On Thu, Apr 05, 2018 at 22:13:01 -0400, Emilio G. Cota wrote:
> +/*
> + * Lock a range of pages ([@start,@end[) as well as the pages of all
> + * intersecting TBs.
> + * Locking order: acquire locks in ascending order of page index.
> + */
> +struct page_collection *
> +page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
> +{
> + struct page_collection *set = g_malloc(sizeof(*set));
> + tb_page_addr_t index;
> + PageDesc *pd;
> +
> + start >>= TARGET_PAGE_BITS;
> + end >>= TARGET_PAGE_BITS;
> + g_assert(start <= end);
> +
> + set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
> + page_entry_destroy);
> + set->max = NULL;
> +
> + retry:
> + g_tree_foreach(set->tree, page_entry_lock, NULL);
> +
> + for (index = start; index <= end; index++) {
> + TranslationBlock *tb;
> + int n;
> +
> + pd = page_find(index);
> + if (pd == NULL) {
> + continue;
> + }
Just noticed there's a bug here: we need to acquire pd's page lock,
since it protects the traversal of the list of pd's TBs.
Fixed in v3 as shown below.
> + PAGE_FOR_EACH_TB(pd, tb, n) {
> + if (page_trylock_add(set, tb->page_addr[0]) ||
> + (tb->page_addr[1] != -1 &&
> + page_trylock_add(set, tb->page_addr[1]))) {
> + /* drop all locks, and reacquire in order */
> + g_tree_foreach(set->tree, page_entry_unlock, NULL);
> + goto retry;
> + }
> + }
> + }
> + return set;
> +}
Thanks,
Emilio
---
@@ -840,6 +840,12 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t
end)
if (pd == NULL) {
continue;
}
+ if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
+ g_tree_foreach(set->tree, page_entry_unlock, NULL);
+ set_page_collection_locked(false);
+ goto retry;
+ }
+ assert_page_locked(pd);
PAGE_FOR_EACH_TB(pd, tb, n) {
if (page_trylock_add(set, tb->page_addr[0]) ||
(tb->page_addr[1] != -1 &&