Authored-by: Sergey Bugaev <buga...@gmail.com> --- vm/vm_map.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-)
diff --git a/vm/vm_map.c b/vm/vm_map.c index f221c532..e4672260 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -1424,8 +1424,9 @@ vm_map_pageable_scan(struct vm_map *map, struct vm_map_entry *start, struct vm_map_entry *end) { - struct vm_map_entry *entry; - boolean_t do_wire_faults; + struct vm_map_entry *entry; + struct vm_map_entry entry_copy; + boolean_t do_wire_faults; /* * Pass 1. Update counters and prepare wiring faults. @@ -1549,15 +1550,30 @@ vm_map_pageable_scan(struct vm_map *map, * while we have it unlocked. We cannot trust user threads * to do the same. * + * Once we unlock the map, even well-intentioned operations + * on adjacent VM regions can end up affecting our entry, + * due to clipping and coalescing entries. So, make a + * temporary copy of the entry, and pass that to vm_fault_wire() + * instead of the original. + * * HACK HACK HACK HACK */ if (vm_map_pmap(map) == kernel_pmap) { + /* + * TODO: Support wiring more than one entry + * in the kernel map at a time. + */ + assert(start->vme_next == end); + entry_copy = *start; vm_map_unlock(map); /* trust me ... */ - } else { - vm_map_lock_set_recursive(map); - vm_map_lock_write_to_read(map); + vm_fault_wire(map, &entry_copy); + vm_map_lock(map); + return; } + vm_map_lock_set_recursive(map); + vm_map_lock_write_to_read(map); + for (entry = start; entry != end; entry = entry->vme_next) { /* * The wiring count can only be 1 if it was @@ -1572,11 +1588,7 @@ vm_map_pageable_scan(struct vm_map *map, } } - if (vm_map_pmap(map) == kernel_pmap) { - vm_map_lock(map); - } else { - vm_map_lock_clear_recursive(map); - } + vm_map_lock_clear_recursive(map); } /* -- 2.43.0