Hi,

This diff implements yielding in the vm system, with the intention to
make expensive system calls preempt every once in a while, to allow
userland system calls to progress.

It's a very conservative application at the moment, only interleaving
during memory unmapping.  If this works, we can start adding the
mechanic to other expensive system calls not just in uvm, but anywhere
the kernel is kept locked for long stretches of time.

Technically the diff is incomplete, since it does not consider the very
big problem of the biglock.  And that's because the biglock lacks a
__mp_lock_is_contended() call: if we hold the biglock and another cpu
wants the biglock, ideally it will inform us so we can sidestep and
temporarily grant access.

Most of the changes in the diff are actually adding some flags to allow
propagating if we should do the yielding.  The uvm_ileave() function
implements the actual yield and has a big text where I mention the
biglock thing again.

The diff has a name ofcourse:
vmyield.2.diff

Please test this on your machines.
-- 
Ariane


diff --git arch/i386/i386/pmap.c arch/i386/i386/pmap.c
index f8f05cb..f0f1257 100644
--- arch/i386/i386/pmap.c
+++ arch/i386/i386/pmap.c
@@ -1269,10 +1269,10 @@ pmap_free_pvpage(void)
                /* unmap the page */
                TAILQ_INIT(&dead_entries);
                uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
-                   &dead_entries, FALSE, TRUE);
+                   &dead_entries, 0);
                vm_map_unlock(map);
 
-               uvm_unmap_detach(&dead_entries, 0);
+               uvm_unmap_detach(&dead_entries, 0, 0);
 
                pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
        }
diff --git arch/sparc/sparc/iommu.c arch/sparc/sparc/iommu.c
index 4f7261b..dda9e35 100644
--- arch/sparc/sparc/iommu.c
+++ arch/sparc/sparc/iommu.c
@@ -753,7 +753,7 @@ iommu_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
        size = round_page(size);
        pmap_kremove((vaddr_t)kva, size);
        pmap_update(pmap_kernel());
-       uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
+       uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size, 0);
 }
 
 
diff --git dev/pci/drm/i915_drv.c dev/pci/drm/i915_drv.c
index e73fa15..96a2ebb 100644
--- dev/pci/drm/i915_drv.c
+++ dev/pci/drm/i915_drv.c
@@ -4079,7 +4079,7 @@ i915_gem_cleanup_hws(struct inteldrm_softc *dev_priv)
        obj = dev_priv->hws_obj;
 
        uvm_unmap(kernel_map, (vaddr_t)dev_priv->hw_status_page,
-           (vaddr_t)dev_priv->hw_status_page + PAGE_SIZE);
+           (vaddr_t)dev_priv->hw_status_page + PAGE_SIZE, 0);
        dev_priv->hw_status_page = NULL;
        drm_hold_object(obj);
        i915_gem_object_unpin(obj);
diff --git kern/kern_exec.c kern/kern_exec.c
index e7a2b3a..6682511 100644
--- kern/kern_exec.c
+++ kern/kern_exec.c
@@ -825,7 +825,7 @@ exec_sigcode_map(struct proc *p, struct emul *e)
                        return (ENOMEM);
                }
                memcpy((void *)va, e->e_sigcode, sz);
-               uvm_unmap(kernel_map, va, va + round_page(sz));
+               uvm_unmap(kernel_map, va, va + round_page(sz), 0);
        }
 
        p->p_sigcode = 0; /* no hint */
diff --git kern/kern_malloc_debug.c kern/kern_malloc_debug.c
index 68ee1e0..2b80838 100644
--- kern/kern_malloc_debug.c
+++ kern/kern_malloc_debug.c
@@ -259,7 +259,7 @@ debug_malloc_allocate_free(int wait)
                        break;
 
                if (wait == 0) {
-                       uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2);
+                       uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2, 0);
                        pool_put(&debug_malloc_pool, md);
                        return;
                }
diff --git kern/kern_sysctl.c kern/kern_sysctl.c
index 5e51607..4f69bad 100644
--- kern/kern_sysctl.c
+++ kern/kern_sysctl.c
@@ -1758,7 +1758,7 @@ more:
        error = copyout(&rarg, rargv, sizeof(rarg));
 
 out:
-       uvmspace_free(vm);
+       uvmspace_free(vm, 0);
        free(buf, M_TEMP);
        return (error);
 }
diff --git kern/sys_process.c kern/sys_process.c
index b36da4f..be75012 100644
--- kern/sys_process.c
+++ kern/sys_process.c
@@ -728,7 +728,7 @@ process_domem(struct proc *curp, struct proc *p, struct uio 
*uio, int req)
        error = uvm_io(&vm->vm_map, uio,
            (req == PT_WRITE_I) ? UVM_IO_FIXPROT : 0);
 
-       uvmspace_free(vm);
+       uvmspace_free(vm, 0);
 
        if (error == 0 && req == PT_WRITE_I)
                pmap_proc_iflush(p, addr, len);
diff --git miscfs/procfs/procfs_cmdline.c miscfs/procfs/procfs_cmdline.c
index b80aff9..9956bdf 100644
--- miscfs/procfs/procfs_cmdline.c
+++ miscfs/procfs/procfs_cmdline.c
@@ -180,7 +180,7 @@ procfs_docmdline(struct proc *curp, struct proc *p, struct 
pfsnode *pfs, struct
 
 
  bad:
-       uvmspace_free(vm);
+       uvmspace_free(vm, 0);
        free(arg, M_TEMP);
        return (error);
 }
diff --git uvm/uvm.h uvm/uvm.h
index c236fb4..8e9ff81 100644
--- uvm/uvm.h
+++ uvm/uvm.h
@@ -172,6 +172,9 @@ struct uvm_addr_state       *uvm_map_uaddr_e(struct 
vm_map*, struct vm_map_entry*);
 #define VMMAP_FREE_END(_entry)         ((_entry)->end + (_entry)->guard + \
                                            (_entry)->fspace)
 
+/* uvm interleave functionality. */
+void                    uvm_ileave(int);
+
 #endif /* _KERNEL */
 
 #endif /* _UVM_UVM_H_ */
diff --git uvm/uvm_extern.h uvm/uvm_extern.h
index 991a44d..10e5b76 100644
--- uvm/uvm_extern.h
+++ uvm/uvm_extern.h
@@ -236,6 +236,17 @@ typedef int                vm_prot_t;
 #define        PHYSLOAD_DEVICE 0x01    /* don't add to the page queue */
 
 /*
+ * Flags to uvm_unmap_detach.
+ */
+#define UVM_UNMAP_NO_FREE      0x0001  /* Don't mark unmapped as free. */
+#define UVM_UNMAP_RM_HOLES     0x0002  /* Also remove holes in the map. */
+
+/*
+ * Operation flags.
+ */
+#define UVM_OP_ILEAVE  0x80000000      /* Yield/preempt during request. */
+
+/*
  * structures
  */
 
@@ -661,7 +672,7 @@ void                        uvmspace_init(struct vmspace *, 
struct pmap *,
                                vaddr_t, vaddr_t, boolean_t, boolean_t);
 void                   uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
 struct vmspace         *uvmspace_fork(struct vmspace *);
-void                   uvmspace_free(struct vmspace *);
+void                   uvmspace_free(struct vmspace *, int);
 void                   uvmspace_share(struct proc *, struct proc *);
 
 
diff --git uvm/uvm_glue.c uvm/uvm_glue.c
index c2d205c..fa1eda9 100644
--- uvm/uvm_glue.c
+++ uvm/uvm_glue.c
@@ -347,7 +347,7 @@ uvm_fork(struct proc *p1, struct proc *p2, boolean_t 
shared, void *stack,
 void
 uvm_exit(struct proc *p)
 {
-       uvmspace_free(p->p_vmspace);
+       uvmspace_free(p->p_vmspace, UVM_OP_ILEAVE);
        p->p_vmspace = NULL;
        uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE);
        p->p_addr = NULL;
@@ -478,3 +478,41 @@ uvm_atopg(vaddr_t kva)
        KASSERT(pg != NULL);
        return (pg);
 } 
+
+/*
+ * Allow other processes to run, if UVM_OP_ILEAVE is set in flags.
+ * Otherwise, this is a noop.
+ */
+void
+uvm_ileave(int flags)
+{
+       /* Only interleave if allowed. */
+       if (!(flags & UVM_OP_ILEAVE))
+               return;
+
+       /* Must be allowed to sleep. */
+       assertwaitok();
+
+       /*
+        * XXX Code below is incomplete: it is missing an important case.
+        *
+        * When another process wants to enter the kernel but we hold biglock,
+        * it will spin, basically incrementing the number of cpus on the
+        * current task by 1.
+        *
+        * We should detect that another process is entering a syscall and
+        * allow it to enter and do its work.  If this is a short system
+        * call, it will mean the other process can continue quickly with
+        * userspace work.  If it is an expensive system call, the process
+        * should have a similar mechanic as here to preempt.
+        *
+        * Unfortunately, kernel_lock does not currently allow us to figure
+        * out if another cpu is blocked.
+        */
+
+       /* Reaper yields, other procs preempt. */
+       if (curproc == reaperproc && !curcpu_is_idle())
+               yield();
+       else if (curcpu()->ci_schedstate.spc_schedflags & SPCF_SHOULDYIELD)
+               preempt(NULL);
+}
diff --git uvm/uvm_io.c uvm/uvm_io.c
index bfeea50..32e4bad 100644
--- uvm/uvm_io.c
+++ uvm/uvm_io.c
@@ -141,9 +141,9 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
                vm_map_lock(kernel_map);
                TAILQ_INIT(&dead_entries);
                uvm_unmap_remove(kernel_map, kva, kva+chunksz,
-                   &dead_entries, FALSE, TRUE);
+                   &dead_entries, 0);
                vm_map_unlock(kernel_map);
-               uvm_unmap_detach(&dead_entries, AMAP_REFALL);
+               uvm_unmap_detach(&dead_entries, AMAP_REFALL, 0);
 
                /*
                 * We defer checking the error return from uiomove until
diff --git uvm/uvm_km.c uvm/uvm_km.c
index aa97110..dcaabab 100644
--- uvm/uvm_km.c
+++ uvm/uvm_km.c
@@ -415,7 +415,7 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object 
*obj, vsize_t size,
        if (uvm_pglistalloc(size, low, high, alignment, boundary, &pgl, nsegs,
            pla_flags) != 0) {
                /* Failed. */
-               uvm_unmap(map, kva, kva + size);
+               uvm_unmap(map, kva, kva + size, 0);
                return (0);
        }
 
@@ -456,7 +456,7 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object 
*obj, vsize_t size,
 void
 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
 {
-       uvm_unmap(map, trunc_page(addr), round_page(addr+size));
+       uvm_unmap(map, trunc_page(addr), round_page(addr+size), 0);
 }
 
 /*
@@ -474,11 +474,11 @@ uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, 
vsize_t size)
        vm_map_lock(map);
        TAILQ_INIT(&dead_entries);
        uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 
-            &dead_entries, FALSE, TRUE);
+            &dead_entries, 0);
        wakeup(map);
        vm_map_unlock(map);
 
-       uvm_unmap_detach(&dead_entries, 0);
+       uvm_unmap_detach(&dead_entries, 0, 0);
 }
 
 /*
@@ -536,7 +536,7 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t 
align, boolean_t zeroit)
                                 * sleep for memory, so free what we have
                                 * allocated and fail.
                                 */
-                               uvm_unmap(map, kva, loopva - kva);
+                               uvm_unmap(map, kva, loopva - kva, 0);
                                return (0);
                        } else {
                                uvm_wait("km_alloc1w"); /* wait for memory */
@@ -809,7 +809,7 @@ uvm_km_thread(void *arg)
                        for (; i < nitems(pg); i++) {
                                if (pg[i] != 0) {
                                        uvm_unmap(kernel_map,
-                                           pg[i], pg[i] + PAGE_SIZE);
+                                           pg[i], pg[i] + PAGE_SIZE, 0);
                                }
                        }
                }
@@ -840,7 +840,7 @@ uvm_km_doputpage(struct uvm_km_free_page *fp)
        mtx_leave(&uvm_km_pages.mtx);
 
        if (freeva)
-               uvm_unmap(kernel_map, va, va + PAGE_SIZE);
+               uvm_unmap(kernel_map, va, va + PAGE_SIZE, 0);
 
        uvm_pagefree(pg);
        return (nextfp);
@@ -1043,7 +1043,7 @@ km_free(void *v, size_t sz, const struct kmem_va_mode *kv,
                uvm_pglistfree(&pgl);
        }
 free_va:
-       uvm_unmap(*kv->kv_map, sva, eva);
+       uvm_unmap(*kv->kv_map, sva, eva, 0);
        if (kv->kv_wait)
                wakeup(*kv->kv_map);
 }
diff --git uvm/uvm_map.c uvm/uvm_map.c
index 5ac0e9f..443a067 100644
--- uvm/uvm_map.c
+++ uvm/uvm_map.c
@@ -137,7 +137,7 @@ int                  uvm_map_pageable_wire(struct vm_map*,
                            vaddr_t, vaddr_t, int);
 void                    uvm_map_setup_entries(struct vm_map*);
 void                    uvm_map_setup_md(struct vm_map*);
-void                    uvm_map_teardown(struct vm_map*);
+void                    uvm_map_teardown(struct vm_map*, int);
 void                    uvm_map_vmspace_update(struct vm_map*,
                            struct uvm_map_deadq*, int);
 void                    uvm_map_kmem_grow(struct vm_map*,
@@ -1208,7 +1208,7 @@ unlock:
         * uvm_map_mkentry may also create dead entries, when it attempts to
         * destroy free-space entries.
         */
-       uvm_unmap_detach(&dead, 0);
+       uvm_unmap_detach(&dead, 0, 0);
        return error;
 }
 
@@ -1364,7 +1364,7 @@ uvm_mapent_tryjoin(struct vm_map *map, struct 
vm_map_entry *entry,
  * Kill entries that are no longer in a map.
  */
 void
-uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
+uvm_unmap_detach(struct uvm_map_deadq *deadq, int amapflags, int flags)
 {
        struct vm_map_entry *entry;
 
@@ -1376,7 +1376,7 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
                        amap_unref(entry->aref.ar_amap,
                            entry->aref.ar_pageoff,
                            atop(entry->end - entry->start),
-                           flags);
+                           amapflags);
 
                /*
                 * Drop reference to our backing object, if we've got one.
@@ -1395,6 +1395,9 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
                 */
                TAILQ_REMOVE(deadq, entry, dfree.deadq);
                uvm_mapent_free(entry);
+
+               /* Allow other calls into kernel. */
+               uvm_ileave(flags);
        }
 }
 
@@ -1634,18 +1637,21 @@ uvm_map_pie(vaddr_t align)
 }
 
 void
-uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
+uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
 {
        struct uvm_map_deadq dead;
 
        KASSERT((start & (vaddr_t)PAGE_MASK) == 0 &&
            (end & (vaddr_t)PAGE_MASK) == 0);
+       /* Cannot interleave on intrsafe maps. */
+       KASSERT(!(flags & UVM_OP_ILEAVE) || !(map->flags & VM_MAP_INTRSAFE));
+
        TAILQ_INIT(&dead);
        vm_map_lock(map);
-       uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE);
+       uvm_unmap_remove(map, start, end, &dead, flags);
        vm_map_unlock(map);
 
-       uvm_unmap_detach(&dead, 0);
+       uvm_unmap_detach(&dead, 0, flags);
 }
 
 /*
@@ -1782,14 +1788,13 @@ uvm_unmap_kill_entry(struct vm_map *map, struct 
vm_map_entry *entry)
 /*
  * Remove all entries from start to end.
  *
- * If remove_holes, then remove ET_HOLE entries as well.
- * If markfree, entry will be properly marked free, otherwise, no replacement
- * entry will be put in the tree (corrupting the tree).
+ * If UVM_UNMAP_RM_HOLES, then remove ET_HOLE entries as well.
+ * If UVM_UNMAP_FREE, entry will be properly marked free, otherwise,
+ * no replacement entry will be put in the tree (corrupting the tree).
  */
 void
 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
-    struct uvm_map_deadq *dead, boolean_t remove_holes,
-    boolean_t markfree)
+    struct uvm_map_deadq *dead, int flags)
 {
        struct vm_map_entry *prev_hint, *next, *entry;
 
@@ -1808,7 +1813,7 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, 
vaddr_t end,
         */
        entry = uvm_map_entrybyaddr(&map->addr, start);
        KDASSERT(entry != NULL && entry->start <= start);
-       if (entry->end <= start && markfree)
+       if (entry->end <= start && !(flags & UVM_UNMAP_NO_FREE))
                entry = RB_NEXT(uvm_map_addr, &map->addr, entry);
        else
                UVM_MAP_CLIP_START(map, entry, start);
@@ -1820,14 +1825,14 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, 
vaddr_t end,
        prev_hint = NULL;
        for (; entry != NULL && entry->start < end; entry = next) {
                KDASSERT(entry->start >= start);
-               if (entry->end > end || !markfree)
+               if (entry->end > end || (flags & UVM_UNMAP_NO_FREE))
                        UVM_MAP_CLIP_END(map, entry, end);
                KDASSERT(entry->start >= start && entry->end <= end);
                next = RB_NEXT(uvm_map_addr, &map->addr, entry);
 
                /* Don't remove holes unless asked to do so. */
                if (UVM_ET_ISHOLE(entry)) {
-                       if (!remove_holes) {
+                       if (!(flags & UVM_UNMAP_RM_HOLES)) {
                                prev_hint = entry;
                                continue;
                        }
@@ -1851,13 +1856,17 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, 
vaddr_t end,
                /*
                 * Actual removal of entry.
                 */
-               uvm_mapent_mkfree(map, entry, &prev_hint, dead, markfree);
+               uvm_mapent_mkfree(map, entry, &prev_hint, dead,
+                   !(flags & UVM_UNMAP_NO_FREE));
+
+               /* Allow other calls into kernel. */
+               uvm_ileave(flags);
        }
 
        pmap_update(vm_map_pmap(map));
 
 #ifdef VMMAP_DEBUG
-       if (markfree) {
+       if (!(flags & UVM_UNMAP_NO_FREE)) {
                for (entry = uvm_map_entrybyaddr(&map->addr, start);
                    entry != NULL && entry->start < end;
                    entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) {
@@ -2302,7 +2311,7 @@ uvm_map_setup(struct vm_map *map, vaddr_t min, vaddr_t 
max, int flags)
  * This is the inverse operation to uvm_map_setup.
  */
 void
-uvm_map_teardown(struct vm_map *map)
+uvm_map_teardown(struct vm_map *map, int flags)
 {
        struct uvm_map_deadq     dead_entries;
        int                      i;
@@ -2341,7 +2350,7 @@ uvm_map_teardown(struct vm_map *map)
         *   is black, the rest is grey.
         * The set [entry, end] is also referred to as the wavefront.
         *
-        * Since the tree is always a fully connected graph, the breadth-first
+        * Since the tree is always a minimal connected graph, the breadth-first
         * search guarantees that each vmmap_entry is visited exactly once.
         * The vm_map is broken down in linear time.
         */
@@ -2356,6 +2365,9 @@ uvm_map_teardown(struct vm_map *map)
                        DEAD_ENTRY_PUSH(&dead_entries, tmp);
                /* Update wave-front. */
                entry = TAILQ_NEXT(entry, dfree.deadq);
+
+               /* Allow other calls into kernel. */
+               uvm_ileave(flags);
        }
 
        if ((map->flags & VM_MAP_INTRSAFE) == 0)
@@ -2369,7 +2381,7 @@ uvm_map_teardown(struct vm_map *map)
                numq++;
        KASSERT(numt == numq);
 #endif
-       uvm_unmap_detach(&dead_entries, 0);
+       uvm_unmap_detach(&dead_entries, 0, flags);
        pmap_destroy(map->pmap);
        map->pmap = NULL;
 }
@@ -3169,7 +3181,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
                 * (as in, not replace them with free-memory entries).
                 */
                uvm_unmap_remove(map, map->min_offset, map->max_offset,
-                   &dead_entries, TRUE, FALSE);
+                   &dead_entries, UVM_UNMAP_NO_FREE | UVM_UNMAP_RM_HOLES);
 
                KDASSERT(RB_EMPTY(&map->addr));
 
@@ -3217,13 +3229,13 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t 
end)
                p->p_vmspace = nvm;
                pmap_activate(p);
 
-               uvmspace_free(ovm);
+               uvmspace_free(ovm, 0);
        }
 
        /*
         * Release dead entries
         */
-       uvm_unmap_detach(&dead_entries, 0);
+       uvm_unmap_detach(&dead_entries, 0, 0);
 }
 
 /*
@@ -3233,7 +3245,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
  */
 
 void
-uvmspace_free(struct vmspace *vm)
+uvmspace_free(struct vmspace *vm, int flags)
 {
        if (--vm->vm_refcnt == 0) {
                /*
@@ -3247,7 +3259,7 @@ uvmspace_free(struct vmspace *vm)
                        shmexit(vm);
 #endif
 
-               uvm_map_teardown(&vm->vm_map);
+               uvm_map_teardown(&vm->vm_map, flags);
                pool_put(&uvm_vmspace_pool, vm);
        }
 }
@@ -3595,7 +3607,7 @@ uvmspace_fork(struct vmspace *vm1)
         * This can actually happen, if multiple entries described a
         * space in which an entry was inherited.
         */
-       uvm_unmap_detach(&dead, 0);
+       uvm_unmap_detach(&dead, 0, 0);
 
 #ifdef SYSVSHM
        if (vm1->vm_shm)
@@ -3752,30 +3764,20 @@ void
 uvm_map_deallocate(vm_map_t map)
 {
        int c;
-       struct uvm_map_deadq dead;
 
        simple_lock(&map->ref_lock);
        c = --map->ref_count;
        simple_unlock(&map->ref_lock);
-       if (c > 0) {
+       if (c > 0)
                return;
-       }
 
        /*
         * all references gone.   unmap and free.
         *
         * No lock required: we are only one to access this map.
         */
-
-       TAILQ_INIT(&dead);
-       uvm_tree_sanity(map, __FILE__, __LINE__);
-       uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
-           TRUE, FALSE);
-       pmap_destroy(map->pmap);
-       KASSERT(RB_EMPTY(&map->addr));
+       uvm_map_teardown(map, 0);
        free(map, M_VMMAP);
-
-       uvm_unmap_detach(&dead, 0);
 }
 
 /* 
@@ -4030,10 +4032,8 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, 
vsize_t len,
         * Unmap copied entries on failure.
         */
 fail2_unmap:
-       if (error) {
-               uvm_unmap_remove(kernel_map, dstaddr, dstaddr + len, &dead,
-                   FALSE, TRUE);
-       }
+       if (error)
+               uvm_unmap_remove(kernel_map, dstaddr, dstaddr + len, &dead, 0);
 
        /*
         * Release maps, release dead entries.
@@ -4044,7 +4044,7 @@ fail2:
 fail:
        vm_map_unlock(srcmap);
 
-       uvm_unmap_detach(&dead, 0);
+       uvm_unmap_detach(&dead, 0, 0);
 
        return error;
 }
@@ -4623,7 +4623,7 @@ uvm_map_set_uaddr(struct vm_map *map, struct 
uvm_addr_state **which,
 
        uvm_map_freelist_update_refill(map, 0);
        vm_map_unlock(map);
-       uvm_unmap_detach(&dead, 0);
+       uvm_unmap_detach(&dead, 0, 0);
 }
 
 /*
diff --git uvm/uvm_map.h uvm/uvm_map.h
index 9cf6827..8e38afc 100644
--- uvm/uvm_map.h
+++ uvm/uvm_map.h
@@ -397,14 +397,14 @@ int               uvm_map_reserve(vm_map_t, vsize_t, 
vaddr_t, vsize_t,
                    vaddr_t *);
 void           uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int);
 int            uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t);
-void           uvm_unmap(vm_map_t, vaddr_t, vaddr_t);
+void           uvm_unmap(vm_map_t, vaddr_t, vaddr_t, int);
 void           uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**,
                    struct uvm_addr_state*);
 int            uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int);
 
-void           uvm_unmap_detach(struct uvm_map_deadq*, int);
+void           uvm_unmap_detach(struct uvm_map_deadq*, int, int);
 void           uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
-                   struct uvm_map_deadq*, boolean_t, boolean_t);
+                   struct uvm_map_deadq*, int);
 
 #endif /* _KERNEL */
 
diff --git uvm/uvm_mmap.c uvm/uvm_mmap.c
index da9a291..de63fb4 100644
--- uvm/uvm_mmap.c
+++ uvm/uvm_mmap.c
@@ -625,7 +625,7 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
 
        addr = (vaddr_t) SCARG(uap, addr);
        size = (vsize_t) SCARG(uap, len);
-       
+
        /*
         * align the address to a page boundary, and adjust the size accordingly
         */
@@ -660,11 +660,11 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
         * doit!
         */
        TAILQ_INIT(&dead_entries);
-       uvm_unmap_remove(map, addr, addr + size, &dead_entries, FALSE, TRUE);
+       uvm_unmap_remove(map, addr, addr + size, &dead_entries, UVM_OP_ILEAVE);
 
        vm_map_unlock(map);     /* and unlock */
 
-       uvm_unmap_detach(&dead_entries, 0);
+       uvm_unmap_detach(&dead_entries, 0, UVM_OP_ILEAVE);
 
        return (0);
 }
@@ -995,7 +995,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, 
vm_prot_t prot,
                if (*addr & PAGE_MASK)
                        return(EINVAL);
                uvmflag |= UVM_FLAG_FIXED;
-               uvm_unmap(map, *addr, *addr + size);    /* zap! */
+               uvm_unmap(map, *addr, *addr + size, 0);
        }
 
        /*
@@ -1116,7 +1116,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, 
vm_prot_t prot,
                                error = ENOMEM;
                                vm_map_unlock(map);
                                /* unmap the region! */
-                               uvm_unmap(map, *addr, *addr + size);
+                               uvm_unmap(map, *addr, *addr + size, 0);
                                goto bad;
                        }
                        /*
@@ -1127,7 +1127,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, 
vm_prot_t prot,
                            FALSE, UVM_LK_ENTER);
                        if (error != 0) {
                                /* unmap the region! */
-                               uvm_unmap(map, *addr, *addr + size);
+                               uvm_unmap(map, *addr, *addr + size, 0);
                                goto bad;
                        }
                        return (0);
diff --git uvm/uvm_user.c uvm/uvm_user.c
index 6875b55..6f1878f 100644
--- uvm/uvm_user.c
+++ uvm/uvm_user.c
@@ -60,5 +60,5 @@ uvm_deallocate(struct vm_map *map, vaddr_t start, vsize_t 
size)
        if (size == 0)
                return;
 
-       uvm_unmap(map, trunc_page(start), round_page(start+size));
+       uvm_unmap(map, trunc_page(start), round_page(start+size), 0);
 }

Reply via email to