On 20/10/22(Thu) 16:17, Martin Pieuchot wrote:
> On 11/09/22(Sun) 12:26, Martin Pieuchot wrote:
> > Diff below adds a minimalist set of assertions to ensure proper locks
> > are held in uvm_mapanon() and uvm_unmap_remove() which are the guts of
> > mmap(2) for anons and munmap(2).
> > 
> > Please test it with WITNESS enabled and report back.
> 
> New version of the diff that includes a lock/unlock dance  in 
> uvm_map_teardown().  While grabbing this lock should not be strictly
> necessary because no other reference to the map should exist when the
> reaper is holding it, it helps make progress with asserts.  Grabbing
> the lock is easy and it can also save us a lot of time if there is any
> reference counting bugs (like we've discovered w/ vnode and swapping).

Here's an updated version that adds a lock/unlock dance in
uvm_map_deallocate() to satisfy the assert in uvm_unmap_remove().
Thanks to tb@ from pointing this out.

I received many positive feedback and test reports, I'm now asking for
oks.


Index: uvm/uvm_addr.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_addr.c,v
retrieving revision 1.31
diff -u -p -r1.31 uvm_addr.c
--- uvm/uvm_addr.c      21 Feb 2022 10:26:20 -0000      1.31
+++ uvm/uvm_addr.c      28 Oct 2022 08:41:30 -0000
@@ -416,6 +416,8 @@ uvm_addr_invoke(struct vm_map *map, stru
            !(hint >= uaddr->uaddr_minaddr && hint < uaddr->uaddr_maxaddr))
                return ENOMEM;
 
+       vm_map_assert_anylock(map);
+
        error = (*uaddr->uaddr_functions->uaddr_select)(map, uaddr,
            entry_out, addr_out, sz, align, offset, prot, hint);
 
Index: uvm/uvm_fault.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
retrieving revision 1.132
diff -u -p -r1.132 uvm_fault.c
--- uvm/uvm_fault.c     31 Aug 2022 01:27:04 -0000      1.132
+++ uvm/uvm_fault.c     28 Oct 2022 08:41:30 -0000
@@ -1626,6 +1626,7 @@ uvm_fault_unwire_locked(vm_map_t map, va
        struct vm_page *pg;
 
        KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
+       vm_map_assert_anylock(map);
 
        /*
         * we assume that the area we are unwiring has actually been wired
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.301
diff -u -p -r1.301 uvm_map.c
--- uvm/uvm_map.c       24 Oct 2022 15:11:56 -0000      1.301
+++ uvm/uvm_map.c       28 Oct 2022 08:46:28 -0000
@@ -491,6 +491,8 @@ uvmspace_dused(struct vm_map *map, vaddr
        vaddr_t stack_begin, stack_end; /* Position of stack. */
 
        KASSERT(map->flags & VM_MAP_ISVMSPACE);
+       vm_map_assert_anylock(map);
+
        vm = (struct vmspace *)map;
        stack_begin = MIN((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
        stack_end = MAX((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
@@ -570,6 +572,8 @@ uvm_map_isavail(struct vm_map *map, stru
        if (addr + sz < addr)
                return 0;
 
+       vm_map_assert_anylock(map);
+
        /*
         * Kernel memory above uvm_maxkaddr is considered unavailable.
         */
@@ -1457,6 +1461,8 @@ uvm_map_mkentry(struct vm_map *map, stru
        entry->guard = 0;
        entry->fspace = 0;
 
+       vm_map_assert_wrlock(map);
+
        /* Reset free space in first. */
        free = uvm_map_uaddr_e(map, first);
        uvm_mapent_free_remove(map, free, first);
@@ -1584,6 +1590,8 @@ boolean_t
 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
     struct vm_map_entry **entry)
 {
+       vm_map_assert_anylock(map);
+
        *entry = uvm_map_entrybyaddr(&map->addr, address);
        return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
            (*entry)->start <= address && (*entry)->end > address;
@@ -1704,6 +1712,8 @@ uvm_map_is_stack_remappable(struct vm_ma
        vaddr_t end = addr + sz;
        struct vm_map_entry *first, *iter, *prev = NULL;
 
+       vm_map_assert_anylock(map);
+
        if (!uvm_map_lookup_entry(map, addr, &first)) {
                printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n",
                    addr, end, map);
@@ -1868,6 +1878,8 @@ uvm_mapent_mkfree(struct vm_map *map, st
        vaddr_t                  addr;  /* Start of freed range. */
        vaddr_t                  end;   /* End of freed range. */
 
+       UVM_MAP_REQ_WRITE(map);
+
        prev = *prev_ptr;
        if (prev == entry)
                *prev_ptr = prev = NULL;
@@ -1996,10 +2008,7 @@ uvm_unmap_remove(struct vm_map *map, vad
        if (start >= end)
                return 0;
 
-       if ((map->flags & VM_MAP_INTRSAFE) == 0)
-               splassert(IPL_NONE);
-       else
-               splassert(IPL_VM);
+       vm_map_assert_wrlock(map);
 
        /* Find first affected entry. */
        entry = uvm_map_entrybyaddr(&map->addr, start);
@@ -2531,6 +2540,8 @@ uvm_map_teardown(struct vm_map *map)
 
        KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
 
+       vm_map_lock(map);
+
        /* Remove address selectors. */
        uvm_addr_destroy(map->uaddr_exe);
        map->uaddr_exe = NULL;
@@ -2572,6 +2583,8 @@ uvm_map_teardown(struct vm_map *map)
                entry = TAILQ_NEXT(entry, dfree.deadq);
        }
 
+       vm_map_unlock(map);
+
 #ifdef VMMAP_DEBUG
        numt = numq = 0;
        RBT_FOREACH(entry, uvm_map_addr, &map->addr)
@@ -4082,6 +4095,8 @@ uvm_map_checkprot(struct vm_map *map, va
 {
        struct vm_map_entry *entry;
 
+       vm_map_assert_anylock(map);
+
        if (start < map->min_offset || end > map->max_offset || start > end)
                return FALSE;
        if (start == end)
@@ -4142,8 +4157,10 @@ uvm_map_deallocate(vm_map_t map)
         */
        TAILQ_INIT(&dead);
        uvm_tree_sanity(map, __FILE__, __LINE__);
+       vm_map_lock(map);
        uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
            TRUE, FALSE, FALSE);
+       vm_map_unlock(map);
        pmap_destroy(map->pmap);
        KASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
        free(map, M_VMMAP, sizeof *map);
@@ -4980,6 +4997,7 @@ uvm_map_freelist_update(struct vm_map *m
     vaddr_t b_start, vaddr_t b_end, vaddr_t s_start, vaddr_t s_end, int flags)
 {
        KDASSERT(b_end >= b_start && s_end >= s_start);
+       vm_map_assert_wrlock(map);
 
        /* Clear all free lists. */
        uvm_map_freelist_update_clear(map, dead);
@@ -5039,6 +5057,8 @@ uvm_map_fix_space(struct vm_map *map, st
        KDASSERT((entry != NULL && VMMAP_FREE_END(entry) == min) ||
            min == map->min_offset);
 
+       UVM_MAP_REQ_WRITE(map);
+
        /*
         * During the function, entfree will always point at the uaddr state
         * for entry.
@@ -5402,6 +5422,27 @@ vm_map_unbusy_ln(struct vm_map *map, cha
        mtx_leave(&map->flags_lock);
        if (oflags & VM_MAP_WANTLOCK)
                wakeup(&map->flags);
+}
+
+void
+vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, 
line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0)
+               rw_assert_anylock(&map->lock);
+       else
+               MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0) {
+               splassert(IPL_NONE);
+               rw_assert_wrlock(&map->lock);
+       } else
+               MUTEX_ASSERT_LOCKED(&map->mtx);
 }
 
 #ifndef SMALL_KERNEL
Index: uvm/uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.79
diff -u -p -r1.79 uvm_map.h
--- uvm/uvm_map.h       21 Oct 2022 19:13:33 -0000      1.79
+++ uvm/uvm_map.h       28 Oct 2022 08:41:30 -0000
@@ -420,6 +420,8 @@ void                vm_map_downgrade_ln(struct vm_map*
 void           vm_map_upgrade_ln(struct vm_map*, char*, int);
 void           vm_map_busy_ln(struct vm_map*, char*, int);
 void           vm_map_unbusy_ln(struct vm_map*, char*, int);
+void           vm_map_assert_anylock_ln(struct vm_map*, char*, int);
+void           vm_map_assert_wrlock_ln(struct vm_map*, char*, int);
 
 #ifdef DIAGNOSTIC
 #define vm_map_lock_try(map)   vm_map_lock_try_ln(map, __FILE__, __LINE__)
@@ -431,6 +433,10 @@ void               vm_map_unbusy_ln(struct vm_map*, c
 #define vm_map_upgrade(map)    vm_map_upgrade_ln(map, __FILE__, __LINE__)
 #define vm_map_busy(map)       vm_map_busy_ln(map, __FILE__, __LINE__)
 #define vm_map_unbusy(map)     vm_map_unbusy_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_anylock(map)     \
+               vm_map_assert_anylock_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_wrlock(map)      \
+               vm_map_assert_wrlock_ln(map, __FILE__, __LINE__)
 #else
 #define vm_map_lock_try(map)   vm_map_lock_try_ln(map, NULL, 0)
 #define vm_map_lock(map)       vm_map_lock_ln(map, NULL, 0)
@@ -441,6 +447,8 @@ void                vm_map_unbusy_ln(struct vm_map*, c
 #define vm_map_upgrade(map)    vm_map_upgrade_ln(map, NULL, 0)
 #define vm_map_busy(map)       vm_map_busy_ln(map, NULL, 0)
 #define vm_map_unbusy(map)     vm_map_unbusy_ln(map, NULL, 0)
+#define vm_map_assert_anylock(map)     vm_map_assert_anylock_ln(map, NULL, 0)
+#define vm_map_assert_wrlock(map)      vm_map_assert_wrlock_ln(map, NULL, 0)
 #endif
 
 void           uvm_map_lock_entry(struct vm_map_entry *);

Reply via email to