On Sun, Sep 11, 2022 at 12:26:31PM +0200, Martin Pieuchot wrote:
> Diff below adds a minimalist set of assertions to ensure proper locks
> are held in uvm_mapanon() and uvm_unmap_remove() which are the guts of
> mmap(2) for anons and munmap(2).
>
> Please test it with WITNESS enabled and report back.
>

Do you want this tested in conjunction with the aiodoned diff or by itself?

> Index: uvm/uvm_addr.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_addr.c,v
> retrieving revision 1.31
> diff -u -p -r1.31 uvm_addr.c
> --- uvm/uvm_addr.c    21 Feb 2022 10:26:20 -0000      1.31
> +++ uvm/uvm_addr.c    11 Sep 2022 09:08:10 -0000
> @@ -416,6 +416,8 @@ uvm_addr_invoke(struct vm_map *map, stru
>           !(hint >= uaddr->uaddr_minaddr && hint < uaddr->uaddr_maxaddr))
>               return ENOMEM;
>
> +     vm_map_assert_anylock(map);
> +
>       error = (*uaddr->uaddr_functions->uaddr_select)(map, uaddr,
>           entry_out, addr_out, sz, align, offset, prot, hint);
>
> Index: uvm/uvm_fault.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
> retrieving revision 1.132
> diff -u -p -r1.132 uvm_fault.c
> --- uvm/uvm_fault.c   31 Aug 2022 01:27:04 -0000      1.132
> +++ uvm/uvm_fault.c   11 Sep 2022 08:57:35 -0000
> @@ -1626,6 +1626,7 @@ uvm_fault_unwire_locked(vm_map_t map, va
>       struct vm_page *pg;
>
>       KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
> +     vm_map_assert_anylock(map);
>
>       /*
>        * we assume that the area we are unwiring has actually been wired
> Index: uvm/uvm_map.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_map.c,v
> retrieving revision 1.294
> diff -u -p -r1.294 uvm_map.c
> --- uvm/uvm_map.c     15 Aug 2022 15:53:45 -0000      1.294
> +++ uvm/uvm_map.c     11 Sep 2022 09:37:44 -0000
> @@ -162,6 +162,8 @@ int                        
> uvm_map_inentry_recheck(u_long, v
>                            struct p_inentry *);
>  boolean_t             uvm_map_inentry_fix(struct proc *, struct p_inentry *,
>                            vaddr_t, int (*)(vm_map_entry_t), u_long);
> +boolean_t             uvm_map_is_stack_remappable(struct vm_map *,
> +                          vaddr_t, vsize_t);
>  /*
>   * Tree management functions.
>   */
> @@ -491,6 +493,8 @@ uvmspace_dused(struct vm_map *map, vaddr
>       vaddr_t stack_begin, stack_end; /* Position of stack. */
>
>       KASSERT(map->flags & VM_MAP_ISVMSPACE);
> +     vm_map_assert_anylock(map);
> +
>       vm = (struct vmspace *)map;
>       stack_begin = MIN((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
>       stack_end = MAX((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
> @@ -570,6 +574,8 @@ uvm_map_isavail(struct vm_map *map, stru
>       if (addr + sz < addr)
>               return 0;
>
> +     vm_map_assert_anylock(map);
> +
>       /*
>        * Kernel memory above uvm_maxkaddr is considered unavailable.
>        */
> @@ -1446,6 +1452,8 @@ uvm_map_mkentry(struct vm_map *map, stru
>       entry->guard = 0;
>       entry->fspace = 0;
>
> +     vm_map_assert_wrlock(map);
> +
>       /* Reset free space in first. */
>       free = uvm_map_uaddr_e(map, first);
>       uvm_mapent_free_remove(map, free, first);
> @@ -1573,6 +1581,8 @@ boolean_t
>  uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
>      struct vm_map_entry **entry)
>  {
> +     vm_map_assert_anylock(map);
> +
>       *entry = uvm_map_entrybyaddr(&map->addr, address);
>       return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
>           (*entry)->start <= address && (*entry)->end > address;
> @@ -1692,6 +1702,8 @@ uvm_map_is_stack_remappable(struct vm_ma
>       vaddr_t end = addr + sz;
>       struct vm_map_entry *first, *iter, *prev = NULL;
>
> +     vm_map_assert_anylock(map);
> +
>       if (!uvm_map_lookup_entry(map, addr, &first)) {
>               printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n",
>                   addr, end, map);
> @@ -1843,6 +1855,8 @@ uvm_mapent_mkfree(struct vm_map *map, st
>       vaddr_t                  addr;  /* Start of freed range. */
>       vaddr_t                  end;   /* End of freed range. */
>
> +     UVM_MAP_REQ_WRITE(map);
> +
>       prev = *prev_ptr;
>       if (prev == entry)
>               *prev_ptr = prev = NULL;
> @@ -1971,10 +1985,7 @@ uvm_unmap_remove(struct vm_map *map, vad
>       if (start >= end)
>               return;
>
> -     if ((map->flags & VM_MAP_INTRSAFE) == 0)
> -             splassert(IPL_NONE);
> -     else
> -             splassert(IPL_VM);
> +     vm_map_assert_wrlock(map);
>
>       /* Find first affected entry. */
>       entry = uvm_map_entrybyaddr(&map->addr, start);
> @@ -4027,6 +4038,8 @@ uvm_map_checkprot(struct vm_map *map, va
>  {
>       struct vm_map_entry *entry;
>
> +     vm_map_assert_anylock(map);
> +
>       if (start < map->min_offset || end > map->max_offset || start > end)
>               return FALSE;
>       if (start == end)
> @@ -4886,6 +4899,7 @@ uvm_map_freelist_update(struct vm_map *m
>      vaddr_t b_start, vaddr_t b_end, vaddr_t s_start, vaddr_t s_end, int 
> flags)
>  {
>       KDASSERT(b_end >= b_start && s_end >= s_start);
> +     vm_map_assert_wrlock(map);
>
>       /* Clear all free lists. */
>       uvm_map_freelist_update_clear(map, dead);
> @@ -4945,6 +4959,8 @@ uvm_map_fix_space(struct vm_map *map, st
>       KDASSERT((entry != NULL && VMMAP_FREE_END(entry) == min) ||
>           min == map->min_offset);
>
> +     UVM_MAP_REQ_WRITE(map);
> +
>       /*
>        * During the function, entfree will always point at the uaddr state
>        * for entry.
> @@ -5308,6 +5324,27 @@ vm_map_unbusy_ln(struct vm_map *map, cha
>       mtx_leave(&map->flags_lock);
>       if (oflags & VM_MAP_WANTLOCK)
>               wakeup(&map->flags);
> +}
> +
> +void
> +vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
> +{
> +     LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, 
> line));
> +     if ((map->flags & VM_MAP_INTRSAFE) == 0)
> +             rw_assert_anylock(&map->lock);
> +     else
> +             MUTEX_ASSERT_LOCKED(&map->mtx);
> +}
> +
> +void
> +vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
> +{
> +     LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
> +     if ((map->flags & VM_MAP_INTRSAFE) == 0) {
> +             splassert(IPL_NONE);
> +             rw_assert_wrlock(&map->lock);
> +     } else
> +             MUTEX_ASSERT_LOCKED(&map->mtx);
>  }
>
>  #ifndef SMALL_KERNEL
> Index: uvm/uvm_map.h
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_map.h,v
> retrieving revision 1.75
> diff -u -p -r1.75 uvm_map.h
> --- uvm/uvm_map.h     12 Mar 2022 08:11:07 -0000      1.75
> +++ uvm/uvm_map.h     11 Sep 2022 09:31:23 -0000
> @@ -354,7 +354,6 @@ int               uvm_map_inherit(struct vm_map *, va
>  int          uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
>  void         uvm_map_init(void);
>  boolean_t    uvm_map_lookup_entry(struct vm_map *, vaddr_t, vm_map_entry_t 
> *);
> -boolean_t    uvm_map_is_stack_remappable(struct vm_map *, vaddr_t, vsize_t);
>  int          uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t);
>  int          uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
>                   vm_map_entry_t, int);
> @@ -419,6 +418,8 @@ void              vm_map_downgrade_ln(struct vm_map*
>  void         vm_map_upgrade_ln(struct vm_map*, char*, int);
>  void         vm_map_busy_ln(struct vm_map*, char*, int);
>  void         vm_map_unbusy_ln(struct vm_map*, char*, int);
> +void         vm_map_assert_anylock_ln(struct vm_map*, char*, int);
> +void         vm_map_assert_wrlock_ln(struct vm_map*, char*, int);
>
>  #ifdef DIAGNOSTIC
>  #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__)
> @@ -430,6 +431,10 @@ void             vm_map_unbusy_ln(struct vm_map*, c
>  #define vm_map_upgrade(map)  vm_map_upgrade_ln(map, __FILE__, __LINE__)
>  #define vm_map_busy(map)     vm_map_busy_ln(map, __FILE__, __LINE__)
>  #define vm_map_unbusy(map)   vm_map_unbusy_ln(map, __FILE__, __LINE__)
> +#define vm_map_assert_anylock(map)   \
> +             vm_map_assert_anylock_ln(map, __FILE__, __LINE__)
> +#define vm_map_assert_wrlock(map)    \
> +             vm_map_assert_wrlock_ln(map, __FILE__, __LINE__)
>  #else
>  #define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0)
>  #define vm_map_lock(map)     vm_map_lock_ln(map, NULL, 0)
> @@ -440,6 +445,8 @@ void              vm_map_unbusy_ln(struct vm_map*, c
>  #define vm_map_upgrade(map)  vm_map_upgrade_ln(map, NULL, 0)
>  #define vm_map_busy(map)     vm_map_busy_ln(map, NULL, 0)
>  #define vm_map_unbusy(map)   vm_map_unbusy_ln(map, NULL, 0)
> +#define vm_map_assert_anylock(map)   vm_map_assert_anylock_ln(map, NULL, 0)
> +#define vm_map_assert_wrlock(map)    vm_map_assert_wrlock_ln(map, NULL, 0)
>  #endif
>
>  void         uvm_map_lock_entry(struct vm_map_entry *);
>

Reply via email to