When the kernel is low on memory, the pagedaemon thread will try various strategies to free memory.
One of those is to ask the pmap layer to free some memory. This is done in uvm_swapout_threads(), which is roughly a wrapper around the invocation of pmap_collect() on behalf of all processes. However, most pmap layers do not implement pmap_collect() and only provide a stub which does nothing. It doesn't make much sense to iterate over the process list, only to invoke a function which does absolutely nothing. The following diff makes pmap_collect() an optional interface, with pmaps implementing it defining __HAVE_PMAP_COLLECT. This feature macro is used to completely omit uvm_swapout_threads() when pmap_collect() is not available. Index: arch/alpha/include/pmap.h =================================================================== RCS file: /OpenBSD/src/sys/arch/alpha/include/pmap.h,v retrieving revision 1.40 diff -u -p -r1.40 pmap.h --- arch/alpha/include/pmap.h 20 Apr 2016 05:24:18 -0000 1.40 +++ arch/alpha/include/pmap.h 10 Sep 2022 08:00:10 -0000 @@ -197,6 +197,8 @@ extern pt_entry_t *VPT; /* Virtual Page paddr_t vtophys(vaddr_t); +#define __HAVE_PMAP_COLLECT + /* Machine-specific functions. */ void pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids); int pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type); Index: arch/amd64/amd64/pmap.c =================================================================== RCS file: /OpenBSD/src/sys/arch/amd64/amd64/pmap.c,v retrieving revision 1.153 diff -u -p -r1.153 pmap.c --- arch/amd64/amd64/pmap.c 30 Jun 2022 13:51:24 -0000 1.153 +++ arch/amd64/amd64/pmap.c 10 Sep 2022 08:00:10 -0000 @@ -2206,6 +2206,7 @@ pmap_unwire(struct pmap *pmap, vaddr_t v #endif } +#if 0 /* * pmap_collect: free resources held by a pmap * @@ -2221,10 +2222,10 @@ pmap_collect(struct pmap *pmap) * for its entire address space. */ -/* pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, + pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, PMAP_REMOVE_SKIPWIRED); -*/ } +#endif /* * pmap_copy: copy mappings from one pmap to another Index: arch/arm/arm/pmap7.c =================================================================== RCS file: /OpenBSD/src/sys/arch/arm/arm/pmap7.c,v retrieving revision 1.63 diff -u -p -r1.63 pmap7.c --- arch/arm/arm/pmap7.c 21 Feb 2022 19:15:58 -0000 1.63 +++ arch/arm/arm/pmap7.c 10 Sep 2022 08:00:10 -0000 @@ -1743,21 +1743,6 @@ dab_access(trapframe_t *tf, u_int fsr, u } /* - * pmap_collect: free resources held by a pmap - * - * => optional function. - * => called when a process is swapped out to free memory. - */ -void -pmap_collect(pmap_t pm) -{ - /* - * Nothing to do. - * We don't even need to free-up the process' L1. - */ -} - -/* * Routine: pmap_proc_iflush * * Function: Index: arch/arm64/arm64/pmap.c =================================================================== RCS file: /OpenBSD/src/sys/arch/arm64/arm64/pmap.c,v retrieving revision 1.84 diff -u -p -r1.84 pmap.c --- arch/arm64/arm64/pmap.c 10 Jan 2022 09:20:27 -0000 1.84 +++ arch/arm64/arm64/pmap.c 10 Sep 2022 08:00:10 -0000 @@ -856,24 +856,6 @@ pmap_fill_pte(pmap_t pm, vaddr_t va, pad } /* - * Garbage collects the physical map system for pages which are - * no longer used. Success need not be guaranteed -- that is, there - * may well be pages which are not referenced, but others may be collected - * Called by the pageout daemon when pages are scarce. - */ -void -pmap_collect(pmap_t pm) -{ - /* This could return unused v->p table layers which - * are empty. - * could malicious programs allocate memory and eat - * these wired pages? These are allocated via pool. - * Are there pool functions which could be called - * to lower the pool usage here? - */ -} - -/* * Fill the given physical page with zeros. */ void Index: arch/hppa/hppa/pmap.c =================================================================== RCS file: /OpenBSD/src/sys/arch/hppa/hppa/pmap.c,v retrieving revision 1.177 diff -u -p -r1.177 pmap.c --- arch/hppa/hppa/pmap.c 14 Sep 2021 16:16:51 -0000 1.177 +++ arch/hppa/hppa/pmap.c 10 Sep 2022 08:00:10 -0000 @@ -734,13 +734,6 @@ pmap_reference(struct pmap *pmap) atomic_inc_int(&pmap->pm_obj.uo_refs); } -void -pmap_collect(struct pmap *pmap) -{ - DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_collect(%p)\n", pmap)); - /* nothing yet */ -} - int pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) { Index: arch/hppa/include/param.h =================================================================== RCS file: /OpenBSD/src/sys/arch/hppa/include/param.h,v retrieving revision 1.47 diff -u -p -r1.47 param.h --- arch/hppa/include/param.h 14 Sep 2018 13:58:20 -0000 1.47 +++ arch/hppa/include/param.h 10 Sep 2022 08:00:10 -0000 @@ -66,6 +66,5 @@ #endif /* _KERNEL */ #define MACHINE_STACK_GROWS_UP 1 /* stack grows to higher addresses */ -#define __SWAP_BROKEN #endif /* _MACHINE_PARAM_H_ */ Index: arch/m88k/include/pmap.h =================================================================== RCS file: /OpenBSD/src/sys/arch/m88k/include/pmap.h,v retrieving revision 1.26 diff -u -p -r1.26 pmap.h --- arch/m88k/include/pmap.h 25 Jul 2015 20:45:05 -0000 1.26 +++ arch/m88k/include/pmap.h 10 Sep 2022 08:00:10 -0000 @@ -80,6 +80,7 @@ int pmap_translation_info(pmap_t, vaddr_ #define pmap_unmap_direct(va) PHYS_TO_VM_PAGE((paddr_t)va) #define __HAVE_PMAP_DIRECT #define PMAP_STEAL_MEMORY +#define __HAVE_PMAP_COLLECT #endif /* _KERNEL */ Index: arch/mips64/include/pmap.h =================================================================== RCS file: /OpenBSD/src/sys/arch/mips64/include/pmap.h,v retrieving revision 1.49 diff -u -p -r1.49 pmap.h --- arch/mips64/include/pmap.h 1 May 2021 16:11:11 -0000 1.49 +++ arch/mips64/include/pmap.h 10 Sep 2022 08:00:10 -0000 @@ -170,6 +170,8 @@ void pmap_page_cache(vm_page_t, u_int); vaddr_t pmap_map_direct(vm_page_t); vm_page_t pmap_unmap_direct(vaddr_t); +#define __HAVE_PMAP_COLLECT + /* * MD flags to pmap_enter: */ Index: arch/powerpc/powerpc/pmap.c =================================================================== RCS file: /OpenBSD/src/sys/arch/powerpc/powerpc/pmap.c,v retrieving revision 1.176 diff -u -p -r1.176 pmap.c --- arch/powerpc/powerpc/pmap.c 7 Feb 2022 23:20:09 -0000 1.176 +++ arch/powerpc/powerpc/pmap.c 10 Sep 2022 08:00:10 -0000 @@ -1052,24 +1052,6 @@ pmap_clear_attrs(struct vm_page *pg, u_i } /* - * Garbage collects the physical map system for pages which are - * no longer used. Success need not be guaranteed -- that is, there - * may well be pages which are not referenced, but others may be collected - * Called by the pageout daemon when pages are scarce. - */ -void -pmap_collect(pmap_t pm) -{ - /* This could return unused v->p table layers which - * are empty. - * could malicious programs allocate memory and eat - * these wired pages? These are allocated via pool. - * Are there pool functions which could be called - * to lower the pool usage here? - */ -} - -/* * Fill the given physical page with zeros. */ void Index: arch/powerpc64/powerpc64/pmap.c =================================================================== RCS file: /OpenBSD/src/sys/arch/powerpc64/powerpc64/pmap.c,v retrieving revision 1.57 diff -u -p -r1.57 pmap.c --- arch/powerpc64/powerpc64/pmap.c 12 Oct 2021 18:06:15 -0000 1.57 +++ arch/powerpc64/powerpc64/pmap.c 10 Sep 2022 08:00:10 -0000 @@ -1465,11 +1465,6 @@ pmap_unwire(pmap_t pm, vaddr_t va) } void -pmap_collect(pmap_t pm) -{ -} - -void pmap_zero_page(struct vm_page *pg) { paddr_t pa = VM_PAGE_TO_PHYS(pg); Index: arch/riscv64/riscv64/pmap.c =================================================================== RCS file: /OpenBSD/src/sys/arch/riscv64/riscv64/pmap.c,v retrieving revision 1.22 diff -u -p -r1.22 pmap.c --- arch/riscv64/riscv64/pmap.c 29 Aug 2022 02:01:18 -0000 1.22 +++ arch/riscv64/riscv64/pmap.c 10 Sep 2022 08:00:10 -0000 @@ -761,24 +761,6 @@ pmap_fill_pte(pmap_t pm, vaddr_t va, pad } /* - * Garbage collects the physical map system for pages which are - * no longer used. Success need not be guaranteed -- that is, there - * may well be pages which are not referenced, but others may be collected - * Called by the pageout daemon when pages are scarce. - */ -void -pmap_collect(pmap_t pm) -{ - /* This could return unused v->p table layers which - * are empty. - * could malicious programs allocate memory and eat - * these wired pages? These are allocated via pool. - * Are there pool functions which could be called - * to lower the pool usage here? - */ -} - -/* * Fill the given physical page with zeros. */ void Index: arch/sh/include/pmap.h =================================================================== RCS file: /OpenBSD/src/sys/arch/sh/include/pmap.h,v retrieving revision 1.14 diff -u -p -r1.14 pmap.h --- arch/sh/include/pmap.h 15 Feb 2015 21:34:33 -0000 1.14 +++ arch/sh/include/pmap.h 10 Sep 2022 08:00:10 -0000 @@ -64,7 +64,6 @@ void pmap_bootstrap(void); #define pmap_deactivate(pmap) do { /* nothing */ } while (0) #define pmap_update(pmap) do { /* nothing */ } while (0) #define pmap_copy(dp,sp,d,l,s) do { /* nothing */ } while (0) -#define pmap_collect(pmap) do { /* nothing */ } while (0) #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) Index: arch/sparc64/include/pmap.h =================================================================== RCS file: /OpenBSD/src/sys/arch/sparc64/include/pmap.h,v retrieving revision 1.31 diff -u -p -r1.31 pmap.h --- arch/sparc64/include/pmap.h 8 Sep 2022 17:44:48 -0000 1.31 +++ arch/sparc64/include/pmap.h 10 Sep 2022 08:00:10 -0000 @@ -177,6 +177,8 @@ void pmap_bootstrap(u_long, u_long, u_in #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ +#define __HAVE_PMAP_COLLECT + /* SPARC specific? */ void pmap_redzone(void); int pmap_dumpsize(void); Index: arch/sparc64/sparc64/pmap.c =================================================================== RCS file: /OpenBSD/src/sys/arch/sparc64/sparc64/pmap.c,v retrieving revision 1.105 diff -u -p -r1.105 pmap.c --- arch/sparc64/sparc64/pmap.c 8 Sep 2022 17:44:48 -0000 1.105 +++ arch/sparc64/sparc64/pmap.c 10 Sep 2022 08:00:10 -0000 @@ -1546,7 +1546,6 @@ pmap_copy(struct pmap *dst_pmap, struct void pmap_collect(struct pmap *pm) { -#if 1 int i, j, k, n, m, s; paddr_t *pdir, *ptbl; /* This is a good place to scan the pmaps for page tables with @@ -1584,7 +1583,6 @@ pmap_collect(struct pmap *pm) } } splx(s); -#endif } void Index: uvm/uvm_glue.c =================================================================== RCS file: /OpenBSD/src/sys/uvm/uvm_glue.c,v retrieving revision 1.83 diff -u -p -r1.83 uvm_glue.c --- uvm/uvm_glue.c 12 Mar 2022 08:11:07 -0000 1.83 +++ uvm/uvm_glue.c 10 Sep 2022 08:00:11 -0000 @@ -321,6 +321,8 @@ uvm_init_limits(struct plimit *limit0) limit0->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free); } +#ifdef __HAVE_PMAP_COLLECT + #ifdef DEBUG int enableswap = 1; int swapdebug = 0; @@ -413,6 +415,8 @@ next_process: ; pmap_collect(outpr->ps_vmspace->vm_map.pmap); } } + +#endif /* __HAVE_PMAP_COLLECT */ /* * uvm_atopg: convert KVAs back to their page structures. Index: uvm/uvm_pdaemon.c =================================================================== RCS file: /OpenBSD/src/sys/uvm/uvm_pdaemon.c,v retrieving revision 1.104 diff -u -p -r1.104 uvm_pdaemon.c --- uvm/uvm_pdaemon.c 31 Aug 2022 09:26:04 -0000 1.104 +++ uvm/uvm_pdaemon.c 10 Sep 2022 08:00:11 -0000 @@ -890,7 +890,7 @@ uvmpd_scan(struct uvm_pmalloc *pma) */ free = uvmexp.free - BUFPAGES_DEFICIT; -#ifndef __SWAP_BROKEN +#ifdef __HAVE_PMAP_COLLECT /* * swap out some processes if we are below our free target. * we need to unlock the page queues for this. Index: uvm/uvm_pmap.h =================================================================== RCS file: /OpenBSD/src/sys/uvm/uvm_pmap.h,v retrieving revision 1.30 diff -u -p -r1.30 uvm_pmap.h --- uvm/uvm_pmap.h 7 Aug 2022 19:39:25 -0000 1.30 +++ uvm/uvm_pmap.h 10 Sep 2022 08:00:11 -0000 @@ -126,7 +126,7 @@ boolean_t pmap_clear_modify(struct vm_p boolean_t pmap_clear_reference(struct vm_page *); #endif -#if !defined(pmap_collect) +#if !defined(pmap_collect) && defined(__HAVE_PMAP_COLLECT) void pmap_collect(pmap_t); #endif #if !defined(pmap_copy)