Currently it isn't safe to call mtx_enter_try(9) if you're already holding the mutex. That means it isn't safe to call that function in hardclock(9), like with `windup_mtx'. That's why the mutex needs to be initialized as IPL_CLOCK.
I'm working on removing the SCHED_LOCK() from inside hardclock(9). That leads me to wonder if I should initialize all mutexes to IPL_SCHED, possibly blocking clock interrupts, or if we should change the mutex API to allow mtx_enter_try(9) to deal with recursion. The diff below removes the recursion check for mtx_enter_try(9). Comments? Oks? Index: kern/kern_lock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_lock.c,v retrieving revision 1.69 diff -u -p -r1.69 kern_lock.c --- kern/kern_lock.c 23 Apr 2019 13:35:12 -0000 1.69 +++ kern/kern_lock.c 1 Jun 2019 18:26:39 -0000 @@ -251,6 +251,8 @@ __mtx_init(struct mutex *mtx, int wantip } #ifdef MULTIPROCESSOR +int _mtx_enter_try(struct mutex *, int); + void mtx_enter(struct mutex *mtx) { @@ -263,7 +265,7 @@ mtx_enter(struct mutex *mtx) LOP_EXCLUSIVE | LOP_NEWORDER, NULL); spc->spc_spinning++; - while (mtx_enter_try(mtx) == 0) { + while (_mtx_enter_try(mtx, 0) == 0) { CPU_BUSY_CYCLE(); #ifdef MP_LOCKDEBUG @@ -278,7 +280,7 @@ mtx_enter(struct mutex *mtx) } int -mtx_enter_try(struct mutex *mtx) +_mtx_enter_try(struct mutex *mtx, int try) { struct cpu_info *owner, *ci = curcpu(); int s; @@ -292,7 +294,7 @@ mtx_enter_try(struct mutex *mtx) owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci); #ifdef DIAGNOSTIC - if (__predict_false(owner == ci)) + if (!try && __predict_false(owner == ci)) panic("mtx %p: locking against myself", mtx); #endif if (owner == NULL) { @@ -310,6 +312,12 @@ mtx_enter_try(struct mutex *mtx) splx(s); return (0); +} + +int +mtx_enter_try(struct mutex *mtx) +{ + return _mtx_enter_try(mtx, 1); } #else void