All the platforms have switched to clockintr. Let's start by isolating statclock() from hardclock(). stathz is now always non-zero: statclock() must be called separately. Update several of the the stathz users to reflect that the value is always non-zero.
This is a first step toward making hardclock and statclock into schedulable entities. ok? Index: kern_clock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_clock.c,v retrieving revision 1.105 diff -u -p -r1.105 kern_clock.c --- kern_clock.c 14 Aug 2022 01:58:27 -0000 1.105 +++ kern_clock.c 21 Jan 2023 22:59:34 -0000 @@ -98,8 +98,6 @@ volatile unsigned long jiffies; /* XXX void initclocks(void) { - int i; - ticks = INT_MAX - (15 * 60 * hz); jiffies = ULONG_MAX - (10 * 60 * hz); @@ -111,12 +109,9 @@ initclocks(void) cpu_initclocks(); /* - * Compute profhz/stathz, and fix profhz if needed. + * Compute profhz/stathz. */ - i = stathz ? stathz : hz; - if (profhz == 0) - profhz = i; - psratio = profhz / i; + psratio = profhz / stathz; inittimecounter(); } @@ -158,12 +153,6 @@ hardclock(struct clockframe *frame) } } - /* - * If no separate statistics clock is available, run it from here. - */ - if (stathz == 0) - statclock(frame); - if (--ci->ci_schedstate.spc_rrticks <= 0) roundrobin(ci); @@ -268,7 +257,7 @@ startprofclock(struct process *pr) if ((pr->ps_flags & PS_PROFIL) == 0) { atomic_setbits_int(&pr->ps_flags, PS_PROFIL); - if (++profprocs == 1 && stathz != 0) { + if (++profprocs == 1) { s = splstatclock(); psdiv = pscnt = psratio; setstatclockrate(profhz); @@ -287,7 +276,7 @@ stopprofclock(struct process *pr) if (pr->ps_flags & PS_PROFIL) { atomic_clearbits_int(&pr->ps_flags, PS_PROFIL); - if (--profprocs == 0 && stathz != 0) { + if (--profprocs == 0) { s = splstatclock(); psdiv = pscnt = 1; setstatclockrate(stathz); @@ -415,6 +404,6 @@ sysctl_clockrate(char *where, size_t *si clkinfo.tick = tick; clkinfo.hz = hz; clkinfo.profhz = profhz; - clkinfo.stathz = stathz ? stathz : hz; + clkinfo.stathz = stathz; return (sysctl_rdstruct(where, sizep, newp, &clkinfo, sizeof(clkinfo))); } Index: kern_time.c =================================================================== RCS file: /cvs/src/sys/kern/kern_time.c,v retrieving revision 1.161 diff -u -p -r1.161 kern_time.c --- kern_time.c 2 Jan 2023 23:09:48 -0000 1.161 +++ kern_time.c 21 Jan 2023 22:59:34 -0000 @@ -218,10 +218,9 @@ sys_clock_getres(struct proc *p, void *v struct timespec ts; struct proc *q; u_int64_t scale; - int error = 0, realstathz; + int error = 0; memset(&ts, 0, sizeof(ts)); - realstathz = (stathz == 0) ? hz : stathz; clock_id = SCARG(uap, clock_id); switch (clock_id) { @@ -238,7 +237,7 @@ sys_clock_getres(struct proc *p, void *v break; case CLOCK_PROCESS_CPUTIME_ID: case CLOCK_THREAD_CPUTIME_ID: - ts.tv_nsec = 1000000000 / realstathz; + ts.tv_nsec = 1000000000 / stathz; break; default: /* check for clock from pthread_getcpuclockid() */ @@ -248,7 +247,7 @@ sys_clock_getres(struct proc *p, void *v if (q == NULL) error = ESRCH; else - ts.tv_nsec = 1000000000 / realstathz; + ts.tv_nsec = 1000000000 / stathz; KERNEL_UNLOCK(); } else error = EINVAL;