On Sat, Jan 21, 2023 at 05:02:49PM -0600, Scott Cheloha wrote: > All the platforms have switched to clockintr. > > Let's start by isolating statclock() from hardclock(). stathz is now > always non-zero: statclock() must be called separately. Update > several of the the stathz users to reflect that the value is always > non-zero. > > This is a first step toward making hardclock and statclock into > schedulable entities.
Here are a few more. The big comment in kern_clock.c needs to be updated, as does the hardclock.9 manpage, but I think that those can wait until the dust has settled. More stuff is going to move around in this area. ok? Index: kern_clock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_clock.c,v retrieving revision 1.105 diff -u -p -r1.105 kern_clock.c --- kern_clock.c 14 Aug 2022 01:58:27 -0000 1.105 +++ kern_clock.c 24 Jan 2023 00:03:06 -0000 @@ -98,8 +98,6 @@ volatile unsigned long jiffies; /* XXX void initclocks(void) { - int i; - ticks = INT_MAX - (15 * 60 * hz); jiffies = ULONG_MAX - (10 * 60 * hz); @@ -111,12 +109,9 @@ initclocks(void) cpu_initclocks(); /* - * Compute profhz/stathz, and fix profhz if needed. + * Compute profhz/stathz. */ - i = stathz ? stathz : hz; - if (profhz == 0) - profhz = i; - psratio = profhz / i; + psratio = profhz / stathz; inittimecounter(); } @@ -158,12 +153,6 @@ hardclock(struct clockframe *frame) } } - /* - * If no separate statistics clock is available, run it from here. - */ - if (stathz == 0) - statclock(frame); - if (--ci->ci_schedstate.spc_rrticks <= 0) roundrobin(ci); @@ -268,7 +257,7 @@ startprofclock(struct process *pr) if ((pr->ps_flags & PS_PROFIL) == 0) { atomic_setbits_int(&pr->ps_flags, PS_PROFIL); - if (++profprocs == 1 && stathz != 0) { + if (++profprocs == 1) { s = splstatclock(); psdiv = pscnt = psratio; setstatclockrate(profhz); @@ -287,7 +276,7 @@ stopprofclock(struct process *pr) if (pr->ps_flags & PS_PROFIL) { atomic_clearbits_int(&pr->ps_flags, PS_PROFIL); - if (--profprocs == 0 && stathz != 0) { + if (--profprocs == 0) { s = splstatclock(); psdiv = pscnt = 1; setstatclockrate(stathz); @@ -415,6 +404,6 @@ sysctl_clockrate(char *where, size_t *si clkinfo.tick = tick; clkinfo.hz = hz; clkinfo.profhz = profhz; - clkinfo.stathz = stathz ? stathz : hz; + clkinfo.stathz = stathz; return (sysctl_rdstruct(where, sizep, newp, &clkinfo, sizeof(clkinfo))); } Index: kern_resource.c =================================================================== RCS file: /cvs/src/sys/kern/kern_resource.c,v retrieving revision 1.76 diff -u -p -r1.76 kern_resource.c --- kern_resource.c 17 Nov 2022 18:53:13 -0000 1.76 +++ kern_resource.c 24 Jan 2023 00:03:06 -0000 @@ -410,7 +410,6 @@ calctsru(struct tusage *tup, struct time struct timespec *ip) { u_quad_t st, ut, it; - int freq; st = tup->tu_sticks; ut = tup->tu_uticks; @@ -424,16 +423,14 @@ calctsru(struct tusage *tup, struct time return; } - freq = stathz ? stathz : hz; - - st = st * 1000000000 / freq; + st = st * 1000000000 / stathz; sp->tv_sec = st / 1000000000; sp->tv_nsec = st % 1000000000; - ut = ut * 1000000000 / freq; + ut = ut * 1000000000 / stathz; up->tv_sec = ut / 1000000000; up->tv_nsec = ut % 1000000000; if (ip != NULL) { - it = it * 1000000000 / freq; + it = it * 1000000000 / stathz; ip->tv_sec = it / 1000000000; ip->tv_nsec = it % 1000000000; } Index: sched_bsd.c =================================================================== RCS file: /cvs/src/sys/kern/sched_bsd.c,v retrieving revision 1.73 diff -u -p -r1.73 sched_bsd.c --- sched_bsd.c 5 Dec 2022 23:18:37 -0000 1.73 +++ sched_bsd.c 24 Jan 2023 00:03:06 -0000 @@ -189,16 +189,6 @@ schedcpu(void *arg) struct proc *p; int s; unsigned int newcpu; - int phz; - - /* - * If we have a statistics clock, use that to calculate CPU - * time, otherwise revert to using the profiling clock (which, - * in turn, defaults to hz if there is no separate profiling - * clock available) - */ - phz = stathz ? stathz : profhz; - KASSERT(phz); LIST_FOREACH(p, &allproc, p_list) { /* @@ -225,13 +215,13 @@ schedcpu(void *arg) * p_pctcpu is only for diagnostic tools such as ps. */ #if (FSHIFT >= CCPU_SHIFT) - p->p_pctcpu += (phz == 100)? + p->p_pctcpu += (stathz == 100)? ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 100 * (((fixpt_t) p->p_cpticks) - << (FSHIFT - CCPU_SHIFT)) / phz; + << (FSHIFT - CCPU_SHIFT)) / stathz; #else p->p_pctcpu += ((FSCALE - ccpu) * - (p->p_cpticks * FSCALE / phz)) >> FSHIFT; + (p->p_cpticks * FSCALE / stathz)) >> FSHIFT; #endif p->p_cpticks = 0; newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu); Index: kern_time.c =================================================================== RCS file: /cvs/src/sys/kern/kern_time.c,v retrieving revision 1.161 diff -u -p -r1.161 kern_time.c --- kern_time.c 2 Jan 2023 23:09:48 -0000 1.161 +++ kern_time.c 24 Jan 2023 00:03:06 -0000 @@ -218,10 +218,9 @@ sys_clock_getres(struct proc *p, void *v struct timespec ts; struct proc *q; u_int64_t scale; - int error = 0, realstathz; + int error = 0; memset(&ts, 0, sizeof(ts)); - realstathz = (stathz == 0) ? hz : stathz; clock_id = SCARG(uap, clock_id); switch (clock_id) { @@ -238,7 +237,7 @@ sys_clock_getres(struct proc *p, void *v break; case CLOCK_PROCESS_CPUTIME_ID: case CLOCK_THREAD_CPUTIME_ID: - ts.tv_nsec = 1000000000 / realstathz; + ts.tv_nsec = 1000000000 / stathz; break; default: /* check for clock from pthread_getcpuclockid() */ @@ -248,7 +247,7 @@ sys_clock_getres(struct proc *p, void *v if (q == NULL) error = ESRCH; else - ts.tv_nsec = 1000000000 / realstathz; + ts.tv_nsec = 1000000000 / stathz; KERNEL_UNLOCK(); } else error = EINVAL;