Diff below is mostly a refactoring to reduce the amount of spaghetti in
the scheduler logic:

- resetpriority() is renamed to setpriority() and is now the only places
  that sets `p_estcpu' and `p_usrpri'.  These two fields represent the
  priority of a running process.  They should be in sync and they'll be
  hopefully protected by a different lock soon.

- resched_proc() has been moved out of setpriority().  It isn't
  necessary in schedclock() because we're dealing with curproc.  

- updatepri() has been renamed to match decay_cpu() and no longer update
  any per-thread member.

- The two resched_proc() calls inside setrunnable() have been merged.
  Note that in this place `p_priority' is almost always smaller than
  `p_usrpri'.  The exception are thread with a high nice(1) value, like
  sndiod(8), which generally have a smaller sleeping priority than their
  running priority.  Careful reader can find an existing bug related to
  that logic, but I'll address it in another diff ;)

ok?

Index: kern/kern_resource.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_resource.c,v
retrieving revision 1.65
diff -u -p -r1.65 kern_resource.c
--- kern/kern_resource.c        21 Jun 2019 09:39:48 -0000      1.65
+++ kern/kern_resource.c        21 Jun 2019 21:32:42 -0000
@@ -212,8 +212,10 @@ donice(struct proc *curp, struct process
                return (EACCES);
        chgpr->ps_nice = n;
        SCHED_LOCK(s);
-       TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link)
-               (void)resetpriority(p);
+       TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) {
+               setpriority(p, p->p_estcpu, n);
+               resched_proc(p, p->p_usrpri);
+       }
        SCHED_UNLOCK(s);
        return (0);
 }
Index: kern/sched_bsd.c
===================================================================
RCS file: /cvs/src/sys/kern/sched_bsd.c,v
retrieving revision 1.53
diff -u -p -r1.53 sched_bsd.c
--- kern/sched_bsd.c    1 Jun 2019 14:11:17 -0000       1.53
+++ kern/sched_bsd.c    21 Jun 2019 21:51:12 -0000
@@ -62,7 +62,7 @@ struct __mp_lock sched_lock;
 #endif
 
 void    schedcpu(void *);
-void    updatepri(struct proc *);
+uint32_t decay_aftersleep(uint32_t, uint32_t);
 
 void
 scheduler_start(void)
@@ -252,8 +252,9 @@ schedcpu(void *arg)
 #endif
                p->p_cpticks = 0;
                newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu);
-               p->p_estcpu = newcpu;
-               resetpriority(p);
+               setpriority(p, newcpu, p->p_p->ps_nice);
+               resched_proc(p, p->p_usrpri);
+
                if (p->p_priority >= PUSER) {
                        if (p->p_stat == SRUN &&
                            (p->p_priority / SCHED_PPQ) !=
@@ -276,23 +277,23 @@ schedcpu(void *arg)
  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
  * least six times the loadfactor will decay p_estcpu to zero.
  */
-void
-updatepri(struct proc *p)
+uint32_t
+decay_aftersleep(uint32_t estcpu, uint32_t slptime)
 {
-       unsigned int newcpu = p->p_estcpu;
        fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
+       uint32_t newcpu;
 
-       SCHED_ASSERT_LOCKED();
-
-       if (p->p_slptime > 5 * loadfac)
-               p->p_estcpu = 0;
+       if (slptime > 5 * loadfac)
+               newcpu = 0;
        else {
-               p->p_slptime--; /* the first time was done in schedcpu */
-               while (newcpu && --p->p_slptime)
-                       newcpu = (int) decay_cpu(loadfac, newcpu);
-               p->p_estcpu = newcpu;
+               newcpu = estcpu;
+               slptime--;      /* the first time was done in schedcpu */
+               while (newcpu && --slptime)
+                       newcpu = decay_cpu(loadfac, newcpu);
+
        }
-       resetpriority(p);
+
+       return (newcpu);
 }
 
 /*
@@ -441,7 +442,7 @@ mi_switch(void)
 #endif
 }
 
-static __inline void
+inline void
 resched_proc(struct proc *p, u_char pri)
 {
        struct cpu_info *ci;
@@ -496,10 +497,14 @@ setrunnable(struct proc *p)
        p->p_stat = SRUN;
        p->p_cpu = sched_choosecpu(p);
        setrunqueue(p);
-       if (p->p_slptime > 1)
-               updatepri(p);
+       if (p->p_slptime > 1) {
+               uint32_t newcpu;
+
+               newcpu = decay_aftersleep(p->p_estcpu, p->p_slptime);
+               setpriority(p, newcpu, p->p_p->ps_nice);
+       }
        p->p_slptime = 0;
-       resched_proc(p, p->p_priority);
+       resched_proc(p, MIN(p->p_priority, p->p_usrpri));
 }
 
 /*
@@ -508,17 +513,15 @@ setrunnable(struct proc *p)
  * than that of the current process.
  */
 void
-resetpriority(struct proc *p)
+setpriority(struct proc *p, uint32_t newcpu, uint8_t nice)
 {
-       unsigned int newpriority;
+       unsigned int newprio;
 
-       SCHED_ASSERT_LOCKED();
+       newprio = min((PUSER + newcpu + NICE_WEIGHT * (nice - NZERO)), MAXPRI);
 
-       newpriority = PUSER + p->p_estcpu +
-           NICE_WEIGHT * (p->p_p->ps_nice - NZERO);
-       newpriority = min(newpriority, MAXPRI);
-       p->p_usrpri = newpriority;
-       resched_proc(p, p->p_usrpri);
+       SCHED_ASSERT_LOCKED();
+       p->p_estcpu = newcpu;
+       p->p_usrpri = newprio;
 }
 
 /*
@@ -540,14 +543,15 @@ schedclock(struct proc *p)
 {
        struct cpu_info *ci = curcpu();
        struct schedstate_percpu *spc = &ci->ci_schedstate;
+       uint32_t newcpu;
        int s;
 
        if (p == spc->spc_idleproc || spc->spc_spinning)
                return;
 
        SCHED_LOCK(s);
-       p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
-       resetpriority(p);
+       newcpu = ESTCPULIM(p->p_estcpu + 1);
+       setpriority(p, newcpu, p->p_p->ps_nice);
        if (p->p_priority >= PUSER)
                p->p_priority = p->p_usrpri;
        SCHED_UNLOCK(s);
Index: sys/proc.h
===================================================================
RCS file: /cvs/src/sys/sys/proc.h,v
retrieving revision 1.270
diff -u -p -r1.270 proc.h
--- sys/proc.h  21 Jun 2019 09:39:48 -0000      1.270
+++ sys/proc.h  21 Jun 2019 21:25:11 -0000
@@ -559,7 +559,7 @@ void        leavepgrp(struct process *);
 void   killjobc(struct process *);
 void   preempt(void);
 void   procinit(void);
-void   resetpriority(struct proc *);
+void   setpriority(struct proc *, uint32_t, uint8_t);
 void   setrunnable(struct proc *);
 void   endtsleep(void *);
 void   unsleep(struct proc *);
Index: sys/sched.h
===================================================================
RCS file: /cvs/src/sys/sys/sched.h,v
retrieving revision 1.52
diff -u -p -r1.52 sched.h
--- sys/sched.h 16 May 2019 13:52:47 -0000      1.52
+++ sys/sched.h 21 Jun 2019 21:36:50 -0000
@@ -164,6 +164,7 @@ void cpu_idle_cycle(void);
 void cpu_idle_leave(void);
 void sched_peg_curproc(struct cpu_info *ci);
 void sched_barrier(struct cpu_info *ci);
+void resched_proc(struct proc *, u_char);
 
 int sysctl_hwsetperf(void *, size_t *, void *, size_t);
 int sysctl_hwperfpolicy(void *, size_t *, void *, size_t);

Reply via email to