From: Josef Bacik <[email protected]> reweight_task only accounts for the load average change in the cfs_rq, but doesn't account for the runnable_average change in the cfs_rq. We need to do everything reweight_entity does, and then we just set our inv_weight appropriately.
Signed-off-by: Josef Bacik <[email protected]> --- kernel/sched/fair.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0cff1b6..c336534 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2809,26 +2809,6 @@ __sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); } -void reweight_task(struct task_struct *p, int prio) -{ - struct sched_entity *se = &p->se; - struct cfs_rq *cfs_rq = cfs_rq_of(se); - struct load_weight *load = &p->se.load; - - u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; - - __sub_load_avg(cfs_rq, se); - - load->weight = scale_load(sched_prio_to_weight[prio]); - load->inv_weight = sched_prio_to_wmult[prio]; - - se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); - se->avg.runnable_load_avg = - div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); - - __add_load_avg(cfs_rq, se); -} - static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight, unsigned long runnable) { @@ -2858,6 +2838,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, } } +void reweight_task(struct task_struct *p, int prio) +{ + struct sched_entity *se = &p->se; + struct cfs_rq *cfs_rq = cfs_rq_of(se); + struct load_weight *load = &se->load; + unsigned long weight = scale_load(sched_prio_to_weight[prio]); + + reweight_entity(cfs_rq, se, weight, weight); + load->inv_weight = sched_prio_to_wmult[prio]; +} + static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); /* -- 2.7.4

