From: Byungchul Park <[email protected]> In the case that rq->lock may not be held, care must be taken to get a cfs_rq's last_update_time instead of just reading the variable. Since it can happen at serveral places in the code in future, this patch factors it out to a helper function.
Signed-off-by: Byungchul Park <[email protected]> --- kernel/sched/fair.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0f76903..08589a0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2809,15 +2809,10 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); } -/* - * Task first catches up with cfs_rq, and then subtract - * itself from the cfs_rq (task must be off the queue now). - */ -void remove_entity_load_avg(struct sched_entity *se) +/* This function is useful for the case that rq->lock may not be held */ +static inline u64 get_last_update_time(struct cfs_rq *cfs_rq) { - struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 last_update_time; - #ifndef CONFIG_64BIT u64 last_update_time_copy; @@ -2829,6 +2824,17 @@ void remove_entity_load_avg(struct sched_entity *se) #else last_update_time = cfs_rq->avg.last_update_time; #endif + return last_update_time; +} + +/* + * Task first catches up with cfs_rq, and then subtract + * itself from the cfs_rq (task must be off the queue now). + */ +void remove_entity_load_avg(struct sched_entity *se) +{ + struct cfs_rq *cfs_rq = cfs_rq_of(se); + u64 last_update_time = get_last_update_time(cfs_rq); __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [email protected] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/

