Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a05e8c51 authored by Byungchul Park's avatar Byungchul Park Committed by Ingo Molnar
Browse files

sched/fair: Factor out the {at,de}taching of the per entity load {to,from} the runqueue



Currently we open-code the addition/subtraction of the per entity load
to/from the runqueue, factor this out into helper functions.

Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
[ Rewrote the changelog. ]
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1440069720-27038-2-git-send-email-byungchul.park@lge.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 973759c8
Loading
Loading
Loading
Loading
+38 −39
Original line number Original line Diff line number Diff line
@@ -2664,8 +2664,8 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
{
	int decayed;
	struct sched_avg *sa = &cfs_rq->avg;
	struct sched_avg *sa = &cfs_rq->avg;
	int decayed;


	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
		long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
		long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
@@ -2695,33 +2695,52 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
static inline void update_load_avg(struct sched_entity *se, int update_tg)
static inline void update_load_avg(struct sched_entity *se, int update_tg)
{
{
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	int cpu = cpu_of(rq_of(cfs_rq));
	u64 now = cfs_rq_clock_task(cfs_rq);
	u64 now = cfs_rq_clock_task(cfs_rq);
	int cpu = cpu_of(rq_of(cfs_rq));


	/*
	/*
	 * Track task load average for carrying it to new CPU after migrated, and
	 * Track task load average for carrying it to new CPU after migrated, and
	 * track group sched_entity load average for task_h_load calc in migration
	 * track group sched_entity load average for task_h_load calc in migration
	 */
	 */
	__update_load_avg(now, cpu, &se->avg,
	__update_load_avg(now, cpu, &se->avg,
		se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
			  se->on_rq * scale_load_down(se->load.weight),
			  cfs_rq->curr == se, NULL);


	if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
	if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
		update_tg_load_avg(cfs_rq, 0);
		update_tg_load_avg(cfs_rq, 0);
}
}


static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	se->avg.last_update_time = cfs_rq->avg.last_update_time;
	cfs_rq->avg.load_avg += se->avg.load_avg;
	cfs_rq->avg.load_sum += se->avg.load_sum;
	cfs_rq->avg.util_avg += se->avg.util_avg;
	cfs_rq->avg.util_sum += se->avg.util_sum;
}

static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
			  &se->avg, se->on_rq * scale_load_down(se->load.weight),
			  cfs_rq->curr == se, NULL);

	cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
	cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
	cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
	cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
}

/* Add the load generated by se into cfs_rq's load average */
/* Add the load generated by se into cfs_rq's load average */
static inline void
static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
{
	struct sched_avg *sa = &se->avg;
	struct sched_avg *sa = &se->avg;
	u64 now = cfs_rq_clock_task(cfs_rq);
	u64 now = cfs_rq_clock_task(cfs_rq);
	int migrated = 0, decayed;
	int migrated, decayed;


	if (sa->last_update_time == 0) {
	migrated = !sa->last_update_time;
		sa->last_update_time = now;
	if (!migrated) {
		migrated = 1;
	}
	else {
		__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
		__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
			se->on_rq * scale_load_down(se->load.weight),
			se->on_rq * scale_load_down(se->load.weight),
			cfs_rq->curr == se, NULL);
			cfs_rq->curr == se, NULL);
@@ -2732,12 +2751,8 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
	cfs_rq->runnable_load_avg += sa->load_avg;
	cfs_rq->runnable_load_avg += sa->load_avg;
	cfs_rq->runnable_load_sum += sa->load_sum;
	cfs_rq->runnable_load_sum += sa->load_sum;


	if (migrated) {
	if (migrated)
		cfs_rq->avg.load_avg += sa->load_avg;
		attach_entity_load_avg(cfs_rq, se);
		cfs_rq->avg.load_sum += sa->load_sum;
		cfs_rq->avg.util_avg += sa->util_avg;
		cfs_rq->avg.util_sum += sa->util_sum;
	}


	if (decayed || migrated)
	if (decayed || migrated)
		update_tg_load_avg(cfs_rq, 0);
		update_tg_load_avg(cfs_rq, 0);
@@ -2820,6 +2835,11 @@ static inline void
dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}


static inline void
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}

static inline int idle_balance(struct rq *rq)
static inline int idle_balance(struct rq *rq)
{
{
	return 0;
	return 0;
@@ -7909,25 +7929,10 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
		se->vruntime -= cfs_rq->min_vruntime;
		se->vruntime -= cfs_rq->min_vruntime;
	}
	}


#ifdef CONFIG_SMP
	/* Catch up with the cfs_rq and remove our load when we leave */
	/* Catch up with the cfs_rq and remove our load when we leave */
	__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
	detach_entity_load_avg(cfs_rq, se);
		se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);

	cfs_rq->avg.load_avg =
		max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
	cfs_rq->avg.load_sum =
		max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
	cfs_rq->avg.util_avg =
		max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
	cfs_rq->avg.util_sum =
		max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
#endif
}
}


/*
 * We switched to the sched_fair class.
 */
static void switched_to_fair(struct rq *rq, struct task_struct *p)
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
{
	struct sched_entity *se = &p->se;
	struct sched_entity *se = &p->se;
@@ -8040,14 +8045,8 @@ static void task_move_group_fair(struct task_struct *p, int queued)
		cfs_rq = cfs_rq_of(se);
		cfs_rq = cfs_rq_of(se);
		se->vruntime += cfs_rq->min_vruntime;
		se->vruntime += cfs_rq->min_vruntime;


#ifdef CONFIG_SMP
		/* Virtually synchronize task with its new cfs_rq */
		/* Virtually synchronize task with its new cfs_rq */
		p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
		attach_entity_load_avg(cfs_rq, se);
		cfs_rq->avg.load_avg += p->se.avg.load_avg;
		cfs_rq->avg.load_sum += p->se.avg.load_sum;
		cfs_rq->avg.util_avg += p->se.avg.util_avg;
		cfs_rq->avg.util_sum += p->se.avg.util_sum;
#endif
	}
	}
}
}