Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 392edf49 authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched: avoid stale cumulative_runnable_avg HMP statistics



When a new window starts for a task and the task is on a rq, scheduler
decreases rq's cumulative_runnable_avg momentarily, re-account task's
demand and increases rq's cumulative_runnable_avg with newly accounted
task's demand.  Therefore there is short time period that rq's
cumulative_runnable_avg is less than what it's supposed to be.
Meanwhile, there is chance that other CPU is in search of best CPU to place
a task and makes suboptimal decision with momentarily stale
cumulative_runnable_avg.

Fix such issue by adding or subtracting of delta between task's old
and new demand instead of decrementing and incrementing of entire task's
load.

Change-Id: I3c9329961e6f96e269fa13359e7d1c39c4973ff2
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 9fd50032
Loading
Loading
Loading
Loading
+14 −16
Original line number Diff line number Diff line
@@ -1695,30 +1695,28 @@ static void update_history(struct rq *rq, struct task_struct *p,

	p->ravg.sum = 0;

	if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
		demand = runtime;
	} else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
		demand = max;
	} else {
		avg = div64_u64(sum, sched_ravg_hist_size);
		if (sched_window_stats_policy == WINDOW_STATS_AVG)
			demand = avg;
		else
			demand = max(avg, runtime);
	}

	/*
	 * A throttled deadline sched class task gets dequeued without
	 * changing p->on_rq. Since the dequeue decrements hmp stats
	 * avoid decrementing it here again.
	 */
	if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
		p->sched_class->dec_hmp_sched_stats(rq, p);

	avg = div64_u64(sum, sched_ravg_hist_size);

	if (sched_window_stats_policy == WINDOW_STATS_RECENT)
		demand = runtime;
	else if (sched_window_stats_policy == WINDOW_STATS_MAX)
		demand = max;
	else if (sched_window_stats_policy == WINDOW_STATS_AVG)
		demand = avg;
		p->sched_class->fixup_hmp_sched_stats(rq, p, demand);
	else
		demand = max(avg, runtime);

		p->ravg.demand = demand;

	if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
		p->sched_class->inc_hmp_sched_stats(rq, p);

done:
	trace_sched_update_history(rq, p, runtime, samples, event);
}
+8 −0
Original line number Diff line number Diff line
@@ -739,6 +739,13 @@ dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
}

#else	/* CONFIG_SCHED_HMP */

static inline void
@@ -1715,5 +1722,6 @@ const struct sched_class dl_sched_class = {
#ifdef CONFIG_SCHED_HMP
	.inc_hmp_sched_stats	= inc_hmp_sched_stats_dl,
	.dec_hmp_sched_stats	= dec_hmp_sched_stats_dl,
	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_dl,
#endif
};
+41 −0
Original line number Diff line number Diff line
@@ -3637,6 +3637,37 @@ static void dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
	_dec_hmp_sched_stats_fair(rq, p, 1);
}

static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
				       u32 new_task_load)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;
	u32 old_task_load = p->ravg.demand;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);

		dec_nr_big_small_task(&cfs_rq->hmp_stats, p);
		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
					      new_task_load);
		inc_nr_big_small_task(&cfs_rq->hmp_stats, p);
		if (cfs_rq_throttled(cfs_rq))
			break;
		/*
		 * fixup_cumulative_runnable_avg() sets p->ravg.demand to
		 * new_task_load.
		 */
		p->ravg.demand = old_task_load;
	}

	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
	if (!se) {
		dec_nr_big_small_task(&rq->hmp_stats, p);
		fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
		inc_nr_big_small_task(&rq->hmp_stats, p);
	}
}

static int task_will_be_throttled(struct task_struct *p);

#else	/* CONFIG_CFS_BANDWIDTH */
@@ -3655,6 +3686,15 @@ dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
			   u32 new_task_load)
{
	dec_nr_big_small_task(&rq->hmp_stats, p);
	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
	inc_nr_big_small_task(&rq->hmp_stats, p);
}

static inline int task_will_be_throttled(struct task_struct *p)
{
	return 0;
@@ -10604,6 +10644,7 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SCHED_HMP
	.inc_hmp_sched_stats	= inc_hmp_sched_stats_fair,
	.dec_hmp_sched_stats	= dec_hmp_sched_stats_fair,
	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_fair,
#endif
};

+7 −0
Original line number Diff line number Diff line
@@ -91,6 +91,12 @@ dec_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p)
{
}

static void
fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
			   u32 new_task_load)
{
}

#endif

/*
@@ -123,5 +129,6 @@ const struct sched_class idle_sched_class = {
#ifdef CONFIG_SCHED_HMP
	.inc_hmp_sched_stats	= inc_hmp_sched_stats_idle,
	.dec_hmp_sched_stats	= dec_hmp_sched_stats_idle,
	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_idle,
#endif
};
+8 −0
Original line number Diff line number Diff line
@@ -1178,6 +1178,13 @@ dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
}

#else	/* CONFIG_SCHED_HMP */

static inline void
@@ -2312,6 +2319,7 @@ const struct sched_class rt_sched_class = {
#ifdef CONFIG_SCHED_HMP
	.inc_hmp_sched_stats	= inc_hmp_sched_stats_rt,
	.dec_hmp_sched_stats	= dec_hmp_sched_stats_rt,
	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_rt,
#endif
};

Loading