Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bad0cb41 authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched: clean up fixup_hmp_sched_stats()



The commit 392edf49 ("sched: avoid stale cumulative_runnable_avg
HMP statistics) introduced the callback function fixup_hmp_sched_stats()
so update_history() can avoid decrement and increment pair of HMP stat.
However the commit also made fixup function to do obscure p->ravg.demand
update which isn't the cleanest way.

Revise the function fixup_hmp_sched_stats() so the caller can update
p->ravg.demand directly.

Change-Id: Id54667d306495d2109c26362813f80f08a1385ad
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 69ac846d
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -1880,7 +1880,7 @@ static void update_history(struct rq *rq, struct task_struct *p,
	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
						!p->dl.dl_throttled))
		p->sched_class->fixup_hmp_sched_stats(rq, p, demand);
	else

	p->ravg.demand = demand;

done:
@@ -2264,8 +2264,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
#endif
		reset_cpu_hmp_stats(cpu, 1);

		fixup_nr_big_task(cpu, 0);
	}

	if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
+11 −0
Original line number Diff line number Diff line
@@ -739,12 +739,23 @@ dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

#ifdef CONFIG_SCHED_QHMP
static void
fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
}
#else
static void
fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta);
}
#endif

#else	/* CONFIG_SCHED_HMP */

+43 −37
Original line number Diff line number Diff line
@@ -2444,14 +2444,6 @@ unsigned int __read_mostly sched_init_task_load_pelt;
unsigned int __read_mostly sched_init_task_load_windows;
unsigned int __read_mostly sysctl_sched_init_task_load_pct = 100;

static inline unsigned int task_load(struct task_struct *p)
{
	if (sched_use_pelt)
		return p->se.avg.runnable_avg_sum_scaled;

	return p->ravg.demand;
}

unsigned int max_task_load(void)
{
	if (sched_use_pelt)
@@ -2638,17 +2630,19 @@ static inline int upmigrate_discouraged(struct task_struct *p)
#endif

/* Is a task "big" on its current cpu */
static inline int is_big_task(struct task_struct *p)
static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
{
	u64 load = task_load(p);
	int nice = task_nice(p);

	if (nice > sched_upmigrate_min_nice || upmigrate_discouraged(p))
		return 0;

	load = scale_load_to_cpu(load, task_cpu(p));
	return scaled_load > sched_upmigrate;
}

	return load > sched_upmigrate;
static inline int is_big_task(struct task_struct *p)
{
	return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p)));
}

static inline u64 cpu_load(int cpu)
@@ -3181,6 +3175,29 @@ void reset_cpu_hmp_stats(int cpu, int reset_cra)
	reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra);
}

static void
fixup_nr_big_tasks(struct hmp_sched_stats *stats, struct task_struct *p,
		   s64 delta)
{
	u64 new_task_load;
	u64 old_task_load;

	if (!sched_enable_hmp || sched_disable_window_stats)
		return;

	old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
	new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p));

	if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load))
		stats->nr_big_tasks--;
	else if (!__is_big_task(p, old_task_load) &&
		 __is_big_task(p, new_task_load))
		stats->nr_big_tasks++;

	BUG_ON(stats->nr_big_tasks < 0);
}


#ifdef CONFIG_CFS_BANDWIDTH

static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
@@ -3257,29 +3274,23 @@ static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;
	u32 old_task_load = p->ravg.demand;
	s64 task_load_delta = (s64)new_task_load - task_load(p);

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);

		dec_nr_big_task(&cfs_rq->hmp_stats, p);
		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
					      new_task_load);
		inc_nr_big_task(&cfs_rq->hmp_stats, p);
					      task_load_delta);
		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
		if (cfs_rq_throttled(cfs_rq))
			break;
		/*
		 * fixup_cumulative_runnable_avg() sets p->ravg.demand to
		 * new_task_load.
		 */
		p->ravg.demand = old_task_load;
	}

	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
	if (!se) {
		dec_nr_big_task(&rq->hmp_stats, p);
		fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
		inc_nr_big_task(&rq->hmp_stats, p);
		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
					      task_load_delta);
		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
	}
}

@@ -3300,14 +3311,14 @@ dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
	dec_nr_big_task(&rq->hmp_stats, p);
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
			   u32 new_task_load)
{
	dec_nr_big_task(&rq->hmp_stats, p);
	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
	inc_nr_big_task(&rq->hmp_stats, p);
	s64 task_load_delta = (s64)new_task_load - task_load(p);

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta);
	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
}

static inline int task_will_be_throttled(struct task_struct *p)
@@ -3326,18 +3337,13 @@ _inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
/*
 * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
 */
void fixup_nr_big_task(int cpu, int reset_stats)
static void update_nr_big_tasks(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	struct task_struct *p;

	/* fixup_nr_big_task() is called from two functions. In one of
	 * them stats are already reset, don't waste time resetting them again
	 */
	if (reset_stats) {
	/* Do not reset cumulative_runnable_avg */
	reset_cpu_hmp_stats(cpu, 0);
	}

	list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
		_inc_hmp_sched_stats_fair(rq, p, 0);
@@ -3363,7 +3369,7 @@ void post_big_task_count_change(const struct cpumask *cpus)

	/* Assumes local_irq_disable() keeps online cpumap stable */
	for_each_cpu(i, cpus)
		fixup_nr_big_task(i, 1);
		update_nr_big_tasks(i);

	for_each_cpu(i, cpus)
		raw_spin_unlock(&cpu_rq(i)->lock);
+11 −0
Original line number Diff line number Diff line
@@ -1178,12 +1178,23 @@ dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

#ifdef CONFIG_SCHED_QHMP
static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
}
#else
static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta);
}
#endif

#else	/* CONFIG_SCHED_HMP */

+9 −13
Original line number Diff line number Diff line
@@ -922,7 +922,6 @@ extern unsigned int sched_init_task_load_windows;
extern unsigned int sched_heavy_task;
extern unsigned int up_down_migrate_scale_factor;
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern void fixup_nr_big_task(int cpu, int reset_stats);
extern unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
				 u64 delta, u64 wallclock);
@@ -954,6 +953,13 @@ static inline int max_poss_capacity(struct rq *rq)
	return rq->max_possible_capacity;
}

static inline unsigned int task_load(struct task_struct *p)
{
	if (sched_use_pelt)
		return p->se.avg.runnable_avg_sum_scaled;

	return p->ravg.demand;
}

static inline void
inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
@@ -989,18 +995,12 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,

static inline void
fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
			      struct task_struct *p, u32 new_task_load)
			      struct task_struct *p, s64 task_load_delta)
{
	u32 task_load;

	task_load = sched_use_pelt ?
		    p->se.avg.runnable_avg_sum_scaled : p->ravg.demand;
	p->ravg.demand = new_task_load;

	if (!sched_enable_hmp || sched_disable_window_stats)
		return;

	stats->cumulative_runnable_avg += ((s64)new_task_load - task_load);
	stats->cumulative_runnable_avg += task_load_delta;
	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
}

@@ -1040,10 +1040,6 @@ static inline int sched_cpu_high_irqload(int cpu)

struct hmp_sched_stats;

static inline void fixup_nr_big_task(int cpu, int reset_stats)
{
}

static inline u64 scale_load_to_cpu(u64 load, int cpu)
{
	return load;
Loading