Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 289dd294 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: walt: Optimize task_util()



task_util() for WALT is currently defined as

p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT);

This math is required to scale the task demand to 1024 scale.
task_util() is used many times in task placement. So the calls
to this can be optimized by caching the scaled value when task
demand is calculated.

Change-Id: I0c170a10704ae3e8fe4e9f271e8e65c3923075e5
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 635bbd23
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -591,6 +591,8 @@ struct ravg {
	 *
	 * 'busy_buckets' groups historical busy time into different buckets
	 * used for prediction
	 *
	 * 'demand_scaled' represents task's demand scaled to 1024
	 */
	u64 mark_start;
	u32 sum, demand;
@@ -601,6 +603,7 @@ struct ravg {
	u16 active_windows;
	u32 pred_demand;
	u8 busy_buckets[NUM_BUSY_BUCKETS];
	u16 demand_scaled;
};
#else
static inline void sched_exit(struct task_struct *p) { }
+1 −2
Original line number Diff line number Diff line
@@ -3726,8 +3726,7 @@ static inline unsigned long task_util_est(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
	if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
		return (p->ravg.demand /
			(sched_ravg_window >> SCHED_CAPACITY_SHIFT));
		return p->ravg.demand_scaled;
#endif
	return max(task_util(p), _task_util_est(p));
}
+1 −2
Original line number Diff line number Diff line
@@ -1889,8 +1889,7 @@ static inline unsigned long task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
	if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
		return p->ravg.demand /
		       (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
		return p->ravg.demand_scaled;
#endif
	return READ_ONCE(p->se.avg.util_avg);
}
+13 −1
Original line number Diff line number Diff line
@@ -122,6 +122,7 @@ __read_mostly unsigned int walt_cpu_util_freq_divisor;

/* Initial task load. Newly created tasks are assigned this load. */
unsigned int __read_mostly sched_init_task_load_windows;
unsigned int __read_mostly sched_init_task_load_windows_scaled;
unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;

/*
@@ -195,6 +196,9 @@ static int __init set_sched_predl(char *str)
}
early_param("sched_predl", set_sched_predl);

__read_mostly unsigned int walt_scale_demand_divisor;
#define scale_demand(d) ((d)/walt_scale_demand_divisor)

void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
	inc_nr_big_task(&rq->walt_stats, p);
@@ -1707,6 +1711,7 @@ static void update_history(struct rq *rq, struct task_struct *p,
	}

	p->ravg.demand = demand;
	p->ravg.demand_scaled = scale_demand(demand);
	p->ravg.coloc_demand = div64_u64(sum, sched_ravg_hist_size);
	p->ravg.pred_demand = pred_demand;

@@ -1957,6 +1962,7 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
{
	int i;
	u32 init_load_windows = sched_init_task_load_windows;
	u32 init_load_windows_scaled = sched_init_task_load_windows_scaled;
	u32 init_load_pct = current->init_load_pct;

	p->init_load_pct = 0;
@@ -1974,11 +1980,14 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
	if (idle_task)
		return;

	if (init_load_pct)
	if (init_load_pct) {
		init_load_windows = div64_u64((u64)init_load_pct *
			  (u64)sched_ravg_window, 100);
		init_load_windows_scaled = scale_demand(init_load_windows);
	}

	p->ravg.demand = init_load_windows;
	p->ravg.demand_scaled = init_load_windows_scaled;
	p->ravg.coloc_demand = init_load_windows;
	p->ravg.pred_demand = 0;
	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
@@ -3259,10 +3268,13 @@ static void walt_init_once(void)

	walt_cpu_util_freq_divisor =
	    (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;
	walt_scale_demand_divisor = sched_ravg_window >> SCHED_CAPACITY_SHIFT;

	sched_init_task_load_windows =
		div64_u64((u64)sysctl_sched_init_task_load_pct *
			  (u64)sched_ravg_window, 100);
	sched_init_task_load_windows_scaled =
		scale_demand(sched_init_task_load_windows);
}

void walt_sched_init_rq(struct rq *rq)