Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 57eb071c authored by Lingutla Chandrasekhar's avatar Lingutla Chandrasekhar
Browse files

sched: Use initial_task_util load for new tasks



Currently, we are exposing the initial_task_util tunable but not
honoring it. Fix it by using the tunable for initial load to newly
created tasks.

Since this tunable is used in end user builds, move the tunable
out of SCHED_DEBUG.

Change-Id: I17a89b7a99d43c9cc230536ad7d9238de9833473
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent be5c1d18
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -784,7 +784,6 @@ static void sched_debug_header(struct seq_file *m)
	P(sysctl_sched_child_runs_first);
	P(sysctl_sched_features);
#ifdef CONFIG_SCHED_WALT
	P(sched_init_task_load_windows);
	P(min_capacity);
	P(max_capacity);
	P(sched_ravg_window);
+0 −1
Original line number Diff line number Diff line
@@ -2391,7 +2391,6 @@ extern unsigned int max_load_scale_factor;
extern unsigned int max_possible_capacity;
extern unsigned int min_max_possible_capacity;
extern unsigned int max_power_cost;
extern unsigned int sched_init_task_load_windows;
extern unsigned int up_down_migrate_scale_factor;
extern unsigned int sysctl_sched_restrict_cluster_spill;
extern unsigned int sched_pred_alert_load;
+9 −10
Original line number Diff line number Diff line
@@ -119,7 +119,6 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
__read_mostly unsigned int walt_cpu_util_freq_divisor;

/* Initial task load. Newly created tasks are assigned this load. */
unsigned int __read_mostly sched_init_task_load_windows;
unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;

/*
@@ -1939,8 +1938,8 @@ int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
void init_new_task_load(struct task_struct *p, bool idle_task)
{
	int i;
	u32 init_load_windows = sched_init_task_load_windows;
	u32 init_load_pct = current->init_load_pct;
	u32 init_load_windows;
	u32 init_load_pct;

	p->init_load_pct = 0;
	rcu_assign_pointer(p->grp, NULL);
@@ -1957,7 +1956,11 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
	if (idle_task)
		return;

	if (init_load_pct)
	if (current->init_load_pct)
		init_load_pct = current->init_load_pct;
	else
		init_load_pct = sysctl_sched_init_task_load_pct;

	init_load_windows = div64_u64((u64)init_load_pct *
				(u64)sched_ravg_window, 100);

@@ -3242,8 +3245,4 @@ void walt_sched_init(struct rq *rq)

	walt_cpu_util_freq_divisor =
	    (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;

	sched_init_task_load_windows =
		div64_u64((u64)sysctl_sched_init_task_load_pct *
			  (u64)sched_ravg_window, 100);
}
+0 −1
Original line number Diff line number Diff line
@@ -40,7 +40,6 @@ extern unsigned int max_possible_efficiency;
extern unsigned int min_possible_efficiency;
extern unsigned int max_possible_freq;
extern unsigned int sched_major_task_runtime;
extern unsigned int __read_mostly sched_init_task_load_windows;
extern unsigned int __read_mostly sched_load_granule;

extern struct mutex cluster_lock;
+7 −7
Original line number Diff line number Diff line
@@ -334,6 +334,13 @@ static struct ctl_table kern_table[] = {
		.extra1		= &zero,
		.extra2		= &one,
	},
	{
		.procname	= "sched_initial_task_util",
		.data		= &sysctl_sched_init_task_load_pct,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
#endif
	{
		.procname	= "sched_upmigrate",
@@ -391,13 +398,6 @@ static struct ctl_table kern_table[] = {
		.proc_handler	= proc_dointvec,
	},
#endif
	{
		.procname	= "sched_initial_task_util",
		.data		= &sysctl_sched_initial_task_util,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
	{
		.procname	= "sched_cstate_aware",
		.data		= &sysctl_sched_cstate_aware,