Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e2260c3 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I7d794ad1be10a6811602fabb388facd39c8f3c53
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 817c1496
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_user_hint;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
@@ -49,6 +50,10 @@ extern int
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp,
			 loff_t *ppos);
extern int
walt_proc_user_hint_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp,
			 loff_t *ppos);

#endif

+7 −4
Original line number Diff line number Diff line
@@ -491,9 +491,10 @@ TRACE_EVENT(sched_load_to_gov,

	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
		int freq_aggr, u64 load, int policy,
		int big_task_rotation),
		int big_task_rotation,
		unsigned int user_hint),
	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy,
		big_task_rotation),
		big_task_rotation, user_hint),

	TP_STRUCT__entry(
		__field(int,	cpu)
@@ -509,6 +510,7 @@ TRACE_EVENT(sched_load_to_gov,
		__field(u64,	pl)
		__field(u64,    load)
		__field(int,    big_task_rotation)
		__field(unsigned int, user_hint)
	),

	TP_fast_assign(
@@ -526,13 +528,14 @@ TRACE_EVENT(sched_load_to_gov,
					rq->walt_stats.pred_demands_sum_scaled;
		__entry->load		= load;
		__entry->big_task_rotation = big_task_rotation;
		__entry->user_hint = user_hint;
	),

	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d user_hint=%u",
		__entry->cpu, __entry->policy, __entry->ed_task_pid,
		__entry->aggr_grp_load, __entry->freq_aggr,
		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
		__entry->big_task_rotation)
		__entry->big_task_rotation, __entry->user_hint)
);
#endif
+56 −1
Original line number Diff line number Diff line
@@ -475,6 +475,18 @@ static u32 top_task_load(struct rq *rq)
	}
}

unsigned int sysctl_sched_user_hint;
static bool is_cluster_hosting_top_app(struct sched_cluster *cluster);

static inline bool should_apply_suh_freq_boost(struct sched_cluster *cluster)
{
	if (sched_freq_aggr_en || !sysctl_sched_user_hint ||
				  !cluster->aggr_grp_load)
		return false;

	return is_cluster_hosting_top_app(cluster);
}

static inline u64 freq_policy_load(struct rq *rq)
{
	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
@@ -510,9 +522,13 @@ static inline u64 freq_policy_load(struct rq *rq)
		break;
	}

	if (should_apply_suh_freq_boost(cluster))
		load = div64_u64(load * sysctl_sched_user_hint, (u64)100);

done:
	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en,
				load, reporting_policy, walt_rotation_enabled);
				load, reporting_policy, walt_rotation_enabled,
				sysctl_sched_user_hint);
	return load;
}

@@ -2963,6 +2979,23 @@ int sync_cgroup_colocation(struct task_struct *p, bool insert)
}
#endif

static bool is_cluster_hosting_top_app(struct sched_cluster *cluster)
{
	struct related_thread_group *grp;
	bool grp_on_min;

	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);

	if (!grp)
		return false;

	grp_on_min = !grp->skip_min &&
		     (sched_boost_policy() != SCHED_BOOST_ON_BIG);

	return (is_min_capacity_cluster(cluster) == grp_on_min);
}


static unsigned long max_cap[NR_CPUS];
static unsigned long thermal_cap_cpu[NR_CPUS];

@@ -3435,3 +3468,25 @@ void walt_sched_init_rq(struct rq *rq)
	rq->cum_window_demand_scaled = 0;
	rq->notif_pending = false;
}

int walt_proc_user_hint_handler(struct ctl_table *table,
				int write, void __user *buffer, size_t *lenp,
				loff_t *ppos)
{
	int ret;
	unsigned int old_value;
	static DEFINE_MUTEX(mutex);

	mutex_lock(&mutex);

	old_value = sysctl_sched_user_hint;
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	if (ret || !write || (old_value == sysctl_sched_user_hint))
		goto unlock;

	irq_work_queue(&walt_migration_irq_work);

unlock:
	mutex_unlock(&mutex);
	return ret;
}
+9 −0
Original line number Diff line number Diff line
@@ -344,6 +344,15 @@ static struct ctl_table kern_table[] = {
	},
#endif
#ifdef CONFIG_SCHED_WALT
	{
		.procname	= "sched_user_hint",
		.data           = &sysctl_sched_user_hint,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= walt_proc_user_hint_handler,
		.extra1		= &zero,
		.extra2		= &one_thousand,
	},
	{
		.procname       = "sched_cpu_high_irqload",
		.data           = &sysctl_sched_cpu_high_irqload,