Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 53ee423e authored by Abhijeet Dharmapurikar's avatar Abhijeet Dharmapurikar Committed by Pavankumar Kondeti
Browse files

sched/walt: improve the scheduler



This change is for general scheduler improvement.

Change-Id: I01e6610bba2e8c66a628d6289eeed4e854264fdd
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
Signed-off-by: default avatarAbhijeet Dharmapurikar <adharmap@codeaurora.org>
[clingutla@codeaurora.org: resolve trivial merge conflicts.]
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 0011ca13
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@ extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost_colocation;

extern int
walt_proc_update_handler(struct ctl_table *table, int write,
+12 −8
Original line number Diff line number Diff line
@@ -166,6 +166,10 @@ unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */
#define capacity_margin sysctl_sched_capacity_margin

#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_min_task_util_for_boost_colocation;
#endif

static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
	lw->weight += inc;
@@ -6156,8 +6160,7 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
	if (capacity == max_capacity)
		return true;

	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
					task_sched_boost(p))
	if (task_boost_policy(p) == SCHED_BOOST_ON_BIG)
		return false;

	return __task_fits(p, cpu, 0);
@@ -6823,7 +6826,7 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
struct find_best_target_env {
	struct cpumask *rtg_target;
	bool need_idle;
	bool placement_boost;
	int placement_boost;
	bool avoid_prev_cpu;
};

@@ -7213,7 +7216,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			if (best_idle_cpu != -1)
				break;

			if (fbt_env->placement_boost) {
			if (fbt_env->placement_boost != SCHED_BOOST_NONE) {
				target_capacity = ULONG_MAX;
				continue;
			}
@@ -7395,7 +7398,9 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
	rcu_read_lock();

	grp = task_related_thread_group(p);
	if (grp && grp->preferred_cluster) {
	if (grp && grp->preferred_cluster &&
			(task_util(p) >
			sysctl_sched_min_task_util_for_boost_colocation)) {
		rtg_target = &grp->preferred_cluster->cpus;
		if (!task_fits_max(p, cpumask_first(rtg_target)))
			rtg_target = NULL;
@@ -7453,9 +7458,8 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
	} else {
		fbt_env.need_idle = wake_to_idle(p);
	}
	fbt_env.placement_boost = task_sched_boost(p) ?
				  sched_boost_policy() != SCHED_BOOST_NONE :
				  false;

	fbt_env.placement_boost = task_boost_policy(p);
	fbt_env.avoid_prev_cpu = false;

	if (prefer_idle || fbt_env.need_idle)
+23 −1
Original line number Diff line number Diff line
@@ -2699,6 +2699,25 @@ static inline unsigned int power_cost(int cpu, bool max)
extern void walt_sched_energy_populated_callback(void);
extern void walt_update_min_max_capacity(void);

static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
{
	enum sched_boost_policy boost_on_big = task_sched_boost(p) ?
				sched_boost_policy() : SCHED_BOOST_NONE;

	if (boost_on_big) {
		/*
		 * Filter out tasks less than min task util threshold
		 * under conservative boost.
		 */
		if (sysctl_sched_boost == CONSERVATIVE_BOOST &&
				task_util(p) <=
				sysctl_sched_min_task_util_for_boost_colocation)
			boost_on_big = SCHED_BOOST_NONE;
	}

	return boost_on_big;
}

#else	/* CONFIG_SCHED_WALT */

struct walt_sched_stats;
@@ -2709,7 +2728,10 @@ static inline bool task_sched_boost(struct task_struct *p)
{
	return true;
}

static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
{
	return SCHED_BOOST_NONE;
}
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }

static inline int sched_boost(void)
+1 −2
Original line number Diff line number Diff line
@@ -2627,7 +2627,6 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
{
	struct task_struct *p;
	u64 combined_demand = 0;
	bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
	bool group_boost = false;
	u64 wallclock;

@@ -2651,7 +2650,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
		return;

	list_for_each_entry(p, &grp->tasks, grp_list) {
		if (boost_on_big && task_sched_boost(p)) {
		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG) {
			group_boost = true;
			break;
		}
+10 −3
Original line number Diff line number Diff line
@@ -127,9 +127,7 @@ static int __maybe_unused three = 3;
static int __maybe_unused four = 4;
static unsigned long one_ul = 1;
static int one_hundred = 100;
#ifdef CONFIG_PERF_EVENTS
static int one_thousand = 1000;
#endif
static int __maybe_unused one_thousand = 1000;
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -357,6 +355,15 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
	{
		.procname	= "sched_min_task_util_for_boost_colocation",
		.data		= &sysctl_sched_min_task_util_for_boost_colocation,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &zero,
		.extra2		= &one_thousand,
	},
#endif
	{
		.procname	= "sched_upmigrate",