Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1fd8d612 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: improve the scheduler"

parents cb30e42b 5be9b47a
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -41,7 +41,8 @@ extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost_colocation;
extern unsigned int sysctl_sched_min_task_util_for_boost;
extern unsigned int sysctl_sched_min_task_util_for_colocation;
extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;

extern int
+13 −4
Original line number Diff line number Diff line
@@ -199,7 +199,9 @@ unsigned int sched_capacity_margin_down[NR_CPUS] = {

#ifdef CONFIG_SCHED_WALT
/* 1ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_boost_colocation = 51;
unsigned int sysctl_sched_min_task_util_for_boost = 51;
/* 0.68ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_colocation = 35;
#endif

static inline void update_load_add(struct load_weight *lw, unsigned long inc)
@@ -7957,6 +7959,15 @@ static inline int wake_to_idle(struct task_struct *p)
}

#ifdef CONFIG_SCHED_WALT
static inline bool is_task_util_above_min_thresh(struct task_struct *p)
{
	unsigned int threshold = (sysctl_sched_boost == CONSERVATIVE_BOOST) ?
			sysctl_sched_min_task_util_for_boost :
			sysctl_sched_min_task_util_for_colocation;

	return task_util(p) > threshold;
}

static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
	struct related_thread_group *grp;
@@ -7965,9 +7976,7 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
	rcu_read_lock();

	grp = task_related_thread_group(p);
	if (grp && grp->preferred_cluster &&
			(task_util(p) >
			sysctl_sched_min_task_util_for_boost_colocation)) {
	if (grp && grp->preferred_cluster && is_task_util_above_min_thresh(p)) {
		rtg_target = &grp->preferred_cluster->cpus;
		if (!task_fits_max(p, cpumask_first(rtg_target)))
			rtg_target = NULL;
+1 −1
Original line number Diff line number Diff line
@@ -2940,7 +2940,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
		 */
		if (sysctl_sched_boost == CONSERVATIVE_BOOST &&
				task_util(p) <=
				sysctl_sched_min_task_util_for_boost_colocation)
				sysctl_sched_min_task_util_for_boost)
			policy = SCHED_BOOST_NONE;
	}

+11 −2
Original line number Diff line number Diff line
@@ -378,8 +378,17 @@ static struct ctl_table kern_table[] = {
		.extra2		= &one,
	},
	{
		.procname	= "sched_min_task_util_for_boost_colocation",
		.data		= &sysctl_sched_min_task_util_for_boost_colocation,
		.procname	= "sched_min_task_util_for_boost",
		.data		= &sysctl_sched_min_task_util_for_boost,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &zero,
		.extra2		= &one_thousand,
	},
	{
		.procname	= "sched_min_task_util_for_colocation",
		.data		= &sysctl_sched_min_task_util_for_colocation,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,