Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1019b014 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/walt: improve the scheduler"

parents 1385c339 53ee423e
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -376,6 +376,7 @@
				2054400    876
				2169600    900
				2208000    924
				2304000    940
				2361600    948
				2400000   1170
				2457600   1200
@@ -418,6 +419,7 @@
				2054400    87
				2169600    90
				2208000    92
				2304000    93
				2361600    94
				2400000   117
				2457600   120
+1 −0
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@ extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost_colocation;

extern int
walt_proc_update_handler(struct ctl_table *table, int write,
+14 −14
Original line number Diff line number Diff line
@@ -166,6 +166,10 @@ unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */
#define capacity_margin sysctl_sched_capacity_margin

#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_min_task_util_for_boost_colocation;
#endif

static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
	lw->weight += inc;
@@ -6172,8 +6176,7 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
	if (capacity == max_capacity)
		return true;

	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
					task_sched_boost(p))
	if (task_boost_policy(p) == SCHED_BOOST_ON_BIG)
		return false;

	return __task_fits(p, cpu, 0);
@@ -6839,7 +6842,7 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
struct find_best_target_env {
	struct cpumask *rtg_target;
	bool need_idle;
	bool placement_boost;
	int placement_boost;
	bool avoid_prev_cpu;
};

@@ -7229,7 +7232,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			if (best_idle_cpu != -1)
				break;

			if (fbt_env->placement_boost) {
			if (fbt_env->placement_boost != SCHED_BOOST_NONE) {
				target_capacity = ULONG_MAX;
				continue;
			}
@@ -7411,7 +7414,9 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
	rcu_read_lock();

	grp = task_related_thread_group(p);
	if (grp && grp->preferred_cluster) {
	if (grp && grp->preferred_cluster &&
			(task_util(p) >
			sysctl_sched_min_task_util_for_boost_colocation)) {
		rtg_target = &grp->preferred_cluster->cpus;
		if (!task_fits_max(p, cpumask_first(rtg_target)))
			rtg_target = NULL;
@@ -7469,9 +7474,8 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
	} else {
		fbt_env.need_idle = wake_to_idle(p);
	}
	fbt_env.placement_boost = task_sched_boost(p) ?
				  sched_boost_policy() != SCHED_BOOST_NONE :
				  false;

	fbt_env.placement_boost = task_boost_policy(p);
	fbt_env.avoid_prev_cpu = false;

	if (prefer_idle || fbt_env.need_idle)
@@ -8398,7 +8402,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
	env->flags &= ~LBF_ALL_PINNED;

	if (energy_aware() && !env->dst_rq->rd->overutilized &&
	    env->idle == CPU_NEWLY_IDLE) {
	    env->idle == CPU_NEWLY_IDLE &&
	    !task_in_related_thread_group(p)) {
		long util_cum_dst, util_cum_src;
		unsigned long demand;

@@ -9717,7 +9722,6 @@ static struct sched_group *find_busiest_group(struct lb_env *env)

	if (energy_aware() && !env->dst_rq->rd->overutilized) {
		int cpu_local, cpu_busiest;
		long util_cum;
		unsigned long energy_local, energy_busiest;

		if (env->idle != CPU_NEWLY_IDLE)
@@ -9737,10 +9741,6 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
		} else if (energy_local == energy_busiest) {
			if (cpu_rq(cpu_busiest)->nr_running < 2)
				goto out_balanced;

			util_cum = cpu_util_cum(cpu_busiest, 0);
			if (util_cum < cpu_util_cum(cpu_local, 0))
				goto out_balanced;
		}
	}

+23 −1
Original line number Diff line number Diff line
@@ -2701,6 +2701,25 @@ static inline unsigned int power_cost(int cpu, bool max)
extern void walt_sched_energy_populated_callback(void);
extern void walt_update_min_max_capacity(void);

static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
{
	enum sched_boost_policy boost_on_big = task_sched_boost(p) ?
				sched_boost_policy() : SCHED_BOOST_NONE;

	if (boost_on_big) {
		/*
		 * Filter out tasks less than min task util threshold
		 * under conservative boost.
		 */
		if (sysctl_sched_boost == CONSERVATIVE_BOOST &&
				task_util(p) <=
				sysctl_sched_min_task_util_for_boost_colocation)
			boost_on_big = SCHED_BOOST_NONE;
	}

	return boost_on_big;
}

#else	/* CONFIG_SCHED_WALT */

struct walt_sched_stats;
@@ -2711,7 +2730,10 @@ static inline bool task_sched_boost(struct task_struct *p)
{
	return true;
}

static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
{
	return SCHED_BOOST_NONE;
}
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }

static inline int sched_boost(void)
+1 −2
Original line number Diff line number Diff line
@@ -2627,7 +2627,6 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
{
	struct task_struct *p;
	u64 combined_demand = 0;
	bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
	bool group_boost = false;
	u64 wallclock;

@@ -2651,7 +2650,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
		return;

	list_for_each_entry(p, &grp->tasks, grp_list) {
		if (boost_on_big && task_sched_boost(p)) {
		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG) {
			group_boost = true;
			break;
		}
Loading