Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 986001c9 authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa Committed by Joonwoo Park
Browse files

sched: Introduce scheduler boost related placement changes



Boost should:
1) Trigger tasks to run on the perf cluster whenever possible.
2) Ensure fair class placements only takes CPU load into account
3) Ensure load balance and tick makes tasks upmigrate (active)
4) Ensure RT tasks to run on big cluster for FULL_THROTTLE boost

The change corresponding to (3) will change load balance behavior
in general whereby a misfit task will always be upmigrated to the
performance cluster. This is actually the desired behavior.

Change-Id: Ifeb89e58aa1b2ecaceb988415cb145959621c983
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent 3e705e46
Loading
Loading
Loading
Loading
+21 −1
Original line number Diff line number Diff line
@@ -5940,6 +5940,10 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
	if (capacity == max_capacity)
		return true;

	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
					task_sched_boost(p))
		return false;

	return __task_fits(p, cpu, 0);
}

@@ -6729,6 +6733,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
	int isolated_candidate = -1;
	bool need_idle;
	bool skip_ediff = false;
	enum sched_boost_policy placement_boost = task_sched_boost(p) ?
				sched_boost_policy() : SCHED_BOOST_NONE;

	sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));

@@ -6848,6 +6854,20 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
			if (sched_cpu_high_irqload(cpu))
				continue;

			/*
			 * Since this code is inside sched_is_big_little,
			 * we are going to assume that boost policy is
			 * SCHED_BOOST_ON_BIG.
			 */
			if (placement_boost != SCHED_BOOST_NONE) {
				new_util = cpu_util(i);
				if (new_util < min_util) {
					min_util_cpu = i;
					min_util = new_util;
				}
				continue;
			}

			/*
			 * p's blocked utilization is still accounted for on prev_cpu
			 * so prev_cpu will receive a negative bias due to the double
@@ -9431,7 +9451,7 @@ static int need_active_balance(struct lb_env *env)

	if ((env->idle != CPU_NOT_IDLE) &&
	    (capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu)) &&
	    env->src_rq->cfs.h_nr_running == 1 && env->src_rq->misfit_task)
	    env->src_rq->misfit_task)
		return 1;

	return unlikely(sd->nr_balance_failed >
+21 −4
Original line number Diff line number Diff line
@@ -1844,13 +1844,15 @@ static int find_lowest_rq(struct task_struct *task)
	int this_cpu = smp_processor_id();
	int cpu, best_cpu;
	struct cpumask search_cpu, backup_search_cpu;
	unsigned long cpu_capacity, capacity = ULONG_MAX;
	unsigned long cpu_capacity;
	unsigned long best_capacity;
	unsigned long util, best_cpu_util = ULONG_MAX;
	int best_cpu_idle_idx = INT_MAX;
	int cpu_idle_idx = -1;
	long new_util_cum;
	int max_spare_cap_cpu = -1;
	long max_spare_cap = -LONG_MAX;
	bool placement_boost;

#ifdef CONFIG_SCHED_HMP
	return find_lowest_rq_hmp(task);
@@ -1870,6 +1872,13 @@ static int find_lowest_rq(struct task_struct *task)
		sg_target = NULL;
		best_cpu = -1;

		/*
		 * Since this code is inside sched_is_big_little, we are going
		 * to assume that boost policy is SCHED_BOOST_ON_BIG
		 */
		placement_boost = sched_boost() == FULL_THROTTLE_BOOST;
		best_capacity = placement_boost ? 0 : ULONG_MAX;

		rcu_read_lock();
		sd = rcu_dereference(per_cpu(sd_ea, task_cpu(task)));
		if (!sd) {
@@ -1881,10 +1890,18 @@ static int find_lowest_rq(struct task_struct *task)
		do {
			cpu = group_first_cpu(sg);
			cpu_capacity = capacity_orig_of(cpu);
			if (cpu_capacity < capacity) {
				capacity = cpu_capacity;

			if (unlikely(placement_boost)) {
				if (cpu_capacity > best_capacity) {
					best_capacity = cpu_capacity;
					sg_target = sg;
				}
			} else {
				if (cpu_capacity < best_capacity) {
					best_capacity = cpu_capacity;
					sg_target = sg;
				}
			}
		} while (sg = sg->next, sg != sd->groups);
		rcu_read_unlock();