Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed177c32 authored by Joonwoo Park's avatar Joonwoo Park Committed by Gerrit - the friendly Code Review server
Browse files

sched: actively migrate big tasks on power CPU to idle performance CPU



When performance CPU runs idle or newly idle load balancer to pull a
task on power efficient CPU, the load balancer always fails and enters
idle mode if the big task on the power efficient CPU is running.  This is
suboptimal when the running task on the power efficient CPU doesn't fit
on the power efficient CPU as it's quite possible that the big task will
sustain on the power efficient CPU until it's preempted while there is
a performance CPU sitting idle.

Revise load balancer algorithm to actively migrate big tasks on power
efficient CPU to performance CPU when performance CPU runs idle or newly
idle load balancer.

Change-Id: Iaf05e0236955fdcc7ded0ff09af0880050a2be32
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 20916112
Loading
Loading
Loading
Loading
+67 −17
Original line number Diff line number Diff line
@@ -5772,9 +5772,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
#define LBF_NEED_BREAK	0x02
#define LBF_SOME_PINNED 0x04
#define LBF_IGNORE_SMALL_TASKS 0x08
#define LBF_PWR_ACTIVE_BALANCE 0x10
#define LBF_SCHED_BOOST 0x20
#define LBF_IGNORE_BIG_TASKS 0x40
#define LBF_EA_ACTIVE_BALANCE 0x10
#define LBF_SCHED_BOOST_ACTIVE_BALANCE 0x20
#define LBF_BIG_TASK_ACTIVE_BALANCE 0x40
#define LBF_HMP_ACTIVE_BALANCE (LBF_EA_ACTIVE_BALANCE | \
				LBF_SCHED_BOOST_ACTIVE_BALANCE | \
				LBF_BIG_TASK_ACTIVE_BALANCE)
#define LBF_IGNORE_BIG_TASKS 0x80

struct lb_env {
	struct sched_domain	*sd;
@@ -6591,6 +6595,37 @@ static inline void update_sg_lb_stats(struct lb_env *env,
		sgs->group_has_capacity = 1;
}

#ifdef CONFIG_SCHED_HMP
static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
						  struct sd_lb_stats *sds,
						  struct sched_group *sg,
						  struct sg_lb_stats *sgs)
{
	if (env->idle != CPU_NOT_IDLE &&
	    capacity(env->dst_rq) > group_rq_capacity(sg)) {
		if (sched_boost() && !sds->busiest && sgs->sum_nr_running) {
			env->flags |= LBF_SCHED_BOOST_ACTIVE_BALANCE;
			return true;
		}

		if (sgs->sum_nr_big_tasks > sds->busiest_nr_big_tasks) {
			env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
			return true;
		}
	}

	return false;
}
#else
static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
						  struct sd_lb_stats *sds,
						  struct sched_group *sg,
						  struct sg_lb_stats *sgs)
{
	return false;
}
#endif

/**
 * update_sd_pick_busiest - return 1 on busiest group
 * @env: The load balancing environment.
@@ -6609,23 +6644,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
	int cpu, cpu_busiest;
	unsigned int pc, pc_busiest;

	if (sched_boost() && !sds->busiest && sgs->sum_nr_running &&
		(env->idle != CPU_NOT_IDLE) && (capacity(env->dst_rq) >
		group_rq_capacity(sg))) {
		env->flags |= LBF_SCHED_BOOST;
	if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
		return true;
	}

	if (sgs->avg_load < sds->max_load)
		return false;

	if (sgs->sum_nr_running > sgs->group_capacity) {
		env->flags &= ~LBF_PWR_ACTIVE_BALANCE;
		env->flags &= ~LBF_EA_ACTIVE_BALANCE;
		return true;
	}

	if (sgs->group_imb) {
		env->flags &= ~LBF_PWR_ACTIVE_BALANCE;
		env->flags &= ~LBF_EA_ACTIVE_BALANCE;
		return true;
	}

@@ -6636,7 +6667,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
	 */
	cpu = group_first_cpu(sg);
	if (sysctl_sched_enable_power_aware &&
	    (!sds->busiest || (env->flags & LBF_PWR_ACTIVE_BALANCE)) &&
	    (!sds->busiest || (env->flags & LBF_EA_ACTIVE_BALANCE)) &&
	    (capacity(env->dst_rq) == group_rq_capacity(sg)) &&
	    sgs->sum_nr_running && (env->idle != CPU_NOT_IDLE) &&
	    !is_task_migration_throttled(cpu_rq(cpu)->curr) &&
@@ -6649,7 +6680,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
				return true;
		} else {
			if (power_cost_at_freq(env->dst_cpu, 0) < pc) {
				env->flags |= LBF_PWR_ACTIVE_BALANCE;
				env->flags |= LBF_EA_ACTIVE_BALANCE;
				return true;
			}
		}
@@ -7049,8 +7080,10 @@ ret:
static struct rq *find_busiest_queue_hmp(struct lb_env *env,
				     struct sched_group *group)
{
	struct rq *busiest = NULL;
	u64 max_runnable_avg = 0;
	struct rq *busiest = NULL, *busiest_big = NULL;
	u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
	int max_nr_big = 0, nr_big;
	bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
	int i;

	for_each_cpu(i, sched_group_cpus(group)) {
@@ -7061,12 +7094,29 @@ static struct rq *find_busiest_queue_hmp(struct lb_env *env,
		if (!cpumask_test_cpu(i, env->cpus))
			continue;


		if (find_big) {
			nr_big = nr_big_tasks(rq);
			if (nr_big > max_nr_big ||
			    (nr_big > 0 && nr_big == max_nr_big &&
			     cumulative_runnable_avg > max_runnable_avg_big)) {
				max_runnable_avg_big = cumulative_runnable_avg;
				busiest_big = rq;
				max_nr_big = nr_big;
				continue;
			}
		}

		if (cumulative_runnable_avg > max_runnable_avg) {
			max_runnable_avg = cumulative_runnable_avg;
			busiest = rq;
		}
	}

	if (busiest_big)
		return busiest_big;

	env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
	return busiest;
}
#else
@@ -7142,7 +7192,7 @@ static int need_active_balance(struct lb_env *env)
{
	struct sched_domain *sd = env->sd;

	if (env->flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST))
	if (env->flags & LBF_HMP_ACTIVE_BALANCE)
		return 1;

	if (env->idle == CPU_NEWLY_IDLE) {
@@ -7317,7 +7367,7 @@ more_balance:

no_move:
	if (!ld_moved) {
		if (!(env.flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST)))
		if (!(env.flags & LBF_HMP_ACTIVE_BALANCE))
			schedstat_inc(sd, lb_failed[idle]);

		/*
@@ -7327,7 +7377,7 @@ no_move:
		 * excessive cache_hot migrations and active balances.
		 */
		if (idle != CPU_NEWLY_IDLE &&
		    !(env.flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST)))
		    !(env.flags & LBF_HMP_ACTIVE_BALANCE))
			sd->nr_balance_failed++;

		if (need_active_balance(&env)) {