Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95169e2d authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: actively migrate big tasks on power CPU to idle performance CPU"

parents d576be9b ed177c32
Loading
Loading
Loading
Loading
+67 −17
Original line number Original line Diff line number Diff line
@@ -5772,9 +5772,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
#define LBF_NEED_BREAK	0x02
#define LBF_NEED_BREAK	0x02
#define LBF_SOME_PINNED 0x04
#define LBF_SOME_PINNED 0x04
#define LBF_IGNORE_SMALL_TASKS 0x08
#define LBF_IGNORE_SMALL_TASKS 0x08
#define LBF_PWR_ACTIVE_BALANCE 0x10
#define LBF_EA_ACTIVE_BALANCE 0x10
#define LBF_SCHED_BOOST 0x20
#define LBF_SCHED_BOOST_ACTIVE_BALANCE 0x20
#define LBF_IGNORE_BIG_TASKS 0x40
#define LBF_BIG_TASK_ACTIVE_BALANCE 0x40
#define LBF_HMP_ACTIVE_BALANCE (LBF_EA_ACTIVE_BALANCE | \
				LBF_SCHED_BOOST_ACTIVE_BALANCE | \
				LBF_BIG_TASK_ACTIVE_BALANCE)
#define LBF_IGNORE_BIG_TASKS 0x80


struct lb_env {
struct lb_env {
	struct sched_domain	*sd;
	struct sched_domain	*sd;
@@ -6591,6 +6595,37 @@ static inline void update_sg_lb_stats(struct lb_env *env,
		sgs->group_has_capacity = 1;
		sgs->group_has_capacity = 1;
}
}


#ifdef CONFIG_SCHED_HMP
static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
						  struct sd_lb_stats *sds,
						  struct sched_group *sg,
						  struct sg_lb_stats *sgs)
{
	if (env->idle != CPU_NOT_IDLE &&
	    capacity(env->dst_rq) > group_rq_capacity(sg)) {
		if (sched_boost() && !sds->busiest && sgs->sum_nr_running) {
			env->flags |= LBF_SCHED_BOOST_ACTIVE_BALANCE;
			return true;
		}

		if (sgs->sum_nr_big_tasks > sds->busiest_nr_big_tasks) {
			env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
			return true;
		}
	}

	return false;
}
#else
static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
						  struct sd_lb_stats *sds,
						  struct sched_group *sg,
						  struct sg_lb_stats *sgs)
{
	return false;
}
#endif

/**
/**
 * update_sd_pick_busiest - return 1 on busiest group
 * update_sd_pick_busiest - return 1 on busiest group
 * @env: The load balancing environment.
 * @env: The load balancing environment.
@@ -6609,23 +6644,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
	int cpu, cpu_busiest;
	int cpu, cpu_busiest;
	unsigned int pc, pc_busiest;
	unsigned int pc, pc_busiest;


	if (sched_boost() && !sds->busiest && sgs->sum_nr_running &&
	if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
		(env->idle != CPU_NOT_IDLE) && (capacity(env->dst_rq) >
		group_rq_capacity(sg))) {
		env->flags |= LBF_SCHED_BOOST;
		return true;
		return true;
	}


	if (sgs->avg_load < sds->max_load)
	if (sgs->avg_load < sds->max_load)
		return false;
		return false;


	if (sgs->sum_nr_running > sgs->group_capacity) {
	if (sgs->sum_nr_running > sgs->group_capacity) {
		env->flags &= ~LBF_PWR_ACTIVE_BALANCE;
		env->flags &= ~LBF_EA_ACTIVE_BALANCE;
		return true;
		return true;
	}
	}


	if (sgs->group_imb) {
	if (sgs->group_imb) {
		env->flags &= ~LBF_PWR_ACTIVE_BALANCE;
		env->flags &= ~LBF_EA_ACTIVE_BALANCE;
		return true;
		return true;
	}
	}


@@ -6636,7 +6667,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
	 */
	 */
	cpu = group_first_cpu(sg);
	cpu = group_first_cpu(sg);
	if (sysctl_sched_enable_power_aware &&
	if (sysctl_sched_enable_power_aware &&
	    (!sds->busiest || (env->flags & LBF_PWR_ACTIVE_BALANCE)) &&
	    (!sds->busiest || (env->flags & LBF_EA_ACTIVE_BALANCE)) &&
	    (capacity(env->dst_rq) == group_rq_capacity(sg)) &&
	    (capacity(env->dst_rq) == group_rq_capacity(sg)) &&
	    sgs->sum_nr_running && (env->idle != CPU_NOT_IDLE) &&
	    sgs->sum_nr_running && (env->idle != CPU_NOT_IDLE) &&
	    !is_task_migration_throttled(cpu_rq(cpu)->curr) &&
	    !is_task_migration_throttled(cpu_rq(cpu)->curr) &&
@@ -6649,7 +6680,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
				return true;
				return true;
		} else {
		} else {
			if (power_cost_at_freq(env->dst_cpu, 0) < pc) {
			if (power_cost_at_freq(env->dst_cpu, 0) < pc) {
				env->flags |= LBF_PWR_ACTIVE_BALANCE;
				env->flags |= LBF_EA_ACTIVE_BALANCE;
				return true;
				return true;
			}
			}
		}
		}
@@ -7049,8 +7080,10 @@ ret:
static struct rq *find_busiest_queue_hmp(struct lb_env *env,
static struct rq *find_busiest_queue_hmp(struct lb_env *env,
				     struct sched_group *group)
				     struct sched_group *group)
{
{
	struct rq *busiest = NULL;
	struct rq *busiest = NULL, *busiest_big = NULL;
	u64 max_runnable_avg = 0;
	u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
	int max_nr_big = 0, nr_big;
	bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
	int i;
	int i;


	for_each_cpu(i, sched_group_cpus(group)) {
	for_each_cpu(i, sched_group_cpus(group)) {
@@ -7061,12 +7094,29 @@ static struct rq *find_busiest_queue_hmp(struct lb_env *env,
		if (!cpumask_test_cpu(i, env->cpus))
		if (!cpumask_test_cpu(i, env->cpus))
			continue;
			continue;



		if (find_big) {
			nr_big = nr_big_tasks(rq);
			if (nr_big > max_nr_big ||
			    (nr_big > 0 && nr_big == max_nr_big &&
			     cumulative_runnable_avg > max_runnable_avg_big)) {
				max_runnable_avg_big = cumulative_runnable_avg;
				busiest_big = rq;
				max_nr_big = nr_big;
				continue;
			}
		}

		if (cumulative_runnable_avg > max_runnable_avg) {
		if (cumulative_runnable_avg > max_runnable_avg) {
			max_runnable_avg = cumulative_runnable_avg;
			max_runnable_avg = cumulative_runnable_avg;
			busiest = rq;
			busiest = rq;
		}
		}
	}
	}


	if (busiest_big)
		return busiest_big;

	env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
	return busiest;
	return busiest;
}
}
#else
#else
@@ -7142,7 +7192,7 @@ static int need_active_balance(struct lb_env *env)
{
{
	struct sched_domain *sd = env->sd;
	struct sched_domain *sd = env->sd;


	if (env->flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST))
	if (env->flags & LBF_HMP_ACTIVE_BALANCE)
		return 1;
		return 1;


	if (env->idle == CPU_NEWLY_IDLE) {
	if (env->idle == CPU_NEWLY_IDLE) {
@@ -7317,7 +7367,7 @@ more_balance:


no_move:
no_move:
	if (!ld_moved) {
	if (!ld_moved) {
		if (!(env.flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST)))
		if (!(env.flags & LBF_HMP_ACTIVE_BALANCE))
			schedstat_inc(sd, lb_failed[idle]);
			schedstat_inc(sd, lb_failed[idle]);


		/*
		/*
@@ -7327,7 +7377,7 @@ no_move:
		 * excessive cache_hot migrations and active balances.
		 * excessive cache_hot migrations and active balances.
		 */
		 */
		if (idle != CPU_NEWLY_IDLE &&
		if (idle != CPU_NEWLY_IDLE &&
		    !(env.flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST)))
		    !(env.flags & LBF_HMP_ACTIVE_BALANCE))
			sd->nr_balance_failed++;
			sd->nr_balance_failed++;


		if (need_active_balance(&env)) {
		if (need_active_balance(&env)) {