Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe82a018 authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa
Browse files

sched/fair: Fix capacity and nr_run comparisons in can_migrate_task()



Kernel version 3.18 and beyond alter the definition of sgs->group_capacity
whereby it reflects the load a group is capable of taking. In previous
kernel versions the term used to refer to the number of effective CPUs
available. This change breaks the comparison of capacity with the number
of running tasks on a group. To fix this convert the capacity metric
before doing the comparison.

Change-Id: I3ebd941273edbcc903a611d9c883773172e86c8e
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent d3846f3c
Loading
Loading
Loading
Loading
+29 −27
Original line number Diff line number Diff line
@@ -7403,7 +7403,7 @@ static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
	int tsk_cache_hot = 0;
	int twf;
	int twf, group_cpus;

	lockdep_assert_held(&env->src_rq->lock);

@@ -7417,32 +7417,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
		return 0;

	if (nr_big_tasks(env->src_rq) &&
			capacity(env->dst_rq) > capacity(env->src_rq) &&
			!is_big_task(p))
		return 0;

	if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
		return 0;

	twf = task_will_fit(p, env->dst_cpu);

	/*
	 * Attempt to not pull tasks that don't fit. We may get lucky and find
	 * one that actually fits.
	 */
	if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
		return 0;

	/*
	 * Group imbalance can sometimes cause work to be pulled across groups
	 * even though the group could have managed the imbalance on its own.
	 * Prevent inter-cluster migrations for big tasks when the number of
	 * tasks is lower than the capacity of the group.
	 */
	if (!twf && env->busiest_nr_running <= env->busiest_grp_capacity)
		return 0;

	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
		int cpu;

@@ -7476,6 +7450,34 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
	/* Record that we found atleast one task that could run on dst_cpu */
	env->flags &= ~LBF_ALL_PINNED;

	if (nr_big_tasks(env->src_rq) &&
			capacity(env->dst_rq) > capacity(env->src_rq) &&
			!is_big_task(p))
		return 0;

	if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
		return 0;

	twf = task_will_fit(p, env->dst_cpu);

	/*
	 * Attempt to not pull tasks that don't fit. We may get lucky and find
	 * one that actually fits.
	 */
	if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
		return 0;

	/*
	 * Group imbalance can sometimes cause work to be pulled across groups
	 * even though the group could have managed the imbalance on its own.
	 * Prevent inter-cluster migrations for big tasks when the number of
	 * tasks is lower than the capacity of the group.
	 */
	group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
						 SCHED_CAPACITY_SCALE);
	if (!twf && env->busiest_nr_running <= group_cpus)
		return 0;

	if (task_running(env->src_rq, p)) {
		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
		return 0;