Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8e45cb54 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: Move load-balancing arguments into helper struct



Passing large sets of similar arguments all around the load-balancer
gets tiresom when you want to modify something. Stick them all in a
helper structure and pass the structure around.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: pjt@google.com
Link: http://lkml.kernel.org/n/tip-5slqz0vhsdzewrfk9eza1aon@git.kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3c7d5184
Loading
Loading
Loading
Loading
+93 −84
Original line number Original line Diff line number Diff line
@@ -3135,13 +3135,25 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
#define LBF_HAD_BREAKS	0x0C	/* count HAD_BREAKs overflows into ABORT */
#define LBF_HAD_BREAKS	0x0C	/* count HAD_BREAKs overflows into ABORT */
#define LBF_ABORT	0x10
#define LBF_ABORT	0x10


struct lb_env {
	struct sched_domain	*sd;

	int			this_cpu;
	struct rq		*this_rq;

	struct rq		*busiest_rq;
	struct cfs_rq		*busiest_cfs_rq;

	enum cpu_idle_type	idle;
	unsigned long		max_load_move;
	unsigned int		flags;
};

/*
/*
 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
 */
 */
static
static
int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
int can_migrate_task(struct task_struct *p, struct lb_env *env)
		     struct sched_domain *sd, enum cpu_idle_type idle,
		     int *lb_flags)
{
{
	int tsk_cache_hot = 0;
	int tsk_cache_hot = 0;
	/*
	/*
@@ -3150,13 +3162,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
	 * 3) are cache-hot on their current CPU.
	 * 3) are cache-hot on their current CPU.
	 */
	 */
	if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
	if (!cpumask_test_cpu(env->this_cpu, tsk_cpus_allowed(p))) {
		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
		return 0;
		return 0;
	}
	}
	*lb_flags &= ~LBF_ALL_PINNED;
	env->flags &= ~LBF_ALL_PINNED;


	if (task_running(rq, p)) {
	if (task_running(env->busiest_rq, p)) {
		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
		return 0;
		return 0;
	}
	}
@@ -3167,12 +3179,12 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
	 * 2) too many balance attempts have failed.
	 * 2) too many balance attempts have failed.
	 */
	 */


	tsk_cache_hot = task_hot(p, rq->clock_task, sd);
	tsk_cache_hot = task_hot(p, env->busiest_rq->clock_task, env->sd);
	if (!tsk_cache_hot ||
	if (!tsk_cache_hot ||
		sd->nr_balance_failed > sd->cache_nice_tries) {
		env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
#ifdef CONFIG_SCHEDSTATS
#ifdef CONFIG_SCHEDSTATS
		if (tsk_cache_hot) {
		if (tsk_cache_hot) {
			schedstat_inc(sd, lb_hot_gained[idle]);
			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
			schedstat_inc(p, se.statistics.nr_forced_migrations);
			schedstat_inc(p, se.statistics.nr_forced_migrations);
		}
		}
#endif
#endif
@@ -3193,31 +3205,27 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
 *
 *
 * Called with both runqueues locked.
 * Called with both runqueues locked.
 */
 */
static int
static int move_one_task(struct lb_env *env)
move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
	      struct sched_domain *sd, enum cpu_idle_type idle)
{
{
	struct task_struct *p, *n;
	struct task_struct *p, *n;
	struct cfs_rq *cfs_rq;
	struct cfs_rq *cfs_rq;
	int pinned = 0;


	for_each_leaf_cfs_rq(busiest, cfs_rq) {
	for_each_leaf_cfs_rq(env->busiest_rq, cfs_rq) {
		list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
		list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
			if (throttled_lb_pair(task_group(p),
			if (throttled_lb_pair(task_group(p),
					      busiest->cpu, this_cpu))
					      env->busiest_rq->cpu, env->this_cpu))
				break;
				break;


			if (!can_migrate_task(p, busiest, this_cpu,
			if (!can_migrate_task(p, env))
						sd, idle, &pinned))
				continue;
				continue;


			pull_task(busiest, p, this_rq, this_cpu);
			pull_task(env->busiest_rq, p, env->this_rq, env->this_cpu);
			/*
			/*
			 * Right now, this is only the second place pull_task()
			 * Right now, this is only the second place pull_task()
			 * is called, so we can safely collect pull_task()
			 * is called, so we can safely collect pull_task()
			 * stats here rather than inside pull_task().
			 * stats here rather than inside pull_task().
			 */
			 */
			schedstat_inc(sd, lb_gained[idle]);
			schedstat_inc(env->sd, lb_gained[env->idle]);
			return 1;
			return 1;
		}
		}
	}
	}
@@ -3225,31 +3233,26 @@ move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
	return 0;
	return 0;
}
}


static unsigned long
static unsigned long balance_tasks(struct lb_env *env)
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
	      unsigned long max_load_move, struct sched_domain *sd,
	      enum cpu_idle_type idle, int *lb_flags,
	      struct cfs_rq *busiest_cfs_rq)
{
{
	int loops = 0, pulled = 0;
	int loops = 0, pulled = 0;
	long rem_load_move = max_load_move;
	long rem_load_move = env->max_load_move;
	struct task_struct *p, *n;
	struct task_struct *p, *n;


	if (max_load_move == 0)
	if (env->max_load_move == 0)
		goto out;
		goto out;


	list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
	list_for_each_entry_safe(p, n, &env->busiest_cfs_rq->tasks, se.group_node) {
		if (loops++ > sysctl_sched_nr_migrate) {
		if (loops++ > sysctl_sched_nr_migrate) {
			*lb_flags |= LBF_NEED_BREAK;
			env->flags |= LBF_NEED_BREAK;
			break;
			break;
		}
		}


		if ((p->se.load.weight >> 1) > rem_load_move ||
		if ((p->se.load.weight >> 1) > rem_load_move ||
		    !can_migrate_task(p, busiest, this_cpu, sd, idle,
		    !can_migrate_task(p, env))
				      lb_flags))
			continue;
			continue;


		pull_task(busiest, p, this_rq, this_cpu);
		pull_task(env->busiest_rq, p, env->this_rq, env->this_cpu);
		pulled++;
		pulled++;
		rem_load_move -= p->se.load.weight;
		rem_load_move -= p->se.load.weight;


@@ -3259,8 +3262,8 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
		 * kernels will stop after the first task is pulled to minimize
		 * kernels will stop after the first task is pulled to minimize
		 * the critical section.
		 * the critical section.
		 */
		 */
		if (idle == CPU_NEWLY_IDLE) {
		if (env->idle == CPU_NEWLY_IDLE) {
			*lb_flags |= LBF_ABORT;
			env->flags |= LBF_ABORT;
			break;
			break;
		}
		}
#endif
#endif
@@ -3278,9 +3281,9 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
	 * so we can safely collect pull_task() stats here rather than
	 * so we can safely collect pull_task() stats here rather than
	 * inside pull_task().
	 * inside pull_task().
	 */
	 */
	schedstat_add(sd, lb_gained[idle], pulled);
	schedstat_add(env->sd, lb_gained[env->idle], pulled);


	return max_load_move - rem_load_move;
	return env->max_load_move - rem_load_move;
}
}


#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3363,40 +3366,39 @@ static void update_h_load(long cpu)
	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}
}


static unsigned long
static unsigned long load_balance_fair(struct lb_env *env)
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
		  unsigned long max_load_move,
		  struct sched_domain *sd, enum cpu_idle_type idle,
		  int *lb_flags)
{
{
	long rem_load_move = max_load_move;
	unsigned long max_load_move = env->max_load_move;
	struct cfs_rq *busiest_cfs_rq;
	long rem_load_move = env->max_load_move;


	rcu_read_lock();
	rcu_read_lock();
	update_h_load(cpu_of(busiest));
	update_h_load(cpu_of(env->busiest_rq));


	for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
	for_each_leaf_cfs_rq(env->busiest_rq, env->busiest_cfs_rq) {
		unsigned long busiest_h_load = busiest_cfs_rq->h_load;
		unsigned long busiest_h_load = env->busiest_cfs_rq->h_load;
		unsigned long busiest_weight = busiest_cfs_rq->load.weight;
		unsigned long busiest_weight = env->busiest_cfs_rq->load.weight;
		u64 rem_load, moved_load;
		u64 rem_load, moved_load;


		if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
		if (env->flags & (LBF_NEED_BREAK|LBF_ABORT))
			break;
			break;


		/*
		/*
		 * empty group or part of a throttled hierarchy
		 * empty group or part of a throttled hierarchy
		 */
		 */
		if (!busiest_cfs_rq->task_weight ||
		if (!env->busiest_cfs_rq->task_weight)
		    throttled_lb_pair(busiest_cfs_rq->tg, cpu_of(busiest), this_cpu))
			continue;

		if (throttled_lb_pair(env->busiest_cfs_rq->tg,
				      cpu_of(env->busiest_rq),
				      env->this_cpu))
			continue;
			continue;


		rem_load = (u64)rem_load_move * busiest_weight;
		rem_load = (u64)rem_load_move * busiest_weight;
		rem_load = div_u64(rem_load, busiest_h_load + 1);
		rem_load = div_u64(rem_load, busiest_h_load + 1);


		moved_load = balance_tasks(this_rq, this_cpu, busiest,
		env->max_load_move = rem_load;
				rem_load, sd, idle, lb_flags,
				busiest_cfs_rq);


		moved_load = balance_tasks(env);
		if (!moved_load)
		if (!moved_load)
			continue;
			continue;


@@ -3416,15 +3418,10 @@ static inline void update_shares(int cpu)
{
{
}
}


static unsigned long
static unsigned long load_balance_fair(struct lb_env *env)
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
		  unsigned long max_load_move,
		  struct sched_domain *sd, enum cpu_idle_type idle,
		  int *lb_flags)
{
{
	return balance_tasks(this_rq, this_cpu, busiest,
	env->busiest_cfs_rq = &env->busiest_rq->cfs;
			max_load_move, sd, idle, lb_flags,
	return balance_tasks(env);
			&busiest->cfs);
}
}
#endif
#endif


@@ -3435,21 +3432,17 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 *
 *
 * Called with both runqueues locked.
 * Called with both runqueues locked.
 */
 */
static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
static int move_tasks(struct lb_env *env)
		      unsigned long max_load_move,
		      struct sched_domain *sd, enum cpu_idle_type idle,
		      int *lb_flags)
{
{
	unsigned long max_load_move = env->max_load_move;
	unsigned long total_load_moved = 0, load_moved;
	unsigned long total_load_moved = 0, load_moved;


	do {
	do {
		load_moved = load_balance_fair(this_rq, this_cpu, busiest,
		env->max_load_move = max_load_move - total_load_moved;
				max_load_move - total_load_moved,
		load_moved = load_balance_fair(env);
				sd, idle, lb_flags);

		total_load_moved += load_moved;
		total_load_moved += load_moved;


		if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
		if (env->flags & (LBF_NEED_BREAK|LBF_ABORT))
			break;
			break;


#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
@@ -3458,8 +3451,8 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
		 * kernels will stop after the first task is pulled to minimize
		 * kernels will stop after the first task is pulled to minimize
		 * the critical section.
		 * the critical section.
		 */
		 */
		if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) {
		if (env->idle == CPU_NEWLY_IDLE && env->this_rq->nr_running) {
			*lb_flags |= LBF_ABORT;
			env->flags |= LBF_ABORT;
			break;
			break;
		}
		}
#endif
#endif
@@ -4459,13 +4452,20 @@ static int load_balance(int this_cpu, struct rq *this_rq,
			struct sched_domain *sd, enum cpu_idle_type idle,
			struct sched_domain *sd, enum cpu_idle_type idle,
			int *balance)
			int *balance)
{
{
	int ld_moved, lb_flags = 0, active_balance = 0;
	int ld_moved, active_balance = 0;
	struct sched_group *group;
	struct sched_group *group;
	unsigned long imbalance;
	unsigned long imbalance;
	struct rq *busiest;
	struct rq *busiest;
	unsigned long flags;
	unsigned long flags;
	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);


	struct lb_env env = {
		.sd		= sd,
		.this_cpu	= this_cpu,
		.this_rq	= this_rq,
		.idle		= idle,
	};

	cpumask_copy(cpus, cpu_active_mask);
	cpumask_copy(cpus, cpu_active_mask);


	schedstat_inc(sd, lb_count[idle]);
	schedstat_inc(sd, lb_count[idle]);
@@ -4500,11 +4500,13 @@ static int load_balance(int this_cpu, struct rq *this_rq,
		 * still unbalanced. ld_moved simply stays zero, so it is
		 * still unbalanced. ld_moved simply stays zero, so it is
		 * correctly treated as an imbalance.
		 * correctly treated as an imbalance.
		 */
		 */
		lb_flags |= LBF_ALL_PINNED;
		env.flags |= LBF_ALL_PINNED;
		env.max_load_move = imbalance;
		env.busiest_rq = busiest;

		local_irq_save(flags);
		local_irq_save(flags);
		double_rq_lock(this_rq, busiest);
		double_rq_lock(this_rq, busiest);
		ld_moved = move_tasks(this_rq, this_cpu, busiest,
		ld_moved = move_tasks(&env);
				      imbalance, sd, idle, &lb_flags);
		double_rq_unlock(this_rq, busiest);
		double_rq_unlock(this_rq, busiest);
		local_irq_restore(flags);
		local_irq_restore(flags);


@@ -4514,18 +4516,18 @@ static int load_balance(int this_cpu, struct rq *this_rq,
		if (ld_moved && this_cpu != smp_processor_id())
		if (ld_moved && this_cpu != smp_processor_id())
			resched_cpu(this_cpu);
			resched_cpu(this_cpu);


		if (lb_flags & LBF_ABORT)
		if (env.flags & LBF_ABORT)
			goto out_balanced;
			goto out_balanced;


		if (lb_flags & LBF_NEED_BREAK) {
		if (env.flags & LBF_NEED_BREAK) {
			lb_flags += LBF_HAD_BREAK - LBF_NEED_BREAK;
			env.flags += LBF_HAD_BREAK - LBF_NEED_BREAK;
			if (lb_flags & LBF_ABORT)
			if (env.flags & LBF_ABORT)
				goto out_balanced;
				goto out_balanced;
			goto redo;
			goto redo;
		}
		}


		/* All tasks on this runqueue were pinned by CPU affinity */
		/* All tasks on this runqueue were pinned by CPU affinity */
		if (unlikely(lb_flags & LBF_ALL_PINNED)) {
		if (unlikely(env.flags & LBF_ALL_PINNED)) {
			cpumask_clear_cpu(cpu_of(busiest), cpus);
			cpumask_clear_cpu(cpu_of(busiest), cpus);
			if (!cpumask_empty(cpus))
			if (!cpumask_empty(cpus))
				goto redo;
				goto redo;
@@ -4555,7 +4557,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
					tsk_cpus_allowed(busiest->curr))) {
					tsk_cpus_allowed(busiest->curr))) {
				raw_spin_unlock_irqrestore(&busiest->lock,
				raw_spin_unlock_irqrestore(&busiest->lock,
							    flags);
							    flags);
				lb_flags |= LBF_ALL_PINNED;
				env.flags |= LBF_ALL_PINNED;
				goto out_one_pinned;
				goto out_one_pinned;
			}
			}


@@ -4608,7 +4610,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,


out_one_pinned:
out_one_pinned:
	/* tune up the balancing interval */
	/* tune up the balancing interval */
	if (((lb_flags & LBF_ALL_PINNED) &&
	if (((env.flags & LBF_ALL_PINNED) &&
			sd->balance_interval < MAX_PINNED_INTERVAL) ||
			sd->balance_interval < MAX_PINNED_INTERVAL) ||
			(sd->balance_interval < sd->max_interval))
			(sd->balance_interval < sd->max_interval))
		sd->balance_interval *= 2;
		sd->balance_interval *= 2;
@@ -4718,10 +4720,17 @@ static int active_load_balance_cpu_stop(void *data)
	}
	}


	if (likely(sd)) {
	if (likely(sd)) {
		struct lb_env env = {
			.sd		= sd,
			.this_cpu	= target_cpu,
			.this_rq	= target_rq,
			.busiest_rq	= busiest_rq,
			.idle		= CPU_IDLE,
		};

		schedstat_inc(sd, alb_count);
		schedstat_inc(sd, alb_count);


		if (move_one_task(target_rq, target_cpu, busiest_rq,
		if (move_one_task(&env))
				  sd, CPU_IDLE))
			schedstat_inc(sd, alb_pushed);
			schedstat_inc(sd, alb_pushed);
		else
		else
			schedstat_inc(sd, alb_failed);
			schedstat_inc(sd, alb_failed);