Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fcc1d2a9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Fixes and two late cleanups"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/cleanups: Add load balance cpumask pointer to 'struct lb_env'
  sched: Fix comment about PREEMPT_ACTIVE bit location
  sched: Fix minor code style issues
  sched: Use task_rq_unlock() in __sched_setscheduler()
  sched/numa: Add SD_PERFER_SIBLING to CPU domain
parents bd463a06 b9403130
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -22,7 +22,7 @@
 *
 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
 * - bit 26 is the NMI_MASK
 * - bit 28 is the PREEMPT_ACTIVE flag
 * - bit 27 is the PREEMPT_ACTIVE flag
 *
 * PREEMPT_MASK: 0x000000ff
 * SOFTIRQ_MASK: 0x0000ff00
+1 −0
Original line number Diff line number Diff line
@@ -164,6 +164,7 @@ int arch_update_cpu_topology(void);
				| 0*SD_SHARE_CPUPOWER			\
				| 0*SD_SHARE_PKG_RESOURCES		\
				| 0*SD_SERIALIZE			\
				| 1*SD_PREFER_SIBLING			\
				,					\
	.last_balance		= jiffies,				\
	.balance_interval	= 1,					\
+1 −3
Original line number Diff line number Diff line
@@ -4340,9 +4340,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
	 */
	if (unlikely(policy == p->policy && (!rt_policy(policy) ||
			param->sched_priority == p->rt_priority))) {

		__task_rq_unlock(rq);
		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
		task_rq_unlock(rq, p, &flags);
		return 0;
	}

+14 −15
Original line number Diff line number Diff line
@@ -3069,6 +3069,9 @@ struct lb_env {
	int			new_dst_cpu;
	enum cpu_idle_type	idle;
	long			imbalance;
	/* The set of CPUs under consideration for load-balancing */
	struct cpumask		*cpus;

	unsigned int		flags;

	unsigned int		loop;
@@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 */
static inline void update_sg_lb_stats(struct lb_env *env,
			struct sched_group *group, int load_idx,
			int local_group, const struct cpumask *cpus,
			int *balance, struct sg_lb_stats *sgs)
			int local_group, int *balance, struct sg_lb_stats *sgs)
{
	unsigned long nr_running, max_nr_running, min_nr_running;
	unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
	max_nr_running = 0;
	min_nr_running = ~0UL;

	for_each_cpu_and(i, sched_group_cpus(group), cpus) {
	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
		struct rq *rq = cpu_rq(i);

		nr_running = rq->nr_running;
@@ -3800,7 +3802,6 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 * @sds: variable to hold the statistics for this sched_domain.
 */
static inline void update_sd_lb_stats(struct lb_env *env,
				      const struct cpumask *cpus,
					int *balance, struct sd_lb_stats *sds)
{
	struct sched_domain *child = env->sd->child;
@@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,

		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
		memset(&sgs, 0, sizeof(sgs));
		update_sg_lb_stats(env, sg, load_idx, local_group,
				   cpus, balance, &sgs);
		update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);

		if (local_group && !(*balance))
			return;
@@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
 * to restore balance.
 *
 * @env: The load balancing environment.
 * @cpus: The set of CPUs under consideration for load-balancing.
 * @balance: Pointer to a variable indicating if this_cpu
 *	is the appropriate cpu to perform load balancing at this_level.
 *
@@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
 *		   put to idle by rebalancing its tasks onto our group.
 */
static struct sched_group *
find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
find_busiest_group(struct lb_env *env, int *balance)
{
	struct sd_lb_stats sds;

@@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
	 * Compute the various statistics relavent for load balancing at
	 * this level.
	 */
	update_sd_lb_stats(env, cpus, balance, &sds);
	update_sd_lb_stats(env, balance, &sds);

	/*
	 * this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4154,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
 * find_busiest_queue - find the busiest runqueue among the cpus in group.
 */
static struct rq *find_busiest_queue(struct lb_env *env,
				     struct sched_group *group,
				     const struct cpumask *cpus)
				     struct sched_group *group)
{
	struct rq *busiest = NULL, *rq;
	unsigned long max_load = 0;
@@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
		if (!capacity)
			capacity = fix_small_capacity(env->sd, group);

		if (!cpumask_test_cpu(i, cpus))
		if (!cpumask_test_cpu(i, env->cpus))
			continue;

		rq = cpu_rq(i);
@@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
		.dst_grpmask    = sched_group_cpus(sd->groups),
		.idle		= idle,
		.loop_break	= sched_nr_migrate_break,
		.cpus		= cpus,
	};

	cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
	schedstat_inc(sd, lb_count[idle]);

redo:
	group = find_busiest_group(&env, cpus, balance);
	group = find_busiest_group(&env, balance);

	if (*balance == 0)
		goto out_balanced;
@@ -4270,7 +4269,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
		goto out_balanced;
	}

	busiest = find_busiest_queue(&env, group, cpus);
	busiest = find_busiest_queue(&env, group);
	if (!busiest) {
		schedstat_inc(sd, lb_nobusyq[idle]);
		goto out_balanced;
+5 −5

File changed.

Contains only whitespace changes.