Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit daae677f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar.

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Fix OOPS when build_sched_domains() percpu allocation fails
  sched: Fix more load-balancing fallout
parents 06fc5d3d fb2cf2c6
Loading
Loading
Loading
Loading
+16 −6
Original line number Diff line number Diff line
@@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
		struct sd_data *sdd = &tl->data;

		for_each_cpu(j, cpu_map) {
			struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
			struct sched_domain *sd;

			if (sdd->sd) {
				sd = *per_cpu_ptr(sdd->sd, j);
				if (sd && (sd->flags & SD_OVERLAP))
					free_sched_groups(sd->groups, 0);
				kfree(*per_cpu_ptr(sdd->sd, j));
			}

			if (sdd->sg)
				kfree(*per_cpu_ptr(sdd->sg, j));
			if (sdd->sgp)
				kfree(*per_cpu_ptr(sdd->sgp, j));
		}
		free_percpu(sdd->sd);
		sdd->sd = NULL;
		free_percpu(sdd->sg);
		sdd->sg = NULL;
		free_percpu(sdd->sgp);
		sdd->sgp = NULL;
	}
}

+10 −8
Original line number Diff line number Diff line
@@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
	if (entity_is_task(se))
		list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
#endif
	cfs_rq->nr_running++;
}
@@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)

static unsigned long task_h_load(struct task_struct *p);

static const unsigned int sched_nr_migrate_break = 32;

/*
 * move_tasks tries to move up to load_move weighted load from busiest to
 * this_rq, as part of a balancing operation within domain "sd".
@@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)

		/* take a breather every nr_migrate tasks */
		if (env->loop > env->loop_break) {
			env->loop_break += sysctl_sched_nr_migrate;
			env->loop_break += sched_nr_migrate_break;
			env->flags |= LBF_NEED_BREAK;
			break;
		}
@@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)

		load = task_h_load(p);

		if (load < 16 && !env->sd->nr_balance_failed)
		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
			goto next;

		if ((load / 2) > env->load_move)
@@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
		.dst_cpu	= this_cpu,
		.dst_rq		= this_rq,
		.idle		= idle,
		.loop_break	= sysctl_sched_nr_migrate,
		.loop_break	= sched_nr_migrate_break,
	};

	cpumask_copy(cpus, cpu_active_mask);
@@ -4448,7 +4450,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
		env.load_move	= imbalance;
		env.src_cpu	= busiest->cpu;
		env.src_rq	= busiest;
		env.loop_max = busiest->nr_running;
		env.loop_max	= min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);

more_balance:
		local_irq_save(flags);
+1 −0
Original line number Diff line number Diff line
@@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true)

SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)