Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65314ed0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Two rq-clock warnings related fixes, plus a cgroups related crash fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/cgroup: Move sched_online_group() back into css_online() to fix crash
  sched/fair: Update rq clock before changing a task's CPU affinity
  sched/core: Fix update_rq_clock() splat on hotplug (and suspend/resume)
parents 3f26b0c8 96b77745
Loading
Loading
Loading
Loading
+17 −12
Original line number Diff line number Diff line
@@ -1090,6 +1090,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
	int ret = 0;

	rq = task_rq_lock(p, &rf);
	update_rq_clock(rq);

	if (p->flags & PF_KTHREAD) {
		/*
@@ -5560,7 +5561,7 @@ static void migrate_tasks(struct rq *dead_rq)
{
	struct rq *rq = dead_rq;
	struct task_struct *next, *stop = rq->stop;
	struct rq_flags rf, old_rf;
	struct rq_flags rf;
	int dest_cpu;

	/*
@@ -5579,7 +5580,9 @@ static void migrate_tasks(struct rq *dead_rq)
	 * class method both need to have an up-to-date
	 * value of rq->clock[_task]
	 */
	rq_pin_lock(rq, &rf);
	update_rq_clock(rq);
	rq_unpin_lock(rq, &rf);

	for (;;) {
		/*
@@ -5592,7 +5595,7 @@ static void migrate_tasks(struct rq *dead_rq)
		/*
		 * pick_next_task() assumes pinned rq->lock:
		 */
		rq_pin_lock(rq, &rf);
		rq_repin_lock(rq, &rf);
		next = pick_next_task(rq, &fake_task, &rf);
		BUG_ON(!next);
		next->sched_class->put_prev_task(rq, next);
@@ -5621,13 +5624,6 @@ static void migrate_tasks(struct rq *dead_rq)
			continue;
		}

		/*
		 * __migrate_task() may return with a different
		 * rq->lock held and a new cookie in 'rf', but we need
		 * to preserve rf::clock_update_flags for 'dead_rq'.
		 */
		old_rf = rf;

		/* Find suitable destination for @next, with force if needed. */
		dest_cpu = select_fallback_rq(dead_rq->cpu, next);

@@ -5636,7 +5632,6 @@ static void migrate_tasks(struct rq *dead_rq)
			raw_spin_unlock(&rq->lock);
			rq = dead_rq;
			raw_spin_lock(&rq->lock);
			rf = old_rf;
		}
		raw_spin_unlock(&next->pi_lock);
	}
@@ -6819,11 +6814,20 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
	if (IS_ERR(tg))
		return ERR_PTR(-ENOMEM);

	sched_online_group(tg, parent);

	return &tg->css;
}

/* Expose task group only after completing cgroup initialization */
static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
	struct task_group *tg = css_tg(css);
	struct task_group *parent = css_tg(css->parent);

	if (parent)
		sched_online_group(tg, parent);
	return 0;
}

static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
	struct task_group *tg = css_tg(css);
@@ -7229,6 +7233,7 @@ static struct cftype cpu_files[] = {

struct cgroup_subsys cpu_cgrp_subsys = {
	.css_alloc	= cpu_cgroup_css_alloc,
	.css_online	= cpu_cgroup_css_online,
	.css_released	= cpu_cgroup_css_released,
	.css_free	= cpu_cgroup_css_free,
	.fork		= cpu_cgroup_fork,