Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ae3e10ab authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes:

   - a deadline scheduler related bug fix which triggered a kernel
     warning

   - an RT_RUNTIME_SHARE fix

   - a stop_machine preemption fix

   - a potential NULL dereference fix in sched_domain_debug_one()"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/rt: Restore rt_runtime after disabling RT_RUNTIME_SHARE
  sched/deadline: Update rq_clock of later_rq when pushing a task
  stop_machine: Disable preemption after queueing stopper threads
  sched/topology: Check variable group before dereferencing it
parents 0634922a f3d133ee
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -2090,8 +2090,14 @@ static int push_dl_task(struct rq *rq)
	sub_rq_bw(&next_task->dl, &rq->dl);
	set_task_cpu(next_task, later_rq->cpu);
	add_rq_bw(&next_task->dl, &later_rq->dl);

	/*
	 * Update the later_rq clock here, because the clock is used
	 * by the cpufreq_update_util() inside __add_running_bw().
	 */
	update_rq_clock(later_rq);
	add_running_bw(&next_task->dl, &later_rq->dl);
	activate_task(later_rq, next_task, 0);
	activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
	ret = 1;

	resched_curr(later_rq);
+2 −0
Original line number Diff line number Diff line
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
		 * can be time-consuming. Try to avoid it when possible.
		 */
		raw_spin_lock(&rt_rq->rt_runtime_lock);
		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
			rt_rq->rt_runtime = rt_b->rt_runtime;
		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
		if (skip)
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
	}
	if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
	}

+9 −1
Original line number Diff line number Diff line
@@ -260,6 +260,15 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
	err = 0;
	__cpu_stop_queue_work(stopper1, work1, &wakeq);
	__cpu_stop_queue_work(stopper2, work2, &wakeq);
	/*
	 * The waking up of stopper threads has to happen
	 * in the same scheduling context as the queueing.
	 * Otherwise, there is a possibility of one of the
	 * above stoppers being woken up by another CPU,
	 * and preempting us. This will cause us to n ot
	 * wake up the other stopper forever.
	 */
	preempt_disable();
unlock:
	raw_spin_unlock(&stopper2->lock);
	raw_spin_unlock_irq(&stopper1->lock);
@@ -271,7 +280,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
	}

	if (!err) {
		preempt_disable();
		wake_up_q(&wakeq);
		preempt_enable();
	}