Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e5a7405 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: Fix cross-sched-class wakeup preemption



Instead of dealing with sched classes inside each check_preempt_curr()
implementation, pull out this logic into the generic wakeup preemption
path.

This fixes a hang in KVM (and others) where we are waiting for the
stop machine thread to run ...

Reported-by: default avatarMarkus Trippelsdorf <markus@trippelsdorf.de>
Tested-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Tested-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1288891946.2039.31.camel@laptop>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2d467090
Loading
Loading
Loading
Loading
+26 −11
Original line number Original line Diff line number Diff line
@@ -560,18 +560,8 @@ struct rq {


static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);


static inline
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
	rq->curr->sched_class->check_preempt_curr(rq, p, flags);


	/*
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
	 * A queue event has occurred, and we're going to schedule.  In
	 * this case, we can save a useless back to back clock update.
	 */
	if (test_tsk_need_resched(p))
		rq->skip_clock_update = 1;
}


static inline int cpu_of(struct rq *rq)
static inline int cpu_of(struct rq *rq)
{
{
@@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
		p->sched_class->prio_changed(rq, p, oldprio, running);
		p->sched_class->prio_changed(rq, p, oldprio, running);
}
}


static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
	const struct sched_class *class;

	if (p->sched_class == rq->curr->sched_class) {
		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
	} else {
		for_each_class(class) {
			if (class == rq->curr->sched_class)
				break;
			if (class == p->sched_class) {
				resched_task(rq->curr);
				break;
			}
		}
	}

	/*
	 * A queue event has occurred, and we're going to schedule.  In
	 * this case, we can save a useless back to back clock update.
	 */
	if (test_tsk_need_resched(rq->curr))
		rq->skip_clock_update = 1;
}

#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
/*
/*
 * Is this task likely cache-hot:
 * Is this task likely cache-hot:
+0 −6
Original line number Original line Diff line number Diff line
@@ -1654,12 +1654,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
	int scale = cfs_rq->nr_running >= sched_nr_latency;
	int scale = cfs_rq->nr_running >= sched_nr_latency;


	if (unlikely(rt_prio(p->prio)))
		goto preempt;

	if (unlikely(p->sched_class != &fair_sched_class))
		return;

	if (unlikely(se == pse))
	if (unlikely(se == pse))
		return;
		return;


+1 −1
Original line number Original line Diff line number Diff line
@@ -19,7 +19,7 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p,
static void
static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
{
{
	resched_task(rq->curr); /* we preempt everything */
	/* we're never preempted */
}
}


static struct task_struct *pick_next_task_stop(struct rq *rq)
static struct task_struct *pick_next_task_stop(struct rq *rq)