Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d8ac8971 authored by Matt Fleming's avatar Matt Fleming Committed by Ingo Molnar
Browse files

sched/core: Add wrappers for lockdep_(un)pin_lock()



In preparation for adding diagnostic checks to catch missing calls to
update_rq_clock(), provide wrappers for (re)pinning and unpinning
rq->lock.

Because the pending diagnostic checks allow state to be maintained in
rq_flags across pin contexts, swap the 'struct pin_cookie' arguments
for 'struct rq_flags *'.

Signed-off-by: default avatarMatt Fleming <matt@codeblueprint.co.uk>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luca Abeni <luca.abeni@unitn.it>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Cc: Yuyang Du <yuyang.du@intel.com>
Link: http://lkml.kernel.org/r/20160921133813.31976-5-matt@codeblueprint.co.uk


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c8d7dabf
Loading
Loading
Loading
Loading
+40 −40
Original line number Diff line number Diff line
@@ -185,7 +185,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
		rq = task_rq(p);
		raw_spin_lock(&rq->lock);
		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
			rf->cookie = lockdep_pin_lock(&rq->lock);
			rq_pin_lock(rq, rf);
			return rq;
		}
		raw_spin_unlock(&rq->lock);
@@ -225,7 +225,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
		 * pair with the WMB to ensure we must then also see migrating.
		 */
		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
			rf->cookie = lockdep_pin_lock(&rq->lock);
			rq_pin_lock(rq, rf);
			return rq;
		}
		raw_spin_unlock(&rq->lock);
@@ -1195,9 +1195,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
		 * OK, since we're going to drop the lock immediately
		 * afterwards anyway.
		 */
		lockdep_unpin_lock(&rq->lock, rf.cookie);
		rq_unpin_lock(rq, &rf);
		rq = move_queued_task(rq, p, dest_cpu);
		lockdep_repin_lock(&rq->lock, rf.cookie);
		rq_repin_lock(rq, &rf);
	}
out:
	task_rq_unlock(rq, p, &rf);
@@ -1690,7 +1690,7 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
 * Mark the task runnable and perform wakeup-preemption.
 */
static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
			   struct pin_cookie cookie)
			   struct rq_flags *rf)
{
	check_preempt_curr(rq, p, wake_flags);
	p->state = TASK_RUNNING;
@@ -1702,9 +1702,9 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
		 * Our task @p is fully woken up and running; so its safe to
		 * drop the rq->lock, hereafter rq is only used for statistics.
		 */
		lockdep_unpin_lock(&rq->lock, cookie);
		rq_unpin_lock(rq, rf);
		p->sched_class->task_woken(rq, p);
		lockdep_repin_lock(&rq->lock, cookie);
		rq_repin_lock(rq, rf);
	}

	if (rq->idle_stamp) {
@@ -1723,7 +1723,7 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,

static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
		 struct pin_cookie cookie)
		 struct rq_flags *rf)
{
	int en_flags = ENQUEUE_WAKEUP;

@@ -1738,7 +1738,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
#endif

	ttwu_activate(rq, p, en_flags);
	ttwu_do_wakeup(rq, p, wake_flags, cookie);
	ttwu_do_wakeup(rq, p, wake_flags, rf);
}

/*
@@ -1757,7 +1757,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
	if (task_on_rq_queued(p)) {
		/* check_preempt_curr() may use rq clock */
		update_rq_clock(rq);
		ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
		ttwu_do_wakeup(rq, p, wake_flags, &rf);
		ret = 1;
	}
	__task_rq_unlock(rq, &rf);
@@ -1770,15 +1770,15 @@ void sched_ttwu_pending(void)
{
	struct rq *rq = this_rq();
	struct llist_node *llist = llist_del_all(&rq->wake_list);
	struct pin_cookie cookie;
	struct task_struct *p;
	unsigned long flags;
	struct rq_flags rf;

	if (!llist)
		return;

	raw_spin_lock_irqsave(&rq->lock, flags);
	cookie = lockdep_pin_lock(&rq->lock);
	rq_pin_lock(rq, &rf);

	while (llist) {
		int wake_flags = 0;
@@ -1789,10 +1789,10 @@ void sched_ttwu_pending(void)
		if (p->sched_remote_wakeup)
			wake_flags = WF_MIGRATED;

		ttwu_do_activate(rq, p, wake_flags, cookie);
		ttwu_do_activate(rq, p, wake_flags, &rf);
	}

	lockdep_unpin_lock(&rq->lock, cookie);
	rq_unpin_lock(rq, &rf);
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}

@@ -1881,7 +1881,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
{
	struct rq *rq = cpu_rq(cpu);
	struct pin_cookie cookie;
	struct rq_flags rf;

#if defined(CONFIG_SMP)
	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
@@ -1892,9 +1892,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
#endif

	raw_spin_lock(&rq->lock);
	cookie = lockdep_pin_lock(&rq->lock);
	ttwu_do_activate(rq, p, wake_flags, cookie);
	lockdep_unpin_lock(&rq->lock, cookie);
	rq_pin_lock(rq, &rf);
	ttwu_do_activate(rq, p, wake_flags, &rf);
	rq_unpin_lock(rq, &rf);
	raw_spin_unlock(&rq->lock);
}

@@ -2111,7 +2111,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 * ensure that this_rq() is locked, @p is bound to this_rq() and not
 * the current task.
 */
static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
{
	struct rq *rq = task_rq(p);

@@ -2128,11 +2128,11 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie
		 * disabled avoiding further scheduler activity on it and we've
		 * not yet picked a replacement task.
		 */
		lockdep_unpin_lock(&rq->lock, cookie);
		rq_unpin_lock(rq, rf);
		raw_spin_unlock(&rq->lock);
		raw_spin_lock(&p->pi_lock);
		raw_spin_lock(&rq->lock);
		lockdep_repin_lock(&rq->lock, cookie);
		rq_repin_lock(rq, rf);
	}

	if (!(p->state & TASK_NORMAL))
@@ -2143,7 +2143,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie
	if (!task_on_rq_queued(p))
		ttwu_activate(rq, p, ENQUEUE_WAKEUP);

	ttwu_do_wakeup(rq, p, 0, cookie);
	ttwu_do_wakeup(rq, p, 0, rf);
	ttwu_stat(p, smp_processor_id(), 0);
out:
	raw_spin_unlock(&p->pi_lock);
@@ -2590,9 +2590,9 @@ void wake_up_new_task(struct task_struct *p)
		 * Nothing relies on rq->lock after this, so its fine to
		 * drop it.
		 */
		lockdep_unpin_lock(&rq->lock, rf.cookie);
		rq_unpin_lock(rq, &rf);
		p->sched_class->task_woken(rq, p);
		lockdep_repin_lock(&rq->lock, rf.cookie);
		rq_repin_lock(rq, &rf);
	}
#endif
	task_rq_unlock(rq, p, &rf);
@@ -2861,7 +2861,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
 */
static __always_inline struct rq *
context_switch(struct rq *rq, struct task_struct *prev,
	       struct task_struct *next, struct pin_cookie cookie)
	       struct task_struct *next, struct rq_flags *rf)
{
	struct mm_struct *mm, *oldmm;

@@ -2893,7 +2893,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
	 * of the scheduler it's an obvious special-case), so we
	 * do an early lockdep release here:
	 */
	lockdep_unpin_lock(&rq->lock, cookie);
	rq_unpin_lock(rq, rf);
	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);

	/* Here we just switch the register state and the stack. */
@@ -3257,7 +3257,7 @@ static inline void schedule_debug(struct task_struct *prev)
 * Pick up the highest-prio task:
 */
static inline struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
	const struct sched_class *class = &fair_sched_class;
	struct task_struct *p;
@@ -3268,20 +3268,20 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie
	 */
	if (likely(prev->sched_class == class &&
		   rq->nr_running == rq->cfs.h_nr_running)) {
		p = fair_sched_class.pick_next_task(rq, prev, cookie);
		p = fair_sched_class.pick_next_task(rq, prev, rf);
		if (unlikely(p == RETRY_TASK))
			goto again;

		/* assumes fair_sched_class->next == idle_sched_class */
		if (unlikely(!p))
			p = idle_sched_class.pick_next_task(rq, prev, cookie);
			p = idle_sched_class.pick_next_task(rq, prev, rf);

		return p;
	}

again:
	for_each_class(class) {
		p = class->pick_next_task(rq, prev, cookie);
		p = class->pick_next_task(rq, prev, rf);
		if (p) {
			if (unlikely(p == RETRY_TASK))
				goto again;
@@ -3335,7 +3335,7 @@ static void __sched notrace __schedule(bool preempt)
{
	struct task_struct *prev, *next;
	unsigned long *switch_count;
	struct pin_cookie cookie;
	struct rq_flags rf;
	struct rq *rq;
	int cpu;

@@ -3358,7 +3358,7 @@ static void __sched notrace __schedule(bool preempt)
	 */
	smp_mb__before_spinlock();
	raw_spin_lock(&rq->lock);
	cookie = lockdep_pin_lock(&rq->lock);
	rq_pin_lock(rq, &rf);

	rq->clock_skip_update <<= 1; /* promote REQ to ACT */

@@ -3380,7 +3380,7 @@ static void __sched notrace __schedule(bool preempt)

				to_wakeup = wq_worker_sleeping(prev);
				if (to_wakeup)
					try_to_wake_up_local(to_wakeup, cookie);
					try_to_wake_up_local(to_wakeup, &rf);
			}
		}
		switch_count = &prev->nvcsw;
@@ -3389,7 +3389,7 @@ static void __sched notrace __schedule(bool preempt)
	if (task_on_rq_queued(prev))
		update_rq_clock(rq);

	next = pick_next_task(rq, prev, cookie);
	next = pick_next_task(rq, prev, &rf);
	clear_tsk_need_resched(prev);
	clear_preempt_need_resched();
	rq->clock_skip_update = 0;
@@ -3400,9 +3400,9 @@ static void __sched notrace __schedule(bool preempt)
		++*switch_count;

		trace_sched_switch(preempt, prev, next);
		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
		rq = context_switch(rq, prev, next, &rf); /* unlocks the rq */
	} else {
		lockdep_unpin_lock(&rq->lock, cookie);
		rq_unpin_lock(rq, &rf);
		raw_spin_unlock_irq(&rq->lock);
	}

@@ -5521,7 +5521,7 @@ static void migrate_tasks(struct rq *dead_rq)
{
	struct rq *rq = dead_rq;
	struct task_struct *next, *stop = rq->stop;
	struct pin_cookie cookie;
	struct rq_flags rf;
	int dest_cpu;

	/*
@@ -5553,8 +5553,8 @@ static void migrate_tasks(struct rq *dead_rq)
		/*
		 * pick_next_task assumes pinned rq->lock.
		 */
		cookie = lockdep_pin_lock(&rq->lock);
		next = pick_next_task(rq, &fake_task, cookie);
		rq_pin_lock(rq, &rf);
		next = pick_next_task(rq, &fake_task, &rf);
		BUG_ON(!next);
		next->sched_class->put_prev_task(rq, next);

@@ -5567,7 +5567,7 @@ static void migrate_tasks(struct rq *dead_rq)
		 * because !cpu_active at this point, which means load-balance
		 * will not interfere. Also, stop-machine.
		 */
		lockdep_unpin_lock(&rq->lock, cookie);
		rq_unpin_lock(rq, &rf);
		raw_spin_unlock(&rq->lock);
		raw_spin_lock(&next->pi_lock);
		raw_spin_lock(&rq->lock);
+5 −5
Original line number Diff line number Diff line
@@ -663,9 +663,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
		 * Nothing relies on rq->lock after this, so its safe to drop
		 * rq->lock.
		 */
		lockdep_unpin_lock(&rq->lock, rf.cookie);
		rq_unpin_lock(rq, &rf);
		push_dl_task(rq);
		lockdep_repin_lock(&rq->lock, rf.cookie);
		rq_repin_lock(rq, &rf);
	}
#endif

@@ -1118,7 +1118,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
}

struct task_struct *
pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
	struct sched_dl_entity *dl_se;
	struct task_struct *p;
@@ -1133,9 +1133,9 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie coo
		 * disabled avoiding further scheduler activity on it and we're
		 * being very careful to re-start the picking loop.
		 */
		lockdep_unpin_lock(&rq->lock, cookie);
		rq_unpin_lock(rq, rf);
		pull_dl_task(rq);
		lockdep_repin_lock(&rq->lock, cookie);
		rq_repin_lock(rq, rf);
		/*
		 * pull_dl_task() can drop (and re-acquire) rq->lock; this
		 * means a stop task can slip in, in which case we need to
+3 −3
Original line number Diff line number Diff line
@@ -6213,7 +6213,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
}

static struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
	struct cfs_rq *cfs_rq = &rq->cfs;
	struct sched_entity *se;
@@ -6326,9 +6326,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie c
	 * further scheduler activity on it and we're being very careful to
	 * re-start the picking loop.
	 */
	lockdep_unpin_lock(&rq->lock, cookie);
	rq_unpin_lock(rq, rf);
	new_tasks = idle_balance(rq);
	lockdep_repin_lock(&rq->lock, cookie);
	rq_repin_lock(rq, rf);
	/*
	 * Because idle_balance() releases (and re-acquires) rq->lock, it is
	 * possible for any higher priority task to appear. In that case we
+1 −1
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
}

static struct task_struct *
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
	put_prev_task(rq, prev);
	update_idle_core(rq);
+3 −3
Original line number Diff line number Diff line
@@ -1523,7 +1523,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
}

static struct task_struct *
pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
	struct task_struct *p;
	struct rt_rq *rt_rq = &rq->rt;
@@ -1535,9 +1535,9 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie coo
		 * disabled avoiding further scheduler activity on it and we're
		 * being very careful to re-start the picking loop.
		 */
		lockdep_unpin_lock(&rq->lock, cookie);
		rq_unpin_lock(rq, rf);
		pull_rt_task(rq);
		lockdep_repin_lock(&rq->lock, cookie);
		rq_repin_lock(rq, rf);
		/*
		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
		 * means a dl or stop task can slip in, in which case we need
Loading