Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f52be570 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/lockdep: Untangle xhlock history save/restore from task independence



Where XHLOCK_{SOFT,HARD} are save/restore points in the xhlocks[] to
ensure the temporal IRQ events don't interact with task state, the
XHLOCK_PROC is a fundament different beast that just happens to share
the interface.

The purpose of XHLOCK_PROC is to annotate independent execution inside
one task. For example workqueues, each work should appear to run in its
own 'pristine' 'task'.

Remove XHLOCK_PROC in favour of its own interface to avoid confusion.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boqun.feng@gmail.com
Cc: david@fromorbit.com
Cc: johannes@sipsolutions.net
Cc: kernel-team@lge.com
Cc: oleg@redhat.com
Cc: tj@kernel.org
Link: http://lkml.kernel.org/r/20170829085939.ggmb6xiohw67micb@hirez.programming.kicks-ass.net


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7b3d61cc
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -26,7 +26,7 @@
# define trace_hardirq_enter()			\
# define trace_hardirq_enter()			\
do {						\
do {						\
	current->hardirq_context++;		\
	current->hardirq_context++;		\
	crossrelease_hist_start(XHLOCK_HARD, 0);\
	crossrelease_hist_start(XHLOCK_HARD);	\
} while (0)
} while (0)
# define trace_hardirq_exit()			\
# define trace_hardirq_exit()			\
do {						\
do {						\
@@ -36,7 +36,7 @@ do { \
# define lockdep_softirq_enter()		\
# define lockdep_softirq_enter()		\
do {						\
do {						\
	current->softirq_context++;		\
	current->softirq_context++;		\
	crossrelease_hist_start(XHLOCK_SOFT, 0);\
	crossrelease_hist_start(XHLOCK_SOFT);	\
} while (0)
} while (0)
# define lockdep_softirq_exit()			\
# define lockdep_softirq_exit()			\
do {						\
do {						\
+4 −3
Original line number Original line Diff line number Diff line
@@ -551,7 +551,6 @@ struct pin_cookie { };
enum xhlock_context_t {
enum xhlock_context_t {
	XHLOCK_HARD,
	XHLOCK_HARD,
	XHLOCK_SOFT,
	XHLOCK_SOFT,
	XHLOCK_PROC,
	XHLOCK_CTX_NR,
	XHLOCK_CTX_NR,
};
};


@@ -580,8 +579,9 @@ extern void lock_commit_crosslock(struct lockdep_map *lock);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
	{ .name = (_name), .key = (void *)(_key), .cross = 0, }
	{ .name = (_name), .key = (void *)(_key), .cross = 0, }


extern void crossrelease_hist_start(enum xhlock_context_t c, bool force);
extern void crossrelease_hist_start(enum xhlock_context_t c);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void lockdep_invariant_state(bool force);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
#else /* !CROSSRELEASE */
#else /* !CROSSRELEASE */
@@ -593,8 +593,9 @@ extern void lockdep_free_task(struct task_struct *task);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
	{ .name = (_name), .key = (void *)(_key), }
	{ .name = (_name), .key = (void *)(_key), }


static inline void crossrelease_hist_start(enum xhlock_context_t c, bool force) {}
static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#endif /* CROSSRELEASE */
#endif /* CROSSRELEASE */
+38 −41
Original line number Original line Diff line number Diff line
@@ -4623,13 +4623,8 @@ asmlinkage __visible void lockdep_sys_exit(void)
	/*
	/*
	 * The lock history for each syscall should be independent. So wipe the
	 * The lock history for each syscall should be independent. So wipe the
	 * slate clean on return to userspace.
	 * slate clean on return to userspace.
	 *
	 * crossrelease_hist_end() works well here even when getting here
	 * without starting (i.e. just after forking), because it rolls back
	 * the index to point to the last entry, which is already invalid.
	 */
	 */
	crossrelease_hist_end(XHLOCK_PROC);
	lockdep_invariant_state(false);
	crossrelease_hist_start(XHLOCK_PROC, false);
}
}


void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
@@ -4723,19 +4718,47 @@ static inline void invalidate_xhlock(struct hist_lock *xhlock)
}
}


/*
/*
 * Lock history stacks; we have 3 nested lock history stacks:
 * Lock history stacks; we have 2 nested lock history stacks:
 *
 *
 *   HARD(IRQ)
 *   HARD(IRQ)
 *   SOFT(IRQ)
 *   SOFT(IRQ)
 *   PROC(ess)
 *
 *
 * The thing is that once we complete a HARD/SOFT IRQ the future task locks
 * The thing is that once we complete a HARD/SOFT IRQ the future task locks
 * should not depend on any of the locks observed while running the IRQ.  So
 * should not depend on any of the locks observed while running the IRQ.  So
 * what we do is rewind the history buffer and erase all our knowledge of that
 * what we do is rewind the history buffer and erase all our knowledge of that
 * temporal event.
 * temporal event.
 *
 */
 * The PROCess one is special though; it is used to annotate independence

 * inside a task.
void crossrelease_hist_start(enum xhlock_context_t c)
{
	struct task_struct *cur = current;

	if (!cur->xhlocks)
		return;

	cur->xhlock_idx_hist[c] = cur->xhlock_idx;
	cur->hist_id_save[c]    = cur->hist_id;
}

void crossrelease_hist_end(enum xhlock_context_t c)
{
	struct task_struct *cur = current;

	if (cur->xhlocks) {
		unsigned int idx = cur->xhlock_idx_hist[c];
		struct hist_lock *h = &xhlock(idx);

		cur->xhlock_idx = idx;

		/* Check if the ring was overwritten. */
		if (h->hist_id != cur->hist_id_save[c])
			invalidate_xhlock(h);
	}
}

/*
 * lockdep_invariant_state() is used to annotate independence inside a task, to
 * make one task look like multiple independent 'tasks'.
 *
 *
 * Take for instance workqueues; each work is independent of the last. The
 * Take for instance workqueues; each work is independent of the last. The
 * completion of a future work does not depend on the completion of a past work
 * completion of a future work does not depend on the completion of a past work
@@ -4758,40 +4781,14 @@ static inline void invalidate_xhlock(struct hist_lock *xhlock)
 * entry. Similarly, independence per-definition means it does not depend on
 * entry. Similarly, independence per-definition means it does not depend on
 * prior state.
 * prior state.
 */
 */
void crossrelease_hist_start(enum xhlock_context_t c, bool force)
void lockdep_invariant_state(bool force)
{
{
	struct task_struct *cur = current;

	if (!cur->xhlocks)
		return;

	/*
	/*
	 * We call this at an invariant point, no current state, no history.
	 * We call this at an invariant point, no current state, no history.
	 * Verify the former, enforce the latter.
	 */
	 */
	if (c == XHLOCK_PROC) {
	WARN_ON_ONCE(!force && current->lockdep_depth);
		/* verified the former, ensure the latter */
	invalidate_xhlock(&xhlock(current->xhlock_idx));
		WARN_ON_ONCE(!force && cur->lockdep_depth);
		invalidate_xhlock(&xhlock(cur->xhlock_idx));
	}

	cur->xhlock_idx_hist[c] = cur->xhlock_idx;
	cur->hist_id_save[c]    = cur->hist_id;
}

void crossrelease_hist_end(enum xhlock_context_t c)
{
	struct task_struct *cur = current;

	if (cur->xhlocks) {
		unsigned int idx = cur->xhlock_idx_hist[c];
		struct hist_lock *h = &xhlock(idx);

		cur->xhlock_idx = idx;

		/* Check if the ring was overwritten. */
		if (h->hist_id != cur->hist_id_save[c])
			invalidate_xhlock(h);
	}
}
}


static int cross_lock(struct lockdep_map *lock)
static int cross_lock(struct lockdep_map *lock)
+4 −5
Original line number Original line Diff line number Diff line
@@ -2094,8 +2094,8 @@ __acquires(&pool->lock)
	lock_map_acquire(&pwq->wq->lockdep_map);
	lock_map_acquire(&pwq->wq->lockdep_map);
	lock_map_acquire(&lockdep_map);
	lock_map_acquire(&lockdep_map);
	/*
	/*
	 * Strictly speaking we should do start(PROC) without holding any
	 * Strictly speaking we should mark the invariant state without holding
	 * locks, that is, before these two lock_map_acquire()'s.
	 * any locks, that is, before these two lock_map_acquire()'s.
	 *
	 *
	 * However, that would result in:
	 * However, that would result in:
	 *
	 *
@@ -2107,14 +2107,14 @@ __acquires(&pool->lock)
	 * Which would create W1->C->W1 dependencies, even though there is no
	 * Which would create W1->C->W1 dependencies, even though there is no
	 * actual deadlock possible. There are two solutions, using a
	 * actual deadlock possible. There are two solutions, using a
	 * read-recursive acquire on the work(queue) 'locks', but this will then
	 * read-recursive acquire on the work(queue) 'locks', but this will then
	 * hit the lockdep limitation on recursive locks, or simly discard
	 * hit the lockdep limitation on recursive locks, or simply discard
	 * these locks.
	 * these locks.
	 *
	 *
	 * AFAICT there is no possible deadlock scenario between the
	 * AFAICT there is no possible deadlock scenario between the
	 * flush_work() and complete() primitives (except for single-threaded
	 * flush_work() and complete() primitives (except for single-threaded
	 * workqueues), so hiding them isn't a problem.
	 * workqueues), so hiding them isn't a problem.
	 */
	 */
	crossrelease_hist_start(XHLOCK_PROC, true);
	lockdep_invariant_state(true);
	trace_workqueue_execute_start(work);
	trace_workqueue_execute_start(work);
	worker->current_func(work);
	worker->current_func(work);
	/*
	/*
@@ -2122,7 +2122,6 @@ __acquires(&pool->lock)
	 * point will only record its address.
	 * point will only record its address.
	 */
	 */
	trace_workqueue_execute_end(work);
	trace_workqueue_execute_end(work);
	crossrelease_hist_end(XHLOCK_PROC);
	lock_map_release(&lockdep_map);
	lock_map_release(&lockdep_map);
	lock_map_release(&pwq->wq->lockdep_map);
	lock_map_release(&pwq->wq->lockdep_map);