Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c2c2b44 authored by Yuyang Du's avatar Yuyang Du Committed by Ingo Molnar
Browse files

locking/lockdep: Refactorize check_noncircular and check_redundant



These two functions now handle different check results themselves. A new
check_path function is added to check whether there is a path in the
dependency graph. No functional change.

Signed-off-by: default avatarYuyang Du <duyuyang@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bvanassche@acm.org
Cc: frederic@kernel.org
Cc: ming.lei@redhat.com
Cc: will.deacon@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-20-duyuyang@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b4adfe8e
Loading
Loading
Loading
Loading
+74 −44
Original line number Diff line number Diff line
@@ -1683,33 +1683,90 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
}

/*
 * Prove that the dependency graph starting at <entry> can not
 * lead to <target>. Print an error and return 0 if it does.
 * Check that the dependency graph starting at <src> can lead to
 * <target> or not. Print an error and return 0 if it does.
 */
static noinline int
check_noncircular(struct lock_list *root, struct lock_class *target,
check_path(struct lock_class *target, struct lock_list *src_entry,
	   struct lock_list **target_entry)
{
	int result;
	int ret;

	ret = __bfs_forwards(src_entry, (void *)target, class_equal,
			     target_entry);

	if (unlikely(ret < 0))
		print_bfs_bug(ret);

	return ret;
}

/*
 * Prove that the dependency graph starting at <src> can not
 * lead to <target>. If it can, there is a circle when adding
 * <target> -> <src> dependency.
 *
 * Print an error and return 0 if it does.
 */
static noinline int
check_noncircular(struct held_lock *src, struct held_lock *target,
		  struct lock_trace *trace)
{
	int ret;
	struct lock_list *uninitialized_var(target_entry);
	struct lock_list src_entry = {
		.class = hlock_class(src),
		.parent = NULL,
	};

	debug_atomic_inc(nr_cyclic_checks);

	result = __bfs_forwards(root, target, class_equal, target_entry);
	ret = check_path(hlock_class(target), &src_entry, &target_entry);

	return result;
	if (unlikely(!ret)) {
		if (!trace->nr_entries) {
			/*
			 * If save_trace fails here, the printing might
			 * trigger a WARN but because of the !nr_entries it
			 * should not do bad things.
			 */
			save_trace(trace);
		}

		print_circular_bug(&src_entry, target_entry, src, target);
	}

	return ret;
}

/*
 * Check that the dependency graph starting at <src> can lead to
 * <target> or not. If it can, <src> -> <target> dependency is already
 * in the graph.
 *
 * Print an error and return 2 if it does or 1 if it does not.
 */
static noinline int
check_redundant(struct lock_list *root, struct lock_class *target,
		struct lock_list **target_entry)
check_redundant(struct held_lock *src, struct held_lock *target)
{
	int result;
	int ret;
	struct lock_list *uninitialized_var(target_entry);
	struct lock_list src_entry = {
		.class = hlock_class(src),
		.parent = NULL,
	};

	debug_atomic_inc(nr_redundant_checks);

	result = __bfs_forwards(root, target, class_equal, target_entry);
	ret = check_path(hlock_class(target), &src_entry, &target_entry);

	return result;
	if (!ret) {
		debug_atomic_inc(nr_redundant);
		ret = 2;
	} else if (ret < 0)
		ret = 0;

	return ret;
}

#ifdef CONFIG_TRACE_IRQFLAGS
@@ -2307,9 +2364,7 @@ static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
	       struct held_lock *next, int distance, struct lock_trace *trace)
{
	struct lock_list *uninitialized_var(target_entry);
	struct lock_list *entry;
	struct lock_list this;
	int ret;

	if (!hlock_class(prev)->key || !hlock_class(next)->key) {
@@ -2340,25 +2395,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
	 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
	 * in the graph whose neighbours are to be checked.
	 */
	this.class = hlock_class(next);
	this.parent = NULL;
	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
	if (unlikely(!ret)) {
		if (!trace->nr_entries) {
			/*
			 * If save_trace fails here, the printing might
			 * trigger a WARN but because of the !nr_entries it
			 * should not do bad things.
			 */
			save_trace(trace);
		}
		print_circular_bug(&this, target_entry, next, prev);
		return 0;
	}
	else if (unlikely(ret < 0)) {
		print_bfs_bug(ret);
	ret = check_noncircular(next, prev, trace);
	if (unlikely(ret <= 0))
		return 0;
	}

	if (!check_irq_usage(curr, prev, next))
		return 0;
@@ -2392,18 +2431,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
	/*
	 * Is the <prev> -> <next> link redundant?
	 */
	this.class = hlock_class(prev);
	this.parent = NULL;
	ret = check_redundant(&this, hlock_class(next), &target_entry);
	if (!ret) {
		debug_atomic_inc(nr_redundant);
		return 2;
	}
	if (ret < 0) {
		print_bfs_bug(ret);
		return 0;
	}

	ret = check_redundant(prev, next);
	if (ret != 1)
		return ret;

	if (!trace->nr_entries && !save_trace(trace))
		return 0;