Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 280481d0 authored by Kent Overstreet's avatar Kent Overstreet
Browse files

bcache: Debug code improvements



Couple changes:
 * Consolidate bch_check_keys() and bch_check_key_order(), and move the
   checks that only check_key_order() could do to bch_btree_iter_next().

 * Get rid of CONFIG_BCACHE_EDEBUG - now, all that code is compiled in
   when CONFIG_BCACHE_DEBUG is enabled, and there's now a sysfs file to
   flip on the EDEBUG checks at runtime.

 * Dropped an old not terribly useful check in rw_unlock(), and
   refactored/improved a some of the other debug code.

Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent e58ff155
Loading
Loading
Loading
Loading
+2 −9
Original line number Diff line number Diff line
@@ -13,15 +13,8 @@ config BCACHE_DEBUG
	---help---
	Don't select this option unless you're a developer

	Enables extra debugging tools (primarily a fuzz tester)

config BCACHE_EDEBUG
	bool "Extended runtime checks"
	depends on BCACHE
	---help---
	Don't select this option unless you're a developer

	Enables extra runtime checks which significantly affect performance
	Enables extra debugging tools, allows expensive runtime checks to be
	turned on.

config BCACHE_CLOSURES_DEBUG
	bool "Debug closures"
+2 −3
Original line number Diff line number Diff line
@@ -398,8 +398,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
out:
	wake_up_process(ca->alloc_thread);

#ifdef CONFIG_BCACHE_EDEBUG
	{
	if (expensive_debug_checks(ca->set)) {
		size_t iter;
		long i;

@@ -413,7 +412,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
		fifo_for_each(i, &ca->unused, iter)
			BUG_ON(i == r);
	}
#endif

	b = ca->buckets + r;

	BUG_ON(atomic_read(&b->pin) != 1);
+1 −9
Original line number Diff line number Diff line
@@ -690,6 +690,7 @@ struct cache_set {
	unsigned short		journal_delay_ms;
	unsigned		verify:1;
	unsigned		key_merging_disabled:1;
	unsigned		expensive_debug_checks:1;
	unsigned		gc_always_rewrite:1;
	unsigned		shrinker_disabled:1;
	unsigned		copy_gc_enabled:1;
@@ -698,15 +699,6 @@ struct cache_set {
	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
};

static inline bool key_merging_disabled(struct cache_set *c)
{
#ifdef CONFIG_BCACHE_DEBUG
	return c->key_merging_disabled;
#else
	return 0;
#endif
}

struct bbio {
	unsigned		submit_time_us;
	union {
+60 −52
Original line number Diff line number Diff line
@@ -106,6 +106,43 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
	return true;
}

static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
				     unsigned ptr)
{
	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
	char buf[80];

	if (mutex_trylock(&b->c->bucket_lock)) {
		if (b->level) {
			if (KEY_DIRTY(k) ||
			    g->prio != BTREE_PRIO ||
			    (b->c->gc_mark_valid &&
			     GC_MARK(g) != GC_MARK_METADATA))
				goto err;

		} else {
			if (g->prio == BTREE_PRIO)
				goto err;

			if (KEY_DIRTY(k) &&
			    b->c->gc_mark_valid &&
			    GC_MARK(g) != GC_MARK_DIRTY)
				goto err;
		}
		mutex_unlock(&b->c->bucket_lock);
	}

	return false;
err:
	mutex_unlock(&b->c->bucket_lock);
	bch_bkey_to_text(buf, sizeof(buf), k);
	btree_bug(b,
"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
		  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
	return true;
}

bool bch_ptr_bad(struct btree *b, const struct bkey *k)
{
	struct bucket *g;
@@ -133,46 +170,12 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
		if (stale)
			return true;

#ifdef CONFIG_BCACHE_EDEBUG
		if (!mutex_trylock(&b->c->bucket_lock))
			continue;

		if (b->level) {
			if (KEY_DIRTY(k) ||
			    g->prio != BTREE_PRIO ||
			    (b->c->gc_mark_valid &&
			     GC_MARK(g) != GC_MARK_METADATA))
				goto bug;

		} else {
			if (g->prio == BTREE_PRIO)
				goto bug;

			if (KEY_DIRTY(k) &&
			    b->c->gc_mark_valid &&
			    GC_MARK(g) != GC_MARK_DIRTY)
				goto bug;
		}
		mutex_unlock(&b->c->bucket_lock);
#endif
		if (expensive_debug_checks(b->c) &&
		    ptr_bad_expensive_checks(b, k, i))
			return true;
	}

	return false;
#ifdef CONFIG_BCACHE_EDEBUG
bug:
	mutex_unlock(&b->c->bucket_lock);

	{
		char buf[80];

		bch_bkey_to_text(buf, sizeof(buf), k);
		btree_bug(b,
"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
			  buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
			  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
	}
	return true;
#endif
}

/* Key/pointer manipulation */
@@ -821,7 +824,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
	} else
		i = bset_search_write_set(b, t, search);

#ifdef CONFIG_BCACHE_EDEBUG
	if (expensive_debug_checks(b->c)) {
		BUG_ON(bset_written(b, t) &&
		       i.l != t->data->start &&
		       bkey_cmp(tree_to_prev_bkey(t,
@@ -830,7 +833,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,

		BUG_ON(i.r != end(t->data) &&
		       bkey_cmp(i.r, search) <= 0);
#endif
	}

	while (likely(i.l != i.r) &&
	       bkey_cmp(i.l, search) <= 0)
@@ -877,6 +880,10 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
	iter->size = ARRAY_SIZE(iter->data);
	iter->used = 0;

#ifdef CONFIG_BCACHE_DEBUG
	iter->b = b;
#endif

	for (; start <= &b->sets[b->nsets]; start++) {
		ret = bch_bset_search(b, start, search);
		bch_btree_iter_push(iter, ret, end(start->data));
@@ -891,6 +898,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
	struct bkey *ret = NULL;

	if (!btree_iter_end(iter)) {
		bch_btree_iter_next_check(iter);

		ret = iter->data->k;
		iter->data->k = bkey_next(iter->data->k);

@@ -1002,7 +1011,6 @@ static void btree_mergesort(struct btree *b, struct bset *out,
	out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;

	pr_debug("sorted %i keys", out->keys);
	bch_check_key_order(b, out);
}

static void __btree_sort(struct btree *b, struct btree_iter *iter,
@@ -1063,15 +1071,15 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,

void bch_btree_sort_partial(struct btree *b, unsigned start)
{
	size_t oldsize = 0, order = b->page_order, keys = 0;
	size_t order = b->page_order, keys = 0;
	struct btree_iter iter;
	int oldsize = bch_count_data(b);

	__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);

	BUG_ON(b->sets[b->nsets].data == write_block(b) &&
	       (b->sets[b->nsets].size || b->nsets));

	if (b->written)
		oldsize = bch_count_data(b);

	if (start) {
		unsigned i;
@@ -1087,7 +1095,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)

	__btree_sort(b, &iter, start, order, false);

	EBUG_ON(b->written && bch_count_data(b) != oldsize);
	EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
}

void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+3 −0
Original line number Diff line number Diff line
@@ -148,6 +148,9 @@

struct btree_iter {
	size_t size, used;
#ifdef CONFIG_BCACHE_DEBUG
	struct btree *b;
#endif
	struct btree_iter_set {
		struct bkey *k, *end;
	} data[MAX_BSETS];
Loading