Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b207d80 authored by Kent Overstreet's avatar Kent Overstreet
Browse files

bcache: Kill op->replace



This is prep work for converting bch_btree_insert to
bch_btree_map_leaf_nodes() - we have to convert all its arguments to
actual arguments. Bunch of churn, but should be straightforward.

Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent faadf0c9
Loading
Loading
Loading
Loading
+52 −50
Original line number Diff line number Diff line
@@ -89,15 +89,6 @@
 * Test module load/unload
 */

static const char * const op_types[] = {
	"insert", "replace"
};

static const char *op_type(struct btree_op *op)
{
	return op_types[op->type];
}

enum {
	BTREE_INSERT_STATUS_INSERT,
	BTREE_INSERT_STATUS_BACK_MERGE,
@@ -1699,10 +1690,9 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
	bch_bset_fix_lookup_table(b, where);
}

static bool fix_overlapping_extents(struct btree *b,
				    struct bkey *insert,
static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
				    struct btree_iter *iter,
				    struct btree_op *op)
				    struct bkey *replace_key)
{
	void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
	{
@@ -1730,39 +1720,38 @@ static bool fix_overlapping_extents(struct btree *b,
		 * We might overlap with 0 size extents; we can't skip these
		 * because if they're in the set we're inserting to we have to
		 * adjust them so they don't overlap with the key we're
		 * inserting. But we don't want to check them for BTREE_REPLACE
		 * inserting. But we don't want to check them for replace
		 * operations.
		 */

		if (op->type == BTREE_REPLACE &&
		    KEY_SIZE(k)) {
		if (replace_key && KEY_SIZE(k)) {
			/*
			 * k might have been split since we inserted/found the
			 * key we're replacing
			 */
			unsigned i;
			uint64_t offset = KEY_START(k) -
				KEY_START(&op->replace);
				KEY_START(replace_key);

			/* But it must be a subset of the replace key */
			if (KEY_START(k) < KEY_START(&op->replace) ||
			    KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
			if (KEY_START(k) < KEY_START(replace_key) ||
			    KEY_OFFSET(k) > KEY_OFFSET(replace_key))
				goto check_failed;

			/* We didn't find a key that we were supposed to */
			if (KEY_START(k) > KEY_START(insert) + sectors_found)
				goto check_failed;

			if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
			if (KEY_PTRS(replace_key) != KEY_PTRS(k))
				goto check_failed;

			/* skip past gen */
			offset <<= 8;

			BUG_ON(!KEY_PTRS(&op->replace));
			BUG_ON(!KEY_PTRS(replace_key));

			for (i = 0; i < KEY_PTRS(&op->replace); i++)
				if (k->ptr[i] != op->replace.ptr[i] + offset)
			for (i = 0; i < KEY_PTRS(replace_key); i++)
				if (k->ptr[i] != replace_key->ptr[i] + offset)
					goto check_failed;

			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1833,9 +1822,8 @@ static bool fix_overlapping_extents(struct btree *b,
	}

check_failed:
	if (op->type == BTREE_REPLACE) {
	if (replace_key) {
		if (!sectors_found) {
			op->insert_collision = true;
			return true;
		} else if (sectors_found < KEY_SIZE(insert)) {
			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1848,7 +1836,7 @@ static bool fix_overlapping_extents(struct btree *b,
}

static bool btree_insert_key(struct btree *b, struct btree_op *op,
			     struct bkey *k)
			     struct bkey *k, struct bkey *replace_key)
{
	struct bset *i = b->sets[b->nsets].data;
	struct bkey *m, *prev;
@@ -1874,8 +1862,10 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
		prev = NULL;
		m = bch_btree_iter_init(b, &iter, &search);

		if (fix_overlapping_extents(b, k, &iter, op))
		if (fix_overlapping_extents(b, k, &iter, replace_key)) {
			op->insert_collision = true;
			return false;
		}

		if (KEY_DIRTY(k))
			bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
@@ -1903,24 +1893,28 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
		if (m != end(i) &&
		    bch_bkey_try_merge(b, k, m))
			goto copy;
	} else
	} else {
		BUG_ON(replace_key);
		m = bch_bset_search(b, &b->sets[b->nsets], k);
	}

insert:	shift_keys(b, m, k);
copy:	bkey_copy(m, k);
merged:
	bch_check_keys(b, "%u for %s", status, op_type(op));
	bch_check_keys(b, "%u for %s", status,
		       replace_key ? "replace" : "insert");

	if (b->level && !KEY_OFFSET(k))
		btree_current_write(b)->prio_blocked++;

	trace_bcache_btree_insert_key(b, k, op->type, status);
	trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);

	return true;
}

static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
				  struct keylist *insert_keys)
				  struct keylist *insert_keys,
				  struct bkey *replace_key)
{
	bool ret = false;
	unsigned oldsize = bch_count_data(b);
@@ -1936,11 +1930,11 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
		if (bkey_cmp(k, &b->key) <= 0) {
			bkey_put(b->c, k, b->level);

			ret |= btree_insert_key(b, op, k);
			ret |= btree_insert_key(b, op, k, replace_key);
			bch_keylist_pop_front(insert_keys);
		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
#if 0
			if (op->type == BTREE_REPLACE) {
			if (replace_key) {
				bkey_put(b->c, k, b->level);
				bch_keylist_pop_front(insert_keys);
				op->insert_collision = true;
@@ -1953,7 +1947,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
			bch_cut_back(&b->key, &temp.key);
			bch_cut_front(&b->key, insert_keys->keys);

			ret |= btree_insert_key(b, op, &temp.key);
			ret |= btree_insert_key(b, op, &temp.key, replace_key);
			break;
		} else {
			break;
@@ -1968,7 +1962,8 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,

static int btree_split(struct btree *b, struct btree_op *op,
		       struct keylist *insert_keys,
		       struct keylist *parent_keys)
		       struct keylist *parent_keys,
		       struct bkey *replace_key)
{
	bool split;
	struct btree *n1, *n2 = NULL, *n3 = NULL;
@@ -1998,7 +1993,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
				goto err_free2;
		}

		bch_btree_insert_keys(n1, op, insert_keys);
		bch_btree_insert_keys(n1, op, insert_keys, replace_key);

		/*
		 * Has to be a linear search because we don't have an auxiliary
@@ -2026,7 +2021,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
	} else {
		trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);

		bch_btree_insert_keys(n1, op, insert_keys);
		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
	}

	bch_keylist_add(parent_keys, &n1->key);
@@ -2036,7 +2031,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
		/* Depth increases, make a new root */

		bkey_copy_key(&n3->key, &MAX_KEY);
		bch_btree_insert_keys(n3, op, parent_keys);
		bch_btree_insert_keys(n3, op, parent_keys, NULL);
		bch_btree_node_write(n3, &cl);

		closure_sync(&cl);
@@ -2091,7 +2086,8 @@ static int btree_split(struct btree *b, struct btree_op *op,

static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
				 struct keylist *insert_keys,
				 atomic_t *journal_ref)
				 atomic_t *journal_ref,
				 struct bkey *replace_key)
{
	int ret = 0;
	struct keylist split_keys;
@@ -2101,6 +2097,8 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
	BUG_ON(b->level);

	do {
		BUG_ON(b->level && replace_key);

		if (should_split(b)) {
			if (current->bio_list) {
				op->lock = b->c->root->level + 1;
@@ -2112,8 +2110,9 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
				struct btree *parent = b->parent;

				ret = btree_split(b, op, insert_keys,
						  &split_keys);
						  &split_keys, replace_key);
				insert_keys = &split_keys;
				replace_key = NULL;
				b = parent;
				if (!ret)
					ret = -EINTR;
@@ -2121,7 +2120,8 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
		} else {
			BUG_ON(write_block(b) != b->sets[b->nsets].data);

			if (bch_btree_insert_keys(b, op, insert_keys)) {
			if (bch_btree_insert_keys(b, op, insert_keys,
						  replace_key)) {
				if (!b->level) {
					bch_btree_leaf_dirty(b, journal_ref);
				} else {
@@ -2165,9 +2165,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,

	bch_keylist_add(&insert, check_key);

	BUG_ON(op->type != BTREE_INSERT);

	ret = bch_btree_insert_node(b, op, &insert, NULL);
	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);

	BUG_ON(!ret && !bch_keylist_empty(&insert));
out:
@@ -2177,7 +2175,8 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
}

static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
				    struct keylist *keys, atomic_t *journal_ref)
				    struct keylist *keys, atomic_t *journal_ref,
				    struct bkey *replace_key)
{
	if (bch_keylist_empty(keys))
		return 0;
@@ -2194,14 +2193,17 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
			return -EIO;
		}

		return btree(insert_recurse, k, b, op, keys, journal_ref);
		return btree(insert_recurse, k, b, op, keys,
			     journal_ref, replace_key);
	} else {
		return bch_btree_insert_node(b, op, keys, journal_ref);
		return bch_btree_insert_node(b, op, keys,
					     journal_ref, replace_key);
	}
}

int bch_btree_insert(struct btree_op *op, struct cache_set *c,
		     struct keylist *keys, atomic_t *journal_ref)
		     struct keylist *keys, atomic_t *journal_ref,
		     struct bkey *replace_key)
{
	int ret = 0;

@@ -2209,7 +2211,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,

	while (!bch_keylist_empty(keys)) {
		op->lock = 0;
		ret = btree_root(insert_recurse, c, op, keys, journal_ref);
		ret = btree_root(insert_recurse, c, op, keys,
				 journal_ref, replace_key);

		if (ret == -EAGAIN) {
			BUG();
@@ -2217,8 +2220,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
		} else if (ret) {
			struct bkey *k;

			pr_err("error %i trying to insert key for %s",
			       ret, op_type(op));
			pr_err("error %i", ret);

			while ((k = bch_keylist_pop(keys)))
				bkey_put(c, k, 0);
+1 −9
Original line number Diff line number Diff line
@@ -240,15 +240,7 @@ struct btree_op {
	/* Btree level at which we start taking write locks */
	short			lock;

	/* Btree insertion type */
	enum {
		BTREE_INSERT,
		BTREE_REPLACE
	} type:8;

	unsigned		insert_collision:1;

	BKEY_PADDED(replace);
};

static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
@@ -290,7 +282,7 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
int bch_btree_insert_check_key(struct btree *, struct btree_op *,
			       struct bkey *);
int bch_btree_insert(struct btree_op *, struct cache_set *,
		     struct keylist *, atomic_t *);
		     struct keylist *, atomic_t *, struct bkey *);

int bch_gc_thread_start(struct cache_set *);
size_t bch_btree_gc_finish(struct cache_set *);
+1 −1
Original line number Diff line number Diff line
@@ -322,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
			bkey_copy(keylist.top, k);
			bch_keylist_push(&keylist);

			ret = bch_btree_insert(&op, s, &keylist, i->pin);
			ret = bch_btree_insert(&op, s, &keylist, i->pin, NULL);
			if (ret)
				goto err;

+2 −2
Original line number Diff line number Diff line
@@ -105,8 +105,8 @@ static void write_moving(struct closure *cl)
		s->writeback		= KEY_DIRTY(&io->w->key);
		s->csum			= KEY_CSUM(&io->w->key);

		s->op.type = BTREE_REPLACE;
		bkey_copy(&s->op.replace, &io->w->key);
		bkey_copy(&s->replace_key, &io->w->key);
		s->replace = true;

		closure_init(&s->btree, cl);
		bch_data_insert(&s->btree);
+9 −5
Original line number Diff line number Diff line
@@ -217,6 +217,7 @@ static void bch_data_insert_keys(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, btree);
	atomic_t *journal_ref = NULL;
	struct bkey *replace_key = s->replace ? &s->replace_key : NULL;

	/*
	 * If we're looping, might already be waiting on
@@ -235,7 +236,8 @@ static void bch_data_insert_keys(struct closure *cl)
					  s->flush_journal
					  ? &s->cl : NULL);

	if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) {
	if (bch_btree_insert(&s->op, s->c, &s->insert_keys,
			     journal_ref, replace_key)) {
		s->error		= -ENOMEM;
		s->insert_data_done	= true;
	}
@@ -1056,7 +1058,7 @@ static void cached_dev_read_done(struct closure *cl)

	if (s->cache_bio &&
	    !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
		s->op.type = BTREE_REPLACE;
		BUG_ON(!s->replace);
		closure_call(&s->btree, bch_data_insert, NULL, cl);
	}

@@ -1101,13 +1103,15 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,

	s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada);

	s->op.replace = KEY(s->inode, bio->bi_sector +
	s->replace_key = KEY(s->inode, bio->bi_sector +
			     s->cache_bio_sectors, s->cache_bio_sectors);

	ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace);
	ret = bch_btree_insert_check_key(b, &s->op, &s->replace_key);
	if (ret)
		return ret;

	s->replace = true;

	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);

	/* btree_search_recurse()'s btree iterator is no good anymore */
Loading