Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c18536a7 authored by Kent Overstreet's avatar Kent Overstreet
Browse files

bcache: Prune struct btree_op



Eventual goal is for struct btree_op to contain only what is necessary
for traversing the btree.

Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent cc231966
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1197,7 +1197,6 @@ int bch_bset_print_stats(struct cache_set *c, char *buf)

	memset(&t, 0, sizeof(struct bset_stats));
	bch_btree_op_init_stack(&t.op);
	t.op.c = c;

	ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
	if (ret < 0)
+18 −14
Original line number Diff line number Diff line
@@ -503,7 +503,7 @@ static void btree_node_write_work(struct work_struct *w)
	rw_unlock(true, b);
}

static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
{
	struct bset *i = b->sets[b->nsets].data;
	struct btree_write *w = btree_current_write(b);
@@ -516,15 +516,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)

	set_btree_node_dirty(b);

	if (op->journal) {
	if (journal_ref) {
		if (w->journal &&
		    journal_pin_cmp(b->c, w, op)) {
		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
			atomic_dec_bug(w->journal);
			w->journal = NULL;
		}

		if (!w->journal) {
			w->journal = op->journal;
			w->journal = journal_ref;
			atomic_inc(w->journal);
		}
	}
@@ -1663,13 +1663,16 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
	return 0;
}

int bch_btree_check(struct cache_set *c, struct btree_op *op)
int bch_btree_check(struct cache_set *c)
{
	int ret = -ENOMEM;
	unsigned i;
	unsigned long *seen[MAX_CACHES_PER_SET];
	struct btree_op op;

	memset(seen, 0, sizeof(seen));
	bch_btree_op_init_stack(&op);
	op.lock = SHRT_MAX;

	for (i = 0; c->cache[i]; i++) {
		size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1681,7 +1684,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op)
		memset(seen[i], 0xFF, n);
	}

	ret = btree_root(check_recurse, c, op, seen);
	ret = btree_root(check_recurse, c, &op, seen);
err:
	for (i = 0; i < MAX_CACHES_PER_SET; i++)
		kfree(seen[i]);
@@ -2091,7 +2094,8 @@ static int btree_split(struct btree *b, struct btree_op *op,
}

static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
				 struct keylist *insert_keys)
				 struct keylist *insert_keys,
				 atomic_t *journal_ref)
{
	int ret = 0;
	struct keylist split_keys;
@@ -2123,7 +2127,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,

			if (bch_btree_insert_keys(b, op, insert_keys)) {
				if (!b->level)
					bch_btree_leaf_dirty(b, op);
					bch_btree_leaf_dirty(b, journal_ref);
				else
					bch_btree_node_write(b, &op->cl);
			}
@@ -2162,7 +2166,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,

	BUG_ON(op->type != BTREE_INSERT);

	ret = bch_btree_insert_node(b, op, &insert);
	ret = bch_btree_insert_node(b, op, &insert, NULL);

	BUG_ON(!ret && !bch_keylist_empty(&insert));
out:
@@ -2172,7 +2176,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
}

static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
				    struct keylist *keys)
				    struct keylist *keys, atomic_t *journal_ref)
{
	if (bch_keylist_empty(keys))
		return 0;
@@ -2189,14 +2193,14 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
			return -EIO;
		}

		return btree(insert_recurse, k, b, op, keys);
		return btree(insert_recurse, k, b, op, keys, journal_ref);
	} else {
		return bch_btree_insert_node(b, op, keys);
		return bch_btree_insert_node(b, op, keys, journal_ref);
	}
}

int bch_btree_insert(struct btree_op *op, struct cache_set *c,
		     struct keylist *keys)
		     struct keylist *keys, atomic_t *journal_ref)
{
	int ret = 0;

@@ -2210,7 +2214,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,

	while (!bch_keylist_empty(keys)) {
		op->lock = 0;
		ret = btree_root(insert_recurse, c, op, keys);
		ret = btree_root(insert_recurse, c, op, keys, journal_ref);

		if (ret == -EAGAIN) {
			ret = 0;
+3 −18
Original line number Diff line number Diff line
@@ -238,17 +238,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k);

struct btree_op {
	struct closure		cl;
	struct cache_set	*c;

	/* Journal entry we have a refcount on */
	atomic_t		*journal;

	/* Bio to be inserted into the cache */
	struct bio		*cache_bio;

	unsigned		inode;

	uint16_t		write_prio;

	/* Btree level at which we start taking write locks */
	short			lock;
@@ -259,11 +248,6 @@ struct btree_op {
		BTREE_REPLACE
	} type:8;

	unsigned		csum:1;
	unsigned		bypass:1;
	unsigned		flush_journal:1;

	unsigned		insert_data_done:1;
	unsigned		insert_collision:1;

	BKEY_PADDED(replace);
@@ -303,12 +287,13 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);

int bch_btree_insert_check_key(struct btree *, struct btree_op *,
			       struct bkey *);
int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *);
int bch_btree_insert(struct btree_op *, struct cache_set *,
		     struct keylist *, atomic_t *);

int bch_gc_thread_start(struct cache_set *);
size_t bch_btree_gc_finish(struct cache_set *);
void bch_moving_gc(struct cache_set *);
int bch_btree_check(struct cache_set *, struct btree_op *);
int bch_btree_check(struct cache_set *);
uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);

static inline void wake_up_gc(struct cache_set *c)
+17 −15
Original line number Diff line number Diff line
@@ -30,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
}

static int journal_read_bucket(struct cache *ca, struct list_head *list,
			       struct btree_op *op, unsigned bucket_index)
			       unsigned bucket_index)
{
	struct journal_device *ja = &ca->journal;
	struct bio *bio = &ja->bio;

	struct journal_replay *i;
	struct jset *j, *data = ca->set->journal.w[0].data;
	struct closure cl;
	unsigned len, left, offset = 0;
	int ret = 0;
	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);

	closure_init_stack(&cl);

	pr_debug("reading %llu", (uint64_t) bucket);

	while (offset < ca->sb.bucket_size) {
@@ -54,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset;
		bio->bi_size	= len << 9;

		bio->bi_end_io	= journal_read_endio;
		bio->bi_private = &op->cl;
		bio->bi_private = &cl;
		bch_bio_map(bio, data);

		closure_bio_submit(bio, &op->cl, ca);
		closure_sync(&op->cl);
		closure_bio_submit(bio, &cl, ca);
		closure_sync(&cl);

		/* This function could be simpler now since we no longer write
		 * journal entries that overlap bucket boundaries; this means
@@ -128,12 +131,11 @@ reread: left = ca->sb.bucket_size - offset;
	return ret;
}

int bch_journal_read(struct cache_set *c, struct list_head *list,
			struct btree_op *op)
int bch_journal_read(struct cache_set *c, struct list_head *list)
{
#define read_bucket(b)							\
	({								\
		int ret = journal_read_bucket(ca, list, op, b);		\
		int ret = journal_read_bucket(ca, list, b);		\
		__set_bit(b, bitmap);					\
		if (ret < 0)						\
			return ret;					\
@@ -291,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
	}
}

int bch_journal_replay(struct cache_set *s, struct list_head *list,
			  struct btree_op *op)
int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
	int ret = 0, keys = 0, entries = 0;
	struct bkey *k;
@@ -301,8 +302,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,

	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
	struct keylist keylist;
	struct btree_op op;

	bch_keylist_init(&keylist);
	bch_btree_op_init_stack(&op);
	op.lock = SHRT_MAX;

	list_for_each_entry(i, list, list) {
		BUG_ON(i->pin && atomic_read(i->pin) != 1);
@@ -319,9 +323,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
			bkey_copy(keylist.top, k);
			bch_keylist_push(&keylist);

			op->journal = i->pin;

			ret = bch_btree_insert(op, s, &keylist);
			ret = bch_btree_insert(&op, s, &keylist, i->pin);
			if (ret)
				goto err;

@@ -346,7 +348,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
		kfree(i);
	}
err:
	closure_sync(&op->cl);
	closure_sync(&op.cl);
	return ret;
}

@@ -368,8 +370,8 @@ static void btree_flush_write(struct cache_set *c)
			if (!best)
				best = b;
			else if (journal_pin_cmp(c,
						 btree_current_write(best),
						 btree_current_write(b))) {
					btree_current_write(best)->journal,
					btree_current_write(b)->journal)) {
				best = b;
			}
		}
+3 −6
Original line number Diff line number Diff line
@@ -189,8 +189,7 @@ struct journal_device {
};

#define journal_pin_cmp(c, l, r)				\
	(fifo_idx(&(c)->journal.pin, (l)->journal) >		\
	 fifo_idx(&(c)->journal.pin, (r)->journal))
	(fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))

#define JOURNAL_PIN	20000

@@ -206,10 +205,8 @@ atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
void bch_journal_next(struct journal *);
void bch_journal_mark(struct cache_set *, struct list_head *);
void bch_journal_meta(struct cache_set *, struct closure *);
int bch_journal_read(struct cache_set *, struct list_head *,
			struct btree_op *);
int bch_journal_replay(struct cache_set *, struct list_head *,
			  struct btree_op *);
int bch_journal_read(struct cache_set *, struct list_head *);
int bch_journal_replay(struct cache_set *, struct list_head *);

void bch_journal_free(struct cache_set *);
int bch_journal_alloc(struct cache_set *);
Loading