Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb7a583e authored by Kent Overstreet's avatar Kent Overstreet
Browse files

bcache: kill closure locking usage

parent a5ae4300
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -309,7 +309,8 @@ struct cached_dev {
	struct cache_sb		sb;
	struct bio		sb_bio;
	struct bio_vec		sb_bv[1];
	struct closure_with_waitlist sb_write;
	struct closure		sb_write;
	struct semaphore	sb_write_mutex;

	/* Refcount on the cache set. Always nonzero when we're caching. */
	atomic_t		count;
@@ -514,7 +515,8 @@ struct cache_set {
	uint64_t		cached_dev_sectors;
	struct closure		caching;

	struct closure_with_waitlist sb_write;
	struct closure		sb_write;
	struct semaphore	sb_write_mutex;

	mempool_t		*search;
	mempool_t		*bio_meta;
@@ -635,7 +637,8 @@ struct cache_set {
	unsigned		nr_uuids;
	struct uuid_entry	*uuids;
	BKEY_PADDED(uuid_bucket);
	struct closure_with_waitlist uuid_write;
	struct closure		uuid_write;
	struct semaphore	uuid_write_mutex;

	/*
	 * A btree node on disk could have too many bsets for an iterator to fit
+34 −18
Original line number Diff line number Diff line
@@ -340,9 +340,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
	w->journal	= NULL;
}

static void btree_node_write_unlock(struct closure *cl)
{
	struct btree *b = container_of(cl, struct btree, io);

	up(&b->io_mutex);
}

static void __btree_node_write_done(struct closure *cl)
{
	struct btree *b = container_of(cl, struct btree, io.cl);
	struct btree *b = container_of(cl, struct btree, io);
	struct btree_write *w = btree_prev_write(b);

	bch_bbio_free(b->bio, b->c);
@@ -353,12 +360,12 @@ static void __btree_node_write_done(struct closure *cl)
		queue_delayed_work(btree_io_wq, &b->work,
				   msecs_to_jiffies(30000));

	closure_return(cl);
	closure_return_with_destructor(cl, btree_node_write_unlock);
}

static void btree_node_write_done(struct closure *cl)
{
	struct btree *b = container_of(cl, struct btree, io.cl);
	struct btree *b = container_of(cl, struct btree, io);
	struct bio_vec *bv;
	int n;

@@ -371,7 +378,7 @@ static void btree_node_write_done(struct closure *cl)
static void btree_node_write_endio(struct bio *bio, int error)
{
	struct closure *cl = bio->bi_private;
	struct btree *b = container_of(cl, struct btree, io.cl);
	struct btree *b = container_of(cl, struct btree, io);

	if (error)
		set_btree_node_io_error(b);
@@ -382,7 +389,7 @@ static void btree_node_write_endio(struct bio *bio, int error)

static void do_btree_node_write(struct btree *b)
{
	struct closure *cl = &b->io.cl;
	struct closure *cl = &b->io;
	struct bset *i = b->sets[b->nsets].data;
	BKEY_PADDED(key) k;

@@ -435,7 +442,7 @@ static void do_btree_node_write(struct btree *b)
		bch_submit_bbio(b->bio, b->c, &k.key, 0);

		closure_sync(cl);
		__btree_node_write_done(cl);
		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
	}
}

@@ -454,7 +461,8 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
	cancel_delayed_work(&b->work);

	/* If caller isn't waiting for write, parent refcount is cache set */
	closure_lock(&b->io, parent ?: &b->c->cl);
	down(&b->io_mutex);
	closure_init(&b->io, parent ?: &b->c->cl);

	clear_bit(BTREE_NODE_dirty,	 &b->flags);
	change_bit(BTREE_NODE_write_idx, &b->flags);
@@ -554,7 +562,8 @@ static void mca_reinit(struct btree *b)
static void mca_data_free(struct btree *b)
{
	struct bset_tree *t = b->sets;
	BUG_ON(!closure_is_unlocked(&b->io.cl));

	BUG_ON(b->io_mutex.count != 1);

	if (bset_prev_bytes(b) < PAGE_SIZE)
		kfree(t->prev);
@@ -635,7 +644,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
	INIT_LIST_HEAD(&b->list);
	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
	b->c = c;
	closure_init_unlocked(&b->io);
	sema_init(&b->io_mutex, 1);

	mca_data_alloc(b, k, gfp);
	return b;
@@ -653,22 +662,29 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)

	BUG_ON(btree_node_dirty(b) && !b->sets[0].data);

	if (b->page_order < min_order ||
	    (!flush &&
	     (btree_node_dirty(b) ||
	      atomic_read(&b->io.cl.remaining) != -1))) {
		rw_unlock(true, b);
		return -ENOMEM;
	if (b->page_order < min_order)
		goto out_unlock;

	if (!flush) {
		if (btree_node_dirty(b))
			goto out_unlock;

		if (down_trylock(&b->io_mutex))
			goto out_unlock;
		up(&b->io_mutex);
	}

	if (btree_node_dirty(b))
		bch_btree_node_write_sync(b);

	/* wait for any in flight btree write */
	closure_wait_event(&b->io.wait, &cl,
			   atomic_read(&b->io.cl.remaining) == -1);
	down(&b->io_mutex);
	up(&b->io_mutex);

	return 0;
out_unlock:
	rw_unlock(true, b);
	return -ENOMEM;
}

static unsigned long bch_mca_scan(struct shrinker *shrink,
@@ -918,7 +934,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
	if (!b->sets->data)
		goto err;
out:
	BUG_ON(!closure_is_unlocked(&b->io.cl));
	BUG_ON(b->io_mutex.count != 1);

	bkey_copy(&b->key, k);
	list_move(&b->list, &c->btree_cache);
+2 −1
Original line number Diff line number Diff line
@@ -143,7 +143,8 @@ struct btree {
	struct bset_tree	sets[MAX_BSETS];

	/* For outstanding btree writes, used as a lock - protects write_idx */
	struct closure_with_waitlist	io;
	struct closure		io;
	struct semaphore	io_mutex;

	struct list_head	list;
	struct delayed_work	work;
+2 −5
Original line number Diff line number Diff line
@@ -127,9 +127,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
	if (!b->c->verify)
		return;

	closure_wait_event(&b->io.wait, &cl,
			   atomic_read(&b->io.cl.remaining) == -1);

	down(&b->io_mutex);
	mutex_lock(&b->c->verify_lock);

	bkey_copy(&v->key, &b->key);
@@ -137,8 +135,6 @@ void bch_btree_verify(struct btree *b, struct bset *new)
	v->level = b->level;

	bch_btree_node_read(v);
	closure_wait_event(&v->io.wait, &cl,
			   atomic_read(&b->io.cl.remaining) == -1);

	if (new->keys != v->sets[0].data->keys ||
	    memcmp(new->start,
@@ -167,6 +163,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
	}

	mutex_unlock(&b->c->verify_lock);
	up(&b->io_mutex);
}

void bch_data_verify(struct cached_dev *dc, struct bio *bio)
+14 −13
Original line number Diff line number Diff line
@@ -564,6 +564,14 @@ static void journal_write_done(struct closure *cl)
	continue_at_nobarrier(cl, journal_write, system_wq);
}

static void journal_write_unlock(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, journal.io);

	c->journal.io_in_flight = 0;
	spin_unlock(&c->journal.lock);
}

static void journal_write_unlocked(struct closure *cl)
	__releases(c->journal.lock)
{
@@ -578,15 +586,7 @@ static void journal_write_unlocked(struct closure *cl)
	bio_list_init(&list);

	if (!w->need_write) {
		/*
		 * XXX: have to unlock closure before we unlock journal lock,
		 * else we race with bch_journal(). But this way we race
		 * against cache set unregister. Doh.
		 */
		set_closure_fn(cl, NULL, NULL);
		closure_sub(cl, CLOSURE_RUNNING + 1);
		spin_unlock(&c->journal.lock);
		return;
		closure_return_with_destructor(cl, journal_write_unlock);
	} else if (journal_full(&c->journal)) {
		journal_reclaim(c);
		spin_unlock(&c->journal.lock);
@@ -662,11 +662,13 @@ static void journal_try_write(struct cache_set *c)

	w->need_write = true;

	if (closure_trylock(cl, &c->cl))
		journal_write_unlocked(cl);
	else
	if (!c->journal.io_in_flight) {
		c->journal.io_in_flight = 1;
		closure_call(cl, journal_write_unlocked, NULL, &c->cl);
	} else {
		spin_unlock(&c->journal.lock);
	}
}

static struct journal_write *journal_wait_for_write(struct cache_set *c,
						    unsigned nkeys)
@@ -793,7 +795,6 @@ int bch_journal_alloc(struct cache_set *c)
{
	struct journal *j = &c->journal;

	closure_init_unlocked(&j->io);
	spin_lock_init(&j->lock);
	INIT_DELAYED_WORK(&j->work, journal_write_work);

Loading