Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8e51e414 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet
Browse files

bcache: Use standard utility code



Some of bcache's utility code has made it into the rest of the kernel,
so drop the bcache versions.

Bcache used to have a workaround for allocating from a bio set under
generic_make_request() (if you allocated more than once, the bios you
already allocated would get stuck on current->bio_list when you
submitted, and you'd risk deadlock) - bcache would mask out __GFP_WAIT
when allocating bios under generic_make_request() so that allocation
could fail and it could retry from workqueue. But bio_alloc_bioset() has
a workaround now, so we can drop this hack and the associated error
handling.

Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
parent 47cd2eb0
Loading
Loading
Loading
Loading
+2 −5
Original line number Diff line number Diff line
@@ -350,7 +350,7 @@ static void do_btree_node_write(struct btree *b)
	bkey_copy(&k.key, &b->key);
	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));

	if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) {
	if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
		int j;
		struct bio_vec *bv;
		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -1865,7 +1865,7 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
	    should_split(b))
		goto out;

	op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio));
	op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));

	SET_KEY_PTRS(&op->replace, 1);
	get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
@@ -2194,9 +2194,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
					 KEY_OFFSET(k) - bio->bi_sector);

		n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
		if (!n)
			return -EAGAIN;

		if (n == bio)
			op->lookup_done = true;

+1 −1
Original line number Diff line number Diff line
@@ -199,7 +199,7 @@ void bch_data_verify(struct search *s)
	if (!check)
		return;

	if (bch_bio_alloc_pages(check, GFP_NOIO))
	if (bio_alloc_pages(check, GFP_NOIO))
		goto out_put;

	check->bi_rw		= READ_SYNC;
+22 −42
Original line number Diff line number Diff line
@@ -68,13 +68,6 @@ static void bch_generic_make_request_hack(struct bio *bio)
 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
 * freed before the split.
 *
 * If bch_bio_split() is running under generic_make_request(), it's not safe to
 * allocate more than one bio from the same bio set. Therefore, if it is running
 * under generic_make_request() it masks out __GFP_WAIT when doing the
 * allocation. The caller must check for failure if there's any possibility of
 * it being called from under generic_make_request(); it is then the caller's
 * responsibility to retry from a safe context (by e.g. punting to workqueue).
 */
struct bio *bch_bio_split(struct bio *bio, int sectors,
			  gfp_t gfp, struct bio_set *bs)
@@ -85,15 +78,6 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,

	BUG_ON(sectors <= 0);

	/*
	 * If we're being called from underneath generic_make_request() and we
	 * already allocated any bios from this bio set, we risk deadlock if we
	 * use the mempool. So instead, we possibly fail and let the caller punt
	 * to workqueue or somesuch and retry in a safe context.
	 */
	if (current->bio_list)
		gfp &= ~__GFP_WAIT;

	if (sectors >= bio_sectors(bio))
		return bio;

@@ -164,17 +148,18 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
	unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
				      queue_max_segments(q));
	struct bio_vec *bv, *end = bio_iovec(bio) +
		min_t(int, bio_segments(bio), max_segments);

	if (bio->bi_rw & REQ_DISCARD)
		return min(ret, q->limits.max_discard_sectors);

	if (bio_segments(bio) > max_segments ||
	    q->merge_bvec_fn) {
		struct bio_vec *bv;
		int i, seg = 0;

		ret = 0;

		for (bv = bio_iovec(bio); bv < end; bv++) {
		bio_for_each_segment(bv, bio, i) {
			struct bvec_merge_data bvm = {
				.bi_bdev	= bio->bi_bdev,
				.bi_sector	= bio->bi_sector,
@@ -182,10 +167,14 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
				.bi_rw		= bio->bi_rw,
			};

			if (seg == max_segments)
				break;

			if (q->merge_bvec_fn &&
			    q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
				break;

			seg++;
			ret += bv->bv_len >> 9;
		}
	}
@@ -222,30 +211,10 @@ static void bch_bio_submit_split_endio(struct bio *bio, int error)
	closure_put(cl);
}

static void __bch_bio_submit_split(struct closure *cl)
{
	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
	struct bio *bio = s->bio, *n;

	do {
		n = bch_bio_split(bio, bch_bio_max_sectors(bio),
				  GFP_NOIO, s->p->bio_split);
		if (!n)
			continue_at(cl, __bch_bio_submit_split, system_wq);

		n->bi_end_io	= bch_bio_submit_split_endio;
		n->bi_private	= cl;

		closure_get(cl);
		bch_generic_make_request_hack(n);
	} while (n != bio);

	continue_at(cl, bch_bio_submit_split_done, NULL);
}

void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
{
	struct bio_split_hook *s;
	struct bio *n;

	if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
		goto submit;
@@ -254,6 +223,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
		goto submit;

	s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
	closure_init(&s->cl, NULL);

	s->bio		= bio;
	s->p		= p;
@@ -261,8 +231,18 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
	s->bi_private	= bio->bi_private;
	bio_get(bio);

	closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL);
	return;
	do {
		n = bch_bio_split(bio, bch_bio_max_sectors(bio),
				  GFP_NOIO, s->p->bio_split);

		n->bi_end_io	= bch_bio_submit_split_endio;
		n->bi_private	= &s->cl;

		closure_get(&s->cl);
		bch_generic_make_request_hack(n);
	} while (n != bio);

	continue_at(&s->cl, bch_bio_submit_split_done, NULL);
submit:
	bch_generic_make_request_hack(bio);
}
+4 −3
Original line number Diff line number Diff line
@@ -46,9 +46,10 @@ static void write_moving_finish(struct closure *cl)
{
	struct moving_io *io = container_of(cl, struct moving_io, s.cl);
	struct bio *bio = &io->bio.bio;
	struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt);
	struct bio_vec *bv;
	int i;

	while (bv-- != bio->bi_io_vec)
	bio_for_each_segment_all(bv, bio, i)
		__free_page(bv->bv_page);

	if (io->s.op.insert_collision)
@@ -158,7 +159,7 @@ static void read_moving(struct closure *cl)
		bio->bi_rw	= READ;
		bio->bi_end_io	= read_moving_endio;

		if (bch_bio_alloc_pages(bio, GFP_KERNEL))
		if (bio_alloc_pages(bio, GFP_KERNEL))
			goto err;

		trace_bcache_gc_copy(&w->key);
+18 −69
Original line number Diff line number Diff line
@@ -509,10 +509,6 @@ static void bch_insert_data_loop(struct closure *cl)
			goto err;

		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
		if (!n) {
			__bkey_put(op->c, k);
			continue_at(cl, bch_insert_data_loop, bcache_wq);
		}

		n->bi_end_io	= bch_insert_data_endio;
		n->bi_private	= cl;
@@ -821,53 +817,13 @@ static void request_read_done(struct closure *cl)
	 */

	if (s->op.cache_bio) {
		struct bio_vec *src, *dst;
		unsigned src_offset, dst_offset, bytes;
		void *dst_ptr;

		bio_reset(s->op.cache_bio);
		s->op.cache_bio->bi_sector	= s->cache_miss->bi_sector;
		s->op.cache_bio->bi_bdev	= s->cache_miss->bi_bdev;
		s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9;
		bch_bio_map(s->op.cache_bio, NULL);

		src = bio_iovec(s->op.cache_bio);
		dst = bio_iovec(s->cache_miss);
		src_offset = src->bv_offset;
		dst_offset = dst->bv_offset;
		dst_ptr = kmap(dst->bv_page);

		while (1) {
			if (dst_offset == dst->bv_offset + dst->bv_len) {
				kunmap(dst->bv_page);
				dst++;
				if (dst == bio_iovec_idx(s->cache_miss,
						s->cache_miss->bi_vcnt))
					break;

				dst_offset = dst->bv_offset;
				dst_ptr = kmap(dst->bv_page);
			}

			if (src_offset == src->bv_offset + src->bv_len) {
				src++;
				if (src == bio_iovec_idx(s->op.cache_bio,
						 s->op.cache_bio->bi_vcnt))
					BUG();

				src_offset = src->bv_offset;
			}

			bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
				    src->bv_offset + src->bv_len - src_offset);

			memcpy(dst_ptr + dst_offset,
			       page_address(src->bv_page) + src_offset,
			       bytes);

			src_offset	+= bytes;
			dst_offset	+= bytes;
		}
		bio_copy_data(s->cache_miss, s->op.cache_bio);

		bio_put(s->cache_miss);
		s->cache_miss = NULL;
@@ -912,9 +868,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
	struct bio *miss;

	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
	if (!miss)
		return -EAGAIN;

	if (miss == bio)
		s->op.lookup_done = true;

@@ -933,8 +886,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
		reada = min(dc->readahead >> 9,
			    sectors - bio_sectors(miss));

		if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
			reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
		if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
			reada = bdev_sectors(miss->bi_bdev) -
				bio_end_sector(miss);
	}

	s->cache_bio_sectors = bio_sectors(miss) + reada;
@@ -958,7 +912,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
		goto out_put;

	bch_bio_map(s->op.cache_bio, NULL);
	if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
	if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
		goto out_put;

	s->cache_miss = miss;
@@ -1002,7 +956,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
	struct bio *bio = &s->bio.bio;
	struct bkey start, end;
	start = KEY(dc->disk.id, bio->bi_sector, 0);
	end = KEY(dc->disk.id, bio_end(bio), 0);
	end = KEY(dc->disk.id, bio_end_sector(bio), 0);

	bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);

@@ -1176,7 +1130,7 @@ found:
		if (i->sequential + bio->bi_size > i->sequential)
			i->sequential	+= bio->bi_size;

		i->last			 = bio_end(bio);
		i->last			 = bio_end_sector(bio);
		i->jiffies		 = jiffies + msecs_to_jiffies(5000);
		s->task->sequential_io	 = i->sequential;

@@ -1294,29 +1248,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
				struct bio *bio, unsigned sectors)
{
	struct bio_vec *bv;
	int i;

	/* Zero fill bio */

	while (bio->bi_idx != bio->bi_vcnt) {
		struct bio_vec *bv = bio_iovec(bio);
	bio_for_each_segment(bv, bio, i) {
		unsigned j = min(bv->bv_len >> 9, sectors);

		void *p = kmap(bv->bv_page);
		memset(p + bv->bv_offset, 0, j << 9);
		kunmap(bv->bv_page);

		bv->bv_len	-= j << 9;
		bv->bv_offset	+= j << 9;

		if (bv->bv_len)
			return 0;

		bio->bi_sector	+= j;
		bio->bi_size	-= j << 9;

		bio->bi_idx++;
		sectors	-= j;
	}

	bio_advance(bio, min(sectors << 9, bio->bi_size));

	if (!bio->bi_size)
		s->op.lookup_done = true;

	return 0;
@@ -1345,7 +1294,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
	} else if (bio_has_data(bio) || s->op.skip) {
		bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
					&KEY(d->id, bio->bi_sector, 0),
					     &KEY(d->id, bio_end(bio), 0));
					&KEY(d->id, bio_end_sector(bio), 0));

		s->writeback	= true;
		s->op.cache_bio	= bio;
Loading