Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9082e87b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove struct bio_batch



It can be replaced with a combination of bio_chain and submit_bio_wait.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lin <ming.l@ssi.samsung.com>
Signed-off-by: default avatarSagi Grimberg <sagig@grimberg.me>
Reviewed-by: default avatarMing Lei <tom.leiming@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 1fcbcc33
Loading
Loading
Loading
Loading
+27 −91
Original line number Original line Diff line number Diff line
@@ -9,21 +9,17 @@


#include "blk.h"
#include "blk.h"


struct bio_batch {
static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
	atomic_t		done;
		gfp_t gfp)
	int			error;
	struct completion	*wait;
};

static void bio_batch_end_io(struct bio *bio)
{
{
	struct bio_batch *bb = bio->bi_private;
	struct bio *new = bio_alloc(gfp, nr_pages);

	if (bio) {
		bio_chain(bio, new);
		submit_bio(rw, bio);
	}


	if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
	return new;
		bb->error = bio->bi_error;
	if (atomic_dec_and_test(&bb->done))
		complete(bb->wait);
	bio_put(bio);
}
}


/**
/**
@@ -40,13 +36,11 @@ static void bio_batch_end_io(struct bio *bio)
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	struct request_queue *q = bdev_get_queue(bdev);
	int type = REQ_WRITE | REQ_DISCARD;
	int type = REQ_WRITE | REQ_DISCARD;
	unsigned int granularity;
	unsigned int granularity;
	int alignment;
	int alignment;
	struct bio_batch bb;
	struct bio *bio = NULL;
	struct bio *bio;
	int ret = 0;
	int ret = 0;
	struct blk_plug plug;
	struct blk_plug plug;


@@ -66,25 +60,15 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		type |= REQ_SECURE;
		type |= REQ_SECURE;
	}
	}


	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	blk_start_plug(&plug);
	blk_start_plug(&plug);
	while (nr_sects) {
	while (nr_sects) {
		unsigned int req_sects;
		unsigned int req_sects;
		sector_t end_sect, tmp;
		sector_t end_sect, tmp;


		bio = bio_alloc(gfp_mask, 1);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		/* Make sure bi_size doesn't overflow */
		/* Make sure bi_size doesn't overflow */
		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);


		/*
		/**
		 * If splitting a request, and the next starting sector would be
		 * If splitting a request, and the next starting sector would be
		 * misaligned, stop the discard at the previous aligned sector.
		 * misaligned, stop the discard at the previous aligned sector.
		 */
		 */
@@ -98,18 +82,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
			req_sects = end_sect - sector;
			req_sects = end_sect - sector;
		}
		}


		bio = next_bio(bio, type, 1, gfp_mask);
		bio->bi_iter.bi_sector = sector;
		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_bdev = bdev;
		bio->bi_bdev = bdev;
		bio->bi_private = &bb;


		bio->bi_iter.bi_size = req_sects << 9;
		bio->bi_iter.bi_size = req_sects << 9;
		nr_sects -= req_sects;
		nr_sects -= req_sects;
		sector = end_sect;
		sector = end_sect;


		atomic_inc(&bb.done);
		submit_bio(type, bio);

		/*
		/*
		 * We can loop for a long time in here, if someone does
		 * We can loop for a long time in here, if someone does
		 * full device discards (like mkfs). Be nice and allow
		 * full device discards (like mkfs). Be nice and allow
@@ -118,15 +98,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		 */
		 */
		cond_resched();
		cond_resched();
	}
	}
	if (bio)
		ret = submit_bio_wait(type, bio);
	blk_finish_plug(&plug);
	blk_finish_plug(&plug);


	/* Wait for bios in-flight */
	return ret != -EOPNOTSUPP ? ret : 0;
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
}
}
EXPORT_SYMBOL(blkdev_issue_discard);
EXPORT_SYMBOL(blkdev_issue_discard);


@@ -145,11 +121,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
			    sector_t nr_sects, gfp_t gfp_mask,
			    sector_t nr_sects, gfp_t gfp_mask,
			    struct page *page)
			    struct page *page)
{
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	struct request_queue *q = bdev_get_queue(bdev);
	unsigned int max_write_same_sectors;
	unsigned int max_write_same_sectors;
	struct bio_batch bb;
	struct bio *bio = NULL;
	struct bio *bio;
	int ret = 0;
	int ret = 0;


	if (!q)
	if (!q)
@@ -158,21 +132,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
	max_write_same_sectors = UINT_MAX >> 9;
	max_write_same_sectors = UINT_MAX >> 9;


	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	while (nr_sects) {
	while (nr_sects) {
		bio = bio_alloc(gfp_mask, 1);
		bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_bdev = bdev;
		bio->bi_bdev = bdev;
		bio->bi_private = &bb;
		bio->bi_vcnt = 1;
		bio->bi_vcnt = 1;
		bio->bi_io_vec->bv_page = page;
		bio->bi_io_vec->bv_page = page;
		bio->bi_io_vec->bv_offset = 0;
		bio->bi_io_vec->bv_offset = 0;
@@ -186,18 +149,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
			bio->bi_iter.bi_size = nr_sects << 9;
			bio->bi_iter.bi_size = nr_sects << 9;
			nr_sects = 0;
			nr_sects = 0;
		}
		}

		atomic_inc(&bb.done);
		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
	}
	}


	/* Wait for bios in-flight */
	if (bio)
	if (!atomic_dec_and_test(&bb.done))
		ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
		wait_for_completion_io(&wait);
	return ret != -EOPNOTSUPP ? ret : 0;

	if (bb.error)
		return bb.error;
	return ret;
}
}
EXPORT_SYMBOL(blkdev_issue_write_same);
EXPORT_SYMBOL(blkdev_issue_write_same);


@@ -216,28 +172,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
				  sector_t nr_sects, gfp_t gfp_mask)
				  sector_t nr_sects, gfp_t gfp_mask)
{
{
	int ret;
	int ret;
	struct bio *bio;
	struct bio *bio = NULL;
	struct bio_batch bb;
	unsigned int sz;
	unsigned int sz;
	DECLARE_COMPLETION_ONSTACK(wait);

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;


	ret = 0;
	while (nr_sects != 0) {
	while (nr_sects != 0) {
		bio = bio_alloc(gfp_mask,
		bio = next_bio(bio, WRITE,
				min(nr_sects, (sector_t)BIO_MAX_PAGES));
				min(nr_sects, (sector_t)BIO_MAX_PAGES),
		if (!bio) {
				gfp_mask);
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev   = bdev;
		bio->bi_bdev   = bdev;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_private = &bb;


		while (nr_sects != 0) {
		while (nr_sects != 0) {
			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -247,18 +190,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
			if (ret < (sz << 9))
			if (ret < (sz << 9))
				break;
				break;
		}
		}
		ret = 0;
		atomic_inc(&bb.done);
		submit_bio(WRITE, bio);
	}
	}


	/* Wait for bios in-flight */
	if (bio)
	if (!atomic_dec_and_test(&bb.done))
		return submit_bio_wait(WRITE, bio);
		wait_for_completion_io(&wait);
	return 0;

	if (bb.error)
		return bb.error;
	return ret;
}
}


/**
/**