Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ef6b12a authored by Ming Lin's avatar Ming Lin Committed by Jens Axboe
Browse files

md/raid5: split bio for chunk_aligned_read



If a read request fits entirely in a chunk, it will be passed directly to the
underlying device (providing it hasn't failed of course).  If it doesn't fit,
the slightly less efficient path that uses the stripe_cache is used.
Requests that get to the stripe cache are always completely split up as
necessary.

So with RAID5, ripping out the merge_bvec_fn doesn't cause it to stop work,
but could cause it to take the less efficient path more often.

All that is needed to manage this is for 'chunk_aligned_read' do some bio
splitting, much like the RAID0 code does.

Cc: Neil Brown <neilb@suse.de>
Cc: linux-raid@vger.kernel.org
Acked-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarMing Lin <ming.l@ssi.samsung.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent b49a0871
Loading
Loading
Loading
Loading
+32 −5
Original line number Diff line number Diff line
@@ -4800,7 +4800,7 @@ static int bio_fits_rdev(struct bio *bi)
	return 1;
}

static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{
	struct r5conf *conf = mddev->private;
	int dd_idx;
@@ -4809,7 +4809,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
	sector_t end_sector;

	if (!in_chunk_boundary(mddev, raid_bio)) {
		pr_debug("chunk_aligned_read : non aligned\n");
		pr_debug("%s: non aligned\n", __func__);
		return 0;
	}
	/*
@@ -4886,6 +4886,31 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
	}
}

static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{
	struct bio *split;

	do {
		sector_t sector = raid_bio->bi_iter.bi_sector;
		unsigned chunk_sects = mddev->chunk_sectors;
		unsigned sectors = chunk_sects - (sector & (chunk_sects-1));

		if (sectors < bio_sectors(raid_bio)) {
			split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
			bio_chain(split, raid_bio);
		} else
			split = raid_bio;

		if (!raid5_read_one_chunk(mddev, split)) {
			if (split != raid_bio)
				generic_make_request(raid_bio);
			return split;
		}
	} while (split != raid_bio);

	return NULL;
}

/* __get_priority_stripe - get the next stripe to process
 *
 * Full stripe writes are allowed to pass preread active stripes up until
@@ -5163,9 +5188,11 @@ static void make_request(struct mddev *mddev, struct bio * bi)
	 * data on failed drives.
	 */
	if (rw == READ && mddev->degraded == 0 &&
	     mddev->reshape_position == MaxSector &&
	     chunk_aligned_read(mddev,bi))
	    mddev->reshape_position == MaxSector) {
		bi = chunk_aligned_read(mddev, bi);
		if (!bi)
			return;
	}

	if (unlikely(bi->bi_rw & REQ_DISCARD)) {
		make_discard_request(mddev, bi);