Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e4c93811 authored by Alasdair G Kergon's avatar Alasdair G Kergon
Browse files

dm: refactor bio cloning



Refactor part of the bio splitting and cloning code to try to make it
easier to understand.

Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 14fe594d
Loading
Loading
Loading
Loading
+96 −68
Original line number Diff line number Diff line
@@ -1087,7 +1087,7 @@ static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
 */
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
		      sector_t sector, unsigned short idx,
		      unsigned short bv_count, unsigned int len)
		      unsigned short bv_count, unsigned len)
{
	struct bio *clone = &tio->clone;
	unsigned trim = 0;
@@ -1159,17 +1159,23 @@ static int __send_empty_flush(struct clone_info *ci)
	return 0;
}

static void __clone_and_map_data_bio(struct clone_info *ci,
				     struct dm_target *ti)
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
				     sector_t sector, int nr_iovecs,
				     unsigned short idx, unsigned short bv_count,
				     unsigned offset, unsigned len,
				     unsigned split_bvec)
{
	struct bio *bio = ci->bio;
	struct dm_target_io *tio;

	tio = alloc_tio(ci, ti, bio->bi_max_vecs, 0);
	clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx,
		  ci->sector_count);
	tio = alloc_tio(ci, ti, nr_iovecs, 0);

	if (split_bvec)
		clone_split_bio(tio, bio, sector, idx, offset, len);
	else
		clone_bio(tio, bio, sector, idx, bv_count, len);

	__map_bio(tio);
	ci->sector_count = 0;
}

typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1238,65 +1244,35 @@ static int __send_write_same(struct clone_info *ci)
	return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
}

static int __split_and_process_non_flush(struct clone_info *ci)
{
	struct bio *bio = ci->bio;
	struct dm_target *ti;
	sector_t len = 0, max;
	struct dm_target_io *tio;

	if (unlikely(bio->bi_rw & REQ_DISCARD))
		return __send_discard(ci);
	else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
		return __send_write_same(ci);

	ti = dm_table_find_target(ci->map, ci->sector);
	if (!dm_target_is_valid(ti))
		return -EIO;

	max = max_io_len(ci->sector, ti);

	if (ci->sector_count <= max) {
		/*
		 * Optimise for the simple case where we can do all of
		 * the remaining io with a single clone.
		 */
		__clone_and_map_data_bio(ci, ti);

	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
/*
		 * There are some bvecs that don't span targets.
		 * Do as many of these as possible.
 * Find maximum number of sectors / bvecs we can process with a single bio.
 */
		int i;
		sector_t remaining = max;
		sector_t bv_len;
static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
{
	struct bio *bio = ci->bio;
	sector_t bv_len, total_len = 0;

		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
			bv_len = to_sector(bio->bi_io_vec[i].bv_len);
	for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
		bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);

			if (bv_len > remaining)
		if (bv_len > max)
			break;

			remaining -= bv_len;
			len += bv_len;
		max -= bv_len;
		total_len += bv_len;
	}

		tio = alloc_tio(ci, ti, bio->bi_max_vecs, 0);
		clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len);
		__map_bio(tio);

		ci->sector += len;
		ci->sector_count -= len;
		ci->idx = i;
	return total_len;
}

	} else {
		/*
		 * Handle a bvec that must be split between two or more targets.
		 */
static int __split_bvec_across_targets(struct clone_info *ci,
				       struct dm_target *ti, sector_t max)
{
	struct bio *bio = ci->bio;
	struct bio_vec *bv = bio->bi_io_vec + ci->idx;
	sector_t remaining = to_sector(bv->bv_len);
		unsigned int offset = 0;
	unsigned offset = 0;
	sector_t len;

	do {
		if (offset) {
@@ -1309,11 +1285,8 @@ static int __split_and_process_non_flush(struct clone_info *ci)

		len = min(remaining, max);

			tio = alloc_tio(ci, ti, 1, 0);
			clone_split_bio(tio, bio, ci->sector, ci->idx,
					bv->bv_offset + offset, len);

			__map_bio(tio);
		__clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
					 bv->bv_offset + offset, len, 1);

		ci->sector += len;
		ci->sector_count -= len;
@@ -1321,11 +1294,66 @@ static int __split_and_process_non_flush(struct clone_info *ci)
	} while (remaining -= len);

	ci->idx++;

	return 0;
}

/*
 * Select the correct strategy for processing a non-flush bio.
 */
static int __split_and_process_non_flush(struct clone_info *ci)
{
	struct bio *bio = ci->bio;
	struct dm_target *ti;
	sector_t len, max;
	int idx;

	if (unlikely(bio->bi_rw & REQ_DISCARD))
		return __send_discard(ci);
	else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
		return __send_write_same(ci);

	ti = dm_table_find_target(ci->map, ci->sector);
	if (!dm_target_is_valid(ti))
		return -EIO;

	max = max_io_len(ci->sector, ti);

	/*
	 * Optimise for the simple case where we can do all of
	 * the remaining io with a single clone.
	 */
	if (ci->sector_count <= max) {
		__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
					 ci->idx, bio->bi_vcnt - ci->idx, 0,
					 ci->sector_count, 0);
		ci->sector_count = 0;
		return 0;
	}

	/*
	 * There are some bvecs that don't span targets.
	 * Do as many of these as possible.
	 */
	if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
		len = __len_within_target(ci, max, &idx);

		__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
					 ci->idx, idx - ci->idx, 0, len, 0);

		ci->sector += len;
		ci->sector_count -= len;
		ci->idx = idx;

		return 0;
	}

	/*
	 * Handle a bvec that must be split between two or more targets.
	 */
	return __split_bvec_across_targets(ci, ti, max);
}

/*
 * Entry point to split a bio into clones and submit them to the targets.
 */