Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b4b6cb61 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

Revert "block: blk-merge: try to make front segments in full size"



This reverts commit a2d37968.

If max segment size isn't 512-aligned, this patch won't work well.

Also once multipage bvec is enabled, adjacent bvecs won't be physically
contiguous if page is added via bio_add_page(), so we don't need this
kind of complicated logic.

Reported-by: default avatarDmitry Osipenko <digetx@gmail.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5448aca4
Loading
Loading
Loading
Loading
+5 −49
Original line number Diff line number Diff line
@@ -109,7 +109,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
	bool do_split = true;
	struct bio *new = NULL;
	const unsigned max_sectors = get_max_io_size(q, bio);
	unsigned advance = 0;

	bio_for_each_segment(bv, bio, iter) {
		/*
@@ -133,32 +132,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
		}

		if (bvprvp && blk_queue_cluster(q)) {
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
				goto new_segment;
			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
				goto new_segment;
			if (seg_size + bv.bv_len > queue_max_segment_size(q)) {
				/*
				 * One assumption is that initial value of
				 * @seg_size(equals to bv.bv_len) won't be
				 * bigger than max segment size, but this
				 * becomes false after multipage bvecs.
				 */
				advance = queue_max_segment_size(q) - seg_size;

				if (advance > 0) {
					seg_size += advance;
					sectors += advance >> 9;
					bv.bv_len -= advance;
					bv.bv_offset += advance;
				}

				/*
				 * Still need to put remainder of current
				 * bvec into a new segment.
				 */
				goto new_segment;
			}

			seg_size += bv.bv_len;
			bvprv = bv;
@@ -180,12 +159,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
		seg_size = bv.bv_len;
		sectors += bv.bv_len >> 9;

		/* restore the bvec for iterator */
		if (advance) {
			bv.bv_len += advance;
			bv.bv_offset -= advance;
			advance = 0;
		}
	}

	do_split = false;
@@ -386,29 +359,16 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
{

	int nbytes = bvec->bv_len;
	unsigned advance = 0;

	if (*sg && *cluster) {
		if ((*sg)->length + nbytes > queue_max_segment_size(q))
			goto new_segment;

		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
			goto new_segment;
		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
			goto new_segment;

		/*
		 * try best to merge part of the bvec into previous
		 * segment and follow same policy with
		 * blk_bio_segment_split()
		 */
		if ((*sg)->length + nbytes > queue_max_segment_size(q)) {
			advance = queue_max_segment_size(q) - (*sg)->length;
			if (advance) {
				(*sg)->length += advance;
				bvec->bv_offset += advance;
				bvec->bv_len -= advance;
			}
			goto new_segment;
		}

		(*sg)->length += nbytes;
	} else {
new_segment:
@@ -431,10 +391,6 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,

		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
		(*nsegs)++;

		/* for making iterator happy */
		bvec->bv_offset -= advance;
		bvec->bv_len += advance;
	}
	*bvprv = *bvec;
}