Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 07388549 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

blk-merge: fix blk_recount_segments

QUEUE_FLAG_NO_SG_MERGE is set at default for blk-mq devices,
so bio->bi_phys_segment computed may be bigger than
queue_max_segments(q) for blk-mq devices, then drivers will
fail to handle the case, for example, BUG_ON() in
virtio_queue_rq() can be triggerd for virtio-blk:

	https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1359146



This patch fixes the issue by ignoring the QUEUE_FLAG_NO_SG_MERGE
flag if the computed bio->bi_phys_segment is bigger than
queue_max_segments(q), and the regression is caused by commit
05f1dd53(block: add queue flag for disabling SG merging).

Reported-by: default avatarKick In <pierre-andre.morey@canonical.com>
Tested-by: default avatarChris J Arges <chris.j.arges@canonical.com>
Signed-off-by: default avatarMing Lei <ming.lei@canonical.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 7505ceaf
Loading
Loading
Loading
Loading
+11 −6
Original line number Diff line number Diff line
@@ -10,10 +10,11 @@
#include "blk.h"

static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
					     struct bio *bio)
					     struct bio *bio,
					     bool no_sg_merge)
{
	struct bio_vec bv, bvprv = { NULL };
	int cluster, high, highprv = 1, no_sg_merge;
	int cluster, high, highprv = 1;
	unsigned int seg_size, nr_phys_segs;
	struct bio *fbio, *bbio;
	struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
	cluster = blk_queue_cluster(q);
	seg_size = 0;
	nr_phys_segs = 0;
	no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
	high = 0;
	for_each_bio(bio) {
		bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,

void blk_recalc_rq_segments(struct request *rq)
{
	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&rq->q->queue_flags);

	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
			no_sg_merge);
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
			bio->bi_vcnt < queue_max_segments(q))
		bio->bi_phys_segments = bio->bi_vcnt;
	else {
		struct bio *nxt = bio->bi_next;

		bio->bi_next = NULL;
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
		bio->bi_next = nxt;
	}