Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0c2a6fe4 authored by Jens Axboe's avatar Jens Axboe
Browse files

blk-mq: don't special case flush inserts for blk-mq-sched



The current request insertion machinery works just fine for
directly inserting flushes, so no need to special case
this anymore.

Signed-off-by: default avatarJens Axboe <axboe@fb.com>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
parent c7a571b4
Loading
Loading
Loading
Loading
+10 −8
Original line number Original line Diff line number Diff line
@@ -1434,12 +1434,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
	cookie = request_to_qc_t(data.hctx, rq);
	cookie = request_to_qc_t(data.hctx, rq);


	if (unlikely(is_flush_fua)) {
	if (unlikely(is_flush_fua)) {
		blk_mq_put_ctx(data.ctx);
		if (q->elevator)
			goto elv_insert;
		blk_mq_bio_to_request(rq, bio);
		blk_mq_bio_to_request(rq, bio);
		blk_mq_get_driver_tag(rq, NULL, true);
		blk_insert_flush(rq);
		blk_insert_flush(rq);
		blk_mq_run_hw_queue(data.hctx, true);
		goto run_queue;
		goto done;
	}
	}


	plug = current->plug;
	plug = current->plug;
@@ -1489,6 +1488,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
	}
	}


	if (q->elevator) {
	if (q->elevator) {
elv_insert:
		blk_mq_put_ctx(data.ctx);
		blk_mq_put_ctx(data.ctx);
		blk_mq_bio_to_request(rq, bio);
		blk_mq_bio_to_request(rq, bio);
		blk_mq_sched_insert_request(rq, false, true,
		blk_mq_sched_insert_request(rq, false, true,
@@ -1502,6 +1502,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
		 * latter allows for merging opportunities and more efficient
		 * latter allows for merging opportunities and more efficient
		 * dispatching.
		 * dispatching.
		 */
		 */
run_queue:
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
	}
	}
	blk_mq_put_ctx(data.ctx);
	blk_mq_put_ctx(data.ctx);
@@ -1557,12 +1558,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
	cookie = request_to_qc_t(data.hctx, rq);
	cookie = request_to_qc_t(data.hctx, rq);


	if (unlikely(is_flush_fua)) {
	if (unlikely(is_flush_fua)) {
		blk_mq_put_ctx(data.ctx);
		if (q->elevator)
			goto elv_insert;
		blk_mq_bio_to_request(rq, bio);
		blk_mq_bio_to_request(rq, bio);
		blk_mq_get_driver_tag(rq, NULL, true);
		blk_insert_flush(rq);
		blk_insert_flush(rq);
		blk_mq_run_hw_queue(data.hctx, true);
		goto run_queue;
		goto done;
	}
	}


	/*
	/*
@@ -1600,6 +1600,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
	}
	}


	if (q->elevator) {
	if (q->elevator) {
elv_insert:
		blk_mq_put_ctx(data.ctx);
		blk_mq_put_ctx(data.ctx);
		blk_mq_bio_to_request(rq, bio);
		blk_mq_bio_to_request(rq, bio);
		blk_mq_sched_insert_request(rq, false, true,
		blk_mq_sched_insert_request(rq, false, true,
@@ -1613,6 +1614,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
		 * latter allows for merging opportunities and more efficient
		 * latter allows for merging opportunities and more efficient
		 * dispatching.
		 * dispatching.
		 */
		 */
run_queue:
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
	}
	}