Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 43a5e4e2 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

block: blk-mq: support draining mq queue



blk_mq_drain_queue() is introduced so that we can drain
mq queue inside blk_cleanup_queue().

Also don't accept new requests any more if queue is marked
as dying.

Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarMing Lei <tom.leiming@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b28bc9b3
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@

#include "blk.h"
#include "blk-cgroup.h"
#include "blk-mq.h"

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -497,8 +498,13 @@ void blk_cleanup_queue(struct request_queue *q)
	 * Drain all requests queued before DYING marking. Set DEAD flag to
	 * prevent that q->request_fn() gets invoked after draining finished.
	 */
	if (q->mq_ops) {
		blk_mq_drain_queue(q);
		spin_lock_irq(lock);
	} else {
		spin_lock_irq(lock);
		__blk_drain_queue(q, true);
	}
	queue_flag_set(QUEUE_FLAG_DEAD, q);
	spin_unlock_irq(lock);

+4 −0
Original line number Diff line number Diff line
@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
	rq->rq_disk = bd_disk;
	rq->end_io = done;

	/*
	 * don't check dying flag for MQ because the request won't
	 * be resued after dying flag is set
	 */
	if (q->mq_ops) {
		blk_mq_insert_request(q, rq, true);
		return;
+27 −16
Original line number Diff line number Diff line
@@ -106,10 +106,13 @@ static int blk_mq_queue_enter(struct request_queue *q)

	spin_lock_irq(q->queue_lock);
	ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
		!blk_queue_bypass(q), *q->queue_lock);
		!blk_queue_bypass(q) || blk_queue_dying(q),
		*q->queue_lock);
	/* inc usage with lock hold to avoid freeze_queue runs here */
	if (!ret)
	if (!ret && !blk_queue_dying(q))
		__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
	else if (blk_queue_dying(q))
		ret = -ENODEV;
	spin_unlock_irq(q->queue_lock);

	return ret;
@@ -120,6 +123,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
	__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
}

static void __blk_mq_drain_queue(struct request_queue *q)
{
	while (true) {
		s64 count;

		spin_lock_irq(q->queue_lock);
		count = percpu_counter_sum(&q->mq_usage_counter);
		spin_unlock_irq(q->queue_lock);

		if (count == 0)
			break;
		blk_mq_run_queues(q, false);
		msleep(10);
	}
}

/*
 * Guarantee no request is in use, so we can change any data structure of
 * the queue afterward.
@@ -133,21 +152,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
	queue_flag_set(QUEUE_FLAG_BYPASS, q);
	spin_unlock_irq(q->queue_lock);

	if (!drain)
		return;

	while (true) {
		s64 count;

		spin_lock_irq(q->queue_lock);
		count = percpu_counter_sum(&q->mq_usage_counter);
		spin_unlock_irq(q->queue_lock);

		if (count == 0)
			break;
		blk_mq_run_queues(q, false);
		msleep(10);
	if (drain)
		__blk_mq_drain_queue(q);
}

void blk_mq_drain_queue(struct request_queue *q)
{
	__blk_mq_drain_queue(q);
}

static void blk_mq_unfreeze_queue(struct request_queue *q)
+1 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@ void blk_mq_complete_request(struct request *rq, int error);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);

/*
 * CPU hotplug helpers