Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7587a5ae authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

blk-mq: Introduce blk_mq_delay_run_hw_queue()



Introduce a function that runs a hardware queue unconditionally
after a delay. Note: there is already a function that stops and
restarts a hardware queue after a delay, namely blk_mq_delay_queue().

This function will be used in the next patch in this series.

Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Long Li <longli@microsoft.com>
Cc: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent ebe8bddb
Loading
Loading
Loading
Loading
+30 −2
Original line number Diff line number Diff line
@@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
	return hctx->next_cpu;
}

void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
					unsigned long msecs)
{
	if (unlikely(blk_mq_hctx_stopped(hctx) ||
		     !blk_mq_hw_queue_mapped(hctx)))
@@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
		put_cpu();
	}

	kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
	if (msecs == 0)
		kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
					 &hctx->run_work);
	else
		kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
						 &hctx->delayed_run_work,
						 msecs_to_jiffies(msecs));
}

void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);

void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
	__blk_mq_delay_run_hw_queue(hctx, async, 0);
}

void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
	__blk_mq_run_hw_queue(hctx);
}

static void blk_mq_delayed_run_work_fn(struct work_struct *work)
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);

	__blk_mq_run_hw_queue(hctx);
}

static void blk_mq_delay_work_fn(struct work_struct *work)
{
	struct blk_mq_hw_ctx *hctx;
@@ -1962,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
		node = hctx->numa_node = set->numa_node;

	INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
	INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
	spin_lock_init(&hctx->lock);
	INIT_LIST_HEAD(&hctx->dispatch);
+2 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {

	atomic_t		nr_active;

	struct delayed_work	delayed_run_work;
	struct delayed_work	delay_work;

	struct hlist_node	cpuhp_dead;
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,