Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0bf6cd5b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: factor out a helper to iterate all tags for a request_queue



And replace the blk_mq_tag_busy_iter with it - the driver use has been
replaced with a new helper a while ago, and internal to the block we
only need the new version.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent f4829a9b
Loading
Loading
Loading
Loading
+20 −7
Original line number Original line Diff line number Diff line
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
}
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);


void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
		void *priv)
		void *priv)
{
{
	struct blk_mq_hw_ctx *hctx;
	int i;


	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_tags *tags = hctx->tags;
		struct blk_mq_tags *tags = hctx->tags;


		/*
		 * If not software queues are currently mapped to this
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		if (tags->nr_reserved_tags)
		if (tags->nr_reserved_tags)
			bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
			bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
		bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
		bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
		      false);
		      false);
	}
	}
EXPORT_SYMBOL(blk_mq_tag_busy_iter);

}


static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
{
{
+2 −0
Original line number Original line Diff line number Diff line
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
		void *priv);


enum {
enum {
	BLK_MQ_TAG_CACHE_MIN	= 1,
	BLK_MQ_TAG_CACHE_MIN	= 1,
+3 −11
Original line number Original line Diff line number Diff line
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
		.next		= 0,
		.next		= 0,
		.next_set	= 0,
		.next_set	= 0,
	};
	};
	struct blk_mq_hw_ctx *hctx;
	int i;
	int i;


	queue_for_each_hw_ctx(q, hctx, i) {
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
		/*
		 * If not software queues are currently mapped to this
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
	}


	if (data.next_set) {
	if (data.next_set) {
		data.next = blk_rq_timeout(round_jiffies_up(data.next));
		data.next = blk_rq_timeout(round_jiffies_up(data.next));
		mod_timer(&q->timeout, data.next);
		mod_timer(&q->timeout, data.next);
	} else {
	} else {
		struct blk_mq_hw_ctx *hctx;

		queue_for_each_hw_ctx(q, hctx, i) {
		queue_for_each_hw_ctx(q, hctx, i) {
			/* the hctx may be unmapped, so check it here */
			/* the hctx may be unmapped, so check it here */
			if (blk_mq_hw_queue_mapped(hctx))
			if (blk_mq_hw_queue_mapped(hctx))
+0 −2
Original line number Original line Diff line number Diff line
@@ -223,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
		void *priv);
void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
		void *priv);
		void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_freeze_queue(struct request_queue *q);