Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 08e98fc6 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

blk-mq: handle failure path for initializing hctx



Failure of initializing one hctx isn't handled, so this patch
introduces blk_mq_init_hctx() and its pair to handle it explicitly.
Also this patch makes code cleaner.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@canonical.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent fe052529
Loading
Loading
Loading
Loading
+69 −45
Original line number Diff line number Diff line
@@ -1509,26 +1509,31 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
	return NOTIFY_OK;
}

static void blk_mq_exit_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set, int nr_queue)
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (i == nr_queue)
			break;

	blk_mq_tag_idle(hctx);

	if (set->ops->exit_hctx)
			set->ops->exit_hctx(hctx, i);
		set->ops->exit_hctx(hctx, hctx_idx);

	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
	kfree(hctx->ctxs);
	blk_mq_free_bitmap(&hctx->ctx_map);
}

static void blk_mq_exit_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set, int nr_queue)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (i == nr_queue)
			break;
		blk_mq_exit_hctx(q, set, hctx, i);
	}
}

static void blk_mq_free_hw_queues(struct request_queue *q,
@@ -1543,16 +1548,10 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
	}
}

static int blk_mq_init_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set)
static int blk_mq_init_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	/*
	 * Initialize hardware queues
	 */
	queue_for_each_hw_ctx(q, hctx, i) {
	int node;

	node = hctx->numa_node;
@@ -1564,7 +1563,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
	spin_lock_init(&hctx->lock);
	INIT_LIST_HEAD(&hctx->dispatch);
	hctx->queue = q;
		hctx->queue_num = i;
	hctx->queue_num = hctx_idx;
	hctx->flags = set->flags;
	hctx->cmd_size = set->cmd_size;

@@ -1572,7 +1571,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
					blk_mq_hctx_notify, hctx);
	blk_mq_register_cpu_notifier(&hctx->cpu_notifier);

		hctx->tags = set->tags[i];
	hctx->tags = set->tags[hctx_idx];

	/*
	 * Allocate space for all possible cpus to avoid allocation at
@@ -1581,15 +1580,40 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
					GFP_KERNEL, node);
	if (!hctx->ctxs)
			break;
		goto unregister_cpu_notifier;

	if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
			break;
		goto free_ctxs;

	hctx->nr_ctx = 0;

	if (set->ops->init_hctx &&
		    set->ops->init_hctx(hctx, set->driver_data, i))
	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
		goto free_bitmap;

	return 0;

 free_bitmap:
	blk_mq_free_bitmap(&hctx->ctx_map);
 free_ctxs:
	kfree(hctx->ctxs);
 unregister_cpu_notifier:
	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);

	return -1;
}

static int blk_mq_init_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	/*
	 * Initialize hardware queues
	 */
	queue_for_each_hw_ctx(q, hctx, i) {
		if (blk_mq_init_hctx(q, set, hctx, i))
			break;
	}