Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d858e5f0 authored by Sagi Grimberg's avatar Sagi Grimberg
Browse files

nvme: move queue_count to the nvme_ctrl



All all transports use the queue_count in exactly the same, so move it to
the generic struct nvme_ctrl. In the future it will also be maintained by
the core.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-By: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent d554b5e1
Loading
Loading
Loading
Loading
+17 −18
Original line number Diff line number Diff line
@@ -148,7 +148,6 @@ struct nvme_fc_ctrl {
	struct device		*dev;
	struct nvme_fc_lport	*lport;
	struct nvme_fc_rport	*rport;
	u32			queue_count;
	u32			cnum;

	u64			association_id;
@@ -1614,7 +1613,7 @@ nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
{
	int i;

	for (i = 1; i < ctrl->queue_count; i++)
	for (i = 1; i < ctrl->ctrl.queue_count; i++)
		nvme_fc_free_queue(&ctrl->queues[i]);
}

@@ -1635,10 +1634,10 @@ __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
static void
nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
{
	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
	int i;

	for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
	for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
		__nvme_fc_delete_hw_queue(ctrl, queue, i);
}

@@ -1648,7 +1647,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
	struct nvme_fc_queue *queue = &ctrl->queues[1];
	int i, ret;

	for (i = 1; i < ctrl->queue_count; i++, queue++) {
	for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
		ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
		if (ret)
			goto delete_queues;
@@ -1667,7 +1666,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{
	int i, ret = 0;

	for (i = 1; i < ctrl->queue_count; i++) {
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
		ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
					(qsize / 5));
		if (ret)
@@ -1685,7 +1684,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
{
	int i;

	for (i = 1; i < ctrl->queue_count; i++)
	for (i = 1; i < ctrl->ctrl.queue_count; i++)
		nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
}

@@ -2187,7 +2186,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
		return ret;
	}

	ctrl->queue_count = opts->nr_io_queues + 1;
	ctrl->ctrl.queue_count = opts->nr_io_queues + 1;
	if (!opts->nr_io_queues)
		return 0;

@@ -2204,7 +2203,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
						sizeof(struct scatterlist)) +
					ctrl->lport->ops->fcprqst_priv_sz;
	ctrl->tag_set.driver_data = ctrl;
	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;

	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
@@ -2258,7 +2257,7 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
	}

	/* check for io queues existing */
	if (ctrl->queue_count == 1)
	if (ctrl->ctrl.queue_count == 1)
		return 0;

	nvme_fc_init_io_queues(ctrl);
@@ -2381,7 +2380,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
	 * Create the io queues
	 */

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		if (ctrl->ctrl.state == NVME_CTRL_NEW)
			ret = nvme_fc_create_io_queues(ctrl);
		else
@@ -2395,7 +2394,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)

	ctrl->ctrl.nr_reconnects = 0;

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_start_queues(&ctrl->ctrl);
		nvme_queue_scan(&ctrl->ctrl);
		nvme_queue_async_events(&ctrl->ctrl);
@@ -2447,7 +2446,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
	 * io requests back to the block layer as part of normal completions
	 * (but with error status).
	 */
	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
				nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2702,18 +2701,18 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
	spin_lock_init(&ctrl->lock);

	/* io queue count */
	ctrl->queue_count = min_t(unsigned int,
	ctrl->ctrl.queue_count = min_t(unsigned int,
				opts->nr_io_queues,
				lport->ops->max_hw_queues);
	opts->nr_io_queues = ctrl->queue_count;	/* so opts has valid value */
	ctrl->queue_count++;	/* +1 for admin queue */
	opts->nr_io_queues = ctrl->ctrl.queue_count;	/* so opts has valid value */
	ctrl->ctrl.queue_count++;	/* +1 for admin queue */

	ctrl->ctrl.sqsize = opts->queue_size - 1;
	ctrl->ctrl.kato = opts->kato;

	ret = -ENOMEM;
	ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
				GFP_KERNEL);
	ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
				sizeof(struct nvme_fc_queue), GFP_KERNEL);
	if (!ctrl->queues)
		goto out_free_ida;

+1 −0
Original line number Diff line number Diff line
@@ -142,6 +142,7 @@ struct nvme_ctrl {
	u16 cntlid;

	u32 ctrl_config;
	u32 queue_count;

	u32 page_size;
	u32 max_hw_sectors;
+7 −8
Original line number Diff line number Diff line
@@ -74,7 +74,6 @@ struct nvme_dev {
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned queue_count;
	unsigned online_queues;
	unsigned max_qid;
	int q_depth;
@@ -1099,9 +1098,9 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
	int i;

	for (i = dev->queue_count - 1; i >= lowest; i--) {
	for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
		struct nvme_queue *nvmeq = dev->queues[i];
		dev->queue_count--;
		dev->ctrl.queue_count--;
		dev->queues[i] = NULL;
		nvme_free_queue(nvmeq);
	}
@@ -1221,7 +1220,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
	nvmeq->qid = qid;
	nvmeq->cq_vector = -1;
	dev->queues[qid] = nvmeq;
	dev->queue_count++;
	dev->ctrl.queue_count++;

	return nvmeq;

@@ -1441,7 +1440,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
	unsigned i, max;
	int ret = 0;

	for (i = dev->queue_count; i <= dev->max_qid; i++) {
	for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
		/* vector == qid - 1, match nvme_create_queue */
		if (!nvme_alloc_queue(dev, i, dev->q_depth,
		     pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
@@ -1450,7 +1449,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
		}
	}

	max = min(dev->max_qid, dev->queue_count - 1);
	max = min(dev->max_qid, dev->ctrl.queue_count - 1);
	for (i = dev->online_queues; i <= max; i++) {
		ret = nvme_create_queue(dev->queues[i], i);
		if (ret)
@@ -2001,7 +2000,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
	nvme_stop_queues(&dev->ctrl);

	queues = dev->online_queues - 1;
	for (i = dev->queue_count - 1; i > 0; i--)
	for (i = dev->ctrl.queue_count - 1; i > 0; i--)
		nvme_suspend_queue(dev->queues[i]);

	if (dead) {
@@ -2009,7 +2008,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
		 * probe, before the admin queue is configured. Thus,
		 * queue_count can be 0 here.
		 */
		if (dev->queue_count)
		if (dev->ctrl.queue_count)
			nvme_suspend_queue(dev->queues[0]);
	} else {
		nvme_disable_io_queues(dev, queues);
+19 −20
Original line number Diff line number Diff line
@@ -103,7 +103,6 @@ struct nvme_rdma_queue {
struct nvme_rdma_ctrl {
	/* read only in the hot path */
	struct nvme_rdma_queue	*queues;
	u32			queue_count;

	/* other member variables */
	struct blk_mq_tag_set	tag_set;
@@ -349,7 +348,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
	struct nvme_rdma_ctrl *ctrl = data;
	struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];

	BUG_ON(hctx_idx >= ctrl->queue_count);
	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);

	hctx->driver_data = queue;
	return 0;
@@ -587,7 +586,7 @@ static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
{
	int i;

	for (i = 1; i < ctrl->queue_count; i++)
	for (i = 1; i < ctrl->ctrl.queue_count; i++)
		nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
}

@@ -595,7 +594,7 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
{
	int i, ret = 0;

	for (i = 1; i < ctrl->queue_count; i++) {
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
		if (ret) {
			dev_info(ctrl->ctrl.device,
@@ -623,14 +622,14 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
	if (ret)
		return ret;

	ctrl->queue_count = nr_io_queues + 1;
	if (ctrl->queue_count < 2)
	ctrl->ctrl.queue_count = nr_io_queues + 1;
	if (ctrl->ctrl.queue_count < 2)
		return 0;

	dev_info(ctrl->ctrl.device,
		"creating %d I/O queues.\n", nr_io_queues);

	for (i = 1; i < ctrl->queue_count; i++) {
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
		ret = nvme_rdma_init_queue(ctrl, i,
					   ctrl->ctrl.opts->queue_size);
		if (ret) {
@@ -705,7 +704,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)

	++ctrl->ctrl.nr_reconnects;

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_rdma_free_io_queues(ctrl);

		ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@@ -735,7 +734,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)

	nvme_start_keep_alive(&ctrl->ctrl);

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		ret = nvme_rdma_init_io_queues(ctrl);
		if (ret)
			goto requeue;
@@ -749,7 +748,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
	WARN_ON_ONCE(!changed);
	ctrl->ctrl.nr_reconnects = 0;

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_queue_scan(&ctrl->ctrl);
		nvme_queue_async_events(&ctrl->ctrl);
	}
@@ -772,15 +771,15 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)

	nvme_stop_keep_alive(&ctrl->ctrl);

	for (i = 0; i < ctrl->queue_count; i++)
	for (i = 0; i < ctrl->ctrl.queue_count; i++)
		clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);

	if (ctrl->queue_count > 1)
	if (ctrl->ctrl.queue_count > 1)
		nvme_stop_queues(&ctrl->ctrl);
	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);

	/* We must take care of fastfail/requeue all our inflight requests */
	if (ctrl->queue_count > 1)
	if (ctrl->ctrl.queue_count > 1)
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
					nvme_cancel_request, &ctrl->ctrl);
	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
@@ -1624,7 +1623,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
	cancel_work_sync(&ctrl->err_work);
	cancel_delayed_work_sync(&ctrl->reconnect_work);

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
					nvme_cancel_request, &ctrl->ctrl);
@@ -1716,7 +1715,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
		goto del_dead_ctrl;
	}

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		ret = blk_mq_reinit_tagset(&ctrl->tag_set);
		if (ret)
			goto del_dead_ctrl;
@@ -1733,7 +1732,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_start_queues(&ctrl->ctrl);
		nvme_queue_scan(&ctrl->ctrl);
		nvme_queue_async_events(&ctrl->ctrl);
@@ -1785,7 +1784,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
	ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
		SG_CHUNK_SIZE * sizeof(struct scatterlist);
	ctrl->tag_set.driver_data = ctrl;
	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;

	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
@@ -1863,12 +1862,12 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
	INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
	INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);

	ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
	ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
	ctrl->ctrl.sqsize = opts->queue_size - 1;
	ctrl->ctrl.kato = opts->kato;

	ret = -ENOMEM;
	ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
				GFP_KERNEL);
	if (!ctrl->queues)
		goto out_uninit_ctrl;
@@ -1925,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
	list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
	mutex_unlock(&nvme_rdma_ctrl_mutex);

	if (opts->nr_io_queues) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_queue_scan(&ctrl->ctrl);
		nvme_queue_async_events(&ctrl->ctrl);
	}
+7 −8
Original line number Diff line number Diff line
@@ -44,7 +44,6 @@ struct nvme_loop_iod {

struct nvme_loop_ctrl {
	struct nvme_loop_queue	*queues;
	u32			queue_count;

	struct blk_mq_tag_set	admin_tag_set;

@@ -241,7 +240,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
	struct nvme_loop_ctrl *ctrl = data;
	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];

	BUG_ON(hctx_idx >= ctrl->queue_count);
	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);

	hctx->driver_data = queue;
	return 0;
@@ -307,7 +306,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i;

	for (i = 1; i < ctrl->queue_count; i++)
	for (i = 1; i < ctrl->ctrl.queue_count; i++)
		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}

@@ -330,7 +329,7 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
		if (ret)
			goto out_destroy_queues;

		ctrl->queue_count++;
		ctrl->ctrl.queue_count++;
	}

	return 0;
@@ -344,7 +343,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i, ret;

	for (i = 1; i < ctrl->queue_count; i++) {
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
		if (ret)
			return ret;
@@ -372,7 +371,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
	if (error)
		return error;
	ctrl->queue_count = 1;
	ctrl->ctrl.queue_count = 1;

	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
	if (error)
@@ -426,7 +425,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
	nvme_stop_keep_alive(&ctrl->ctrl);

	if (ctrl->queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
					nvme_cancel_request, &ctrl->ctrl);
@@ -559,7 +558,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
		SG_CHUNK_SIZE * sizeof(struct scatterlist);
	ctrl->tag_set.driver_data = ctrl;
	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
	ctrl->ctrl.tagset = &ctrl->tag_set;