Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c66e2998 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig
Browse files

nvme-rdma: centralize controller setup sequence



Centralize controller sequence to a single routine that correctly cleans
up after failures instead of having multiple apperances in several flows
(create, reset, reconnect).

One thing that we also gain here are the sanity/boundary checks also
when connecting back to a dynamic controller.

Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 90140624
Loading
Loading
Loading
Loading
+53 −77
Original line number Original line Diff line number Diff line
@@ -917,24 +917,44 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
	}
	}
}
}


static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
{
	struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
	int ret = -EINVAL;
			struct nvme_rdma_ctrl, reconnect_work);
	bool changed;
	bool changed;
	int ret;


	++ctrl->ctrl.nr_reconnects;
	ret = nvme_rdma_configure_admin_queue(ctrl, new);

	ret = nvme_rdma_configure_admin_queue(ctrl, false);
	if (ret)
	if (ret)
		goto requeue;
		return ret;

	if (ctrl->ctrl.icdoff) {
		dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
		goto destroy_admin;
	}

	if (!(ctrl->ctrl.sgls & (1 << 2))) {
		dev_err(ctrl->ctrl.device,
			"Mandatory keyed sgls are not supported!\n");
		goto destroy_admin;
	}

	if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
		dev_warn(ctrl->ctrl.device,
			"queue_size %zu > ctrl sqsize %u, clamping down\n",
			ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
	}

	if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
		dev_warn(ctrl->ctrl.device,
			"sqsize %u > ctrl maxcmd %u, clamping down\n",
			ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
		ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
	}


	if (ctrl->ctrl.sgls & (1 << 20))
	if (ctrl->ctrl.sgls & (1 << 20))
		ctrl->use_inline_data = true;
		ctrl->use_inline_data = true;


	if (ctrl->ctrl.queue_count > 1) {
	if (ctrl->ctrl.queue_count > 1) {
		ret = nvme_rdma_configure_io_queues(ctrl, false);
		ret = nvme_rdma_configure_io_queues(ctrl, new);
		if (ret)
		if (ret)
			goto destroy_admin;
			goto destroy_admin;
	}
	}
@@ -943,10 +963,31 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
	if (!changed) {
	if (!changed) {
		/* state change failure is ok if we're in DELETING state */
		/* state change failure is ok if we're in DELETING state */
		WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
		WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
		return;
		ret = -EINVAL;
		goto destroy_io;
	}
	}


	nvme_start_ctrl(&ctrl->ctrl);
	nvme_start_ctrl(&ctrl->ctrl);
	return 0;

destroy_io:
	if (ctrl->ctrl.queue_count > 1)
		nvme_rdma_destroy_io_queues(ctrl, new);
destroy_admin:
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	nvme_rdma_destroy_admin_queue(ctrl, new);
	return ret;
}

static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
{
	struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_rdma_ctrl, reconnect_work);

	++ctrl->ctrl.nr_reconnects;

	if (nvme_rdma_setup_ctrl(ctrl, false))
		goto requeue;


	dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
	dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
			ctrl->ctrl.nr_reconnects);
			ctrl->ctrl.nr_reconnects);
@@ -955,9 +996,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)


	return;
	return;


destroy_admin:
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	nvme_rdma_destroy_admin_queue(ctrl, false);
requeue:
requeue:
	dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
	dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
			ctrl->ctrl.nr_reconnects);
			ctrl->ctrl.nr_reconnects);
@@ -1786,8 +1824,6 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{
{
	struct nvme_rdma_ctrl *ctrl =
	struct nvme_rdma_ctrl *ctrl =
		container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
		container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
	int ret;
	bool changed;


	nvme_stop_ctrl(&ctrl->ctrl);
	nvme_stop_ctrl(&ctrl->ctrl);
	nvme_rdma_shutdown_ctrl(ctrl, false);
	nvme_rdma_shutdown_ctrl(ctrl, false);
@@ -1798,25 +1834,9 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
		return;
		return;
	}
	}


	ret = nvme_rdma_configure_admin_queue(ctrl, false);
	if (nvme_rdma_setup_ctrl(ctrl, false))
	if (ret)
		goto out_fail;
		goto out_fail;


	if (ctrl->ctrl.queue_count > 1) {
		ret = nvme_rdma_configure_io_queues(ctrl, false);
		if (ret)
			goto out_fail;
	}

	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	if (!changed) {
		/* state change failure is ok if we're in DELETING state */
		WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
		return;
	}

	nvme_start_ctrl(&ctrl->ctrl);

	return;
	return;


out_fail:
out_fail:
@@ -1979,49 +1999,10 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
	WARN_ON_ONCE(!changed);
	WARN_ON_ONCE(!changed);


	ret = nvme_rdma_configure_admin_queue(ctrl, true);
	ret = nvme_rdma_setup_ctrl(ctrl, true);
	if (ret)
	if (ret)
		goto out_uninit_ctrl;
		goto out_uninit_ctrl;


	/* sanity check icdoff */
	if (ctrl->ctrl.icdoff) {
		dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
		ret = -EINVAL;
		goto out_remove_admin_queue;
	}

	/* sanity check keyed sgls */
	if (!(ctrl->ctrl.sgls & (1 << 2))) {
		dev_err(ctrl->ctrl.device,
			"Mandatory keyed sgls are not supported!\n");
		ret = -EINVAL;
		goto out_remove_admin_queue;
	}

	/* only warn if argument is too large here, will clamp later */
	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
		dev_warn(ctrl->ctrl.device,
			"queue_size %zu > ctrl sqsize %u, clamping down\n",
			opts->queue_size, ctrl->ctrl.sqsize + 1);
	}

	/* warn if maxcmd is lower than sqsize+1 */
	if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
		dev_warn(ctrl->ctrl.device,
			"sqsize %u > ctrl maxcmd %u, clamping down\n",
			ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
		ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
	}

	if (opts->nr_io_queues) {
		ret = nvme_rdma_configure_io_queues(ctrl, true);
		if (ret)
			goto out_remove_admin_queue;
	}

	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);


@@ -2031,13 +2012,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
	list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
	list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
	mutex_unlock(&nvme_rdma_ctrl_mutex);
	mutex_unlock(&nvme_rdma_ctrl_mutex);


	nvme_start_ctrl(&ctrl->ctrl);

	return &ctrl->ctrl;
	return &ctrl->ctrl;


out_remove_admin_queue:
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	nvme_rdma_destroy_admin_queue(ctrl, true);
out_uninit_ctrl:
out_uninit_ctrl:
	nvme_uninit_ctrl(&ctrl->ctrl);
	nvme_uninit_ctrl(&ctrl->ctrl);
	nvme_put_ctrl(&ctrl->ctrl);
	nvme_put_ctrl(&ctrl->ctrl);