Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c913a8b0 authored by James Smart's avatar James Smart Committed by Christoph Hellwig
Browse files

nvme_fc: Move LS's to rport



Link LS's on the remoteport rather than the controller. LS's are
between nport's. Makes more sense, especially on async teardown where
the controller is torn down regardless of the LS (LS is more of a notifier
to the target of the teardown), to have them on the remoteport.

While revising ls send/done routines, issues were seen relative to
refcounting and cleanup, especially in async path. Reworked these code
paths.

Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent 568ad51e
Loading
Loading
Loading
Loading
+65 −54
Original line number Original line Diff line number Diff line
@@ -64,13 +64,13 @@ struct nvme_fc_queue {
struct nvmefc_ls_req_op {
struct nvmefc_ls_req_op {
	struct nvmefc_ls_req	ls_req;
	struct nvmefc_ls_req	ls_req;


	struct nvme_fc_ctrl	*ctrl;
	struct nvme_fc_rport	*rport;
	struct nvme_fc_queue	*queue;
	struct nvme_fc_queue	*queue;
	struct request		*rq;
	struct request		*rq;


	int			ls_error;
	int			ls_error;
	struct completion	ls_done;
	struct completion	ls_done;
	struct list_head	lsreq_list;	/* ctrl->ls_req_list */
	struct list_head	lsreq_list;	/* rport->ls_req_list */
	bool			req_queued;
	bool			req_queued;
};
};


@@ -120,6 +120,9 @@ struct nvme_fc_rport {


	struct list_head		endp_list; /* for lport->endp_list */
	struct list_head		endp_list; /* for lport->endp_list */
	struct list_head		ctrl_list;
	struct list_head		ctrl_list;
	struct list_head		ls_req_list;
	struct device			*dev;	/* physical device for dma */
	struct nvme_fc_lport		*lport;
	spinlock_t			lock;
	spinlock_t			lock;
	struct kref			ref;
	struct kref			ref;
} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
@@ -144,7 +147,6 @@ struct nvme_fc_ctrl {
	u64			cap;
	u64			cap;


	struct list_head	ctrl_list;	/* rport->ctrl_list */
	struct list_head	ctrl_list;	/* rport->ctrl_list */
	struct list_head	ls_req_list;


	struct blk_mq_tag_set	admin_tag_set;
	struct blk_mq_tag_set	admin_tag_set;
	struct blk_mq_tag_set	tag_set;
	struct blk_mq_tag_set	tag_set;
@@ -419,9 +421,12 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,


	INIT_LIST_HEAD(&newrec->endp_list);
	INIT_LIST_HEAD(&newrec->endp_list);
	INIT_LIST_HEAD(&newrec->ctrl_list);
	INIT_LIST_HEAD(&newrec->ctrl_list);
	INIT_LIST_HEAD(&newrec->ls_req_list);
	kref_init(&newrec->ref);
	kref_init(&newrec->ref);
	spin_lock_init(&newrec->lock);
	spin_lock_init(&newrec->lock);
	newrec->remoteport.localport = &lport->localport;
	newrec->remoteport.localport = &lport->localport;
	newrec->dev = lport->dev;
	newrec->lport = lport;
	newrec->remoteport.private = &newrec[1];
	newrec->remoteport.private = &newrec[1];
	newrec->remoteport.port_role = pinfo->port_role;
	newrec->remoteport.port_role = pinfo->port_role;
	newrec->remoteport.node_name = pinfo->node_name;
	newrec->remoteport.node_name = pinfo->node_name;
@@ -444,7 +449,6 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
out_reghost_failed:
out_reghost_failed:
	*portptr = NULL;
	*portptr = NULL;
	return ret;
	return ret;

}
}
EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);


@@ -624,16 +628,16 @@ static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);




static void
static void
__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
		struct nvmefc_ls_req_op *lsop)
{
{
	struct nvme_fc_rport *rport = lsop->rport;
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&ctrl->lock, flags);
	spin_lock_irqsave(&rport->lock, flags);


	if (!lsop->req_queued) {
	if (!lsop->req_queued) {
		spin_unlock_irqrestore(&ctrl->lock, flags);
		spin_unlock_irqrestore(&rport->lock, flags);
		return;
		return;
	}
	}


@@ -641,56 +645,71 @@ __nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,


	lsop->req_queued = false;
	lsop->req_queued = false;


	spin_unlock_irqrestore(&ctrl->lock, flags);
	spin_unlock_irqrestore(&rport->lock, flags);


	fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
				  (lsreq->rqstlen + lsreq->rsplen),
				  (lsreq->rqstlen + lsreq->rsplen),
				  DMA_BIDIRECTIONAL);
				  DMA_BIDIRECTIONAL);


	nvme_fc_ctrl_put(ctrl);
	nvme_fc_rport_put(rport);
}
}


static int
static int
__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
		struct nvmefc_ls_req_op *lsop,
		struct nvmefc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
		void (*done)(struct nvmefc_ls_req *req, int status))
{
{
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	unsigned long flags;
	unsigned long flags;
	int ret;
	int ret = 0;


	if (!nvme_fc_ctrl_get(ctrl))
	if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
		return -ECONNREFUSED;

	if (!nvme_fc_rport_get(rport))
		return -ESHUTDOWN;
		return -ESHUTDOWN;


	lsreq->done = done;
	lsreq->done = done;
	lsop->ctrl = ctrl;
	lsop->rport = rport;
	lsop->req_queued = false;
	lsop->req_queued = false;
	INIT_LIST_HEAD(&lsop->lsreq_list);
	INIT_LIST_HEAD(&lsop->lsreq_list);
	init_completion(&lsop->ls_done);
	init_completion(&lsop->ls_done);


	lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
	lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
				  lsreq->rqstlen + lsreq->rsplen,
				  lsreq->rqstlen + lsreq->rsplen,
				  DMA_BIDIRECTIONAL);
				  DMA_BIDIRECTIONAL);
	if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
	if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
		nvme_fc_ctrl_put(ctrl);
		ret = -EFAULT;
		dev_err(ctrl->dev,
		goto out_putrport;
			"els request command failed EFAULT.\n");
		return -EFAULT;
	}
	}
	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;


	spin_lock_irqsave(&ctrl->lock, flags);
	spin_lock_irqsave(&rport->lock, flags);


	list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
	list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);


	lsop->req_queued = true;
	lsop->req_queued = true;


	spin_unlock_irqrestore(&ctrl->lock, flags);
	spin_unlock_irqrestore(&rport->lock, flags);


	ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
	ret = rport->lport->ops->ls_req(&rport->lport->localport,
					&ctrl->rport->remoteport, lsreq);
					&rport->remoteport, lsreq);
	if (ret)
	if (ret)
		goto out_unlink;

	return 0;

out_unlink:
	lsop->ls_error = ret;
	lsop->ls_error = ret;
	spin_lock_irqsave(&rport->lock, flags);
	lsop->req_queued = false;
	list_del(&lsop->lsreq_list);
	spin_unlock_irqrestore(&rport->lock, flags);
	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
				  (lsreq->rqstlen + lsreq->rsplen),
				  DMA_BIDIRECTIONAL);
out_putrport:
	nvme_fc_rport_put(rport);


	return ret;
	return ret;
}
}
@@ -705,15 +724,15 @@ nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
}
}


static int
static int
nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
{
{
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
	struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
	int ret;
	int ret;


	ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
	ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);


	if (!ret)
	if (!ret) {
		/*
		/*
		 * No timeout/not interruptible as we need the struct
		 * No timeout/not interruptible as we need the struct
		 * to exist until the lldd calls us back. Thus mandate
		 * to exist until the lldd calls us back. Thus mandate
@@ -722,14 +741,14 @@ nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
		 */
		 */
		wait_for_completion(&lsop->ls_done);
		wait_for_completion(&lsop->ls_done);


	__nvme_fc_finish_ls_req(ctrl, lsop);
		__nvme_fc_finish_ls_req(lsop);


	if (ret) {
		ret = lsop->ls_error;
		dev_err(ctrl->dev,
			"ls request command failed (%d).\n", ret);
		return ret;
	}
	}


	if (ret)
		return ret;

	/* ACC or RJT payload ? */
	/* ACC or RJT payload ? */
	if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
	if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
		return -ENXIO;
		return -ENXIO;
@@ -737,19 +756,14 @@ nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
	return 0;
	return 0;
}
}


static void
static int
nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
		struct nvmefc_ls_req_op *lsop,
		struct nvmefc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
		void (*done)(struct nvmefc_ls_req *req, int status))
{
{
	int ret;

	ret = __nvme_fc_send_ls_req(ctrl, lsop, done);

	/* don't wait for completion */
	/* don't wait for completion */


	if (ret)
	return __nvme_fc_send_ls_req(rport, lsop, done);
		done(&lsop->ls_req, ret);
}
}


/* Validation Error indexes into the string table below */
/* Validation Error indexes into the string table below */
@@ -839,7 +853,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
	lsreq->rsplen = sizeof(*assoc_acc);
	lsreq->rsplen = sizeof(*assoc_acc);
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;


	ret = nvme_fc_send_ls_req(ctrl, lsop);
	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
	if (ret)
	if (ret)
		goto out_free_buffer;
		goto out_free_buffer;


@@ -947,7 +961,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
	lsreq->rsplen = sizeof(*conn_acc);
	lsreq->rsplen = sizeof(*conn_acc);
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;


	ret = nvme_fc_send_ls_req(ctrl, lsop);
	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
	if (ret)
	if (ret)
		goto out_free_buffer;
		goto out_free_buffer;


@@ -998,14 +1012,8 @@ static void
nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
{
{
	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
	struct nvme_fc_ctrl *ctrl = lsop->ctrl;


	__nvme_fc_finish_ls_req(ctrl, lsop);
	__nvme_fc_finish_ls_req(lsop);

	if (status)
		dev_err(ctrl->dev,
			"disconnect assoc ls request command failed (%d).\n",
			status);


	/* fc-nvme iniator doesn't care about success or failure of cmd */
	/* fc-nvme iniator doesn't care about success or failure of cmd */


@@ -1036,6 +1044,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
	struct fcnvme_ls_disconnect_acc *discon_acc;
	struct fcnvme_ls_disconnect_acc *discon_acc;
	struct nvmefc_ls_req_op *lsop;
	struct nvmefc_ls_req_op *lsop;
	struct nvmefc_ls_req *lsreq;
	struct nvmefc_ls_req *lsreq;
	int ret;


	lsop = kzalloc((sizeof(*lsop) +
	lsop = kzalloc((sizeof(*lsop) +
			 ctrl->lport->ops->lsrqst_priv_sz +
			 ctrl->lport->ops->lsrqst_priv_sz +
@@ -1078,7 +1087,10 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
	lsreq->rsplen = sizeof(*discon_acc);
	lsreq->rsplen = sizeof(*discon_acc);
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;


	nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
	ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
				nvme_fc_disconnect_assoc_done);
	if (ret)
		kfree(lsop);


	/* only meaningful part to terminating the association */
	/* only meaningful part to terminating the association */
	ctrl->association_id = 0;
	ctrl->association_id = 0;
@@ -2302,7 +2314,6 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,


	ctrl->ctrl.opts = opts;
	ctrl->ctrl.opts = opts;
	INIT_LIST_HEAD(&ctrl->ctrl_list);
	INIT_LIST_HEAD(&ctrl->ctrl_list);
	INIT_LIST_HEAD(&ctrl->ls_req_list);
	ctrl->lport = lport;
	ctrl->lport = lport;
	ctrl->rport = rport;
	ctrl->rport = rport;
	ctrl->dev = lport->dev;
	ctrl->dev = lport->dev;