Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3bc32bb1 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

nvme-fabrics: refactor queue ready check



Move the is_connected check to the fibre channel transport, as it has no
meaning for other transports.  To facilitate this split out a new
nvmf_fail_nonready_command helper that is called by the transport when
it is asked to handle a command on a queue that is not ready.

Also avoid a function call for the queue live fast path by inlining
the check.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJames Smart <james.smart@broadcom.com>
parent e6c3456a
Loading
Loading
Loading
Loading
+24 −35
Original line number Diff line number Diff line
@@ -536,30 +536,32 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
	return NULL;
}

blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live, bool is_connected)
/*
 * For something we're not in a state to send to the device the default action
 * is to busy it and retry it after the controller state is recovered.  However,
 * anything marked for failfast or nvme multipath is immediately failed.
 *
 * Note: commands used to initialize the controller will be marked for failfast.
 * Note: nvme cli/ioctl commands are marked for failfast.
 */
blk_status_t nvmf_fail_nonready_command(struct request *rq)
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
		return BLK_STS_RESOURCE;
	nvme_req(rq)->status = NVME_SC_ABORT_REQ;
	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);

	if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
		return BLK_STS_OK;
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live)
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;

	switch (ctrl->state) {
	case NVME_CTRL_NEW:
	case NVME_CTRL_CONNECTING:
	case NVME_CTRL_DELETING:
		/*
		 * This is the case of starting a new or deleting an association
		 * but connectivity was lost before it was fully created or torn
		 * down. We need to error the commands used to initialize the
		 * controller so the reconnect can go into a retry attempt.  The
		 * commands should all be marked REQ_FAILFAST_DRIVER, which will
		 * hit the reject path below. Anything else will be queued while
		 * the state settles.
		 */
		if (!is_connected)
			break;

		/*
		 * If queue is live, allow only commands that are internally
		 * generated pass through.  These are commands on the admin
@@ -567,7 +569,7 @@ blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
		 * ioctl admin cmds received while initializing.
		 */
		if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
			return BLK_STS_OK;
			return true;

		/*
		 * If the queue is not live, allow only a connect command.  This
@@ -577,26 +579,13 @@ blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
		if (!queue_live && blk_rq_is_passthrough(rq) &&
		     cmd->common.opcode == nvme_fabrics_command &&
		     cmd->fabrics.fctype == nvme_fabrics_type_connect)
			return BLK_STS_OK;
		break;
			return true;
		return false;
	default:
		break;
		return false;
	}

	/*
	 * Any other new io is something we're not in a state to send to the
	 * device.  Default action is to busy it and retry it after the
	 * controller state is recovered. However, anything marked for failfast
	 * or nvme multipath is immediately failed.  Note: commands used to
	 * initialize the controller will be marked for failfast.
	 * Note: nvme cli/ioctl commands are marked for failfast.
	 */
	if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
		return BLK_STS_RESOURCE;
	nvme_req(rq)->status = NVME_SC_ABORT_REQ;
	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
EXPORT_SYMBOL_GPL(__nvmf_check_ready);

static const match_table_t opt_tokens = {
	{ NVMF_OPT_TRANSPORT,		"transport=%s"		},
+11 −2
Original line number Diff line number Diff line
@@ -162,7 +162,16 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl,
	struct request *rq, bool queue_live, bool is_connected);
blk_status_t nvmf_fail_nonready_command(struct request *rq);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live);

static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live)
{
	if (likely(ctrl->state == NVME_CTRL_LIVE))
		return true;
	return __nvmf_check_ready(ctrl, rq, queue_live);
}

#endif /* _NVME_FABRICS_H */
+4 −5
Original line number Diff line number Diff line
@@ -2266,14 +2266,13 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
	struct nvme_command *sqe = &cmdiu->sqe;
	enum nvmefc_fcp_datadir	io_dir;
	bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
	u32 data_len;
	blk_status_t ret;

	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
		test_bit(NVME_FC_Q_LIVE, &queue->flags),
		ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
	if (unlikely(ret))
		return ret;
	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
	    !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
		return nvmf_fail_nonready_command(rq);

	ret = nvme_setup_cmd(ns, rq, sqe);
	if (ret)
+3 −4
Original line number Diff line number Diff line
@@ -1630,15 +1630,14 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
	struct nvme_rdma_qe *sqe = &req->sqe;
	struct nvme_command *c = sqe->data;
	struct ib_device *dev;
	bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
	blk_status_t ret;
	int err;

	WARN_ON_ONCE(rq->tag < 0);

	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
		test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true);
	if (unlikely(ret))
		return ret;
	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
		return nvmf_fail_nonready_command(rq);

	dev = queue->device->dev;
	ib_dma_sync_single_for_cpu(dev, sqe->dma,
+3 −4
Original line number Diff line number Diff line
@@ -158,12 +158,11 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
	struct nvme_loop_queue *queue = hctx->driver_data;
	struct request *req = bd->rq;
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
	blk_status_t ret;

	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
		test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
	if (unlikely(ret))
		return ret;
	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
		return nvmf_fail_nonready_command(req);

	ret = nvme_setup_cmd(ns, req, &iod->cmd);
	if (ret)