Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc6c9730 authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Christoph Hellwig
Browse files

nvmet: rename nvme_completion instances from rsp to cqe



Use NVMe namings for improving code readability.

Signed-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Reviewed-by : Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 8dc2ed3f
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -647,7 +647,7 @@ static void nvmet_update_sq_head(struct nvmet_req *req)
		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
					old_sqhd);
	}
	req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
}

static void nvmet_set_error(struct nvmet_req *req, u16 status)
@@ -656,7 +656,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
	struct nvme_error_slot *new_error_slot;
	unsigned long flags;

	req->rsp->status = cpu_to_le16(status << 1);
	req->cqe->status = cpu_to_le16(status << 1);

	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
		return;
@@ -676,15 +676,15 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
	spin_unlock_irqrestore(&ctrl->error_lock, flags);

	/* set the more bit for this request */
	req->rsp->status |= cpu_to_le16(1 << 14);
	req->cqe->status |= cpu_to_le16(1 << 14);
}

static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
	if (!req->sq->sqhd_disabled)
		nvmet_update_sq_head(req);
	req->rsp->sq_id = cpu_to_le16(req->sq->qid);
	req->rsp->command_id = req->cmd->common.command_id;
	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
	req->cqe->command_id = req->cmd->common.command_id;

	if (unlikely(status))
		nvmet_set_error(req, status);
@@ -841,8 +841,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
	req->sg = NULL;
	req->sg_cnt = 0;
	req->transfer_len = 0;
	req->rsp->status = 0;
	req->rsp->sq_head = 0;
	req->cqe->status = 0;
	req->cqe->sq_head = 0;
	req->ns = NULL;
	req->error_loc = NVMET_NO_ERROR_LOC;
	req->error_slba = 0;
@@ -1069,7 +1069,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
	if (!subsys) {
		pr_warn("connect request for invalid subsystem %s!\n",
			subsysnqn);
		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
	}

@@ -1090,7 +1090,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,

	pr_warn("could not find controller %d for subsys %s / host %s\n",
		cntlid, subsysnqn, hostnqn);
	req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;

out:
@@ -1188,7 +1188,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
	if (!subsys) {
		pr_warn("connect request for invalid subsystem %s!\n",
			subsysnqn);
		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
		goto out;
	}

@@ -1197,7 +1197,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
	if (!nvmet_host_allowed(subsys, hostnqn)) {
		pr_info("connect by host %s for subsystem %s not allowed\n",
			hostnqn, subsysnqn);
		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
		up_read(&nvmet_config_sem);
		status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
		goto out_put_subsystem;
+8 −8
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
			offsetof(struct nvmf_property_get_command, attrib);
	}

	req->rsp->result.u64 = cpu_to_le64(val);
	req->cqe->result.u64 = cpu_to_le64(val);
	nvmet_req_complete(req, status);
}

@@ -124,7 +124,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)

	if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
		req->sq->sqhd_disabled = true;
		req->rsp->sq_head = cpu_to_le16(0xffff);
		req->cqe->sq_head = cpu_to_le16(0xffff);
	}

	if (ctrl->ops->install_queue) {
@@ -158,7 +158,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
		goto out;

	/* zero out initial completion result, assign values as needed */
	req->rsp->result.u32 = 0;
	req->cqe->result.u32 = 0;

	if (c->recfmt != 0) {
		pr_warn("invalid connect version (%d).\n",
@@ -172,7 +172,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
		pr_warn("connect attempt for invalid controller ID %#x\n",
			d->cntlid);
		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
		goto out;
	}

@@ -195,7 +195,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)

	pr_info("creating controller %d for subsystem %s for NQN %s.\n",
		ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
	req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);

out:
	kfree(d);
@@ -222,7 +222,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
		goto out;

	/* zero out initial completion result, assign values as needed */
	req->rsp->result.u32 = 0;
	req->cqe->result.u32 = 0;

	if (c->recfmt != 0) {
		pr_warn("invalid connect version (%d).\n",
@@ -240,14 +240,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
	if (unlikely(qid > ctrl->subsys->max_qid)) {
		pr_warn("invalid queue id (%d)\n", qid);
		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
		req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
		req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
		goto out_ctrl_put;
	}

	status = nvmet_install_queue(ctrl, req);
	if (status) {
		/* pass back cntlid that had the issue of installing queue */
		req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
		req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
		goto out_ctrl_put;
	}

+1 −1
Original line number Diff line number Diff line
@@ -2184,7 +2184,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
	}

	fod->req.cmd = &fod->cmdiubuf.sqe;
	fod->req.rsp = &fod->rspiubuf.cqe;
	fod->req.cqe = &fod->rspiubuf.cqe;
	fod->req.port = tgtport->pe->port;

	/* clear any response payload */
+3 −3
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@
struct nvme_loop_iod {
	struct nvme_request	nvme_req;
	struct nvme_command	cmd;
	struct nvme_completion	rsp;
	struct nvme_completion	cqe;
	struct nvmet_req	req;
	struct nvme_loop_queue	*queue;
	struct work_struct	work;
@@ -94,7 +94,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
{
	struct nvme_loop_queue *queue =
		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
	struct nvme_completion *cqe = req->rsp;
	struct nvme_completion *cqe = req->cqe;

	/*
	 * AEN requests are special as they don't time out and can
@@ -207,7 +207,7 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
		struct nvme_loop_iod *iod, unsigned int queue_idx)
{
	iod->req.cmd = &iod->cmd;
	iod->req.rsp = &iod->rsp;
	iod->req.cqe = &iod->cqe;
	iod->queue = &ctrl->queues[queue_idx];
	INIT_WORK(&iod->work, nvme_loop_execute_work);
	return 0;
+2 −2
Original line number Diff line number Diff line
@@ -284,7 +284,7 @@ struct nvmet_fabrics_ops {

struct nvmet_req {
	struct nvme_command	*cmd;
	struct nvme_completion	*rsp;
	struct nvme_completion	*cqe;
	struct nvmet_sq		*sq;
	struct nvmet_cq		*cq;
	struct nvmet_ns		*ns;
@@ -322,7 +322,7 @@ extern struct workqueue_struct *buffered_io_wq;

static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
	req->rsp->result.u32 = cpu_to_le32(result);
	req->cqe->result.u32 = cpu_to_le32(result);
}

/*
Loading