Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0141af18 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A collection of fixes for the nvme over fabrics code"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvme-rdma: Get rid of redundant defines
  nvme-rdma: Get rid of duplicate variable
  nvme: fabrics drivers don't need the nvme-pci driver
  nvme-fabrics: get a reference when reusing a nvme_host structure
  nvme-fabrics: change NQN UUID to big-endian format
  nvme-loop: set sqsize to 0-based value, per spec
  nvme-rdma: fix sqsize/hsqsize per spec
  fabrics: define admin sqsize min default, per spec
  nvmet-rdma: +1 to *queue_size from hsqsize/hrqsize
  nvmet-rdma: Fix use after free
  nvme-rdma: initialize ret to zero to avoid returning garbage
parents 0ddc9e5a d8d8d9d7
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@ config NVME_FABRICS
config NVME_RDMA
	tristate "NVM Express over Fabrics RDMA host driver"
	depends on INFINIBAND
	depends on BLK_DEV_NVME
	select NVME_CORE
	select NVME_FABRICS
	select SG_POOL
	help
+16 −7
Original line number Diff line number Diff line
@@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)

	mutex_lock(&nvmf_hosts_mutex);
	host = __nvmf_host_find(hostnqn);
	if (host)
	if (host) {
		kref_get(&host->ref);
		goto out_unlock;
	}

	host = kmalloc(sizeof(*host), GFP_KERNEL);
	if (!host)
@@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)

	kref_init(&host->ref);
	memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
	uuid_le_gen(&host->id);
	uuid_be_gen(&host->id);

	list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void)
		return NULL;

	kref_init(&host->ref);
	uuid_le_gen(&host->id);
	uuid_be_gen(&host->id);
	snprintf(host->nqn, NVMF_NQN_SIZE,
		"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id);
		"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);

	mutex_lock(&nvmf_hosts_mutex);
	list_add_tail(&host->list, &nvmf_hosts);
@@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
	cmd.connect.opcode = nvme_fabrics_command;
	cmd.connect.fctype = nvme_fabrics_type_connect;
	cmd.connect.qid = 0;
	cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);

	/*
	 * fabrics spec sets a minimum of depth 32 for admin queue,
	 * so set the queue with this depth always until
	 * justification otherwise.
	 */
	cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);

	/*
	 * Set keep-alive timeout in seconds granularity (ms * 1000)
	 * and add a grace period for controller kato enforcement
@@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
	if (!data)
		return -ENOMEM;

	memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
	memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
	data->cntlid = cpu_to_le16(0xffff);
	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
	if (!data)
		return -ENOMEM;

	memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
	memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
	data->cntlid = cpu_to_le16(ctrl->cntlid);
	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+1 −1
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ struct nvmf_host {
	struct kref		ref;
	struct list_head	list;
	char			nqn[NVMF_NQN_SIZE];
	uuid_le			id;
	uuid_be			id;
};

/**
+28 −18
Original line number Diff line number Diff line
@@ -43,10 +43,6 @@

#define NVME_RDMA_MAX_INLINE_SEGMENTS	1

#define NVME_RDMA_MAX_PAGES_PER_MR	512

#define NVME_RDMA_DEF_RECONNECT_DELAY	20

/*
 * We handle AEN commands ourselves and don't even let the
 * block layer know about them.
@@ -77,7 +73,6 @@ struct nvme_rdma_request {
	u32			num_sge;
	int			nents;
	bool			inline_data;
	bool			need_inval;
	struct ib_reg_wr	reg_wr;
	struct ib_cqe		reg_cqe;
	struct nvme_rdma_queue  *queue;
@@ -286,7 +281,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
	int ret = 0;

	if (!req->need_inval)
	if (!req->mr->need_inval)
		goto out;

	ib_dereg_mr(req->mr);
@@ -298,7 +293,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
		req->mr = NULL;
	}

	req->need_inval = false;
	req->mr->need_inval = false;

out:
	return ret;
@@ -645,7 +640,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
	int i, ret;

	for (i = 1; i < ctrl->queue_count; i++) {
		ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
		ret = nvme_rdma_init_queue(ctrl, i,
					   ctrl->ctrl.opts->queue_size);
		if (ret) {
			dev_info(ctrl->ctrl.device,
				"failed to initialize i/o queue: %d\n", ret);
@@ -849,7 +845,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
	if (!blk_rq_bytes(rq))
		return;

	if (req->need_inval) {
	if (req->mr->need_inval) {
		res = nvme_rdma_inv_rkey(queue, req);
		if (res < 0) {
			dev_err(ctrl->ctrl.device,
@@ -935,7 +931,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
			     IB_ACCESS_REMOTE_READ |
			     IB_ACCESS_REMOTE_WRITE;

	req->need_inval = true;
	req->mr->need_inval = true;

	sg->addr = cpu_to_le64(req->mr->iova);
	put_unaligned_le24(req->mr->length, sg->length);
@@ -958,7 +954,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,

	req->num_sge = 1;
	req->inline_data = false;
	req->need_inval = false;
	req->mr->need_inval = false;

	c->common.flags |= NVME_CMD_SGL_METABUF;

@@ -1145,7 +1141,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,

	if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
	    wc->ex.invalidate_rkey == req->mr->rkey)
		req->need_inval = false;
		req->mr->need_inval = false;

	blk_mq_complete_request(rq, status);

@@ -1278,8 +1274,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)

	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
	priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
	/*
	 * set the admin queue depth to the minimum size
	 * specified by the Fabrics standard.
	 */
	if (priv.qid == 0) {
		priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
		priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
	} else {
		/*
		 * current interpretation of the fabrics spec
		 * is at minimum you make hrqsize sqsize+1, or a
		 * 1's based representation of sqsize.
		 */
		priv.hrqsize = cpu_to_le16(queue->queue_size);
	priv.hsqsize = cpu_to_le16(queue->queue_size);
		priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
	}

	ret = rdma_connect(queue->cm_id, &param);
	if (ret) {
@@ -1319,7 +1329,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
{
	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
	int ret;
	int ret = 0;

	/* Own the controller deletion */
	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
@@ -1461,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
	if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
		flush = true;
	ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
			req->need_inval ? &req->reg_wr.wr : NULL, flush);
			req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
	if (ret) {
		nvme_rdma_unmap_data(queue, rq);
		goto err;
@@ -1816,7 +1826,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)

	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
	ctrl->tag_set.ops = &nvme_rdma_mq_ops;
	ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
	ctrl->tag_set.numa_node = NUMA_NO_NODE;
	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1914,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
	spin_lock_init(&ctrl->lock);

	ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
	ctrl->ctrl.sqsize = opts->queue_size;
	ctrl->ctrl.sqsize = opts->queue_size - 1;
	ctrl->ctrl.kato = opts->kato;

	ret = -ENOMEM;
+1 −1
Original line number Diff line number Diff line
@@ -15,8 +15,8 @@ config NVME_TARGET

config NVME_TARGET_LOOP
	tristate "NVMe loopback device support"
	depends on BLK_DEV_NVME
	depends on NVME_TARGET
	select NVME_CORE
	select NVME_FABRICS
	select SG_POOL
	help
Loading