Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f8a05a1d authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'nvme-4.12' of git://git.infradead.org/nvme into for-4.12/block

Christoph writes:

This is the current NVMe pile: virtualization extensions, lots of FC
updates and various misc bits.  There are a few more FC bits that didn't
make the cut, but we'd like to get this request out before the merge
window for sure.
parents 95c55ff4 e02ab023
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -75,6 +75,20 @@ static int nvme_error_status(struct request *req)
		return -ENOSPC;
	default:
		return -EIO;

	/*
	 * XXX: these errors are a nasty side-band protocol to
	 * drivers/md/dm-mpath.c:noretry_error() that aren't documented
	 * anywhere..
	 */
	case NVME_SC_CMD_SEQ_ERROR:
		return -EILSEQ;
	case NVME_SC_ONCS_NOT_SUPPORTED:
		return -EOPNOTSUPP;
	case NVME_SC_WRITE_FAULT:
	case NVME_SC_READ_ERROR:
	case NVME_SC_UNWRITTEN_BLOCK:
		return -ENODATA;
	}
}

+98 −54
Original line number Diff line number Diff line
@@ -61,16 +61,23 @@ struct nvme_fc_queue {
	unsigned long		flags;
} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */

enum nvme_fcop_flags {
	FCOP_FLAGS_TERMIO	= (1 << 0),
	FCOP_FLAGS_RELEASED	= (1 << 1),
	FCOP_FLAGS_COMPLETE	= (1 << 2),
};

struct nvmefc_ls_req_op {
	struct nvmefc_ls_req	ls_req;

	struct nvme_fc_ctrl	*ctrl;
	struct nvme_fc_rport	*rport;
	struct nvme_fc_queue	*queue;
	struct request		*rq;
	u32			flags;

	int			ls_error;
	struct completion	ls_done;
	struct list_head	lsreq_list;	/* ctrl->ls_req_list */
	struct list_head	lsreq_list;	/* rport->ls_req_list */
	bool			req_queued;
};

@@ -120,6 +127,9 @@ struct nvme_fc_rport {

	struct list_head		endp_list; /* for lport->endp_list */
	struct list_head		ctrl_list;
	struct list_head		ls_req_list;
	struct device			*dev;	/* physical device for dma */
	struct nvme_fc_lport		*lport;
	spinlock_t			lock;
	struct kref			ref;
} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
@@ -144,7 +154,6 @@ struct nvme_fc_ctrl {
	u64			cap;

	struct list_head	ctrl_list;	/* rport->ctrl_list */
	struct list_head	ls_req_list;

	struct blk_mq_tag_set	admin_tag_set;
	struct blk_mq_tag_set	tag_set;
@@ -419,9 +428,12 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,

	INIT_LIST_HEAD(&newrec->endp_list);
	INIT_LIST_HEAD(&newrec->ctrl_list);
	INIT_LIST_HEAD(&newrec->ls_req_list);
	kref_init(&newrec->ref);
	spin_lock_init(&newrec->lock);
	newrec->remoteport.localport = &lport->localport;
	newrec->dev = lport->dev;
	newrec->lport = lport;
	newrec->remoteport.private = &newrec[1];
	newrec->remoteport.port_role = pinfo->port_role;
	newrec->remoteport.node_name = pinfo->node_name;
@@ -444,7 +456,6 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
out_reghost_failed:
	*portptr = NULL;
	return ret;

}
EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);

@@ -487,6 +498,30 @@ nvme_fc_rport_get(struct nvme_fc_rport *rport)
	return kref_get_unless_zero(&rport->ref);
}

static int
nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
{
	struct nvmefc_ls_req_op *lsop;
	unsigned long flags;

restart:
	spin_lock_irqsave(&rport->lock, flags);

	list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
		if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
			lsop->flags |= FCOP_FLAGS_TERMIO;
			spin_unlock_irqrestore(&rport->lock, flags);
			rport->lport->ops->ls_abort(&rport->lport->localport,
						&rport->remoteport,
						&lsop->ls_req);
			goto restart;
		}
	}
	spin_unlock_irqrestore(&rport->lock, flags);

	return 0;
}

/**
 * nvme_fc_unregister_remoteport - transport entry point called by an
 *                              LLDD to deregister/remove a previously
@@ -522,6 +557,8 @@ nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)

	spin_unlock_irqrestore(&rport->lock, flags);

	nvme_fc_abort_lsops(rport);

	nvme_fc_rport_put(rport);
	return 0;
}
@@ -624,16 +661,16 @@ static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);


static void
__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
		struct nvmefc_ls_req_op *lsop)
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
{
	struct nvme_fc_rport *rport = lsop->rport;
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	unsigned long flags;

	spin_lock_irqsave(&ctrl->lock, flags);
	spin_lock_irqsave(&rport->lock, flags);

	if (!lsop->req_queued) {
		spin_unlock_irqrestore(&ctrl->lock, flags);
		spin_unlock_irqrestore(&rport->lock, flags);
		return;
	}

@@ -641,56 +678,71 @@ __nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,

	lsop->req_queued = false;

	spin_unlock_irqrestore(&ctrl->lock, flags);
	spin_unlock_irqrestore(&rport->lock, flags);

	fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
				  (lsreq->rqstlen + lsreq->rsplen),
				  DMA_BIDIRECTIONAL);

	nvme_fc_ctrl_put(ctrl);
	nvme_fc_rport_put(rport);
}

static int
__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
		struct nvmefc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
{
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	unsigned long flags;
	int ret;
	int ret = 0;

	if (!nvme_fc_ctrl_get(ctrl))
	if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
		return -ECONNREFUSED;

	if (!nvme_fc_rport_get(rport))
		return -ESHUTDOWN;

	lsreq->done = done;
	lsop->ctrl = ctrl;
	lsop->rport = rport;
	lsop->req_queued = false;
	INIT_LIST_HEAD(&lsop->lsreq_list);
	init_completion(&lsop->ls_done);

	lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
	lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
				  lsreq->rqstlen + lsreq->rsplen,
				  DMA_BIDIRECTIONAL);
	if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
		nvme_fc_ctrl_put(ctrl);
		dev_err(ctrl->dev,
			"els request command failed EFAULT.\n");
		return -EFAULT;
	if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
		ret = -EFAULT;
		goto out_putrport;
	}
	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;

	spin_lock_irqsave(&ctrl->lock, flags);
	spin_lock_irqsave(&rport->lock, flags);

	list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
	list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);

	lsop->req_queued = true;

	spin_unlock_irqrestore(&ctrl->lock, flags);
	spin_unlock_irqrestore(&rport->lock, flags);

	ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
					&ctrl->rport->remoteport, lsreq);
	ret = rport->lport->ops->ls_req(&rport->lport->localport,
					&rport->remoteport, lsreq);
	if (ret)
		goto out_unlink;

	return 0;

out_unlink:
	lsop->ls_error = ret;
	spin_lock_irqsave(&rport->lock, flags);
	lsop->req_queued = false;
	list_del(&lsop->lsreq_list);
	spin_unlock_irqrestore(&rport->lock, flags);
	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
				  (lsreq->rqstlen + lsreq->rsplen),
				  DMA_BIDIRECTIONAL);
out_putrport:
	nvme_fc_rport_put(rport);

	return ret;
}
@@ -705,15 +757,15 @@ nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
}

static int
nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
{
	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
	struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
	int ret;

	ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
	ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);

	if (!ret)
	if (!ret) {
		/*
		 * No timeout/not interruptible as we need the struct
		 * to exist until the lldd calls us back. Thus mandate
@@ -722,14 +774,14 @@ nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
		 */
		wait_for_completion(&lsop->ls_done);

	__nvme_fc_finish_ls_req(ctrl, lsop);
		__nvme_fc_finish_ls_req(lsop);

	if (ret) {
		dev_err(ctrl->dev,
			"ls request command failed (%d).\n", ret);
		return ret;
		ret = lsop->ls_error;
	}

	if (ret)
		return ret;

	/* ACC or RJT payload ? */
	if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
		return -ENXIO;
@@ -737,19 +789,14 @@ nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
	return 0;
}

static void
nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
static int
nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
		struct nvmefc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
{
	int ret;

	ret = __nvme_fc_send_ls_req(ctrl, lsop, done);

	/* don't wait for completion */

	if (ret)
		done(&lsop->ls_req, ret);
	return __nvme_fc_send_ls_req(rport, lsop, done);
}

/* Validation Error indexes into the string table below */
@@ -839,7 +886,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
	lsreq->rsplen = sizeof(*assoc_acc);
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;

	ret = nvme_fc_send_ls_req(ctrl, lsop);
	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
	if (ret)
		goto out_free_buffer;

@@ -947,7 +994,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
	lsreq->rsplen = sizeof(*conn_acc);
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;

	ret = nvme_fc_send_ls_req(ctrl, lsop);
	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
	if (ret)
		goto out_free_buffer;

@@ -998,14 +1045,8 @@ static void
nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
{
	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
	struct nvme_fc_ctrl *ctrl = lsop->ctrl;

	__nvme_fc_finish_ls_req(ctrl, lsop);

	if (status)
		dev_err(ctrl->dev,
			"disconnect assoc ls request command failed (%d).\n",
			status);
	__nvme_fc_finish_ls_req(lsop);

	/* fc-nvme iniator doesn't care about success or failure of cmd */

@@ -1036,6 +1077,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
	struct fcnvme_ls_disconnect_acc *discon_acc;
	struct nvmefc_ls_req_op *lsop;
	struct nvmefc_ls_req *lsreq;
	int ret;

	lsop = kzalloc((sizeof(*lsop) +
			 ctrl->lport->ops->lsrqst_priv_sz +
@@ -1078,7 +1120,10 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
	lsreq->rsplen = sizeof(*discon_acc);
	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;

	nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
	ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
				nvme_fc_disconnect_assoc_done);
	if (ret)
		kfree(lsop);

	/* only meaningful part to terminating the association */
	ctrl->association_id = 0;
@@ -2302,7 +2347,6 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,

	ctrl->ctrl.opts = opts;
	INIT_LIST_HEAD(&ctrl->ctrl_list);
	INIT_LIST_HEAD(&ctrl->ls_req_list);
	ctrl->lport = lport;
	ctrl->rport = rport;
	ctrl->dev = lport->dev;
+160 −6
Original line number Diff line number Diff line
@@ -103,8 +103,22 @@ struct nvme_dev {
	u32 cmbloc;
	struct nvme_ctrl ctrl;
	struct completion ioq_wait;
	u32 *dbbuf_dbs;
	dma_addr_t dbbuf_dbs_dma_addr;
	u32 *dbbuf_eis;
	dma_addr_t dbbuf_eis_dma_addr;
};

static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{
	return qid * 2 * stride;
}

static inline unsigned int cq_idx(unsigned int qid, u32 stride)
{
	return (qid * 2 + 1) * stride;
}

static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_dev, ctrl);
@@ -133,6 +147,10 @@ struct nvme_queue {
	u16 qid;
	u8 cq_phase;
	u8 cqe_seen;
	u32 *dbbuf_sq_db;
	u32 *dbbuf_cq_db;
	u32 *dbbuf_sq_ei;
	u32 *dbbuf_cq_ei;
};

/*
@@ -171,6 +189,112 @@ static inline void _nvme_check_size(void)
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
}

static inline unsigned int nvme_dbbuf_size(u32 stride)
{
	return ((num_possible_cpus() + 1) * 8 * stride);
}

static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs)
		return 0;

	dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_dbs_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_dbs)
		return -ENOMEM;
	dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_eis_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
	}
	if (dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
		dev->dbbuf_eis = NULL;
	}
}

static void nvme_dbbuf_init(struct nvme_dev *dev,
			    struct nvme_queue *nvmeq, int qid)
{
	if (!dev->dbbuf_dbs || !qid)
		return;

	nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}

static void nvme_dbbuf_set(struct nvme_dev *dev)
{
	struct nvme_command c;

	if (!dev->dbbuf_dbs)
		return;

	memset(&c, 0, sizeof(c));
	c.dbbuf.opcode = nvme_admin_dbbuf;
	c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
	c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);

	if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
		dev_warn(dev->dev, "unable to set dbbuf\n");
		/* Free memory and continue on */
		nvme_dbbuf_dma_free(dev);
	}
}

static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
{
	return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
}

/* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
					      volatile u32 *dbbuf_ei)
{
	if (dbbuf_db) {
		u16 old_value;

		/*
		 * Ensure that the queue is written before updating
		 * the doorbell in memory
		 */
		wmb();

		old_value = *dbbuf_db;
		*dbbuf_db = value;

		if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
			return false;
	}

	return true;
}

/*
@@ -297,6 +421,8 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,

	if (++tail == nvmeq->q_depth)
		tail = 0;
	if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
					      nvmeq->dbbuf_sq_ei))
		writel(tail, nvmeq->q_db);
	nvmeq->sq_tail = tail;
}
@@ -686,6 +812,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
		return;

	if (likely(nvmeq->cq_vector >= 0))
		if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
						      nvmeq->dbbuf_cq_ei))
			writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
	nvmeq->cq_head = head;
	nvmeq->cq_phase = phase;
@@ -718,10 +846,8 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
	return IRQ_NONE;
}

static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
		spin_lock_irq(&nvmeq->q_lock);
		__nvme_process_cq(nvmeq, &tag);
@@ -734,6 +860,13 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
	return 0;
}

static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	return __nvme_poll(nvmeq, tag);
}

static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
{
	struct nvme_dev *dev = to_nvme_dev(ctrl);
@@ -785,7 +918,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
	int flags = NVME_QUEUE_PHYS_CONTIG;

	/*
	 * Note: we (ab)use the fact the the prp fields survive if no data
@@ -831,6 +964,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
	struct request *abort_req;
	struct nvme_command cmd;

	/*
	 * Did we miss an interrupt?
	 */
	if (__nvme_poll(nvmeq, req->tag)) {
		dev_warn(dev->ctrl.device,
			 "I/O %d QID %d timeout, completion polled\n",
			 req->tag, nvmeq->qid);
		return BLK_EH_HANDLED;
	}

	/*
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
@@ -1070,6 +1213,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
	nvmeq->cq_phase = 1;
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
	nvme_dbbuf_init(dev, nvmeq, qid);
	dev->online_queues++;
	spin_unlock_irq(&nvmeq->q_lock);
}
@@ -1542,6 +1686,8 @@ static int nvme_dev_add(struct nvme_dev *dev)
		if (blk_mq_alloc_tag_set(&dev->tagset))
			return 0;
		dev->ctrl.tagset = &dev->tagset;

		nvme_dbbuf_set(dev);
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

@@ -1728,6 +1874,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
{
	struct nvme_dev *dev = to_nvme_dev(ctrl);

	nvme_dbbuf_dma_free(dev);
	put_device(dev->dev);
	if (dev->tagset.tags)
		blk_mq_free_tag_set(&dev->tagset);
@@ -1795,6 +1942,13 @@ static void nvme_reset_work(struct work_struct *work)
		dev->ctrl.opal_dev = NULL;
	}

	if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
		result = nvme_dbbuf_dma_alloc(dev);
		if (result)
			dev_warn(dev->dev,
				 "unable to allocate dma for dbbuf\n");
	}

	result = nvme_setup_io_queues(dev);
	if (result)
		goto out;
+25 −7
Original line number Diff line number Diff line
@@ -122,7 +122,15 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
	struct nvmet_ctrl *ctrl = NULL;
	u16 status = 0;

	d = kmap(sg_page(req->sg)) + req->sg->offset;
	d = kmalloc(sizeof(*d), GFP_KERNEL);
	if (!d) {
		status = NVME_SC_INTERNAL;
		goto complete;
	}

	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
	if (status)
		goto out;

	/* zero out initial completion result, assign values as needed */
	req->rsp->result.u32 = 0;
@@ -158,7 +166,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
	req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);

out:
	kunmap(sg_page(req->sg));
	kfree(d);
complete:
	nvmet_req_complete(req, status);
}

@@ -170,7 +179,15 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
	u16 qid = le16_to_cpu(c->qid);
	u16 status = 0;

	d = kmap(sg_page(req->sg)) + req->sg->offset;
	d = kmalloc(sizeof(*d), GFP_KERNEL);
	if (!d) {
		status = NVME_SC_INTERNAL;
		goto complete;
	}

	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
	if (status)
		goto out;

	/* zero out initial completion result, assign values as needed */
	req->rsp->result.u32 = 0;
@@ -205,7 +222,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
	pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);

out:
	kunmap(sg_page(req->sg));
	kfree(d);
complete:
	nvmet_req_complete(req, status);
	return;

+193 −60

File changed.

Preview size limit exceeded, changes collapsed.

Loading