Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b7405176 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'nvme-4.18-2' of git://git.infradead.org/nvme into for-4.18/block

Pull NVMe changes from Christoph:

"Here is the current batch of nvme updates for 4.18, we have a few more
 patches in the queue, but I'd like to get this pile into your tree
 and linux-next ASAP.

 The biggest item is support for file-backed namespaces in the NVMe
 target from Chaitanya, in addition to that we mostly small fixes from
 all the usual suspects."

* 'nvme-4.18-2' of git://git.infradead.org/nvme:
  nvme: fixup memory leak in nvme_init_identify()
  nvme: fix KASAN warning when parsing host nqn
  nvmet-loop: use nr_phys_segments when map rq to sgl
  nvmet-fc: increase LS buffer count per fc port
  nvmet: add simple file backed ns support
  nvmet: remove duplicate NULL initialization for req->ns
  nvmet: make a few error messages more generic
  nvme-fabrics: allow duplicate connections to the discovery controller
  nvme-fabrics: centralize discovery controller defaults
  nvme-fabrics: remove unnecessary controller subnqn validation
  nvme-fc: remove setting DNR on exception conditions
  nvme-rdma: stop admin queue before freeing it
  nvme-pci: Fix AER reset handling
  nvme-pci: set nvmeq->cq_vector after alloc cq/sq
  nvme: host: core: fix precedence of ternary operator
  nvme: fix lockdep warning in nvme_mpath_clear_current_path
parents 5afb7835 75c8b19a
Loading
Loading
Loading
Loading
+5 −4
Original line number Original line Diff line number Diff line
@@ -1578,7 +1578,7 @@ static int nvme_pr_reserve(struct block_device *bdev, u64 key,
static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
		enum pr_type type, bool abort)
		enum pr_type type, bool abort)
{
{
	u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}
}


@@ -1590,7 +1590,7 @@ static int nvme_pr_clear(struct block_device *bdev, u64 key)


static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
{
	u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
}


@@ -2184,7 +2184,8 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
		 * Verify that the subsystem actually supports multiple
		 * Verify that the subsystem actually supports multiple
		 * controllers, else bail out.
		 * controllers, else bail out.
		 */
		 */
		if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
		if (!ctrl->opts->discovery_nqn &&
		    nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
			dev_err(ctrl->device,
			dev_err(ctrl->device,
				"ignoring ctrl due to duplicate subnqn (%s).\n",
				"ignoring ctrl due to duplicate subnqn (%s).\n",
				found->subnqn);
				found->subnqn);
@@ -2315,7 +2316,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
		ret = nvme_get_effects_log(ctrl);
		ret = nvme_get_effects_log(ctrl);
		if (ret < 0)
		if (ret < 0)
			return ret;
			goto out_free;
	}
	}


	if (!ctrl->identified) {
	if (!ctrl->identified) {
+6 −15
Original line number Original line Diff line number Diff line
@@ -57,7 +57,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
		goto out_unlock;
		goto out_unlock;


	kref_init(&host->ref);
	kref_init(&host->ref);
	memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
	strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);


	list_add_tail(&host->list, &nvmf_hosts);
	list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
out_unlock:
@@ -689,10 +689,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
			opts->discovery_nqn =
			opts->discovery_nqn =
				!(strcmp(opts->subsysnqn,
				!(strcmp(opts->subsysnqn,
					 NVME_DISC_SUBSYS_NAME));
					 NVME_DISC_SUBSYS_NAME));
			if (opts->discovery_nqn) {
				opts->kato = 0;
				opts->nr_io_queues = 0;
			}
			break;
			break;
		case NVMF_OPT_TRADDR:
		case NVMF_OPT_TRADDR:
			p = match_strdup(args);
			p = match_strdup(args);
@@ -851,6 +847,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
		}
		}
	}
	}


	if (opts->discovery_nqn) {
		opts->kato = 0;
		opts->nr_io_queues = 0;
		opts->duplicate_connect = true;
	}
	if (ctrl_loss_tmo < 0)
	if (ctrl_loss_tmo < 0)
		opts->max_reconnects = -1;
		opts->max_reconnects = -1;
	else
	else
@@ -983,16 +984,6 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
		goto out_module_put;
		goto out_module_put;
	}
	}


	if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
		dev_warn(ctrl->device,
			"controller returned incorrect NQN: \"%s\".\n",
			ctrl->subsys->subnqn);
		module_put(ops->module);
		up_read(&nvmf_transports_rwsem);
		nvme_delete_ctrl_sync(ctrl);
		return ERR_PTR(-EINVAL);
	}

	module_put(ops->module);
	module_put(ops->module);
	up_read(&nvmf_transports_rwsem);
	up_read(&nvmf_transports_rwsem);
	return ctrl;
	return ctrl;
+0 −10
Original line number Original line Diff line number Diff line
@@ -1686,16 +1686,6 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
		goto check_error;
		goto check_error;
	}
	}


	/*
	 * Force failures of commands if we're killing the controller
	 * or have an error on a command used to create an new association
	 */
	if (status &&
	    (blk_queue_dying(rq->q) ||
	     ctrl->ctrl.state == NVME_CTRL_NEW ||
	     ctrl->ctrl.state == NVME_CTRL_CONNECTING))
		status |= cpu_to_le16(NVME_SC_DNR << 1);

	__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
	__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
	nvme_end_request(rq, status, result);
	nvme_end_request(rq, status, result);


+2 −1
Original line number Original line Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/lightnvm.h>
#include <linux/lightnvm.h>
#include <linux/sed-opal.h>
#include <linux/sed-opal.h>
#include <linux/fault-inject.h>
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>


extern unsigned int nvme_io_timeout;
extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
#define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
@@ -449,7 +450,7 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
{
	struct nvme_ns_head *head = ns->head;
	struct nvme_ns_head *head = ns->head;


	if (head && ns == srcu_dereference(head->current_path, &head->srcu))
	if (head && ns == rcu_access_pointer(head->current_path))
		rcu_assign_pointer(head->current_path, NULL);
		rcu_assign_pointer(head->current_path, NULL);
}
}
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+21 −18
Original line number Original line Diff line number Diff line
@@ -1076,7 +1076,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
}
}


static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
		struct nvme_queue *nvmeq, s16 vector)
{
{
	struct nvme_command c;
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
@@ -1091,7 +1091,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
	c.create_cq.irq_vector = cpu_to_le16(vector);


	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
}
}
@@ -1462,6 +1462,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
{
	struct nvme_dev *dev = nvmeq->dev;
	struct nvme_dev *dev = nvmeq->dev;
	int result;
	int result;
	s16 vector;


	if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
	if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
		unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
		unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
@@ -1474,15 +1475,21 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
	 * A queue's vector matches the queue identifier unless the controller
	 * A queue's vector matches the queue identifier unless the controller
	 * has only one vector available.
	 * has only one vector available.
	 */
	 */
	nvmeq->cq_vector = dev->num_vecs == 1 ? 0 : qid;
	vector = dev->num_vecs == 1 ? 0 : qid;
	result = adapter_alloc_cq(dev, qid, nvmeq);
	result = adapter_alloc_cq(dev, qid, nvmeq, vector);
	if (result < 0)
	if (result < 0)
		goto release_vector;
		goto out;


	result = adapter_alloc_sq(dev, qid, nvmeq);
	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
	if (result < 0)
		goto release_cq;
		goto release_cq;


	/*
	 * Set cq_vector after alloc cq/sq, otherwise nvme_suspend_queue will
	 * invoke free_irq for it and cause a 'Trying to free already-free IRQ
	 * xxx' warning if the create CQ/SQ command times out.
	 */
	nvmeq->cq_vector = vector;
	nvme_init_queue(nvmeq, qid);
	nvme_init_queue(nvmeq, qid);
	result = queue_request_irq(nvmeq);
	result = queue_request_irq(nvmeq);
	if (result < 0)
	if (result < 0)
@@ -1491,12 +1498,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
	return result;
	return result;


release_sq:
release_sq:
	nvmeq->cq_vector = -1;
	dev->online_queues--;
	dev->online_queues--;
	adapter_delete_sq(dev, qid);
	adapter_delete_sq(dev, qid);
release_cq:
release_cq:
	adapter_delete_cq(dev, qid);
	adapter_delete_cq(dev, qid);
 release_vector:
out:
	nvmeq->cq_vector = -1;
	return result;
	return result;
}
}


@@ -2695,19 +2702,15 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)


	dev_info(dev->ctrl.device, "restart after slot reset\n");
	dev_info(dev->ctrl.device, "restart after slot reset\n");
	pci_restore_state(pdev);
	pci_restore_state(pdev);
	nvme_reset_ctrl_sync(&dev->ctrl);
	nvme_reset_ctrl(&dev->ctrl);

	switch (dev->ctrl.state) {
	case NVME_CTRL_LIVE:
	case NVME_CTRL_ADMIN_ONLY:
	return PCI_ERS_RESULT_RECOVERED;
	return PCI_ERS_RESULT_RECOVERED;
	default:
		return PCI_ERS_RESULT_DISCONNECT;
	}
}
}


static void nvme_error_resume(struct pci_dev *pdev)
static void nvme_error_resume(struct pci_dev *pdev)
{
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

	flush_work(&dev->ctrl.reset_work);
	pci_cleanup_aer_uncorrect_error_status(pdev);
	pci_cleanup_aer_uncorrect_error_status(pdev);
}
}


Loading