Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47ca23c1 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'nvme-5.2' of git://git.infradead.org/nvme into for-5.2/block-post

Pull NVMe fixes from Christoph

* 'nvme-5.2' of git://git.infradead.org/nvme:
  nvme: validate cntlid during controller initialisation
  nvme: change locking for the per-subsystem controller list
  nvme: trace all async notice events
  nvme: fix typos in nvme status code values
  nvme-fabrics: remove unused argument
  nvme-multipath: avoid crash on invalid subsystem cntlid enumeration
  nvme-fc: use separate work queue to avoid warning
  nvme-rdma: remove redundant reference between ib_device and tagset
  nvme-pci: mark expected switch fall-through
  nvme-pci: add known admin effects to augument admin effects log page
  nvme-pci: init shadow doorbell after each reset
parents 936b33f7 1b1031ca
Loading
Loading
Loading
Loading
+40 −39
Original line number Diff line number Diff line
@@ -1257,10 +1257,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
		return 0;
	}

	effects |= nvme_known_admin_effects(opcode);
	if (ctrl->effects)
		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
	else
		effects = nvme_known_admin_effects(opcode);

	/*
	 * For simplicity, IO to all namespaces is quiesced even if the command
@@ -2342,20 +2341,35 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
	NULL,
};

static int nvme_active_ctrls(struct nvme_subsystem *subsys)
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
	int count = 0;
	struct nvme_ctrl *ctrl;
	struct nvme_ctrl *tmp;

	lockdep_assert_held(&nvme_subsystems_lock);

	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
		if (ctrl->state == NVME_CTRL_DELETING ||
		    ctrl->state == NVME_CTRL_DEAD)
			continue;

	mutex_lock(&subsys->lock);
	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
		if (ctrl->state != NVME_CTRL_DELETING &&
		    ctrl->state != NVME_CTRL_DEAD)
			count++;
		if (tmp->cntlid == ctrl->cntlid) {
			dev_err(ctrl->device,
				"Duplicate cntlid %u with %s, rejecting\n",
				ctrl->cntlid, dev_name(tmp->device));
			return false;
		}
	mutex_unlock(&subsys->lock);

	return count;
		if ((id->cmic & (1 << 1)) ||
		    (ctrl->opts && ctrl->opts->discovery_nqn))
			continue;

		dev_err(ctrl->device,
			"Subsystem does not support multiple controllers\n");
		return false;
	}

	return true;
}

static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
@@ -2395,22 +2409,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
	mutex_lock(&nvme_subsystems_lock);
	found = __nvme_find_get_subsystem(subsys->subnqn);
	if (found) {
		/*
		 * Verify that the subsystem actually supports multiple
		 * controllers, else bail out.
		 */
		if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
		    nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
			dev_err(ctrl->device,
				"ignoring ctrl due to duplicate subnqn (%s).\n",
				found->subnqn);
			nvme_put_subsystem(found);
			ret = -EINVAL;
			goto out_unlock;
		}

		__nvme_release_subsystem(subsys);
		subsys = found;

		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
			ret = -EINVAL;
			goto out_put_subsystem;
		}
	} else {
		ret = device_add(&subsys->dev);
		if (ret) {
@@ -2422,23 +2427,20 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
		list_add_tail(&subsys->entry, &nvme_subsystems);
	}

	ctrl->subsys = subsys;
	mutex_unlock(&nvme_subsystems_lock);

	if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
			dev_name(ctrl->device))) {
		dev_err(ctrl->device,
			"failed to create sysfs link from subsystem.\n");
		/* the transport driver will eventually put the subsystem */
		return -EINVAL;
		goto out_put_subsystem;
	}

	mutex_lock(&subsys->lock);
	ctrl->subsys = subsys;
	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
	mutex_unlock(&subsys->lock);

	mutex_unlock(&nvme_subsystems_lock);
	return 0;

out_put_subsystem:
	nvme_put_subsystem(subsys);
out_unlock:
	mutex_unlock(&nvme_subsystems_lock);
	put_device(&subsys->dev);
@@ -3605,19 +3607,18 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
	u32 aer_notice_type = (result & 0xff00) >> 8;

	trace_nvme_async_event(ctrl, aer_notice_type);

	switch (aer_notice_type) {
	case NVME_AER_NOTICE_NS_CHANGED:
		trace_nvme_async_event(ctrl, aer_notice_type);
		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
		nvme_queue_scan(ctrl);
		break;
	case NVME_AER_NOTICE_FW_ACT_STARTING:
		trace_nvme_async_event(ctrl, aer_notice_type);
		queue_work(nvme_wq, &ctrl->fw_act_work);
		break;
#ifdef CONFIG_NVME_MULTIPATH
	case NVME_AER_NOTICE_ANA:
		trace_nvme_async_event(ctrl, aer_notice_type);
		if (!ctrl->ana_log_buf)
			break;
		queue_work(nvme_wq, &ctrl->ana_work);
@@ -3696,10 +3697,10 @@ static void nvme_free_ctrl(struct device *dev)
	__free_page(ctrl->discard_page);

	if (subsys) {
		mutex_lock(&subsys->lock);
		mutex_lock(&nvme_subsystems_lock);
		list_del(&ctrl->subsys_entry);
		mutex_unlock(&subsys->lock);
		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
		mutex_unlock(&nvme_subsystems_lock);
	}

	ctrl->ops->free_ctrl(ctrl);
+2 −2
Original line number Diff line number Diff line
@@ -978,7 +978,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
				 NVMF_OPT_DISABLE_SQFLOW)

static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
nvmf_create_ctrl(struct device *dev, const char *buf)
{
	struct nvmf_ctrl_options *opts;
	struct nvmf_transport_ops *ops;
@@ -1073,7 +1073,7 @@ static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
		goto out_unlock;
	}

	ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
	ctrl = nvmf_create_ctrl(nvmf_device, buf);
	if (IS_ERR(ctrl)) {
		ret = PTR_ERR(ctrl);
		goto out_unlock;
+11 −3
Original line number Diff line number Diff line
@@ -202,7 +202,7 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);


static struct workqueue_struct *nvme_fc_wq;

/*
 * These items are short-term. They will eventually be moved into
@@ -2054,7 +2054,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
	 */
	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
		active = atomic_xchg(&ctrl->err_work_active, 1);
		if (!active && !schedule_work(&ctrl->err_work)) {
		if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
			atomic_set(&ctrl->err_work_active, 0);
			WARN_ON(1);
		}
@@ -3399,6 +3399,10 @@ static int __init nvme_fc_init_module(void)
{
	int ret;

	nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
	if (!nvme_fc_wq)
		return -ENOMEM;

	/*
	 * NOTE:
	 * It is expected that in the future the kernel will combine
@@ -3416,7 +3420,7 @@ static int __init nvme_fc_init_module(void)
	ret = class_register(&fc_class);
	if (ret) {
		pr_err("couldn't register class fc\n");
		return ret;
		goto out_destroy_wq;
	}

	/*
@@ -3440,6 +3444,9 @@ static int __init nvme_fc_init_module(void)
	device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
	class_unregister(&fc_class);
out_destroy_wq:
	destroy_workqueue(nvme_fc_wq);

	return ret;
}

@@ -3456,6 +3463,7 @@ static void __exit nvme_fc_exit_module(void)

	device_destroy(&fc_class, MKDEV(0, 0));
	class_unregister(&fc_class);
	destroy_workqueue(nvme_fc_wq);
}

module_init(nvme_fc_init_module);
+1 −1
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
		sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
	} else if (ns->head->disk) {
		sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
				ctrl->cntlid, ns->head->instance);
				ctrl->instance, ns->head->instance);
		*flags = GENHD_FL_HIDDEN;
	} else {
		sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
+2 −2
Original line number Diff line number Diff line
@@ -1296,6 +1296,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
	switch (dev->ctrl.state) {
	case NVME_CTRL_DELETING:
		shutdown = true;
		/* fall through */
	case NVME_CTRL_CONNECTING:
	case NVME_CTRL_RESETTING:
		dev_warn_ratelimited(dev->ctrl.device,
@@ -2280,8 +2281,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
			return ret;
		}
		dev->ctrl.tagset = &dev->tagset;

		nvme_dbbuf_set(dev);
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

@@ -2289,6 +2288,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
		nvme_free_queues(dev, dev->online_queues);
	}

	nvme_dbbuf_set(dev);
	return 0;
}

Loading