Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 74413084 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'nvme-for-4.18' of git://git.infradead.org/nvme

Pull NVMe fixes from Christoph Hellwig:

 - fix a regression in 4.18 that causes a memory leak on probe failure
   (Keith Bush)

 - fix a deadlock in the passthrough ioctl code (Scott Bauer)

 - don't enable AENs if not supported (Weiping Zhang)

 - fix an old regression in metadata handling in the passthrough ioctl
   code (Roland Dreier)

* tag 'nvme-for-4.18' of git://git.infradead.org/nvme:
  nvme: fix handling of metadata_len for NVME_IOCTL_IO_CMD
  nvme: don't enable AEN if not supported
  nvme: ensure forward progress during Admin passthru
  nvme-pci: fix memory leak on probe failure
parents 165ea0d1 9b382768
Loading
Loading
Loading
Loading
+34 −29
Original line number Diff line number Diff line
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);

static void nvme_set_queue_dying(struct nvme_ns *ns)
{
	/*
	 * Revalidating a dead namespace sets capacity to 0. This will end
	 * buffered writers dirtying pages that can't be synced.
	 */
	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
		return;
	revalidate_disk(ns->disk);
	blk_set_queue_dying(ns->queue);
	/* Forcibly unquiesce queues to avoid blocking dispatch */
	blk_mq_unquiesce_queue(ns->queue);
}

static void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);

static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
	u32 result;
	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
	int status;

	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
			ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
	if (!supported_aens)
		return;

	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
			NULL, 0, &result);
	if (status)
		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
			 ctrl->oaes & NVME_AEN_SUPPORTED);
			 supported_aens);
}

static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,

static void nvme_update_formats(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns, *next;
	LIST_HEAD(rm_list);
	struct nvme_ns *ns;

	down_write(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		if (ns->disk && nvme_revalidate_disk(ns->disk)) {
			list_move_tail(&ns->list, &rm_list);
		}
	}
	up_write(&ctrl->namespaces_rwsem);
	down_read(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		if (ns->disk && nvme_revalidate_disk(ns->disk))
			nvme_set_queue_dying(ns);
	up_read(&ctrl->namespaces_rwsem);

	list_for_each_entry_safe(ns, next, &rm_list, list)
		nvme_ns_remove(ns);
	nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
}

static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
			(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
			(void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
			(void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
			0, &cmd.result, timeout);
	nvme_passthru_end(ctrl, effects);

@@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,

	down_write(&ctrl->namespaces_rwsem);
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
		if (ns->head->ns_id > nsid)
		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
			list_move_tail(&ns->list, &rm_list);
	}
	up_write(&ctrl->namespaces_rwsem);
@@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
	if (ctrl->admin_q)
		blk_mq_unquiesce_queue(ctrl->admin_q);

	list_for_each_entry(ns, &ctrl->namespaces, list) {
		/*
		 * Revalidating a dead namespace sets capacity to 0. This will
		 * end buffered writers dirtying pages that can't be synced.
		 */
		if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
			continue;
		revalidate_disk(ns->disk);
		blk_set_queue_dying(ns->queue);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		nvme_set_queue_dying(ns);

		/* Forcibly unquiesce queues to avoid blocking dispatch */
		blk_mq_unquiesce_queue(ns->queue);
	}
	up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
+7 −5
Original line number Diff line number Diff line
@@ -2556,11 +2556,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)

	quirks |= check_vendor_combination_bug(pdev);

	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
			quirks);
	if (result)
		goto release_pools;

	/*
	 * Double check that our mempool alloc size will cover the biggest
	 * command we support.
@@ -2578,6 +2573,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
		goto release_pools;
	}

	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
			quirks);
	if (result)
		goto release_mempool;

	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));

	nvme_get_ctrl(&dev->ctrl);
@@ -2585,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)

	return 0;

 release_mempool:
	mempool_destroy(dev->iod_mempool);
 release_pools:
	nvme_release_prp_pools(dev);
 unmap: