Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e1d8448 authored by Keith Busch's avatar Keith Busch
Browse files

NVMe: Asynchronous controller probe



This performs the longest parts of nvme device probe in scheduled work.
This speeds up probe significantly when multiple devices are in use.

Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
parent b3fffdef
Loading
Loading
Loading
Loading
+31 −17
Original line number Diff line number Diff line
@@ -2800,6 +2800,10 @@ static int nvme_dev_open(struct inode *inode, struct file *f)
	spin_lock(&dev_list_lock);
	list_for_each_entry(dev, &dev_list, node) {
		if (dev->instance == instance) {
			if (!dev->admin_q) {
				ret = -EWOULDBLOCK;
				break;
			}
			if (!kref_get_unless_zero(&dev->kref))
				break;
			f->private_data = dev;
@@ -2982,6 +2986,7 @@ static void nvme_reset_workfn(struct work_struct *work)
	dev->reset_workfn(work);
}

static void nvme_async_probe(struct work_struct *work);
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	int node, result = -ENOMEM;
@@ -3017,34 +3022,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
		goto release;

	kref_init(&dev->kref);
	result = nvme_dev_start(dev);
	if (result)
		goto release_pools;

	dev->device = device_create(nvme_class, &pdev->dev,
				MKDEV(nvme_char_major, dev->instance),
				dev, "nvme%d", dev->instance);
	if (IS_ERR(dev->device)) {
		result = PTR_ERR(dev->device);
		goto shutdown;
		goto release_pools;
	}
	get_device(dev->device);

	if (dev->online_queues > 1)
		result = nvme_dev_add(dev);
	if (result)
		goto device_del;

	nvme_set_irq_hints(dev);
	dev->initialized = 1;
	INIT_WORK(&dev->probe_work, nvme_async_probe);
	schedule_work(&dev->probe_work);
	return 0;

 device_del:
	device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
 shutdown:
	nvme_dev_shutdown(dev);
 release_pools:
	nvme_free_queues(dev, 0);
	nvme_release_prp_pools(dev);
 release:
	nvme_release_instance(dev);
@@ -3057,6 +3048,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	return result;
}

static void nvme_async_probe(struct work_struct *work)
{
	struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
	int result;

	result = nvme_dev_start(dev);
	if (result)
		goto reset;

	if (dev->online_queues > 1)
		result = nvme_dev_add(dev);
	if (result)
		goto reset;

	nvme_set_irq_hints(dev);
	dev->initialized = 1;
	return;
 reset:
	dev->reset_workfn = nvme_reset_failed_dev;
	queue_work(nvme_workq, &dev->reset_work);
}

static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -3082,6 +3095,7 @@ static void nvme_remove(struct pci_dev *pdev)
	spin_unlock(&dev_list_lock);

	pci_set_drvdata(pdev, NULL);
	flush_work(&dev->probe_work);
	flush_work(&dev->reset_work);
	nvme_dev_shutdown(dev);
	nvme_dev_remove(dev);
+1 −0
Original line number Diff line number Diff line
@@ -91,6 +91,7 @@ struct nvme_dev {
	struct device *device;
	work_func_t reset_workfn;
	struct work_struct reset_work;
	struct work_struct probe_work;
	char name[12];
	char serial[20];
	char model[40];