Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fa08a396 authored by Ramachandra Rao Gajula's avatar Ramachandra Rao Gajula Committed by Matthew Wilcox
Browse files

NVMe: Add MSI support



Some devices only have support for MSI, not MSI-X.  While MSI is more
limited, it still provides better performance than line-based interrupts.

Signed-off-by: default avatarRamachandra Gajula <rama@fastorsystems.com>
Signed-off-by: default avatarMatthew Wilcox <matthew.r.wilcox@intel.com>
parent cf9f123b
Loading
Loading
Loading
Loading
+32 −8
Original line number Diff line number Diff line
@@ -1637,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)

static int nvme_setup_io_queues(struct nvme_dev *dev)
{
	int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
	struct pci_dev *pdev = dev->pci_dev;
	int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;

	nr_io_queues = num_online_cpus();
	result = set_queue_count(dev, nr_io_queues);
@@ -1646,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
	if (result < nr_io_queues)
		nr_io_queues = result;

	q_count = nr_io_queues;
	/* Deregister the admin queue's interrupt */
	free_irq(dev->entry[0].vector, dev->queues[0]);

	db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
	if (db_bar_size > 8192) {
		iounmap(dev->bar);
		dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
								db_bar_size);
		dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
		dev->dbs = ((void __iomem *)dev->bar) + 4096;
		dev->queues[0]->q_db = dev->dbs;
	}
@@ -1661,18 +1662,35 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
	for (i = 0; i < nr_io_queues; i++)
		dev->entry[i].entry = i;
	for (;;) {
		result = pci_enable_msix(dev->pci_dev, dev->entry,
								nr_io_queues);
		result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
		if (result == 0) {
			break;
		} else if (result > 0) {
			nr_io_queues = result;
			continue;
		} else {
			nr_io_queues = 0;
			break;
		}
	}

	if (nr_io_queues == 0) {
		nr_io_queues = q_count;
		for (;;) {
			result = pci_enable_msi_block(pdev, nr_io_queues);
			if (result == 0) {
				for (i = 0; i < nr_io_queues; i++)
					dev->entry[i].vector = i + pdev->irq;
				break;
			} else if (result > 0) {
				nr_io_queues = result;
				continue;
			} else {
				nr_io_queues = 1;
				break;
			}
		}
	}

	result = queue_request_irq(dev, dev->queues[0], "nvme admin");
	/* XXX: handle failure here */
@@ -1854,6 +1872,9 @@ static void nvme_free_dev(struct kref *kref)
{
	struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
	nvme_dev_remove(dev);
	if (dev->pci_dev->msi_enabled)
		pci_disable_msi(dev->pci_dev);
	else if (dev->pci_dev->msix_enabled)
		pci_disable_msix(dev->pci_dev);
	iounmap(dev->bar);
	nvme_release_instance(dev);
@@ -1987,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 unmap:
	iounmap(dev->bar);
 disable_msix:
	pci_disable_msix(pdev);
	if (dev->pci_dev->msi_enabled)
		pci_disable_msi(dev->pci_dev);
	else if (dev->pci_dev->msix_enabled)
		pci_disable_msix(dev->pci_dev);
	nvme_release_instance(dev);
	nvme_release_prp_pools(dev);
 disable: