Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 53a788a7 authored by Alexander Gordeev's avatar Alexander Gordeev Committed by Jon Mason
Browse files

ntb: Split ntb_setup_msix() into separate BWD/SNB routines



This is an cleanup effort to make ntb_setup_msix() more
readable - use ntb_setup_bwd_msix() to init MSI-Xs on
BWD hardware and ntb_setup_snb_msix() - on SNB hardware.

Function ntb_setup_snb_msix() also initializes MSI-Xs the
way it should has been done - looping pci_enable_msix()
until success or failure.

Signed-off-by: default avatarAlexander Gordeev <agordeev@redhat.com>
Signed-off-by: default avatarJon Mason <jon.mason@intel.com>
parent 77733513
Loading
Loading
Loading
Loading
+102 −65
Original line number Diff line number Diff line
@@ -1080,104 +1080,141 @@ static irqreturn_t ntb_interrupt(int irq, void *dev)
	return IRQ_HANDLED;
}

static int ntb_setup_msix(struct ntb_device *ndev)
static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
{
	struct pci_dev *pdev = ndev->pdev;
	struct msix_entry *msix;
	int msix_entries;
	int rc, i;

	msix_entries = pci_msix_vec_count(pdev);
	if (msix_entries < 0) {
		rc = msix_entries;
		goto err;
	} else if (msix_entries > ndev->limits.msix_cnt) {
		rc = -EINVAL;
		goto err;
	}

	ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
				     GFP_KERNEL);
	if (!ndev->msix_entries) {
		rc = -ENOMEM;
		goto err;
	}

	for (i = 0; i < msix_entries; i++)
		ndev->msix_entries[i].entry = i;
	if (msix_entries < ndev->limits.msix_cnt)
		return -ENOSPC;

	rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
	if (rc < 0)
		goto err1;
	if (rc > 0) {
		/* On SNB, the link interrupt is always tied to 4th vector.  If
		 * we can't get all 4, then we can't use MSI-X.
		 */
		if (ndev->hw_type != BWD_HW) {
			rc = -EIO;
			goto err1;
		}

		dev_warn(&pdev->dev,
			 "Only %d MSI-X vectors.  Limiting the number of queues to that number.\n",
			 rc);
		msix_entries = rc;

		rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
		if (rc)
			goto err1;
	}
		return rc;
	else if (rc > 0)
		return -ENOSPC;

	for (i = 0; i < msix_entries; i++) {
		msix = &ndev->msix_entries[i];
		WARN_ON(!msix->vector);

		/* Use the last MSI-X vector for Link status */
		if (ndev->hw_type == BWD_HW) {
			rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
					 "ntb-callback-msix", &ndev->db_cb[i]);
			if (rc)
				goto err2;
		} else {
		if (i == msix_entries - 1) {
			rc = request_irq(msix->vector,
					 xeon_event_msix_irq, 0,
					 "ntb-event-msix", ndev);
			if (rc)
					goto err2;
				goto err;
		} else {
			rc = request_irq(msix->vector,
					 xeon_callback_msix_irq, 0,
					 "ntb-callback-msix",
					 &ndev->db_cb[i]);
			if (rc)
					goto err2;
			}
				goto err;
		}
	}

	ndev->num_msix = msix_entries;
	if (ndev->hw_type == BWD_HW)
		ndev->max_cbs = msix_entries;
	else
	ndev->max_cbs = msix_entries - 1;

	return 0;

err2:
err:
	while (--i >= 0) {
		/* Code never reaches here for entry nr 'ndev->num_msix - 1' */
		msix = &ndev->msix_entries[i];
		if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
			free_irq(msix->vector, ndev);
		else
		free_irq(msix->vector, &ndev->db_cb[i]);
	}

	pci_disable_msix(pdev);
	ndev->num_msix = 0;

	return rc;
}

static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
{
	struct pci_dev *pdev = ndev->pdev;
	struct msix_entry *msix;
	int rc, i;

retry:
	rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
	if (rc < 0)
		return rc;
	else if (rc > 0) {
		dev_warn(&pdev->dev,
			 "Only %d MSI-X vectors. "
			 "Limiting the number of queues to that number.\n",
			 rc);
		msix_entries = rc;
		goto retry;
	}

	for (i = 0; i < msix_entries; i++) {
		msix = &ndev->msix_entries[i];
		WARN_ON(!msix->vector);

		rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
				 "ntb-callback-msix", &ndev->db_cb[i]);
		if (rc)
			goto err;
	}

	ndev->num_msix = msix_entries;
	ndev->max_cbs = msix_entries;

	return 0;

err:
	while (--i >= 0)
		free_irq(msix->vector, &ndev->db_cb[i]);

	pci_disable_msix(pdev);
	ndev->num_msix = 0;

	return rc;
}

static int ntb_setup_msix(struct ntb_device *ndev)
{
	struct pci_dev *pdev = ndev->pdev;
	int msix_entries;
	int rc, i;

	msix_entries = pci_msix_vec_count(pdev);
	if (msix_entries < 0) {
		rc = msix_entries;
		goto err;
	} else if (msix_entries > ndev->limits.msix_cnt) {
		rc = -EINVAL;
		goto err;
	}

	ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
				     GFP_KERNEL);
	if (!ndev->msix_entries) {
		rc = -ENOMEM;
		goto err;
	}

	for (i = 0; i < msix_entries; i++)
		ndev->msix_entries[i].entry = i;

	if (ndev->hw_type == BWD_HW)
		rc = ntb_setup_bwd_msix(ndev, msix_entries);
	else
		rc = ntb_setup_snb_msix(ndev, msix_entries);
	if (rc)
		goto err1;

	return 0;

err1:
	kfree(ndev->msix_entries);
	dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
err:
	ndev->num_msix = 0;
	dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
	return rc;
}