Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 104d9c7f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Martin K. Petersen
Browse files

scsi: csiostor: switch to pci_alloc_irq_vectors



And get automatic MSI-X affinity for free.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarVarun Prakash <varun@chelsio.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 75106523
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -95,7 +95,6 @@ enum {
};

struct csio_msix_entries {
	unsigned short	vector;		/* Assigned MSI-X vector */
	void		*dev_id;	/* Priv object associated w/ this msix*/
	char		desc[24];	/* Description of this vector */
};
+47 −81
Original line number Diff line number Diff line
@@ -383,17 +383,15 @@ csio_request_irqs(struct csio_hw *hw)
	int rv, i, j, k = 0;
	struct csio_msix_entries *entryp = &hw->msix_entries[0];
	struct csio_scsi_cpu_info *info;
	struct pci_dev *pdev = hw->pdev;

	if (hw->intr_mode != CSIO_IM_MSIX) {
		rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
					(hw->intr_mode == CSIO_IM_MSI) ?
							0 : IRQF_SHARED,
		rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
				hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
				KBUILD_MODNAME, hw);
		if (rv) {
			if (hw->intr_mode == CSIO_IM_MSI)
				pci_disable_msi(hw->pdev);
			csio_err(hw, "Failed to allocate interrupt line.\n");
			return -EINVAL;
			goto out_free_irqs;
		}

		goto out;
@@ -402,22 +400,22 @@ csio_request_irqs(struct csio_hw *hw)
	/* Add the MSIX vector descriptions */
	csio_add_msix_desc(hw);

	rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
	rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
			 entryp[k].desc, hw);
	if (rv) {
		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
			 entryp[k].vector, rv);
		goto err;
			 pci_irq_vector(pdev, k), rv);
		goto out_free_irqs;
	}

	entryp[k++].dev_id = (void *)hw;
	entryp[k++].dev_id = hw;

	rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
	rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
			 entryp[k].desc, hw);
	if (rv) {
		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
			 entryp[k].vector, rv);
		goto err;
			 pci_irq_vector(pdev, k), rv);
		goto out_free_irqs;
	}

	entryp[k++].dev_id = (void *)hw;
@@ -429,51 +427,31 @@ csio_request_irqs(struct csio_hw *hw)
			struct csio_scsi_qset *sqset = &hw->sqset[i][j];
			struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];

			rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
			rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
					 entryp[k].desc, q);
			if (rv) {
				csio_err(hw,
				       "IRQ request failed for vec %d err:%d\n",
				       entryp[k].vector, rv);
				goto err;
				       pci_irq_vector(pdev, k), rv);
				goto out_free_irqs;
			}

			entryp[k].dev_id = (void *)q;
			entryp[k].dev_id = q;

		} /* for all scsi cpus */
	} /* for all ports */

out:
	hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;

	return 0;

err:
	for (i = 0; i < k; i++) {
		entryp = &hw->msix_entries[i];
		free_irq(entryp->vector, entryp->dev_id);
	}
	pci_disable_msix(hw->pdev);

out_free_irqs:
	for (i = 0; i < k; i++)
		free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
	pci_free_irq_vectors(hw->pdev);
	return -EINVAL;
}

static void
csio_disable_msix(struct csio_hw *hw, bool free)
{
	int i;
	struct csio_msix_entries *entryp;
	int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;

	if (free) {
		for (i = 0; i < cnt; i++) {
			entryp = &hw->msix_entries[i];
			free_irq(entryp->vector, entryp->dev_id);
		}
	}
	pci_disable_msix(hw->pdev);
}

/* Reduce per-port max possible CPUs */
static void
csio_reduce_sqsets(struct csio_hw *hw, int cnt)
@@ -500,10 +478,9 @@ static int
csio_enable_msix(struct csio_hw *hw)
{
	int i, j, k, n, min, cnt;
	struct csio_msix_entries *entryp;
	struct msix_entry *entries;
	int extra = CSIO_EXTRA_VECS;
	struct csio_scsi_cpu_info *info;
	struct irq_affinity desc = { .pre_vectors = 2 };

	min = hw->num_pports + extra;
	cnt = hw->num_sqsets + extra;
@@ -512,50 +489,35 @@ csio_enable_msix(struct csio_hw *hw)
	if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
		cnt = min_t(uint8_t, hw->cfg_niq, cnt);

	entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
	if (!entries)
		return -ENOMEM;

	for (i = 0; i < cnt; i++)
		entries[i].entry = (uint16_t)i;

	csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);

	cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
	if (cnt < 0) {
		kfree(entries);
	cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
	if (cnt < 0)
		return cnt;
	}

	if (cnt < (hw->num_sqsets + extra)) {
		csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
		csio_reduce_sqsets(hw, cnt - extra);
	}

	/* Save off vectors */
	for (i = 0; i < cnt; i++) {
		entryp = &hw->msix_entries[i];
		entryp->vector = entries[i].vector;
	}

	/* Distribute vectors */
	k = 0;
	csio_set_nondata_intr_idx(hw, entries[k].entry);
	csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
	csio_set_fwevt_intr_idx(hw, entries[k++].entry);
	csio_set_nondata_intr_idx(hw, k);
	csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
	csio_set_fwevt_intr_idx(hw, k++);

	for (i = 0; i < hw->num_pports; i++) {
		info = &hw->scsi_cpu_info[i];

		for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
			n = (j % info->max_cpus) +  k;
			hw->sqset[i][j].intr_idx = entries[n].entry;
			hw->sqset[i][j].intr_idx = n;
		}

		k += info->max_cpus;
	}

	kfree(entries);
	return 0;
}

@@ -597,22 +559,26 @@ csio_intr_disable(struct csio_hw *hw, bool free)
{
	csio_hw_intr_disable(hw);

	if (free) {
		int i;

		switch (hw->intr_mode) {
		case CSIO_IM_MSIX:
		csio_disable_msix(hw, free);
			for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
				free_irq(pci_irq_vector(hw->pdev, i),
					 hw->msix_entries[i].dev_id);
			}
			break;
		case CSIO_IM_MSI:
		if (free)
			free_irq(hw->pdev->irq, hw);
		pci_disable_msi(hw->pdev);
		break;
		case CSIO_IM_INTX:
		if (free)
			free_irq(hw->pdev->irq, hw);
			free_irq(pci_irq_vector(hw->pdev, 0), hw);
			break;
		default:
			break;
		}
	}

	pci_free_irq_vectors(hw->pdev);
	hw->intr_mode = CSIO_IM_NONE;
	hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
}