Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 23bd51e0 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-fixes'



Manish Chopra says:

====================
qed: Bug fixes

Please consider applying this series to net.

V2:
- Use available helpers for declaring bitmap
  and bitmap operations.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4675390a 8f16bc97
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -299,6 +299,7 @@ struct qed_hwfn {

	/* Flag indicating whether interrupts are enabled or not*/
	bool				b_int_enabled;
	bool				b_int_requested;

	struct qed_mcp_info		*mcp_info;

@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
		   u32 input_len, u8 *input_buf,
		   u32 max_size, u8 *unzip_buf);

int qed_slowpath_irq_req(struct qed_hwfn *hwfn);

#define QED_ETH_INTERFACE_VERSION       300

#endif /* _QED_H */
+32 −21
Original line number Diff line number Diff line
@@ -1385,52 +1385,63 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
	return rc;
}

static u32 qed_hw_bar_size(struct qed_dev *cdev,
static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
			   u8			bar_id)
{
	u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
	u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
		       : PGLUE_B_REG_PF_BAR1_SIZE);
	u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);

	return size / cdev->num_hwfns;
	/* Get the BAR size(in KB) from hardware given val */
	return 1 << (val + 15);
}

int qed_hw_prepare(struct qed_dev *cdev,
		   int personality)
{
	int rc, i;
	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
	int rc;

	/* Store the precompiled init data ptrs */
	qed_init_iro_array(cdev);

	/* Initialize the first hwfn - will learn number of hwfns */
	rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
	rc = qed_hw_prepare_single(p_hwfn,
				   cdev->regview,
				   cdev->doorbells, personality);
	if (rc)
		return rc;

	personality = cdev->hwfns[0].hw_info.personality;
	personality = p_hwfn->hw_info.personality;

	/* Initialize the rest of the hwfns */
	for (i = 1; i < cdev->num_hwfns; i++) {
	if (cdev->num_hwfns > 1) {
		void __iomem *p_regview, *p_doorbell;
		u8 __iomem *addr;

		/* adjust bar offset for second engine */
		addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
		p_regview = addr;

		/* adjust doorbell bar offset for second engine */
		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
		p_doorbell = addr;

		p_regview =  cdev->regview +
			     i * qed_hw_bar_size(cdev, 0);
		p_doorbell = cdev->doorbells +
			     i * qed_hw_bar_size(cdev, 1);
		rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
		/* prepare second hw function */
		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
					   p_doorbell, personality);

		/* in case of error, need to free the previously
		 * initiliazed hwfn 0.
		 */
		if (rc) {
			/* Cleanup previously initialized hwfns */
			while (--i >= 0) {
				qed_init_free(&cdev->hwfns[i]);
				qed_mcp_free(&cdev->hwfns[i]);
				qed_hw_hwfn_free(&cdev->hwfns[i]);
			}
			return rc;
			qed_init_free(p_hwfn);
			qed_mcp_free(p_hwfn);
			qed_hw_hwfn_free(p_hwfn);
		}
	}

	return 0;
	return rc;
}

void qed_hw_remove(struct qed_dev *cdev)
+24 −9
Original line number Diff line number Diff line
@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
}

void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
			struct qed_ptt *p_ptt,
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
		       enum qed_int_mode int_mode)
{
	int i;

	p_hwfn->b_int_enabled = 1;
	int rc, i;

	/* Mask non-link attentions */
	for (i = 0; i < 9; i++)
		qed_wr(p_hwfn, p_ptt,
		       MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);

	/* Enable interrupt Generation */
	qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);

	/* Configure AEU signal change to produce attentions for link */
	qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
	qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn,

	/* Unmask AEU signals toward IGU */
	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
	if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
		rc = qed_slowpath_irq_req(p_hwfn);
		if (rc != 0) {
			DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
			return -EINVAL;
		}
		p_hwfn->b_int_requested = true;
	}
	/* Enable interrupt Generation */
	qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
	p_hwfn->b_int_enabled = 1;

	return rc;
}

void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,

	return info->igu_sb_cnt;
}

void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{
	int i;

	for_each_hwfn(cdev, i)
		cdev->hwfns[i].b_int_requested = false;
}
+10 −5
Original line number Diff line number Diff line
@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
			int *p_iov_blks);

/**
 * @file
 * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
 *        release. The API need to be called after releasing all slowpath IRQs
 *        of the device.
 *
 * @param cdev
 *
 * @brief Interrupt handler
 */
void qed_int_disable_post_isr_release(struct qed_dev *cdev);

#define QED_CAU_DEF_RX_TIMER_RES 0
#define QED_CAU_DEF_TX_TIMER_RES 0
@@ -366,9 +370,10 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
 * @param p_hwfn
 * @param p_ptt
 * @param int_mode
 *
 * @return int
 */
void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
			struct qed_ptt *p_ptt,
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
		       enum qed_int_mode int_mode);

/**
+18 −38
Original line number Diff line number Diff line
@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
	return rc;
}

static int qed_slowpath_irq_req(struct qed_dev *cdev)
int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
	int i = 0, rc = 0;
	struct qed_dev *cdev = hwfn->cdev;
	int rc = 0;
	u8 id;

	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
		/* Request all the slowpath MSI-X vectors */
		for (i = 0; i < cdev->num_hwfns; i++) {
			snprintf(cdev->hwfns[i].name, NAME_SIZE,
				 "sp-%d-%02x:%02x.%02x",
				 i, cdev->pdev->bus->number,
				 PCI_SLOT(cdev->pdev->devfn),
				 cdev->hwfns[i].abs_pf_id);

			rc = request_irq(cdev->int_params.msix_table[i].vector,
					 qed_msix_sp_int, 0,
					 cdev->hwfns[i].name,
					 cdev->hwfns[i].sp_dpc);
			if (rc)
				break;

			DP_VERBOSE(&cdev->hwfns[i],
				   (NETIF_MSG_INTR | QED_MSG_SP),
		id = hwfn->my_id;
		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
			 id, cdev->pdev->bus->number,
			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
		rc = request_irq(cdev->int_params.msix_table[id].vector,
				 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
		if (!rc)
			DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
				   "Requested slowpath MSI-X\n");
		}

		if (i != cdev->num_hwfns) {
			/* Free already request MSI-X vectors */
			for (i--; i >= 0; i--) {
				unsigned int vec =
					cdev->int_params.msix_table[i].vector;
				synchronize_irq(vec);
				free_irq(cdev->int_params.msix_table[i].vector,
					 cdev->hwfns[i].sp_dpc);
			}
		}
	} else {
		unsigned long flags = 0;

@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)

	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
		for_each_hwfn(cdev, i) {
			if (!cdev->hwfns[i].b_int_requested)
				break;
			synchronize_irq(cdev->int_params.msix_table[i].vector);
			free_irq(cdev->int_params.msix_table[i].vector,
				 cdev->hwfns[i].sp_dpc);
		}
	} else {
		if (QED_LEADING_HWFN(cdev)->b_int_requested)
			free_irq(cdev->pdev->irq, cdev);
	}
	qed_int_disable_post_isr_release(cdev);
}

static int qed_nic_stop(struct qed_dev *cdev)
@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev,
	if (rc)
		goto err1;

	/* Request the slowpath IRQ */
	rc = qed_slowpath_irq_req(cdev);
	if (rc)
		goto err2;

	/* Allocate stream for unzipping */
	rc = qed_alloc_stream_mem(cdev);
	if (rc) {
		DP_NOTICE(cdev, "Failed to allocate stream memory\n");
		goto err3;
		goto err2;
	}

	/* Start the slowpath */
Loading