Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 432599d7 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bnxt_en-fixes'



Michael Chan says:

====================
bnxt_en: Bug fixes.

Miscellaneous small bug fixes.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 49a496c9 4bb6cdce
Loading
Loading
Loading
Loading
+21 −7
Original line number Diff line number Diff line
@@ -1292,8 +1292,6 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
	return TX_CMP_VALID(txcmp, raw_cons);
}

#define CAG_LEGACY_INT_STATUS	0x2014

static irqreturn_t bnxt_inta(int irq, void *dev_instance)
{
	struct bnxt_napi *bnapi = dev_instance;
@@ -1305,7 +1303,7 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);

	if (!bnxt_has_work(bp, cpr)) {
		int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
		/* return if erroneous interrupt */
		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
			return IRQ_NONE;
@@ -4527,10 +4525,25 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
	return rc;
}

/* Common routine to pre-map certain register block to different GRC window.
 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
 * in PF and 3 windows in VF that can be customized to map in different
 * register blocks.
 */
static void bnxt_preset_reg_win(struct bnxt *bp)
{
	if (BNXT_PF(bp)) {
		/* CAG registers map to GRC window #4 */
		writel(BNXT_CAG_REG_BASE,
		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
	}
}

static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
	int rc = 0;

	bnxt_preset_reg_win(bp);
	netif_carrier_off(bp->dev);
	if (irq_re_init) {
		rc = bnxt_setup_int_mode(bp);
@@ -5294,7 +5307,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
	struct bnxt_ntuple_filter *fltr, *new_fltr;
	struct flow_keys *fkeys;
	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
	int rc = 0, idx;
	int rc = 0, idx, bit_id;
	struct hlist_head *head;

	if (skb->encapsulation)
@@ -5332,14 +5345,15 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
	rcu_read_unlock();

	spin_lock_bh(&bp->ntp_fltr_lock);
	new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
					 BNXT_NTP_FLTR_MAX_FLTR, 0);
	if (new_fltr->sw_id < 0) {
	if (bit_id < 0) {
		spin_unlock_bh(&bp->ntp_fltr_lock);
		rc = -ENOMEM;
		goto err_free;
	}

	new_fltr->sw_id = (u16)bit_id;
	new_fltr->flow_id = flow_id;
	new_fltr->rxq = rxq_index;
	hlist_add_head_rcu(&new_fltr->hash, head);
+16 −10
Original line number Diff line number Diff line
@@ -166,9 +166,11 @@ struct rx_cmp {
#define RX_CMP_HASH_VALID(rxcmp)				\
	((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))

#define RSS_PROFILE_ID_MASK	0x1f

#define RX_CMP_HASH_TYPE(rxcmp)					\
	((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
	 RX_CMP_RSS_HASH_TYPE_SHIFT)
	(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
	  RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)

struct rx_cmp_ext {
	__le32 rx_cmp_flags2;
@@ -282,9 +284,9 @@ struct rx_tpa_start_cmp {
	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))

#define TPA_START_HASH_TYPE(rx_tpa_start)				\
	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
	(((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
	   RX_TPA_START_CMP_RSS_HASH_TYPE) >>				\
	 RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
	  RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)

#define TPA_START_AGG_ID(rx_tpa_start)					\
	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
@@ -839,6 +841,10 @@ struct bnxt_queue_info {
	u8	queue_profile;
};

#define BNXT_GRCPF_REG_WINDOW_BASE_OUT	0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS	0x4014
#define BNXT_CAG_REG_BASE		0x300000

struct bnxt {
	void __iomem		*bar0;
	void __iomem		*bar1;
@@ -959,11 +965,11 @@ struct bnxt {
#define BNXT_RX_MASK_SP_EVENT		0
#define BNXT_RX_NTP_FLTR_SP_EVENT	1
#define BNXT_LINK_CHNG_SP_EVENT		2
#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT	4
#define BNXT_VXLAN_ADD_PORT_SP_EVENT	8
#define BNXT_VXLAN_DEL_PORT_SP_EVENT	16
#define BNXT_RESET_TASK_SP_EVENT	32
#define BNXT_RST_RING_SP_EVENT		64
#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT	3
#define BNXT_VXLAN_ADD_PORT_SP_EVENT	4
#define BNXT_VXLAN_DEL_PORT_SP_EVENT	5
#define BNXT_RESET_TASK_SP_EVENT	6
#define BNXT_RST_RING_SP_EVENT		7

	struct bnxt_pf_info	pf;
#ifdef CONFIG_BNXT_SRIOV
+27 −13
Original line number Diff line number Diff line
@@ -258,7 +258,7 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
	return 0;
}

static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
{
	int i, rc = 0;
	struct bnxt_pf_info *pf = &bp->pf;
@@ -267,7 +267,7 @@ static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);

	mutex_lock(&bp->hwrm_cmd_lock);
	for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
		req.vf_id = cpu_to_le16(i);
		rc = _hwrm_send_message(bp, &req, sizeof(req),
					HWRM_CMD_TIMEOUT);
@@ -509,7 +509,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)

err_out2:
	/* Free the resources reserved for various VF's */
	bnxt_hwrm_func_vf_resource_free(bp);
	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);

err_out1:
	bnxt_free_vf_resources(bp);
@@ -519,13 +519,19 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)

void bnxt_sriov_disable(struct bnxt *bp)
{
	if (!bp->pf.active_vfs)
	u16 num_vfs = pci_num_vf(bp->pdev);

	if (!num_vfs)
		return;

	if (pci_vfs_assigned(bp->pdev)) {
		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
			    num_vfs);
	} else {
		pci_disable_sriov(bp->pdev);

	/* Free the resources reserved for various VF's */
	bnxt_hwrm_func_vf_resource_free(bp);
		/* Free the HW resources reserved for various VF's */
		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
	}

	bnxt_free_vf_resources(bp);

@@ -552,17 +558,25 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
	}
	bp->sriov_cfg = true;
	rtnl_unlock();
	if (!num_vfs) {
		bnxt_sriov_disable(bp);
		return 0;

	if (pci_vfs_assigned(bp->pdev)) {
		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
		num_vfs = 0;
		goto sriov_cfg_exit;
	}

	/* Check if enabled VFs is same as requested */
	if (num_vfs == bp->pf.active_vfs)
		return 0;
	if (num_vfs && num_vfs == bp->pf.active_vfs)
		goto sriov_cfg_exit;

	/* if there are previous existing VFs, clean them up */
	bnxt_sriov_disable(bp);
	if (!num_vfs)
		goto sriov_cfg_exit;

	bnxt_sriov_enable(bp, &num_vfs);

sriov_cfg_exit:
	bp->sriov_cfg = false;
	wake_up(&bp->sriov_cfg_wait);