Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26a4d063 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bnxt_en-next'



Michael Chan says:

====================
bnxt_en: Patches for net-next.

Mainly clean-ups, optimizations, and updating to the latest firmware
interface spec.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f4568828 fbc9a523
Loading
Loading
Loading
Loading
+177 −153
Original line number Diff line number Diff line
@@ -72,8 +72,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_TX_PUSH_THRESH 92

enum board_idx {
	BCM57301,
	BCM57302,
	BCM57304,
	BCM57402,
	BCM57404,
	BCM57406,
	BCM57304_VF,
@@ -84,17 +86,21 @@ enum board_idx {
static const struct {
	char *name;
} board_info[] = {
	{ "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
	{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
	{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
	{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
	{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
	{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
	{ "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
	{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
	{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
	{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
};

static const struct pci_device_id bnxt_pci_tbl[] = {
	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
#ifdef CONFIG_BNXT_SRIOV
@@ -856,8 +862,13 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
	struct tcphdr *th;
	int payload_off, tcp_opt_len = 0;
	int len, nw_off;
	u16 segs;

	NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
	segs = TPA_END_TPA_SEGS(tpa_end);
	if (segs == 1)
		return skb;

	NAPI_GRO_CB(skb)->count = segs;
	skb_shinfo(skb)->gso_size =
		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
@@ -1187,9 +1198,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
		}
	} else {
		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
			if (dev->features & NETIF_F_RXCSUM)
				cpr->rx_l4_csum_errors++;
		}
	}

	skb_record_rx_queue(skb, bnapi->index);
	skb_mark_napi_id(skb, &bnapi->napi);
@@ -1969,11 +1982,12 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
	rxr->rx_prod = prod;
	ring->fw_ring_id = INVALID_HW_RING_ID;

	ring = &rxr->rx_agg_ring_struct;
	ring->fw_ring_id = INVALID_HW_RING_ID;

	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
		return 0;

	ring = &rxr->rx_agg_ring_struct;

	type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
		RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;

@@ -1989,7 +2003,6 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
		prod = NEXT_RX_AGG(prod);
	}
	rxr->rx_agg_prod = prod;
	ring->fw_ring_id = INVALID_HW_RING_ID;

	if (bp->flags & BNXT_FLAG_TPA) {
		if (rxr->rx_tpa) {
@@ -2710,6 +2723,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}

static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
	struct hwrm_func_drv_unrgtr_input req = {0};

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}

static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
{
	u32 rc = 0;
@@ -2772,7 +2793,7 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
	req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);

	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
@@ -2805,7 +2826,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)

static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
					     struct bnxt_ntuple_filter *fltr)
@@ -2824,7 +2845,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,

	req.ethertype = htons(ETH_P_IP);
	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
	req.ipaddr_type = 4;
	req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
	req.ip_protocol = keys->basic.ip_proto;

	req.src_ipaddr[0] = keys->addrs.v4addrs.src;
@@ -2837,7 +2858,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
	req.dst_port = keys->ports.dst;
	req.dst_port_mask = cpu_to_be16(0xffff);

	req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
	req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
	mutex_lock(&bp->hwrm_cmd_lock);
	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (!rc)
@@ -2857,10 +2878,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
				CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
	req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
	req.enables =
		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
	req.l2_addr_mask[0] = 0xff;
@@ -2930,7 +2951,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)

		req.enables =
			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);

		/* Number of segs are log2 units, and first packet is not
		 * included as part of this units.
@@ -2948,6 +2970,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
		segs = ilog2(nsegs);
		req.max_agg_segs = cpu_to_le16(segs);
		req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);

		req.min_agg_len = cpu_to_le32(512);
	}
	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);

@@ -3293,14 +3317,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
	int i, rc = 0;

	if (bp->cp_nr_rings) {
	for (i = 0; i < bp->cp_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;

			rc = hwrm_ring_alloc_send_msg(bp, ring,
						      HWRM_RING_ALLOC_CMPL, i,
		rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
					      INVALID_STATS_CTX_ID);
		if (rc)
			goto err_out;
@@ -3308,32 +3330,26 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
	}
	}

	if (bp->tx_nr_rings) {
	for (i = 0; i < bp->tx_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
		u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;

			rc = hwrm_ring_alloc_send_msg(bp, ring,
						      HWRM_RING_ALLOC_TX, i,
		rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, i,
					      fw_stats_ctx);
		if (rc)
			goto err_out;
		txr->tx_doorbell = bp->bar1 + i * 0x80;
	}
	}

	if (bp->rx_nr_rings) {
	for (i = 0; i < bp->rx_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;

			rc = hwrm_ring_alloc_send_msg(bp, ring,
						      HWRM_RING_ALLOC_RX, i,
		rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, i,
					      INVALID_STATS_CTX_ID);
		if (rc)
			goto err_out;
@@ -3341,7 +3357,6 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
		bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
	}
	}

	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
		for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -3408,14 +3423,13 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
	return 0;
}

static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
	int i, rc = 0;
	int i;

	if (!bp->bnapi)
		return 0;
		return;

	if (bp->tx_nr_rings) {
	for (i = 0; i < bp->tx_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
@@ -3423,17 +3437,14 @@ static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
		u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;

		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
				hwrm_ring_free_send_msg(
					bp, ring,
			hwrm_ring_free_send_msg(bp, ring,
						RING_FREE_REQ_RING_TYPE_TX,
						close_path ? cmpl_ring_id :
						INVALID_HW_RING_ID);
			ring->fw_ring_id = INVALID_HW_RING_ID;
		}
	}
	}

	if (bp->rx_nr_rings) {
	for (i = 0; i < bp->rx_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
@@ -3441,58 +3452,44 @@ static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
		u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;

		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
				hwrm_ring_free_send_msg(
					bp, ring,
			hwrm_ring_free_send_msg(bp, ring,
						RING_FREE_REQ_RING_TYPE_RX,
						close_path ? cmpl_ring_id :
						INVALID_HW_RING_ID);
			ring->fw_ring_id = INVALID_HW_RING_ID;
				bp->grp_info[i].rx_fw_ring_id =
					INVALID_HW_RING_ID;
			}
			bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
		}
	}

	if (bp->rx_agg_nr_pages) {
	for (i = 0; i < bp->rx_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
			struct bnxt_ring_struct *ring =
						&rxr->rx_agg_ring_struct;
		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
		u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;

		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
				hwrm_ring_free_send_msg(
					bp, ring,
			hwrm_ring_free_send_msg(bp, ring,
						RING_FREE_REQ_RING_TYPE_RX,
						close_path ? cmpl_ring_id :
						INVALID_HW_RING_ID);
			ring->fw_ring_id = INVALID_HW_RING_ID;
				bp->grp_info[i].agg_fw_ring_id =
					INVALID_HW_RING_ID;
			}
			bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
		}
	}

	if (bp->cp_nr_rings) {
	for (i = 0; i < bp->cp_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;

		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
				hwrm_ring_free_send_msg(
					bp, ring,
			hwrm_ring_free_send_msg(bp, ring,
						RING_FREE_REQ_RING_TYPE_CMPL,
						INVALID_HW_RING_ID);
			ring->fw_ring_id = INVALID_HW_RING_ID;
				bp->grp_info[i].cp_fw_ring_id =
							INVALID_HW_RING_ID;
			}
			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
		}
	}

	return rc;
}

int bnxt_hwrm_set_coal(struct bnxt *bp)
@@ -3604,7 +3601,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
	return 0;
}

static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
	int rc = 0;
	struct hwrm_func_qcaps_input req = {0};
@@ -3628,9 +3625,10 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
		pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
		pf->max_pf_tx_rings = pf->max_tx_rings;
		pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
		pf->max_pf_rx_rings = pf->max_rx_rings;
		pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
		if (!pf->max_hw_ring_grps)
			pf->max_hw_ring_grps = pf->max_tx_rings;
		pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
		pf->max_vnics = le16_to_cpu(resp->max_vnics);
		pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
@@ -3658,6 +3656,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
		vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
		vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
		vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
		vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
		if (!vf->max_hw_ring_grps)
			vf->max_hw_ring_grps = vf->max_tx_rings;
		vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
		vf->max_vnics = le16_to_cpu(resp->max_vnics);
		vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
@@ -3734,14 +3735,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)

	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));

	if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
	    req.hwrm_intf_min != resp->hwrm_intf_min ||
	    req.hwrm_intf_upd != resp->hwrm_intf_upd) {
		netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
	if (resp->hwrm_intf_maj < 1) {
		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
			    resp->hwrm_intf_upd, req.hwrm_intf_maj,
			    req.hwrm_intf_min, req.hwrm_intf_upd);
		netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
			    resp->hwrm_intf_upd);
		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
	}
	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
		 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
@@ -3944,8 +3942,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
	}
	bp->vnic_info[0].uc_filter_count = 1;

	bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
				   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
	bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;

	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
		bp->vnic_info[0].rx_mask |=
@@ -4026,10 +4023,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp)
		return rc;

#ifdef CONFIG_RFS_ACCEL
	if (bp->rx_nr_rings)
	if (bp->flags & BNXT_FLAG_RFS)
		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
	if (!dev->rx_cpu_rmap)
		rc = -ENOMEM;
#endif

	return rc;
@@ -4353,7 +4348,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
	link_info->auto_mode = resp->auto_mode;
	link_info->auto_pause_setting = resp->auto_pause;
	link_info->force_pause_setting = resp->force_pause;
	link_info->duplex_setting = resp->duplex_setting;
	link_info->duplex_setting = resp->duplex;
	if (link_info->phy_link_status == BNXT_LINK_LINK)
		link_info->link_speed = le16_to_cpu(resp->link_speed);
	else
@@ -4930,9 +4925,32 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
	return rc;
}

static bool bnxt_rfs_capable(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
	struct bnxt_pf_info *pf = &bp->pf;
	int vnics;

	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
		return false;

	vnics = 1 + bp->rx_nr_rings;
	if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
		return false;

	return true;
#else
	return false;
#endif
}

static netdev_features_t bnxt_fix_features(struct net_device *dev,
					   netdev_features_t features)
{
	struct bnxt *bp = netdev_priv(dev);

	if (!bnxt_rfs_capable(bp))
		features &= ~NETIF_F_NTUPLE;
	return features;
}

@@ -4973,7 +4991,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)

		bp->flags = flags;

		if (!netif_running(dev)) {
		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
			if (update_tpa)
				bnxt_set_ring_params(bp);
			return rc;
@@ -5549,6 +5567,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
	cancel_work_sync(&bp->sp_task);
	bp->sp_event = 0;

	bnxt_hwrm_func_drv_unrgtr(bp);
	bnxt_free_hwrm_resources(bp);
	pci_iounmap(pdev, bp->bar2);
	pci_iounmap(pdev, bp->bar1);
@@ -5610,25 +5629,28 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)

void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
{
	int max_rings = 0;
	int max_rings = 0, max_ring_grps = 0;

	if (BNXT_PF(bp)) {
		*max_tx = bp->pf.max_pf_tx_rings;
		*max_rx = bp->pf.max_pf_rx_rings;
		*max_tx = bp->pf.max_tx_rings;
		*max_rx = bp->pf.max_rx_rings;
		max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
		max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
		max_ring_grps = bp->pf.max_hw_ring_grps;
	} else {
#ifdef CONFIG_BNXT_SRIOV
		*max_tx = bp->vf.max_tx_rings;
		*max_rx = bp->vf.max_rx_rings;
		max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
		max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
		max_ring_grps = bp->vf.max_hw_ring_grps;
#endif
	}
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
		*max_rx >>= 1;

	*max_rx = min_t(int, *max_rx, max_rings);
	*max_rx = min_t(int, *max_rx, max_ring_grps);
	*max_tx = min_t(int, *max_tx, max_rings);
}

@@ -5652,11 +5674,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	if (bnxt_vf_pciid(ent->driver_data))
		bp->flags |= BNXT_FLAG_VF;

	if (pdev->msix_cap) {
	if (pdev->msix_cap)
		bp->flags |= BNXT_FLAG_MSIX_CAP;
		if (BNXT_PF(bp))
			bp->flags |= BNXT_FLAG_RFS;
	}

	rc = bnxt_init_board(pdev, dev);
	if (rc < 0)
@@ -5675,9 +5694,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
			   NETIF_F_RXHASH |
			   NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;

	if (bp->flags & BNXT_FLAG_RFS)
		dev->hw_features |= NETIF_F_NTUPLE;

	dev->hw_enc_features =
			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
			NETIF_F_TSO | NETIF_F_TSO6 |
@@ -5736,6 +5752,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
	bp->num_stat_ctxs = bp->cp_nr_rings;

	if (BNXT_PF(bp)) {
		dev->hw_features |= NETIF_F_NTUPLE;
		if (bnxt_rfs_capable(bp)) {
			bp->flags |= BNXT_FLAG_RFS;
			dev->features |= NETIF_F_NTUPLE;
		}
	}

	if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
		bp->flags |= BNXT_FLAG_STRIP_VLAN;

+7 −6
Original line number Diff line number Diff line
@@ -11,11 +11,11 @@
#define BNXT_H

#define DRV_MODULE_NAME		"bnxt_en"
#define DRV_MODULE_VERSION	"0.1.24"
#define DRV_MODULE_VERSION	"1.0.0"

#define DRV_VER_MAJ	0
#define DRV_VER_MIN	1
#define DRV_VER_UPD	24
#define DRV_VER_MAJ	1
#define DRV_VER_MIN	0
#define DRV_VER_UPD	0

struct tx_bd {
	__le32 tx_bd_len_flags_type;
@@ -695,6 +695,7 @@ struct bnxt_vf_info {
	u16	max_cp_rings;
	u16	max_tx_rings;
	u16	max_rx_rings;
	u16	max_hw_ring_grps;
	u16	max_l2_ctxs;
	u16	max_irqs;
	u16	max_vnics;
@@ -722,9 +723,8 @@ struct bnxt_pf_info {
	u16	max_rsscos_ctxs;
	u16	max_cp_rings;
	u16	max_tx_rings; /* HW assigned max tx rings for this PF */
	u16	max_pf_tx_rings; /* runtime max tx rings owned by PF */
	u16	max_rx_rings; /* HW assigned max rx rings for this PF */
	u16	max_pf_rx_rings; /* runtime max rx rings owned by PF */
	u16	max_hw_ring_grps;
	u16	max_irqs;
	u16	max_l2_ctxs;
	u16	max_vnics;
@@ -1084,6 +1084,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
int bnxt_open_nic(struct bnxt *, bool, bool);
+5 −0
Original line number Diff line number Diff line
@@ -266,6 +266,8 @@ static int bnxt_set_channels(struct net_device *dev,
	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
	bp->num_stat_ctxs = bp->cp_nr_rings;

	/* After changing number of rx channels, update NTUPLE feature. */
	netdev_update_features(dev);
	if (netif_running(dev)) {
		rc = bnxt_open_nic(bp, true, false);
		if ((!rc) && BNXT_PF(bp)) {
@@ -818,6 +820,9 @@ static int bnxt_flash_firmware(struct net_device *dev,
	case BNX_DIR_TYPE_BOOTCODE_2:
		code_type = CODE_BOOT;
		break;
	case BNX_DIR_TYPE_APE_FW:
		code_type = CODE_MCTP_PASSTHRU;
		break;
	default:
		netdev_err(dev, "Unsupported directory entry type: %u\n",
			   dir_type);
+320 −545

File changed.

Preview size limit exceeded, changes collapsed.

+35 −27
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
	 * the spoof check should also include vlan anti-spoofing
	 */
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
	req.vf_id = cpu_to_le16(vf->fw_fid);
	req.fid = cpu_to_le16(vf->fw_fid);
	req.flags = cpu_to_le32(func_flags);
	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (!rc) {
@@ -128,7 +128,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)

	memcpy(vf->mac_addr, mac, ETH_ALEN);
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
	req.vf_id = cpu_to_le16(vf->fw_fid);
	req.fid = cpu_to_le16(vf->fw_fid);
	req.flags = cpu_to_le32(vf->func_flags);
	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
@@ -159,7 +159,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
		return 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
	req.vf_id = cpu_to_le16(vf->fw_fid);
	req.fid = cpu_to_le16(vf->fw_fid);
	req.flags = cpu_to_le32(vf->func_flags);
	req.dflt_vlan = cpu_to_le16(vlan_tag);
	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
@@ -198,7 +198,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
		return 0;
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
	req.vf_id = cpu_to_le16(vf->fw_fid);
	req.fid = cpu_to_le16(vf->fw_fid);
	req.flags = cpu_to_le32(vf->func_flags);
	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
	req.max_bw = cpu_to_le32(max_tx_rate);
@@ -363,10 +363,11 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
}

/* only call by PF to reserve resources for VF */
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
	u32 rc = 0, mtu, i;
	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
	u16 vf_ring_grps;
	struct hwrm_func_cfg_input req = {0};
	struct bnxt_pf_info *pf = &bp->pf;

@@ -378,18 +379,18 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
	 * be removed once new HWRM provides HW ring groups capability in
	 * hwrm_func_qcap.
	 */
	vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
	vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
	vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
	vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
	/* TODO: restore this logic below once the WA above is removed */
	/* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
	vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
	/* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
	vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
		vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
			      *num_vfs;
		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
			      num_vfs;
	else
		vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
			      *num_vfs;
	vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
	vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
	vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;

	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
				  FUNC_CFG_REQ_ENABLES_MRU |
@@ -399,7 +400,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
				  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
				  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
				  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
				  FUNC_CFG_REQ_ENABLES_NUM_VNICS);
				  FUNC_CFG_REQ_ENABLES_NUM_VNICS |
				  FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);

	mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
	req.mru = cpu_to_le16(mtu);
@@ -409,6 +411,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
	req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
	req.num_l2_ctxs = cpu_to_le16(4);
	vf_vnics = 1;

@@ -417,22 +420,24 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);

	mutex_lock(&bp->hwrm_cmd_lock);
	for (i = 0; i < *num_vfs; i++) {
		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
	for (i = 0; i < num_vfs; i++) {
		req.fid = cpu_to_le16(pf->first_vf_id + i);
		rc = _hwrm_send_message(bp, &req, sizeof(req),
					HWRM_CMD_TIMEOUT);
		if (rc)
			break;
		bp->pf.active_vfs = i + 1;
		bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
		pf->active_vfs = i + 1;
		pf->vf[i].fw_fid = le16_to_cpu(req.fid);
	}
	mutex_unlock(&bp->hwrm_cmd_lock);
	if (!rc) {
		bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
		if (bp->flags & BNXT_FLAG_AGG_RINGS)
			bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
		else
			bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
		pf->max_tx_rings -= vf_tx_rings * num_vfs;
		pf->max_rx_rings -= vf_rx_rings * num_vfs;
		pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
		pf->max_cp_rings -= vf_cp_rings * num_vfs;
		pf->max_rsscos_ctxs -= num_vfs;
		pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
		pf->max_vnics -= vf_vnics * num_vfs;
	}
	return rc;
}
@@ -492,7 +497,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
		goto err_out1;

	/* Reserve resources for VFs */
	rc = bnxt_hwrm_func_cfg(bp, num_vfs);
	rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
	if (rc)
		goto err_out2;

@@ -536,8 +541,8 @@ void bnxt_sriov_disable(struct bnxt *bp)
	bnxt_free_vf_resources(bp);

	bp->pf.active_vfs = 0;
	bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
	bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
	/* Reclaim all resources for the PF. */
	bnxt_hwrm_func_qcaps(bp);
}

int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -595,6 +600,7 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,

	/* Set the new target id */
	req.target_id = cpu_to_le16(vf->fw_fid);
	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
	req.encap_resp_len = cpu_to_le16(msg_size);
	req.encap_resp_addr = encap_resp_addr;
	req.encap_resp_cmpl_ring = encap_resp_cpr;
@@ -629,6 +635,7 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
	/* Set the new target id */
	req.target_id = cpu_to_le16(vf->fw_fid);
	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);

	mutex_lock(&bp->hwrm_cmd_lock);
@@ -660,6 +667,7 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
	/* Set the new target id */
	req.target_id = cpu_to_le16(vf->fw_fid);
	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);

	mutex_lock(&bp->hwrm_cmd_lock);