Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dfa15378 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bnxt_en-NS2-Nitro'



Michael Chan says:

====================
bnxt_en: Add support for NS2 Nitro.

This series adds support for the embedded version of the
ethernet controller (Nitro) in the North Star 2 SoC.  There are a number
of features not supported and a software workaround for a hardware rx
bug is required for Nitro A0.  Please review.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cb924052 fa853dda
Loading
Loading
Loading
Loading
+214 −40
Original line number Original line Diff line number Diff line
@@ -73,6 +73,7 @@ enum board_idx {
	BCM57301,
	BCM57301,
	BCM57302,
	BCM57302,
	BCM57304,
	BCM57304,
	BCM58700,
	BCM57311,
	BCM57311,
	BCM57312,
	BCM57312,
	BCM57402,
	BCM57402,
@@ -98,6 +99,7 @@ static const struct {
	{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
	{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
	{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
	{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
	{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
	{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
	{ "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
	{ "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
	{ "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
	{ "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
	{ "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
	{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
	{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
@@ -120,6 +122,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
@@ -1668,6 +1671,76 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
	return rx_pkts;
	return rx_pkts;
}
}


static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
{
	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
	struct bnxt *bp = bnapi->bp;
	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
	struct tx_cmp *txcmp;
	struct rx_cmp_ext *rxcmp1;
	u32 cp_cons, tmp_raw_cons;
	u32 raw_cons = cpr->cp_raw_cons;
	u32 rx_pkts = 0;
	bool agg_event = false;

	while (1) {
		int rc;

		cp_cons = RING_CMP(raw_cons);
		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];

		if (!TX_CMP_VALID(txcmp, raw_cons))
			break;

		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
			cp_cons = RING_CMP(tmp_raw_cons);
			rxcmp1 = (struct rx_cmp_ext *)
			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];

			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
				break;

			/* force an error to recycle the buffer */
			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);

			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
			if (likely(rc == -EIO))
				rx_pkts++;
			else if (rc == -EBUSY)	/* partial completion */
				break;
		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
				    CMPL_BASE_TYPE_HWRM_DONE)) {
			bnxt_hwrm_handler(bp, txcmp);
		} else {
			netdev_err(bp->dev,
				   "Invalid completion received on special ring\n");
		}
		raw_cons = NEXT_RAW_CMP(raw_cons);

		if (rx_pkts == budget)
			break;
	}

	cpr->cp_raw_cons = raw_cons;
	BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
	writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
	writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);

	if (agg_event) {
		writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
		writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
	}

	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
		napi_complete(napi);
		BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
	}
	return rx_pkts;
}

static int bnxt_poll(struct napi_struct *napi, int budget)
static int bnxt_poll(struct napi_struct *napi, int budget)
{
{
	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
@@ -2340,6 +2413,9 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
		num_vnics += bp->rx_nr_rings;
		num_vnics += bp->rx_nr_rings;
#endif
#endif


	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
		num_vnics++;

	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
				GFP_KERNEL);
				GFP_KERNEL);
	if (!bp->vnic_info)
	if (!bp->vnic_info)
@@ -2357,7 +2433,8 @@ static void bnxt_init_vnics(struct bnxt *bp)
		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];


		vnic->fw_vnic_id = INVALID_HW_RING_ID;
		vnic->fw_vnic_id = INVALID_HW_RING_ID;
		vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
		vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
		vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;


		if (bp->vnic_info[i].rss_hash_key) {
		if (bp->vnic_info[i].rss_hash_key) {
@@ -2661,7 +2738,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
	}
	}


	if (BNXT_PF(bp)) {
	if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
		bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
		bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
					 sizeof(struct tx_port_stats) + 1024;
					 sizeof(struct tx_port_stats) + 1024;


@@ -3200,8 +3277,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;


	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
				CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
		req.flags |=
			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
	req.enables =
	req.enables =
		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
@@ -3308,7 +3387,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
	struct hwrm_vnic_rss_cfg_input req = {0};
	struct hwrm_vnic_rss_cfg_input req = {0};


	if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
	if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
		return 0;
		return 0;


	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
@@ -3320,10 +3399,14 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)


		req.hash_type = cpu_to_le32(vnic->hash_type);
		req.hash_type = cpu_to_le32(vnic->hash_type);


		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
		if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
			max_rings = bp->rx_nr_rings;
			if (BNXT_CHIP_TYPE_NITRO_A0(bp))
				max_rings = bp->rx_nr_rings - 1;
			else
			else
				max_rings = bp->rx_nr_rings;
		} else {
			max_rings = 1;
			max_rings = 1;
		}


		/* Fill the RSS indirection table with ring group ids */
		/* Fill the RSS indirection table with ring group ids */
		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
@@ -3336,7 +3419,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
		req.hash_key_tbl_addr =
		req.hash_key_tbl_addr =
			cpu_to_le64(vnic->rss_hash_key_dma_addr);
			cpu_to_le64(vnic->rss_hash_key_dma_addr);
	}
	}
	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
}


@@ -3359,32 +3442,35 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
}


static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
					u16 ctx_idx)
{
{
	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};


	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
	req.rss_cos_lb_ctx_id =
	req.rss_cos_lb_ctx_id =
		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);


	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
}


static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
{
{
	int i;
	int i, j;


	for (i = 0; i < bp->nr_vnics; i++) {
	for (i = 0; i < bp->nr_vnics; i++) {
		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];


		if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
			bnxt_hwrm_vnic_ctx_free_one(bp, i);
			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
				bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
		}
	}
	}
	bp->rsscos_nr_ctxs = 0;
	bp->rsscos_nr_ctxs = 0;
}
}


static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
{
{
	int rc;
	int rc;
	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
@@ -3397,7 +3483,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
	mutex_lock(&bp->hwrm_cmd_lock);
	mutex_lock(&bp->hwrm_cmd_lock);
	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (!rc)
	if (!rc)
		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
			le16_to_cpu(resp->rss_cos_lb_ctx_id);
			le16_to_cpu(resp->rss_cos_lb_ctx_id);
	mutex_unlock(&bp->hwrm_cmd_lock);
	mutex_unlock(&bp->hwrm_cmd_lock);


@@ -3412,16 +3498,31 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
	u16 def_vlan = 0;
	u16 def_vlan = 0;


	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);

	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
	/* Only RSS support for now TBD: COS & LB */
	/* Only RSS support for now TBD: COS & LB */
	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
				  VNIC_CFG_REQ_ENABLES_RSS_RULE |
		req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
					   VNIC_CFG_REQ_ENABLES_MRU);
					   VNIC_CFG_REQ_ENABLES_MRU);
	req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
	} else {
		req.rss_rule = cpu_to_le16(0xffff);
	}

	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
		req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
	} else {
		req.cos_rule = cpu_to_le16(0xffff);
		req.cos_rule = cpu_to_le16(0xffff);
	}

	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
		ring = 0;
		ring = 0;
	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
		ring = vnic_id - 1;
		ring = vnic_id - 1;
	else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
		ring = bp->rx_nr_rings - 1;


	grp_idx = bp->rx_ring[ring].bnapi->index;
	grp_idx = bp->rx_ring[ring].bnapi->index;
	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -3489,7 +3590,8 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
					bp->grp_info[grp_idx].fw_grp_id;
					bp->grp_info[grp_idx].fw_grp_id;
	}
	}


	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
	if (vnic_id == 0)
	if (vnic_id == 0)
		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);


@@ -3922,6 +4024,9 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
	if (!bp->bnapi)
	if (!bp->bnapi)
		return 0;
		return 0;


	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
		return 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);


	mutex_lock(&bp->hwrm_cmd_lock);
	mutex_lock(&bp->hwrm_cmd_lock);
@@ -3950,6 +4055,9 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
	struct hwrm_stat_ctx_alloc_input req = {0};
	struct hwrm_stat_ctx_alloc_input req = {0};
	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;


	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
		return 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);


	req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
	req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
@@ -4163,6 +4271,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);


	bp->chip_num = le16_to_cpu(resp->chip_num);
	bp->chip_num = le16_to_cpu(resp->chip_num);
	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
	    !resp->chip_metal)
		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;


hwrm_ver_get_exit:
hwrm_ver_get_exit:
	mutex_unlock(&bp->hwrm_cmd_lock);
	mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4252,7 +4363,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
	int rc;
	int rc;


	/* allocate context for vnic */
	/* allocate context for vnic */
	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
	if (rc) {
	if (rc) {
		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
			   vnic_id, rc);
			   vnic_id, rc);
@@ -4260,6 +4371,16 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
	}
	}
	bp->rsscos_nr_ctxs++;
	bp->rsscos_nr_ctxs++;


	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
		if (rc) {
			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
				   vnic_id, rc);
			goto vnic_setup_err;
		}
		bp->rsscos_nr_ctxs++;
	}

	/* configure default vnic, ring grp */
	/* configure default vnic, ring grp */
	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
	if (rc) {
	if (rc) {
@@ -4327,6 +4448,26 @@ static bool bnxt_promisc_ok(struct bnxt *bp)
	return true;
	return true;
}
}


static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
{
	unsigned int rc = 0;

	rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
	if (rc) {
		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
			   rc);
		return rc;
	}

	rc = bnxt_hwrm_vnic_cfg(bp, 1);
	if (rc) {
		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
			   rc);
		return rc;
	}
	return rc;
}

static int bnxt_cfg_rx_mode(struct bnxt *);
static int bnxt_cfg_rx_mode(struct bnxt *);
static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
static bool bnxt_mc_list_updated(struct bnxt *, u32 *);


@@ -4334,6 +4475,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
{
	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
	int rc = 0;
	int rc = 0;
	unsigned int rx_nr_rings = bp->rx_nr_rings;


	if (irq_re_init) {
	if (irq_re_init) {
		rc = bnxt_hwrm_stat_ctx_alloc(bp);
		rc = bnxt_hwrm_stat_ctx_alloc(bp);
@@ -4356,8 +4498,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
		goto err_out;
		goto err_out;
	}
	}


	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
		rx_nr_rings--;

	/* default vnic 0 */
	/* default vnic 0 */
	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
	if (rc) {
	if (rc) {
		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
		goto err_out;
		goto err_out;
@@ -4414,6 +4559,13 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
				rc);
				rc);


	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
		rc = bnxt_setup_nitroa0_vnic(bp);
		if (rc)
			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
				   rc);
	}

	if (BNXT_VF(bp)) {
	if (BNXT_VF(bp)) {
		bnxt_hwrm_func_qcfg(bp);
		bnxt_hwrm_func_qcfg(bp);
		netdev_update_features(bp->dev);
		netdev_update_features(bp->dev);
@@ -4721,14 +4873,23 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
{
	int i;
	int i;
	unsigned int cp_nr_rings = bp->cp_nr_rings;
	struct bnxt_napi *bnapi;
	struct bnxt_napi *bnapi;


	if (bp->flags & BNXT_FLAG_USING_MSIX) {
	if (bp->flags & BNXT_FLAG_USING_MSIX) {
		for (i = 0; i < bp->cp_nr_rings; i++) {
		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
			cp_nr_rings--;
		for (i = 0; i < cp_nr_rings; i++) {
			bnapi = bp->bnapi[i];
			bnapi = bp->bnapi[i];
			netif_napi_add(bp->dev, &bnapi->napi,
			netif_napi_add(bp->dev, &bnapi->napi,
				       bnxt_poll, 64);
				       bnxt_poll, 64);
		}
		}
		if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
			bnapi = bp->bnapi[cp_nr_rings];
			netif_napi_add(bp->dev, &bnapi->napi,
				       bnxt_poll_nitroa0, 64);
			napi_hash_add(&bnapi->napi);
		}
	} else {
	} else {
		bnapi = bp->bnapi[0];
		bnapi = bp->bnapi[0];
		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
@@ -5681,7 +5842,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
	bool update_tpa = false;
	bool update_tpa = false;


	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
	if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
	if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
		flags |= BNXT_FLAG_GRO;
		flags |= BNXT_FLAG_GRO;
	if (features & NETIF_F_LRO)
	if (features & NETIF_F_LRO)
		flags |= BNXT_FLAG_LRO;
		flags |= BNXT_FLAG_LRO;
@@ -6488,7 +6649,10 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
		*max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
		*max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
		max_ring_grps = bp->pf.max_hw_ring_grps;
		max_ring_grps = bp->pf.max_hw_ring_grps;
	}
	}

	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
		*max_cp -= 1;
		*max_rx -= 2;
	}
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
		*max_rx >>= 1;
		*max_rx >>= 1;
	*max_rx = min_t(int, *max_rx, max_ring_grps);
	*max_rx = min_t(int, *max_rx, max_ring_grps);
@@ -6524,6 +6688,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
			       bp->tx_nr_rings + bp->rx_nr_rings;
			       bp->tx_nr_rings + bp->rx_nr_rings;
	bp->num_stat_ctxs = bp->cp_nr_rings;
	bp->num_stat_ctxs = bp->cp_nr_rings;
	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
		bp->rx_nr_rings++;
		bp->cp_nr_rings++;
	}
	return rc;
	return rc;
}
}


@@ -6550,6 +6718,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	struct bnxt *bp;
	struct bnxt *bp;
	int rc, max_irqs;
	int rc, max_irqs;


	if (pdev->device == 0x16cd && pci_is_bridge(pdev))
		return -ENODEV;

	if (version_printed++ == 0)
	if (version_printed++ == 0)
		pr_info("%s", version);
		pr_info("%s", version);


@@ -6576,13 +6747,25 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)


	pci_set_drvdata(pdev, dev);
	pci_set_drvdata(pdev, dev);


	rc = bnxt_alloc_hwrm_resources(bp);
	if (rc)
		goto init_err;

	mutex_init(&bp->hwrm_cmd_lock);
	rc = bnxt_hwrm_ver_get(bp);
	if (rc)
		goto init_err;

	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
			   NETIF_F_TSO | NETIF_F_TSO6 |
			   NETIF_F_TSO | NETIF_F_TSO6 |
			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
			   NETIF_F_GSO_IPXIP4 |
			   NETIF_F_GSO_IPXIP4 |
			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
			   NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
			   NETIF_F_RXCSUM | NETIF_F_GRO;

	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
		dev->hw_features |= NETIF_F_LRO;


	dev->hw_enc_features =
	dev->hw_enc_features =
			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -6601,15 +6784,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
#ifdef CONFIG_BNXT_SRIOV
	init_waitqueue_head(&bp->sriov_cfg_wait);
	init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
#endif
	rc = bnxt_alloc_hwrm_resources(bp);
	if (rc)
		goto init_err;

	mutex_init(&bp->hwrm_cmd_lock);
	rc = bnxt_hwrm_ver_get(bp);
	if (rc)
		goto init_err;

	bp->gro_func = bnxt_gro_func_5730x;
	bp->gro_func = bnxt_gro_func_5730x;
	if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
	if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
		bp->gro_func = bnxt_gro_func_5731x;
		bp->gro_func = bnxt_gro_func_5731x;
@@ -6647,7 +6821,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
#endif
	bnxt_set_dflt_rings(bp);
	bnxt_set_dflt_rings(bp);


	if (BNXT_PF(bp)) {
	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
		dev->hw_features |= NETIF_F_NTUPLE;
		dev->hw_features |= NETIF_F_NTUPLE;
		if (bnxt_rfs_capable(bp)) {
		if (bnxt_rfs_capable(bp)) {
			bp->flags |= BNXT_FLAG_RFS;
			bp->flags |= BNXT_FLAG_RFS;
+5 −1
Original line number Original line Diff line number Diff line
@@ -695,7 +695,8 @@ struct bnxt_ring_grp_info {


struct bnxt_vnic_info {
struct bnxt_vnic_info {
	u16		fw_vnic_id; /* returned by Chimp during alloc */
	u16		fw_vnic_id; /* returned by Chimp during alloc */
	u16		fw_rss_cos_lb_ctx;
#define BNXT_MAX_CTX_PER_VNIC	2
	u16		fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
	u16		fw_l2_ctx_id;
	u16		fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS	4
#define BNXT_MAX_UC_ADDRS	4
	__le64		fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
	__le64		fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
@@ -893,6 +894,7 @@ struct bnxt {
#define CHIP_NUM_57301		0x16c8
#define CHIP_NUM_57301		0x16c8
#define CHIP_NUM_57302		0x16c9
#define CHIP_NUM_57302		0x16c9
#define CHIP_NUM_57304		0x16ca
#define CHIP_NUM_57304		0x16ca
#define CHIP_NUM_58700		0x16cd
#define CHIP_NUM_57402		0x16d0
#define CHIP_NUM_57402		0x16d0
#define CHIP_NUM_57404		0x16d1
#define CHIP_NUM_57404		0x16d1
#define CHIP_NUM_57406		0x16d2
#define CHIP_NUM_57406		0x16d2
@@ -954,6 +956,7 @@ struct bnxt {
	#define BNXT_FLAG_SHARED_RINGS	0x200
	#define BNXT_FLAG_SHARED_RINGS	0x200
	#define BNXT_FLAG_PORT_STATS	0x400
	#define BNXT_FLAG_PORT_STATS	0x400
	#define BNXT_FLAG_EEE_CAP	0x1000
	#define BNXT_FLAG_EEE_CAP	0x1000
	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000


	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
					    BNXT_FLAG_RFS |		\
					    BNXT_FLAG_RFS |		\
@@ -963,6 +966,7 @@ struct bnxt {
#define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
#define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp)		((bp)->port_partition_type)
#define BNXT_NPAR(bp)		((bp)->port_partition_type)
#define BNXT_SINGLE_PF(bp)	(BNXT_PF(bp) && !BNXT_NPAR(bp))
#define BNXT_SINGLE_PF(bp)	(BNXT_PF(bp) && !BNXT_NPAR(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)


	struct bnxt_napi	**bnapi;
	struct bnxt_napi	**bnapi;


+10 −2
Original line number Original line Diff line number Diff line
@@ -362,11 +362,15 @@ static void bnxt_get_channels(struct net_device *dev,
	channel->max_other = 0;
	channel->max_other = 0;
	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
		channel->combined_count = bp->rx_nr_rings;
		channel->combined_count = bp->rx_nr_rings;
		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
			channel->combined_count--;
	} else {
	} else {
		if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
			channel->rx_count = bp->rx_nr_rings;
			channel->rx_count = bp->rx_nr_rings;
			channel->tx_count = bp->tx_nr_rings_per_tc;
			channel->tx_count = bp->tx_nr_rings_per_tc;
		}
		}
	}
	}
}


static int bnxt_set_channels(struct net_device *dev,
static int bnxt_set_channels(struct net_device *dev,
			     struct ethtool_channels *channel)
			     struct ethtool_channels *channel)
@@ -387,6 +391,10 @@ static int bnxt_set_channels(struct net_device *dev,
	    (channel->rx_count || channel->tx_count))
	    (channel->rx_count || channel->tx_count))
		return -EINVAL;
		return -EINVAL;


	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
					    channel->tx_count))
		return -EINVAL;

	if (channel->combined_count)
	if (channel->combined_count)
		sh = true;
		sh = true;