Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c9a82421 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bnxt_en-next'



Michael Chan says:

====================
bnxt_en: Updates for net-next.

First, we upgrade the firmware interface spec.  Due to a change in
the toolchains, the auto-generated bnxt_hsi.h does not match the
old bnxt_hsi.h and the patch is really big.  This should be just
one-time.  Going forward, changes should be incremental.

The next 10 patches implement a new scheme for the PF and VF drivers
to allocate and reserve resources.  The new scheme is more flexible
and allows dynamic and asymmetric distribution of resources, whereas
the old scheme is static and even distribution.

The last few patches add cacheline size setting, a couple of PCI IDs,
better management of VF MAC address, and a better parent switchdev ID
for dual-port devices.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ee81098e dd4ea1da
Loading
Loading
Loading
Loading
+539 −158

File changed.

Preview size limit exceeded, changes collapsed.

+34 −19
Original line number Diff line number Diff line
/* Broadcom NetXtreme-C/E network driver.
 *
 * Copyright (c) 2014-2016 Broadcom Corporation
 * Copyright (c) 2016-2017 Broadcom Limited
 * Copyright (c) 2016-2018 Broadcom Limited
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
@@ -12,10 +12,10 @@
#define BNXT_H

#define DRV_MODULE_NAME		"bnxt_en"
#define DRV_MODULE_VERSION	"1.8.0"
#define DRV_MODULE_VERSION	"1.9.0"

#define DRV_VER_MAJ	1
#define DRV_VER_MIN	8
#define DRV_VER_MIN	9
#define DRV_VER_UPD	0

#include <linux/interrupt.h>
@@ -776,19 +776,38 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_NEW_RSS_FLAG	0x10
};

#if defined(CONFIG_BNXT_SRIOV)
struct bnxt_vf_info {
	u16	fw_fid;
	u8	mac_addr[ETH_ALEN];
struct bnxt_hw_resc {
	u16	min_rsscos_ctxs;
	u16	max_rsscos_ctxs;
	u16	min_cp_rings;
	u16	max_cp_rings;
	u16	resv_cp_rings;
	u16	min_tx_rings;
	u16	max_tx_rings;
	u16	resv_tx_rings;
	u16	min_rx_rings;
	u16	max_rx_rings;
	u16	resv_rx_rings;
	u16	min_hw_ring_grps;
	u16	max_hw_ring_grps;
	u16	resv_hw_ring_grps;
	u16	min_l2_ctxs;
	u16	max_l2_ctxs;
	u16	max_irqs;
	u16	min_vnics;
	u16	max_vnics;
	u16	resv_vnics;
	u16	min_stat_ctxs;
	u16	max_stat_ctxs;
	u16	max_irqs;
};

#if defined(CONFIG_BNXT_SRIOV)
struct bnxt_vf_info {
	u16	fw_fid;
	u8	mac_addr[ETH_ALEN];	/* PF assigned MAC Address */
	u8	vf_mac_addr[ETH_ALEN];	/* VF assigned MAC address, only
					 * stored by PF.
					 */
	u16	vlan;
	u32	flags;
#define BNXT_VF_QOS		0x1
@@ -809,15 +828,6 @@ struct bnxt_pf_info {
	u16	fw_fid;
	u16	port_id;
	u8	mac_addr[ETH_ALEN];
	u16	max_rsscos_ctxs;
	u16	max_cp_rings;
	u16	max_tx_rings; /* HW assigned max tx rings for this PF */
	u16	max_rx_rings; /* HW assigned max rx rings for this PF */
	u16	max_hw_ring_grps;
	u16	max_irqs;
	u16	max_l2_ctxs;
	u16	max_vnics;
	u16	max_stat_ctxs;
	u32	first_vf_id;
	u16	active_vfs;
	u16	max_vfs;
@@ -829,6 +839,9 @@ struct bnxt_pf_info {
	u32	max_rx_wm_flows;
	unsigned long	*vf_event_bmap;
	u16	hwrm_cmd_req_pages;
	u8	vf_resv_strategy;
#define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
#define BNXT_VF_RESV_STRATEGY_MINIMAL	1
	void			*hwrm_cmd_req_addr[4];
	dma_addr_t		hwrm_cmd_req_dma_addr[4];
	struct bnxt_vf_info	*vf;
@@ -1137,6 +1150,7 @@ struct bnxt {
	#define BNXT_FLAG_FW_DCBX_AGENT	0x800000
	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
	#define BNXT_FLAG_DIM		0x2000000
	#define BNXT_FLAG_NEW_RM	0x8000000

	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
					    BNXT_FLAG_RFS |		\
@@ -1196,7 +1210,6 @@ struct bnxt {
	int			tx_nr_rings;
	int			tx_nr_rings_per_tc;
	int			tx_nr_rings_xdp;
	int			tx_reserved_rings;

	int			tx_wake_thresh;
	int			tx_push_thresh;
@@ -1308,6 +1321,7 @@ struct bnxt {
#define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
#define BNXT_FLOW_STATS_SP_EVENT	15

	struct bnxt_hw_resc	hw_resc;
	struct bnxt_pf_info	pf;
#ifdef CONFIG_BNXT_SRIOV
	int			nr_vfs;
@@ -1357,6 +1371,7 @@ struct bnxt {
	enum devlink_eswitch_mode eswitch_mode;
	struct bnxt_vf_rep	**vf_reps; /* array of vf-rep ptrs */
	u16			*cfa_code_map; /* cfa_code -> vf_idx map */
	u8			switch_id[8];
	struct bnxt_tc_info	*tc_info;
};

@@ -1432,7 +1447,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
		     int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+5841 −6095

File changed.

Preview size limit exceeded, changes collapsed.

+189 −32
Original line number Diff line number Diff line
@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
	ivi->vf = vf_id;
	vf = &bp->pf.vf[vf_id];

	if (is_valid_ether_addr(vf->mac_addr))
		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
	else
		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
	ivi->max_tx_rate = vf->max_tx_rate;
	ivi->min_tx_rate = vf->min_tx_rate;
	ivi->vlan = vf->vlan;
@@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}

/* only call by PF to reserve resources for VF */
/* Only called by PF to reserve resources for VFs, returns actual number of
 * VFs configured, or < 0 on error.
 */
static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
{
	struct hwrm_func_vf_resource_cfg_input req = {0};
	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
	struct bnxt_pf_info *pf = &bp->pf;
	int i, rc = 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);

	vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
	vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
	else
		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
	vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);

	req.min_rsscos_ctx = cpu_to_le16(1);
	req.max_rsscos_ctx = cpu_to_le16(1);
	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
		req.min_cmpl_rings = cpu_to_le16(1);
		req.min_tx_rings = cpu_to_le16(1);
		req.min_rx_rings = cpu_to_le16(1);
		req.min_l2_ctxs = cpu_to_le16(1);
		req.min_vnics = cpu_to_le16(1);
		req.min_stat_ctx = cpu_to_le16(1);
		req.min_hw_ring_grps = cpu_to_le16(1);
	} else {
		vf_cp_rings /= num_vfs;
		vf_tx_rings /= num_vfs;
		vf_rx_rings /= num_vfs;
		vf_vnics /= num_vfs;
		vf_stat_ctx /= num_vfs;
		vf_ring_grps /= num_vfs;

		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
		req.min_l2_ctxs = cpu_to_le16(4);
		req.min_vnics = cpu_to_le16(vf_vnics);
		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
	}
	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
	req.max_l2_ctxs = cpu_to_le16(4);
	req.max_vnics = cpu_to_le16(vf_vnics);
	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);

	mutex_lock(&bp->hwrm_cmd_lock);
	for (i = 0; i < num_vfs; i++) {
		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
		rc = _hwrm_send_message(bp, &req, sizeof(req),
					HWRM_CMD_TIMEOUT);
		if (rc) {
			rc = -ENOMEM;
			break;
		}
		pf->active_vfs = i + 1;
		pf->vf[i].fw_fid = pf->first_vf_id + i;
	}
	mutex_unlock(&bp->hwrm_cmd_lock);
	if (pf->active_vfs) {
		u16 n = 1;

		if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
			n = pf->active_vfs;

		hw_resc->max_tx_rings -= vf_tx_rings * n;
		hw_resc->max_rx_rings -= vf_rx_rings * n;
		hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
		hw_resc->max_cp_rings -= vf_cp_rings * n;
		hw_resc->max_rsscos_ctxs -= pf->active_vfs;
		hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
		hw_resc->max_vnics -= vf_vnics * n;

		rc = pf->active_vfs;
	}
	return rc;
}

/* Only called by PF to reserve resources for VFs, returns actual number of
 * VFs configured, or < 0 on error.
 */
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
	u32 rc = 0, mtu, i;
	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
	u16 vf_ring_grps;
	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
	u16 vf_ring_grps, max_stat_ctxs;
	struct hwrm_func_cfg_input req = {0};
	struct bnxt_pf_info *pf = &bp->pf;
	int total_vf_tx_rings = 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);

	max_stat_ctxs = hw_resc->max_stat_ctxs;

	/* Remaining rings are distributed equally amongs VF's for now */
	vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
	vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
	vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
	vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
			      num_vfs;
	else
		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
	vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
	vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
	vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
			      num_vfs;
	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);

	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
@@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
		total_vf_tx_rings += vf_tx_rsvd;
	}
	mutex_unlock(&bp->hwrm_cmd_lock);
	if (!rc) {
		pf->max_tx_rings -= total_vf_tx_rings;
		pf->max_rx_rings -= vf_rx_rings * num_vfs;
		pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
		pf->max_cp_rings -= vf_cp_rings * num_vfs;
		pf->max_rsscos_ctxs -= num_vfs;
		pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
		pf->max_vnics -= vf_vnics * num_vfs;
	if (rc)
		rc = -ENOMEM;
	if (pf->active_vfs) {
		hw_resc->max_tx_rings -= total_vf_tx_rings;
		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
		hw_resc->max_rsscos_ctxs -= num_vfs;
		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
		hw_resc->max_vnics -= vf_vnics * num_vfs;
		rc = pf->active_vfs;
	}
	return rc;
}

static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
{
	if (bp->flags & BNXT_FLAG_NEW_RM)
		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
	else
		return bnxt_hwrm_func_cfg(bp, num_vfs);
}

static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
	int rc = 0, vfs_supported;
	int min_rx_rings, min_tx_rings, min_rss_ctxs;
	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
	int avail_cp, avail_stat;

@@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
	 */
	vfs_supported = *num_vfs;

	avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
	avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
	avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
	avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
	avail_cp = min_t(int, avail_cp, avail_stat);

	while (vfs_supported) {
@@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
		min_rss_ctxs = vfs_supported;

		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
			if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
			    min_rx_rings)
				rx_ok = 1;
		} else {
			if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
			    min_rx_rings)
				rx_ok = 1;
		}
		if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
		    avail_cp < min_rx_rings)
			rx_ok = 0;

		if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
		    avail_cp >= min_tx_rings)
			tx_ok = 1;

		if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
		    min_rss_ctxs)
			rss_ok = 1;

		if (tx_ok && rx_ok && rss_ok)
@@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
		goto err_out1;

	/* Reserve resources for VFs */
	rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
	if (rc)
	rc = bnxt_func_cfg(bp, *num_vfs);
	if (rc != *num_vfs) {
		if (rc <= 0) {
			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
			*num_vfs = 0;
			goto err_out2;
		}
		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
		*num_vfs = rc;
	}

	/* Register buffers for VFs */
	rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -766,16 +886,50 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
	return rc;
}

static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
	struct hwrm_func_vf_cfg_input *req =
		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;

	/* Only allow VF to set a valid MAC address if the PF assigned MAC
	 * address is zero
	 */
	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
		if (is_valid_ether_addr(req->dflt_mac_addr) &&
		    !is_valid_ether_addr(vf->mac_addr)) {
			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
		}
		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
	}
	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
}

static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
	struct hwrm_cfa_l2_filter_alloc_input *req =
		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
	bool mac_ok = false;

	if (!is_valid_ether_addr(vf->mac_addr) ||
	    ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
	/* VF MAC address must first match PF MAC address, if it is valid.
	 * Otherwise, it must match the VF MAC address if firmware spec >=
	 * 1.2.2
	 */
	if (is_valid_ether_addr(vf->mac_addr)) {
		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
			mac_ok = true;
	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
			mac_ok = true;
	} else if (bp->hwrm_spec_code < 0x10202) {
		mac_ok = true;
	} else {
		mac_ok = true;
	}
	if (mac_ok)
		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
	else
	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
}

@@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
	u32 req_type = le16_to_cpu(encap_req->req_type);

	switch (req_type) {
	case HWRM_FUNC_VF_CFG:
		rc = bnxt_vf_store_mac(bp, vf);
		break;
	case HWRM_CFA_L2_FILTER_ALLOC:
		rc = bnxt_vf_validate_set_mac(bp, vf);
		break;
+1 −1
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
	}

	/* Is dev a VF-rep? */
	if (dev != pf_bp->dev)
	if (bnxt_dev_is_vf_rep(dev))
		return bnxt_vf_rep_get_fid(dev);

	bp = netdev_priv(dev);
Loading