Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c595be94 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bnxt_en-Updates-for-net-next'



Michael Chan says:

====================
bnxt_en: Updates for net-next.

This series includes the usual firmware spec update.  The driver has
added external phy loopback test and phy setup retry logic that is
needed during hotplug.  In the SRIOV space, the driver has added a
new VF resource allocation mode that requires the VF driver to
reserve resources during IFUP.  IF state changes are now propagated
to firmware so that firmware can release some resources during IFDOWN.

ethtool method to get firmware core dump and hwmon temperature reading
have been added.  DSCP to user priority support has been added to
the driver's DCBNL interface, and the CoS queue logic has been refined
to make sure that the special RDMA Congestion Notification hardware CoS
queue will not be used for networking traffic.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 074fb880 aabfc016
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -230,4 +230,12 @@ config BNXT_DCB

	  If unsure, say N.

config BNXT_HWMON
	bool "Broadcom NetXtreme-C/E HWMON support"
	default y
	depends on BNXT && HWMON && !(BNXT=y && HWMON=m)
	---help---
	  Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
	  devices, via the hwmon sysfs interface.

endif # NET_VENDOR_BROADCOM
+183 −33
Original line number Diff line number Diff line
@@ -51,6 +51,8 @@
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>

#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -1115,7 +1117,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
		tpa_info->hash_type = PKT_HASH_TYPE_L4;
		tpa_info->gso_type = SKB_GSO_TCPV4;
		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
		if (hash_type == 3)
		if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
			tpa_info->gso_type = SKB_GSO_TCPV6;
		tpa_info->rss_hash =
			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -3445,7 +3447,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
	cp_ring_id = le16_to_cpu(req->cmpl_ring);
	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;

	if (bp->flags & BNXT_FLAG_SHORT_CMD) {
	if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;

		memcpy(short_cmd_req, req, msg_len);
@@ -3638,7 +3640,9 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,

static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
	struct hwrm_func_drv_rgtr_input req = {0};
	int rc;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);

@@ -3676,7 +3680,15 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
	}

	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	mutex_lock(&bp->hwrm_cmd_lock);
	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (rc)
		rc = -EIO;
	else if (resp->flags &
		 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
		bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
	mutex_unlock(&bp->hwrm_cmd_lock);
	return rc;
}

static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
@@ -3981,6 +3993,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
	if (set_rss) {
		req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
		if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
			if (BNXT_CHIP_TYPE_NITRO_A0(bp))
				max_rings = bp->rx_nr_rings - 1;
@@ -4578,7 +4591,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
	}

	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
	if (bp->flags & BNXT_FLAG_NEW_RM) {
	if (BNXT_NEW_RM(bp)) {
		u16 cp, stats;

		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
@@ -4624,7 +4637,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
	req->fid = cpu_to_le16(0xffff);
	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
	req->num_tx_rings = cpu_to_le16(tx_rings);
	if (bp->flags & BNXT_FLAG_NEW_RM) {
	if (BNXT_NEW_RM(bp)) {
		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
		enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
				      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
@@ -4697,7 +4710,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
	struct hwrm_func_vf_cfg_input req = {0};
	int rc;

	if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
	if (!BNXT_NEW_RM(bp)) {
		bp->hw_resc.resv_tx_rings = tx_rings;
		return 0;
	}
@@ -4757,7 +4770,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
		vnic = rx + 1;
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
		rx <<= 1;
	if ((bp->flags & BNXT_FLAG_NEW_RM) &&
	if (BNXT_NEW_RM(bp) &&
	    (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
	     hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
		return true;
@@ -4793,7 +4806,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
		return rc;

	tx = hw_resc->resv_tx_rings;
	if (bp->flags & BNXT_FLAG_NEW_RM) {
	if (BNXT_NEW_RM(bp)) {
		rx = hw_resc->resv_rx_rings;
		cp = hw_resc->resv_cp_rings;
		grp = hw_resc->resv_hw_ring_grps;
@@ -4837,7 +4850,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
	u32 flags;
	int rc;

	if (!(bp->flags & BNXT_FLAG_NEW_RM))
	if (!BNXT_NEW_RM(bp))
		return 0;

	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
@@ -4866,7 +4879,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
				     cp_rings, vnics);
	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
	if (bp->flags & BNXT_FLAG_NEW_RM)
	if (BNXT_NEW_RM(bp))
		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
			 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
@@ -5088,9 +5101,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
	flags = le16_to_cpu(resp->flags);
	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
		bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
			bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
	}
	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
		bp->flags |= BNXT_FLAG_MULTI_HOST;
@@ -5162,7 +5175,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)

		pf->vf_resv_strategy =
			le16_to_cpu(resp->vf_reservation_strategy);
		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
	}
hwrm_func_resc_qcaps_exit:
@@ -5248,7 +5261,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
	if (bp->hwrm_spec_code >= 0x10803) {
		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
		if (!rc)
			bp->flags |= BNXT_FLAG_NEW_RM;
			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
	}
	return 0;
}
@@ -5268,7 +5281,8 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
	int rc = 0;
	struct hwrm_queue_qportcfg_input req = {0};
	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
	u8 i, *qptr;
	u8 i, j, *qptr;
	bool no_rdma;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);

@@ -5286,19 +5300,24 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
	if (bp->max_tc > BNXT_MAX_QUEUE)
		bp->max_tc = BNXT_MAX_QUEUE;

	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
	qptr = &resp->queue_id0;
	for (i = 0, j = 0; i < bp->max_tc; i++) {
		bp->q_info[j].queue_id = *qptr++;
		bp->q_info[j].queue_profile = *qptr++;
		bp->tc_to_qidx[j] = j;
		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
		    (no_rdma && BNXT_PF(bp)))
			j++;
	}
	bp->max_tc = max_t(u8, j, 1);

	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
		bp->max_tc = 1;

	if (bp->max_lltc > bp->max_tc)
		bp->max_lltc = bp->max_tc;

	qptr = &resp->queue_id0;
	for (i = 0; i < bp->max_tc; i++) {
		bp->q_info[i].queue_id = *qptr++;
		bp->q_info[i].queue_profile = *qptr++;
		bp->tc_to_qidx[i] = i;
	}

qportcfg_exit:
	mutex_unlock(&bp->hwrm_cmd_lock);
	return rc;
@@ -5351,7 +5370,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
		bp->flags |= BNXT_FLAG_SHORT_CMD;
		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;

hwrm_ver_get_exit:
	mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5920,7 +5939,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)

	max_idx = min_t(int, bp->total_irqs, max_cp);
	avail_msix = max_idx - bp->cp_nr_rings;
	if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
	if (!BNXT_NEW_RM(bp) || avail_msix >= num)
		return avail_msix;

	if (max_irq < total_req) {
@@ -5933,7 +5952,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)

static int bnxt_get_num_msix(struct bnxt *bp)
{
	if (!(bp->flags & BNXT_FLAG_NEW_RM))
	if (!BNXT_NEW_RM(bp))
		return bnxt_get_max_func_irqs(bp);

	return bnxt_cp_rings_in_use(bp);
@@ -6056,8 +6075,7 @@ int bnxt_reserve_rings(struct bnxt *bp)
		netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
		return rc;
	}
	if ((bp->flags & BNXT_FLAG_NEW_RM) &&
	    (bnxt_get_num_msix(bp) != bp->total_irqs)) {
	if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
		bnxt_ulp_irq_stop(bp);
		bnxt_clear_int_mode(bp);
		rc = bnxt_init_int_mode(bp);
@@ -6337,6 +6355,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
	}
	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
		if (bp->test_info)
			bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
	}
	if (resp->supported_speeds_auto_mode)
		link_info->support_auto_speeds =
			le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -6633,6 +6655,39 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}

static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
	struct hwrm_func_drv_if_change_input req = {0};
	bool resc_reinit = false;
	int rc;

	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
		return 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
	if (up)
		req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
	mutex_lock(&bp->hwrm_cmd_lock);
	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (!rc && (resp->flags &
		    cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
		resc_reinit = true;
	mutex_unlock(&bp->hwrm_cmd_lock);

	if (up && resc_reinit && BNXT_NEW_RM(bp)) {
		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;

		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
		hw_resc->resv_cp_rings = 0;
		hw_resc->resv_tx_rings = 0;
		hw_resc->resv_rx_rings = 0;
		hw_resc->resv_hw_ring_grps = 0;
		hw_resc->resv_vnics = 0;
	}
	return rc;
}

static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
{
	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -6742,6 +6797,62 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
	} while (handle && handle != 0xffff);
}

#ifdef CONFIG_BNXT_HWMON
static ssize_t bnxt_show_temp(struct device *dev,
			      struct device_attribute *devattr, char *buf)
{
	struct hwrm_temp_monitor_query_input req = {0};
	struct hwrm_temp_monitor_query_output *resp;
	struct bnxt *bp = dev_get_drvdata(dev);
	u32 temp = 0;

	resp = bp->hwrm_cmd_resp_addr;
	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
	mutex_lock(&bp->hwrm_cmd_lock);
	if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
		temp = resp->temp * 1000; /* display millidegree */
	mutex_unlock(&bp->hwrm_cmd_lock);

	return sprintf(buf, "%u\n", temp);
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);

static struct attribute *bnxt_attrs[] = {
	&sensor_dev_attr_temp1_input.dev_attr.attr,
	NULL
};
ATTRIBUTE_GROUPS(bnxt);

static void bnxt_hwmon_close(struct bnxt *bp)
{
	if (bp->hwmon_dev) {
		hwmon_device_unregister(bp->hwmon_dev);
		bp->hwmon_dev = NULL;
	}
}

static void bnxt_hwmon_open(struct bnxt *bp)
{
	struct pci_dev *pdev = bp->pdev;

	bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
							  DRV_MODULE_NAME, bp,
							  bnxt_groups);
	if (IS_ERR(bp->hwmon_dev)) {
		bp->hwmon_dev = NULL;
		dev_warn(&pdev->dev, "Cannot register hwmon device\n");
	}
}
#else
static void bnxt_hwmon_close(struct bnxt *bp)
{
}

static void bnxt_hwmon_open(struct bnxt *bp)
{
}
#endif

static bool bnxt_eee_config_ok(struct bnxt *bp)
{
	struct ethtool_eee *eee = &bp->eee;
@@ -6894,8 +7005,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
		mutex_lock(&bp->link_lock);
		rc = bnxt_update_phy_setting(bp);
		mutex_unlock(&bp->link_lock);
		if (rc)
		if (rc) {
			netdev_warn(bp->dev, "failed to update phy settings\n");
			if (BNXT_SINGLE_PF(bp)) {
				bp->link_info.phy_retry = true;
				bp->link_info.phy_retry_expires =
					jiffies + 5 * HZ;
			}
		}
	}

	if (irq_re_init)
@@ -6981,8 +7098,16 @@ void bnxt_half_close_nic(struct bnxt *bp)
static int bnxt_open(struct net_device *dev)
{
	struct bnxt *bp = netdev_priv(dev);
	int rc;

	bnxt_hwrm_if_change(bp, true);
	rc = __bnxt_open_nic(bp, true, true);
	if (rc)
		bnxt_hwrm_if_change(bp, false);

	bnxt_hwmon_open(bp);

	return __bnxt_open_nic(bp, true, true);
	return rc;
}

static bool bnxt_drv_busy(struct bnxt *bp)
@@ -7044,8 +7169,10 @@ static int bnxt_close(struct net_device *dev)
{
	struct bnxt *bp = netdev_priv(dev);

	bnxt_hwmon_close(bp);
	bnxt_close_nic(bp, true, true);
	bnxt_hwrm_shutdown_link(bp);
	bnxt_hwrm_if_change(bp, false);
	return 0;
}

@@ -7295,7 +7422,7 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
static bool bnxt_can_reserve_rings(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
	if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;

		/* No minimum rings were provisioned by the PF.  Don't
@@ -7345,7 +7472,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
		return false;
	}

	if (!(bp->flags & BNXT_FLAG_NEW_RM))
	if (!BNXT_NEW_RM(bp))
		return true;

	if (vnics == bp->hw_resc.resv_vnics)
@@ -7579,6 +7706,16 @@ static void bnxt_timer(struct timer_list *t)
		set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
		bnxt_queue_sp_work(bp);
	}

	if (bp->link_info.phy_retry) {
		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
			bp->link_info.phy_retry = 0;
			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
		} else {
			set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
			bnxt_queue_sp_work(bp);
		}
	}
bnxt_restart_timer:
	mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -7666,6 +7803,19 @@ static void bnxt_sp_task(struct work_struct *work)
			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
				   rc);
	}
	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
		int rc;

		mutex_lock(&bp->link_lock);
		rc = bnxt_update_phy_setting(bp);
		mutex_unlock(&bp->link_lock);
		if (rc) {
			netdev_warn(bp->dev, "update phy settings retry failed\n");
		} else {
			bp->link_info.phy_retry = false;
			netdev_info(bp->dev, "update phy settings retry succeeded\n");
		}
	}
	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
		mutex_lock(&bp->link_lock);
		bnxt_get_port_module_status(bp);
@@ -7718,7 +7868,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
	if (bp->flags & BNXT_FLAG_AGG_RINGS)
		rx_rings <<= 1;
	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
	if (bp->flags & BNXT_FLAG_NEW_RM)
	if (BNXT_NEW_RM(bp))
		cp += bnxt_get_ulp_msix_num(bp);
	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
				     vnics);
@@ -8727,7 +8877,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	if (rc)
		goto init_err_pci_clean;

	if (bp->flags & BNXT_FLAG_SHORT_CMD) {
	if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
		rc = bnxt_alloc_hwrm_short_cmd_req(bp);
		if (rc)
			goto init_err_pci_clean;
+23 −7
Original line number Diff line number Diff line
@@ -12,11 +12,11 @@
#define BNXT_H

#define DRV_MODULE_NAME		"bnxt_en"
#define DRV_MODULE_VERSION	"1.9.1"
#define DRV_MODULE_VERSION	"1.9.2"

#define DRV_VER_MAJ	1
#define DRV_VER_MIN	9
#define DRV_VER_UPD	1
#define DRV_VER_UPD	2

#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -326,6 +326,10 @@ struct rx_tpa_start_cmp_ext {
	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &	\
	 RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)

#define TPA_START_IS_IPV6(rx_tpa_start)				\
	(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 &		\
	    cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))

struct rx_tpa_end_cmp {
	__le32 rx_tpa_end_cmp_len_flags_type;
	#define RX_TPA_END_CMP_TYPE				(0x3f << 0)
@@ -862,6 +866,7 @@ struct bnxt_pf_info {
	u8	vf_resv_strategy;
#define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
#define BNXT_VF_RESV_STRATEGY_MINIMAL	1
#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC	2
	void			*hwrm_cmd_req_addr[4];
	dma_addr_t		hwrm_cmd_req_dma_addr[4];
	struct bnxt_vf_info	*vf;
@@ -959,6 +964,9 @@ struct bnxt_link_info {
	u16			advertising;	/* user adv setting */
	bool			force_link_chng;

	bool			phy_retry;
	unsigned long		phy_retry_expires;

	/* a copy of phy_qcfg output used to report link
	 * info to VF
	 */
@@ -990,6 +998,8 @@ struct bnxt_led_info {

struct bnxt_test_info {
	u8 offline_mask;
	u8 flags;
#define BNXT_TEST_FL_EXT_LPBK	0x1
	u16 timeout;
	char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1134,7 +1144,6 @@ struct bnxt {
	atomic_t		intr_sem;

	u32			flags;
	#define BNXT_FLAG_DCB_ENABLED	0x1
	#define BNXT_FLAG_VF		0x2
	#define BNXT_FLAG_LRO		0x4
#ifdef CONFIG_INET
@@ -1163,15 +1172,11 @@ struct bnxt {
					 BNXT_FLAG_ROCEV2_CAP)
	#define BNXT_FLAG_NO_AGG_RINGS	0x20000
	#define BNXT_FLAG_RX_PAGE_MODE	0x40000
	#define BNXT_FLAG_FW_LLDP_AGENT	0x80000
	#define BNXT_FLAG_MULTI_HOST	0x100000
	#define BNXT_FLAG_SHORT_CMD	0x200000
	#define BNXT_FLAG_DOUBLE_DB	0x400000
	#define BNXT_FLAG_FW_DCBX_AGENT	0x800000
	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
	#define BNXT_FLAG_DIM		0x2000000
	#define BNXT_FLAG_ROCE_MIRROR_CAP	0x4000000
	#define BNXT_FLAG_NEW_RM	0x8000000
	#define BNXT_FLAG_PORT_STATS_EXT	0x10000000

	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
@@ -1276,10 +1281,19 @@ struct bnxt {
	struct ieee_ets		*ieee_ets;
	u8			dcbx_cap;
	u8			default_pri;
	u8			max_dscp_value;
#endif /* CONFIG_BNXT_DCB */

	u32			msg_enable;

	u32			fw_cap;
	#define BNXT_FW_CAP_SHORT_CMD	0x00000001
	#define BNXT_FW_CAP_LLDP_AGENT	0x00000002
	#define BNXT_FW_CAP_DCBX_AGENT	0x00000004
	#define BNXT_FW_CAP_NEW_RM	0x00000008
	#define BNXT_FW_CAP_IF_CHANGE	0x00000010

#define BNXT_NEW_RM(bp)		((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
	u32			hwrm_spec_code;
	u16			hwrm_cmd_seq;
	u32			hwrm_intr_seq_id;
@@ -1342,6 +1356,7 @@ struct bnxt {
#define BNXT_GENEVE_DEL_PORT_SP_EVENT	13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
#define BNXT_FLOW_STATS_SP_EVENT	15
#define BNXT_UPDATE_PHY_SP_EVENT	16

	struct bnxt_hw_resc	hw_resc;
	struct bnxt_pf_info	pf;
@@ -1397,6 +1412,7 @@ struct bnxt {
	struct bnxt_tc_info	*tc_info;
	struct dentry		*debugfs_pdev;
	struct dentry		*debugfs_dim;
	struct device		*hwmon_dev;
};

#define BNXT_RX_STATS_OFFSET(counter)			\
+66 −0
Original line number Diff line number Diff line
/* Broadcom NetXtreme-C/E network driver.
 *
 * Copyright (c) 2018 Broadcom Inc
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#ifndef BNXT_COREDUMP_H
#define BNXT_COREDUMP_H

struct bnxt_coredump_segment_hdr {
	__u8 signature[4];
	__le32 component_id;
	__le32 segment_id;
	__le32 flags;
	__u8 low_version;
	__u8 high_version;
	__le16 function_id;
	__le32 offset;
	__le32 length;
	__le32 status;
	__le32 duration;
	__le32 data_offset;
	__le32 instance;
	__le32 rsvd[5];
};

struct bnxt_coredump_record {
	__u8 signature[4];
	__le32 flags;
	__u8 low_version;
	__u8 high_version;
	__u8 asic_state;
	__u8 rsvd0[5];
	char system_name[32];
	__le16 year;
	__le16 month;
	__le16 day;
	__le16 hour;
	__le16 minute;
	__le16 second;
	__le16 utc_bias;
	__le16 rsvd1;
	char commandline[256];
	__le32 total_segments;
	__le32 os_ver_major;
	__le32 os_ver_minor;
	__le32 rsvd2;
	char os_name[32];
	__le16 end_year;
	__le16 end_month;
	__le16 end_day;
	__le16 end_hour;
	__le16 end_minute;
	__le16 end_second;
	__le16 end_utc_bias;
	__le32 asic_id1;
	__le32 asic_id2;
	__le32 coredump_status;
	__u8 ioctl_low_version;
	__u8 ioctl_high_version;
	__le16 rsvd3[313];
};
#endif
+85 −4
Original line number Diff line number Diff line
@@ -385,6 +385,61 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
	return rc;
}

static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
{
	struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
	struct hwrm_queue_dscp_qcaps_input req = {0};
	int rc;

	if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
		return 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
	mutex_lock(&bp->hwrm_cmd_lock);
	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (!rc) {
		bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
		if (bp->max_dscp_value < 0x3f)
			bp->max_dscp_value = 0;
	}

	mutex_unlock(&bp->hwrm_cmd_lock);
	return rc;
}

static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
					bool add)
{
	struct hwrm_queue_dscp2pri_cfg_input req = {0};
	struct bnxt_dscp2pri_entry *dscp2pri;
	dma_addr_t mapping;
	int rc;

	if (bp->hwrm_spec_code < 0x10800)
		return 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
	dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
				      &mapping, GFP_KERNEL);
	if (!dscp2pri)
		return -ENOMEM;

	req.src_data_addr = cpu_to_le64(mapping);
	dscp2pri->dscp = app->protocol;
	if (add)
		dscp2pri->mask = 0x3f;
	else
		dscp2pri->mask = 0;
	dscp2pri->pri = app->priority;
	req.entry_cnt = cpu_to_le16(1);
	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (rc)
		rc = -EIO;
	dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
			  mapping);
	return rc;
}

static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
	int total_ets_bw = 0;
@@ -551,15 +606,30 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
	return rc;
}

static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app)
{
	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) {
		if (!bp->max_dscp_value)
			return -ENOTSUPP;
		if (app->protocol > bp->max_dscp_value)
			return -EINVAL;
	}
	return 0;
}

static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
	struct bnxt *bp = netdev_priv(dev);
	int rc = -EINVAL;
	int rc;

	if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
		return -EINVAL;

	rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
	if (rc)
		return rc;

	rc = dcb_ieee_setapp(dev, app);
	if (rc)
		return rc;
@@ -570,6 +640,9 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
	     app->protocol == ROCE_V2_UDP_DPORT))
		rc = bnxt_hwrm_set_dcbx_app(bp, app, true);

	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
		rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true);

	return rc;
}

@@ -582,6 +655,10 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
		return -EINVAL;

	rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
	if (rc)
		return rc;

	rc = dcb_ieee_delapp(dev, app);
	if (rc)
		return rc;
@@ -591,6 +668,9 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
	     app->protocol == ROCE_V2_UDP_DPORT))
		rc = bnxt_hwrm_set_dcbx_app(bp, app, false);

	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
		rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false);

	return rc;
}

@@ -610,7 +690,7 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
		return 1;

	if (mode & DCB_CAP_DCBX_HOST) {
		if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
		if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
			return 1;

		/* only support IEEE */
@@ -642,10 +722,11 @@ void bnxt_dcb_init(struct bnxt *bp)
	if (bp->hwrm_spec_code < 0x10501)
		return;

	bnxt_hwrm_queue_dscp_qcaps(bp);
	bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
	if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
	if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
		bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
	else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
	else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
		bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
	bp->dev->dcbnl_ops = &dcbnl_ops;
}
Loading