Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5abe9ead authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2018-01-26

This series contains updates to i40e and i40evf.

Michal updates the driver to pass critical errors from the firmware to
the caller.

Patryk fixes an issue of creating multiple identical filters with the
same location, by simply moving the functions so that we remove the
existing filter and then add the new filter.

Paweł adds back in the ability to turn off offloads when VLAN is set for
the VF driver.  Fixed an issue where the number of TC queue pairs was
exceeding MSI-X vectors count, causing messages about invalid TC mapping
and wrong selected Tx queue.

Alex cleans up the i40e/i40evf_set_itr_per_queue() by dropping all the
unneeded pointer chases.  Puts to use the reg_idx value, which was going
unused, so that we can avoid having to compute the vector every time
throughout the driver.

Upasana enable the driver to display LLDP information on the vSphere Web
Client by exposing DCB parameters.

Alice converts our flags from 32 to 64 bit size, since we have added
more flags.

Dave implements a private ethtool flag to disable the processing of LLDP
packets by the firmware, so that the firmware will not consume LLDPDU
and cause them to be sent up the stack.

Alan adds a mechanism for detecting/storing the flag for processing of
LLDP packets by the firmware, so that its current state is persistent
across reboots/reloads of the driver.

Avinash fixes kdump with i40e due to resource constraints.  We were
enabling VMDq and iWARP when we just have a single CPU, which was
starving kdump for the lack of IRQs.

Jake adds support to program the fragmented IPv4 input set PCTYPE.
Fixed the reported masks to properly report that the entire field is
masked, since we had accidentally swapped the mask values for the IPv4
addresses with the L4 port numbers.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 457740a9 1563f2d2
Loading
Loading
Loading
Loading
+34 −33
Original line number Diff line number Diff line
@@ -508,39 +508,40 @@ struct i40e_pf {
#define I40E_HW_PORT_ID_VALID			BIT(17)
#define I40E_HW_RESTART_AUTONEG			BIT(18)

	u32 flags;
#define I40E_FLAG_RX_CSUM_ENABLED		BIT(0)
#define I40E_FLAG_MSI_ENABLED			BIT(1)
#define I40E_FLAG_MSIX_ENABLED			BIT(2)
#define I40E_FLAG_RSS_ENABLED			BIT(3)
#define I40E_FLAG_VMDQ_ENABLED			BIT(4)
#define I40E_FLAG_FILTER_SYNC			BIT(5)
#define I40E_FLAG_SRIOV_ENABLED			BIT(6)
#define I40E_FLAG_DCB_CAPABLE			BIT(7)
#define I40E_FLAG_DCB_ENABLED			BIT(8)
#define I40E_FLAG_FD_SB_ENABLED			BIT(9)
#define I40E_FLAG_FD_ATR_ENABLED		BIT(10)
#define I40E_FLAG_FD_SB_AUTO_DISABLED		BIT(11)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED		BIT(12)
#define I40E_FLAG_MFP_ENABLED			BIT(13)
#define I40E_FLAG_UDP_FILTER_SYNC		BIT(14)
#define I40E_FLAG_HW_ATR_EVICT_ENABLED		BIT(15)
#define I40E_FLAG_VEB_MODE_ENABLED		BIT(16)
#define I40E_FLAG_VEB_STATS_ENABLED		BIT(17)
#define I40E_FLAG_LINK_POLLING_ENABLED		BIT(18)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT		BIT(19)
#define I40E_FLAG_TEMP_LINK_POLLING		BIT(20)
#define I40E_FLAG_LEGACY_RX			BIT(21)
#define I40E_FLAG_PTP				BIT(22)
#define I40E_FLAG_IWARP_ENABLED			BIT(23)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT(24)
#define I40E_FLAG_CLIENT_L2_CHANGE		BIT(25)
#define I40E_FLAG_CLIENT_RESET			BIT(26)
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED	BIT(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED	BIT(28)
#define I40E_FLAG_TC_MQPRIO			BIT(29)
#define I40E_FLAG_FD_SB_INACTIVE		BIT(30)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER		BIT(31)
	u64 flags;
#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(0)
#define I40E_FLAG_MSI_ENABLED			BIT_ULL(1)
#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(2)
#define I40E_FLAG_RSS_ENABLED			BIT_ULL(3)
#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(4)
#define I40E_FLAG_FILTER_SYNC			BIT_ULL(5)
#define I40E_FLAG_SRIOV_ENABLED			BIT_ULL(6)
#define I40E_FLAG_DCB_CAPABLE			BIT_ULL(7)
#define I40E_FLAG_DCB_ENABLED			BIT_ULL(8)
#define I40E_FLAG_FD_SB_ENABLED			BIT_ULL(9)
#define I40E_FLAG_FD_ATR_ENABLED		BIT_ULL(10)
#define I40E_FLAG_FD_SB_AUTO_DISABLED		BIT_ULL(11)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED		BIT_ULL(12)
#define I40E_FLAG_MFP_ENABLED			BIT_ULL(13)
#define I40E_FLAG_UDP_FILTER_SYNC		BIT_ULL(14)
#define I40E_FLAG_HW_ATR_EVICT_ENABLED		BIT_ULL(15)
#define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(16)
#define I40E_FLAG_VEB_STATS_ENABLED		BIT_ULL(17)
#define I40E_FLAG_LINK_POLLING_ENABLED		BIT_ULL(18)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT		BIT_ULL(19)
#define I40E_FLAG_TEMP_LINK_POLLING		BIT_ULL(20)
#define I40E_FLAG_LEGACY_RX			BIT_ULL(21)
#define I40E_FLAG_PTP				BIT_ULL(22)
#define I40E_FLAG_IWARP_ENABLED			BIT_ULL(23)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT_ULL(24)
#define I40E_FLAG_CLIENT_L2_CHANGE		BIT_ULL(25)
#define I40E_FLAG_CLIENT_RESET			BIT_ULL(26)
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED	BIT_ULL(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED	BIT_ULL(28)
#define I40E_FLAG_TC_MQPRIO			BIT_ULL(29)
#define I40E_FLAG_FD_SB_INACTIVE		BIT_ULL(30)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER		BIT_ULL(31)
#define I40E_FLAG_DISABLE_FW_LLDP		BIT_ULL(32)

	struct i40e_client_instance *cinst;
	bool stat_offsets_loaded;
+10 −5
Original line number Diff line number Diff line
@@ -907,11 +907,16 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
	/* update the error if time out occurred */
	if ((!cmd_completed) &&
	    (!details->async && !details->postpone)) {
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
				   "AQTX: AQ Critical error.\n");
			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
		} else {
			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
				   "AQTX: Writeback timeout.\n");
			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
		}
	}

asq_send_command_error:
	mutex_unlock(&hw->aq.asq_mutex);
@@ -971,7 +976,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
	}

	/* set next_to_use to head */
	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
	ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
	if (ntu == ntc) {
		/* nothing to do - shouldn't need to update ring's values */
		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+12 −0
Original line number Diff line number Diff line
@@ -205,6 +205,7 @@ enum i40e_admin_queue_opc {
	/* DCB commands */
	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
	i40e_aqc_opc_dcb_updated	= 0x0302,
	i40e_aqc_opc_set_dcb_parameters = 0x0303,

	/* TX scheduler */
	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
@@ -2496,6 +2497,17 @@ struct i40e_aqc_lldp_start {

I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);

/* Set DCB (direct 0x0303) */
struct i40e_aqc_set_dcb_parameters {
	u8 command;
#define I40E_AQ_DCB_SET_AGENT	0x1
#define I40E_DCB_VALID		0x1
	u8 valid_flags;
	u8 reserved[14];
};

I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);

/* Get CEE DCBX Oper Config (0x0A07)
 * uses the generic descriptor struct
 * returns below as indirect response
+29 −0
Original line number Diff line number Diff line
@@ -278,6 +278,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
		return "I40E_NOT_SUPPORTED";
	case I40E_ERR_FIRMWARE_API_VERSION:
		return "I40E_ERR_FIRMWARE_API_VERSION";
	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
	}

	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -3639,7 +3641,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);

	cmd->command = I40E_AQ_LLDP_AGENT_START;
	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);

	return status;
}

/**
 * i40e_aq_set_dcb_parameters
 * @hw: pointer to the hw struct
 * @cmd_details: pointer to command details structure or NULL
 * @dcb_enable: True if DCB configuration needs to be applied
 *
 **/
enum i40e_status_code
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
			   struct i40e_asq_cmd_details *cmd_details)
{
	struct i40e_aq_desc desc;
	struct i40e_aqc_set_dcb_parameters *cmd =
		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
	i40e_status status;

	i40e_fill_default_direct_cmd_desc(&desc,
					  i40e_aqc_opc_set_dcb_parameters);

	if (dcb_enable) {
		cmd->valid_flags = I40E_DCB_VALID;
		cmd->command = I40E_AQ_DCB_SET_AGENT;
	}
	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);

	return status;
+165 −21
Original line number Diff line number Diff line
@@ -233,6 +233,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
	I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
	I40E_PRIV_FLAG("disable-source-pruning",
		       I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
	I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
};

#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -2305,6 +2306,8 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
				   struct ethtool_coalesce *ec,
				   int queue)
{
	struct i40e_ring *rx_ring = vsi->rx_rings[queue];
	struct i40e_ring *tx_ring = vsi->tx_rings[queue];
	struct i40e_pf *pf = vsi->back;
	struct i40e_hw *hw = &pf->hw;
	struct i40e_q_vector *q_vector;
@@ -2312,26 +2315,26 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,

	intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);

	vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
	vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
	rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
	tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;

	if (ec->use_adaptive_rx_coalesce)
		vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
		rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
	else
		vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
		rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC;

	if (ec->use_adaptive_tx_coalesce)
		vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
		tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
	else
		vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
		tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC;

	q_vector = vsi->rx_rings[queue]->q_vector;
	q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
	q_vector = rx_ring->q_vector;
	q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
	vector = vsi->base_vector + q_vector->v_idx;
	wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);

	q_vector = vsi->tx_rings[queue]->q_vector;
	q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
	q_vector = tx_ring->q_vector;
	q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
	vector = vsi->base_vector + q_vector->v_idx;
	wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);

@@ -2746,16 +2749,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,

no_input_set:
	if (input_set & I40E_L3_SRC_MASK)
		fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
		fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);

	if (input_set & I40E_L3_DST_MASK)
		fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
		fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);

	if (input_set & I40E_L4_SRC_MASK)
		fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
		fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);

	if (input_set & I40E_L4_DST_MASK)
		fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
		fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);

	if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
		fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -3806,6 +3809,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,

	i40e_write_fd_input_set(pf, index, new_mask);

	/* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
	 * frames. If we're programming the input set for IPv4/Other, we also
	 * need to program the IPv4/Fragmented input set. Since we don't have
	 * separate support, we'll always assume and enforce that the two flow
	 * types must have matching input sets.
	 */
	if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
		i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
					new_mask);

	/* Add the new offset and update table, if necessary */
	if (new_flex_offset) {
		err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
@@ -3827,6 +3840,87 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
	return 0;
}

/**
 * i40e_match_fdir_filter - Return true of two filters match
 * @a: pointer to filter struct
 * @b: pointer to filter struct
 *
 * Returns true if the two filters match exactly the same criteria. I.e. they
 * match the same flow type and have the same parameters. We don't need to
 * check any input-set since all filters of the same flow type must use the
 * same input set.
 **/
static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
				   struct i40e_fdir_filter *b)
{
	/* The filters do not much if any of these criteria differ. */
	if (a->dst_ip != b->dst_ip ||
	    a->src_ip != b->src_ip ||
	    a->dst_port != b->dst_port ||
	    a->src_port != b->src_port ||
	    a->flow_type != b->flow_type ||
	    a->ip4_proto != b->ip4_proto)
		return false;

	return true;
}

/**
 * i40e_disallow_matching_filters - Check that new filters differ
 * @vsi: pointer to the targeted VSI
 * @input: new filter to check
 *
 * Due to hardware limitations, it is not possible for two filters that match
 * similar criteria to be programmed at the same time. This is true for a few
 * reasons:
 *
 * (a) all filters matching a particular flow type must use the same input
 * set, that is they must match the same criteria.
 * (b) different flow types will never match the same packet, as the flow type
 * is decided by hardware before checking which rules apply.
 * (c) hardware has no way to distinguish which order filters apply in.
 *
 * Due to this, we can't really support using the location data to order
 * filters in the hardware parsing. It is technically possible for the user to
 * request two filters matching the same criteria but which select different
 * queues. In this case, rather than keep both filters in the list, we reject
 * the 2nd filter when the user requests adding it.
 *
 * This avoids needing to track location for programming the filter to
 * hardware, and ensures that we avoid some strange scenarios involving
 * deleting filters which match the same criteria.
 **/
static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
					  struct i40e_fdir_filter *input)
{
	struct i40e_pf *pf = vsi->back;
	struct i40e_fdir_filter *rule;
	struct hlist_node *node2;

	/* Loop through every filter, and check that it doesn't match */
	hlist_for_each_entry_safe(rule, node2,
				  &pf->fdir_filter_list, fdir_node) {
		/* Don't check the filters match if they share the same fd_id,
		 * since the new filter is actually just updating the target
		 * of the old filter.
		 */
		if (rule->fd_id == input->fd_id)
			continue;

		/* If any filters match, then print a warning message to the
		 * kernel message buffer and bail out.
		 */
		if (i40e_match_fdir_filter(rule, input)) {
			dev_warn(&pf->pdev->dev,
				 "Existing user defined filter %d already matches this flow.\n",
				 rule->fd_id);
			return -EINVAL;
		}
	}

	return 0;
}

/**
 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
 * @vsi: pointer to the targeted VSI
@@ -3939,19 +4033,25 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
		input->flex_offset = userdef.flex_offset;
	}

	ret = i40e_add_del_fdir(vsi, input, true);
	/* Avoid programming two filters with identical match criteria. */
	ret = i40e_disallow_matching_filters(vsi, input);
	if (ret)
		goto free_input;
		goto free_filter_memory;

	/* Add the input filter to the fdir_input_list, possibly replacing
	 * a previous filter. Do not free the input structure after adding it
	 * to the list as this would cause a use-after-free bug.
	 */
	i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);

	ret = i40e_add_del_fdir(vsi, input, true);
	if (ret)
		goto remove_sw_rule;
	return 0;

free_input:
remove_sw_rule:
	hlist_del(&input->fdir_node);
	pf->fdir_pf_active_filters--;
free_filter_memory:
	kfree(input);
	return ret;
}
@@ -4264,7 +4364,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
	struct i40e_netdev_priv *np = netdev_priv(dev);
	struct i40e_vsi *vsi = np->vsi;
	struct i40e_pf *pf = vsi->back;
	u32 orig_flags, new_flags, changed_flags;
	u64 orig_flags, new_flags, changed_flags;
	u32 i, j;

	orig_flags = READ_ONCE(pf->flags);
@@ -4315,13 +4415,32 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
	    !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
		return -EOPNOTSUPP;

	/* Disable FW LLDP not supported if NPAR active or if FW
	 * API version < 1.7
	 */
	if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
		if (pf->hw.func_caps.npar_enable) {
			dev_warn(&pf->pdev->dev,
				 "Unable to stop FW LLDP if NPAR active\n");
			return -EOPNOTSUPP;
		}

		if (pf->hw.aq.api_maj_ver < 1 ||
		    (pf->hw.aq.api_maj_ver == 1 &&
		     pf->hw.aq.api_min_ver < 7)) {
			dev_warn(&pf->pdev->dev,
				 "FW ver does not support stopping FW LLDP\n");
			return -EOPNOTSUPP;
		}
	}

	/* Compare and exchange the new flags into place. If we failed, that
	 * is if cmpxchg returns anything but the old value, this means that
	 * something else has modified the flags variable since we copied it
	 * originally. We'll just punt with an error and log something in the
	 * message buffer.
	 */
	if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
	if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
		dev_warn(&pf->pdev->dev,
			 "Unable to update pf->flags as it was modified by another thread...\n");
		return -EAGAIN;
@@ -4360,12 +4479,37 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
		}
	}

	if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
		if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
			struct i40e_dcbx_config *dcbcfg;
			int i;

			i40e_aq_stop_lldp(&pf->hw, true, NULL);
			i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
			/* reset local_dcbx_config to default */
			dcbcfg = &pf->hw.local_dcbx_config;
			dcbcfg->etscfg.willing = 1;
			dcbcfg->etscfg.maxtcs = 0;
			dcbcfg->etscfg.tcbwtable[0] = 100;
			for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
				dcbcfg->etscfg.tcbwtable[i] = 0;
			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
				dcbcfg->etscfg.prioritytable[i] = 0;
			dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
			dcbcfg->pfc.willing = 1;
			dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
		} else {
			i40e_aq_start_lldp(&pf->hw, NULL);
		}
	}

	/* Issue reset to cause things to take effect, as additional bits
	 * are added we will need to create a mask of bits requiring reset
	 */
	if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
			     I40E_FLAG_LEGACY_RX |
			     I40E_FLAG_SOURCE_PRUNING_DISABLED))
			     I40E_FLAG_SOURCE_PRUNING_DISABLED |
			     I40E_FLAG_DISABLE_FW_LLDP))
		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);

	return 0;
Loading