Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27fa589d authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-06-09

This series contains more updates to i40e and i40evf.

Shannon adds checks for error status bits on the admin event queue and
provides notification if seen.  Cleans up unused variable and memory
allocation which was used earlier in driver development and is no longer
needed.  Also fixes the driver to not complain about removing
non-existent MAC addresses.  Bumps the driver versions for both i40e
and i40evf.

Catherine fixes a function header comment to make sure the comment correctly
reflects the function name.

Mitch adds code to allow for additional VSIs since the number of VSIs that
the firmware reports to us is a guaranteed minimum, not an absolute
maximum.  The hardware actually supports for more than the reported value,
which we often need.  Implements anti-spoofing for VFs for both MAC
addresses and VLANs, as well as enable this feature by default for all VFs.

Anjali changes the interrupt distribution policy to change the way
resources for special features are handled.  Fixes the driver to not fall
back to one queue if the only feature enabled is ATR, since FD_SB
and FD_ATR need to be checked independently in order to decide if we
will support multiple queue or not.  Allows the RSS table entry range
and GPS to be any number, not necessarily a power of 2 because hardware
does not restrict us to use a power of 2 GPS in the case of RSS as long as
we are not sharing the RSS table with another VSI (VMDq).

Frank modifies the driver to keep SR-IOV enabled in the case that RSS,
VMFq, FD_SB and DCB are disabled so that SR-IOV does not get turned off
unnecessarily.

Jesse fixes a bug in receive checksum where the driver was not marking
packets with bad checksums correctly, especially IPv6 packets with a bad
checksum.  To do this correctly, we need a define that may be set by
hardware in rare cases.

Greg fixes the driver to delete all the old and stale MAC filters for the
VF VSI when the host administrator changes the VF MAC address from under
its feet.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b78370c0 e8607ef5
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -72,6 +72,7 @@
#define I40E_MIN_NUM_DESCRIPTORS      64
#define I40E_MIN_MSIX                 2
#define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
#define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_DEFAULT_QUEUES_PER_VMDQ  2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF    4
#define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
@@ -215,6 +216,7 @@ struct i40e_pf {
	u16 rss_size;              /* num queues in the RSS array */
	u16 rss_size_max;          /* HW defined max RSS queues */
	u16 fdir_pf_filter_count;  /* num of guaranteed filters for this PF */
	u16 num_alloc_vsi;         /* num VSIs this driver supports */
	u8 atr_sample_rate;
	bool wol_en;

@@ -295,7 +297,6 @@ struct i40e_pf {
	u16 pf_seid;
	u16 main_vsi_seid;
	u16 mac_seid;
	struct i40e_aqc_get_switch_config_data *sw_config;
	struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
	struct dentry *i40e_dbg_pf;
+1 −1
Original line number Diff line number Diff line
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
			      struct i40e_ieee_app_priority_table *app)
{
	int v, err;
	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		if (pf->vsi[v] && pf->vsi[v]->netdev) {
			err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
			if (err)
+3 −3
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
	if (seid < 0)
		dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
	else
		for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
		for (i = 0; i < pf->num_alloc_vsi; i++)
			if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
				return pf->vsi[i];

@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
	int i;

	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
	for (i = 0; i < pf->num_alloc_vsi; i++)
		if (pf->vsi[i])
			dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
				 i, pf->vsi[i]->seid);
@@ -1526,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
			cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
			if (cnt == 0) {
				int i;
				for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
				for (i = 0; i < pf->num_alloc_vsi; i++)
					i40e_vsi_reset_stats(pf->vsi[i]);
				dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
			} else if (cnt == 1) {
+1 −0
Original line number Diff line number Diff line
@@ -119,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
	I40E_PF_STAT("tx_timeout", tx_timeout_count),
	I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
+122 −55
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =

#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 5
#define DRV_VERSION_BUILD 7
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
	     __stringify(DRV_VERSION_MINOR) "." \
	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -652,7 +652,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
		return;

	/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		struct i40e_vsi *vsi = pf->vsi[v];

		if (!vsi || !vsi->tx_rings[0])
@@ -706,7 +706,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
	}

	/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		struct i40e_vsi *vsi = pf->vsi[v];

		if (!vsi || !vsi->tx_rings[0])
@@ -1366,7 +1366,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
	vsi->tc_config.numtc = numtc;
	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
	/* Number of queues per enabled TC */
	num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
	num_tc_qps = vsi->alloc_queue_pairs/numtc;
	num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);

	/* Setup queue offset/count for all TCs for given VSI */
@@ -1595,7 +1595,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
				num_del = 0;
				memset(del_list, 0, sizeof(*del_list));

				if (aq_ret)
				if (aq_ret &&
				    pf->hw.aq.asq_last_status !=
							      I40E_AQ_RC_ENOENT)
					dev_info(&pf->pdev->dev,
						 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
						 aq_ret,
@@ -1607,7 +1609,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
						     del_list, num_del, NULL);
			num_del = 0;

			if (aq_ret)
			if (aq_ret &&
			    pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
				dev_info(&pf->pdev->dev,
					 "ignoring delete macvlan error, err %d, aq_err %d\n",
					 aq_ret, pf->hw.aq.asq_last_status);
@@ -1734,7 +1737,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
		return;
	pf->flags &= ~I40E_FLAG_FILTER_SYNC;

	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		if (pf->vsi[v] &&
		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
			i40e_sync_vsi_filters(pf->vsi[v]);
@@ -3524,7 +3527,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
	int i;

	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
	for (i = 0; i < pf->num_alloc_vsi; i++)
		if (pf->vsi[i])
			i40e_vsi_free_q_vectors(pf->vsi[i]);
	i40e_reset_interrupt_capability(pf);
@@ -3614,7 +3617,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
	int v;

	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		if (pf->vsi[v])
			i40e_quiesce_vsi(pf->vsi[v]);
	}
@@ -3628,7 +3631,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
	int v;

	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		if (pf->vsi[v])
			i40e_unquiesce_vsi(pf->vsi[v]);
	}
@@ -4069,7 +4072,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
	}

	/* Update each VSI */
	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		if (!pf->vsi[v])
			continue;

@@ -4592,7 +4595,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
		/* Find the VSI(s) that requested a re-init */
		dev_info(&pf->pdev->dev,
			 "VSI reinit requested\n");
		for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
		for (v = 0; v < pf->num_alloc_vsi; v++) {
			struct i40e_vsi *vsi = pf->vsi[v];
			if (vsi != NULL &&
			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4919,7 +4922,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
			i40e_veb_link_event(pf->veb[i], link_up);

	/* ... now the local VSIs */
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
	for (i = 0; i < pf->num_alloc_vsi; i++)
		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
			i40e_vsi_link_event(pf->vsi[i], link_up);
}
@@ -4976,7 +4979,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
	 *     for each q_vector
	 *         force an interrupt
	 */
	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		struct i40e_vsi *vsi = pf->vsi[v];
		int armed = 0;

@@ -5026,7 +5029,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
	/* Update the stats for active netdevs so the network stack
	 * can look at updated numbers whenever it cares to
	 */
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
	for (i = 0; i < pf->num_alloc_vsi; i++)
		if (pf->vsi[i] && pf->vsi[i]->netdev)
			i40e_update_stats(pf->vsi[i]);

@@ -5132,11 +5135,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
	u16 pending, i = 0;
	i40e_status ret;
	u16 opcode;
	u32 oldval;
	u32 val;

	if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
		return;

	/* check for error indications */
	val = rd32(&pf->hw, pf->hw.aq.arq.len);
	oldval = val;
	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
		dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
	}
	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
		dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
	}
	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
		dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
	}
	if (oldval != val)
		wr32(&pf->hw, pf->hw.aq.arq.len, val);

	val = rd32(&pf->hw, pf->hw.aq.asq.len);
	oldval = val;
	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
		dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
	}
	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
		dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
	}
	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
		dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
	}
	if (oldval != val)
		wr32(&pf->hw, pf->hw.aq.asq.len, val);

	event.msg_size = I40E_MAX_AQ_BUF_SIZE;
	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
	if (!event.msg_buf)
@@ -5242,7 +5281,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
	int ret;

	/* build VSI that owns this VEB, temporarily attached to base VEB */
	for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
		if (pf->vsi[v] &&
		    pf->vsi[v]->veb_idx == veb->idx &&
		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5272,7 +5311,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
		goto end_reconstitute;

	/* create the remaining VSIs attached to this VEB */
	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
			continue;

@@ -5385,7 +5424,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)

	/* find existing VSI and see if it needs configuring */
	vsi = NULL;
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
	for (i = 0; i < pf->num_alloc_vsi; i++) {
		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
			vsi = pf->vsi[i];
			break;
@@ -5415,7 +5454,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
	int i;

	i40e_fdir_filter_exit(pf);
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
	for (i = 0; i < pf->num_alloc_vsi; i++) {
		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
			i40e_vsi_release(pf->vsi[i]);
			break;
@@ -5444,7 +5483,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
	/* quiesce the VSIs and their queues that are not already DOWN */
	i40e_pf_quiesce_all_vsi(pf);

	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
	for (v = 0; v < pf->num_alloc_vsi; v++) {
		if (pf->vsi[v])
			pf->vsi[v]->seid = 0;
	}
@@ -5924,15 +5963,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
	 * find next empty vsi slot, looping back around if necessary
	 */
	i = pf->next_vsi;
	while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
	while (i < pf->num_alloc_vsi && pf->vsi[i])
		i++;
	if (i >= pf->hw.func_caps.num_vsis) {
	if (i >= pf->num_alloc_vsi) {
		i = 0;
		while (i < pf->next_vsi && pf->vsi[i])
			i++;
	}

	if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
		vsi_idx = i;             /* Found one! */
	} else {
		ret = -ENODEV;
@@ -6189,6 +6228,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
	for (i = 0; i < v_budget; i++)
		pf->msix_entries[i].entry = i;
	vec = i40e_reserve_msix_vectors(pf, v_budget);

	if (vec != v_budget) {
		/* If we have limited resources, we will start with no vectors
		 * for the special features and then allocate vectors to some
		 * of these features based on the policy and at the end disable
		 * the features that did not get any vectors.
		 */
		pf->num_vmdq_msix = 0;
	}

	if (vec < I40E_MIN_MSIX) {
		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
		kfree(pf->msix_entries);
@@ -6197,27 +6246,25 @@ static int i40e_init_msix(struct i40e_pf *pf)

	} else if (vec == I40E_MIN_MSIX) {
		/* Adjust for minimal MSIX use */
		dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
		pf->num_vmdq_vsis = 0;
		pf->num_vmdq_qps = 0;
		pf->num_vmdq_msix = 0;
		pf->num_lan_qps = 1;
		pf->num_lan_msix = 1;

	} else if (vec != v_budget) {
		/* reserve the misc vector */
		vec--;

		/* Scale vector usage down */
		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
		vec--;                    /* reserve the misc vector */
		pf->num_vmdq_vsis = 1;

		/* partition out the remaining vectors */
		switch (vec) {
		case 2:
			pf->num_vmdq_vsis = 1;
			pf->num_lan_msix = 1;
			break;
		case 3:
			pf->num_vmdq_vsis = 1;
			pf->num_lan_msix = 2;
			break;
		default:
@@ -6229,6 +6276,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
		}
	}

	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
	    (pf->num_vmdq_msix == 0)) {
		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
	}
	return err;
}

@@ -6446,7 +6498,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
		return 0;

	queue_count = min_t(int, queue_count, pf->rss_size_max);
	queue_count = rounddown_pow_of_two(queue_count);

	if (queue_count != pf->rss_size) {
		i40e_prep_for_reset(pf);
@@ -6502,7 +6553,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
	if (pf->hw.func_caps.rss) {
		pf->flags |= I40E_FLAG_RSS_ENABLED;
		pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
		pf->rss_size = rounddown_pow_of_two(pf->rss_size);
	} else {
		pf->rss_size = 1;
	}
@@ -6848,6 +6898,7 @@ static const struct net_device_ops i40e_netdev_ops = {
	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofck,
#ifdef CONFIG_I40E_VXLAN
	.ndo_add_vxlan_port	= i40e_add_vxlan_port,
	.ndo_del_vxlan_port	= i40e_del_vxlan_port,
@@ -7082,6 +7133,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)

		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
		if (pf->vf[vsi->vf_id].spoofchk) {
			ctxt.info.valid_sections |=
				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
			ctxt.info.sec_flags |=
				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
		}
		/* Setup the VSI tx/rx queue map for TC0 only for now */
		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
		break;
@@ -7193,7 +7251,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
	 * the orphan VEBs yet.  We'll wait for an explicit remove request
	 * from up the network stack.
	 */
	for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
		if (pf->vsi[i] &&
		    pf->vsi[i]->uplink_seid == uplink_seid &&
		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7372,7 +7430,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,

	if (!veb && uplink_seid != pf->mac_seid) {

		for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
		for (i = 0; i < pf->num_alloc_vsi; i++) {
			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
				vsi = pf->vsi[i];
				break;
@@ -7615,7 +7673,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
	 *       the VEB itself, so don't use (*branch) after this loop.
	 */
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
	for (i = 0; i < pf->num_alloc_vsi; i++) {
		if (!pf->vsi[i])
			continue;
		if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7667,7 +7725,7 @@ void i40e_veb_release(struct i40e_veb *veb)
	pf = veb->pf;

	/* find the remaining VSI and check for extras */
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
	for (i = 0; i < pf->num_alloc_vsi; i++) {
		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
			n++;
			vsi = pf->vsi[i];
@@ -7779,10 +7837,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
	}

	/* make sure there is such a vsi and uplink */
	for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
			break;
	if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
	if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
			 vsi_seid);
		return NULL;
@@ -7954,15 +8012,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
				 "header: %d reported %d total\n",
				 num_reported, num_total);

		if (num_reported) {
			int sz = sizeof(*sw_config) * num_reported;

			kfree(pf->sw_config);
			pf->sw_config = kzalloc(sz, GFP_KERNEL);
			if (pf->sw_config)
				memcpy(pf->sw_config, sw_config, sz);
		}

		for (i = 0; i < num_reported; i++) {
			struct i40e_aqc_switch_config_element_resp *ele =
				&sw_config->element[i];
@@ -8129,9 +8178,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
	queues_left = pf->hw.func_caps.num_tx_qp;

	if ((queues_left == 1) ||
	    !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
	    !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
			   I40E_FLAG_DCB_ENABLED))) {
	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
		/* one qp for PF, no queues for anything else */
		queues_left = 0;
		pf->rss_size = pf->num_lan_qps = 1;
@@ -8143,6 +8190,19 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
			       I40E_FLAG_DCB_ENABLED	|
			       I40E_FLAG_SRIOV_ENABLED	|
			       I40E_FLAG_VMDQ_ENABLED);
	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
				  I40E_FLAG_FD_SB_ENABLED |
				  I40E_FLAG_FD_ATR_ENABLED |
				  I40E_FLAG_DCB_ENABLED))) {
		/* one qp for PF */
		pf->rss_size = pf->num_lan_qps = 1;
		queues_left -= pf->num_lan_qps;

		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
			       I40E_FLAG_FD_SB_ENABLED	|
			       I40E_FLAG_FD_ATR_ENABLED	|
			       I40E_FLAG_DCB_ENABLED	|
			       I40E_FLAG_VMDQ_ENABLED);
	} else {
		/* Not enough queues for all TCs */
		if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
@@ -8448,10 +8508,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	i40e_determine_queue_usage(pf);
	i40e_init_interrupt_scheme(pf);

	/* Set up the *vsi struct based on the number of VSIs in the HW,
	 * and set up our local tracking of the MAIN PF vsi.
	/* The number of VSIs reported by the FW is the minimum guaranteed
	 * to us; HW supports far more and we share the remaining pool with
	 * the other PFs. We allocate space for more than the guarantee with
	 * the understanding that we might not get them all later.
	 */
	len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
	else
		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;

	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
	len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
	pf->vsi = kzalloc(len, GFP_KERNEL);
	if (!pf->vsi) {
		err = -ENOMEM;
@@ -8464,7 +8532,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
		goto err_vsis;
	}
	/* if FDIR VSI was set up, start it now */
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
	for (i = 0; i < pf->num_alloc_vsi; i++) {
		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
			i40e_vsi_open(pf->vsi[i]);
			break;
@@ -8659,7 +8727,7 @@ static void i40e_remove(struct pci_dev *pdev)

	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
	i40e_clear_interrupt_scheme(pf);
	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
	for (i = 0; i < pf->num_alloc_vsi; i++) {
		if (pf->vsi[i]) {
			i40e_vsi_clear_rings(pf->vsi[i]);
			i40e_vsi_clear(pf->vsi[i]);
@@ -8674,7 +8742,6 @@ static void i40e_remove(struct pci_dev *pdev)

	kfree(pf->qp_pile);
	kfree(pf->irq_pile);
	kfree(pf->sw_config);
	kfree(pf->vsi);

	/* force a PF reset to clean anything leftover */
Loading