Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95aef7ce authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-05-05

This series contains updates to i40e and i40evf.

The theme behind this series is code reduction, yeah!  Jesse provides
most of the changes starting with a refactor of the interpretation of
a tunnel which lets us start using the hardware's parsing.  Removed
the packet split receive routine and ancillary code in preparation
for the Rx-refactor.  The refactor of the receive routine,
aligns the receive routine with the one in ixgbe which was highly
optimized.  The hardware supports a 16 byte descriptor for receive,
but the driver was never using it in production.  There was no performance
benefit to the real driver of 16 byte descriptors, so drop a whole lot
of complexity while getting rid of the code.  Fixed a bug where while
changing the number of descriptors using ethtool, the driver did not
test the limits of the system memory before permanently assuming it
would be able to get receive buffer memory.

Mitch fixes a memory leak of one page each time the driver is opened by
allocating the correct number of receive buffers and do not fiddle with
next_to_use in the VF driver.

Arnd Bergmann fixed a indentation issue by adding the appropriate
curly braces in i40e_vc_config_promiscuous_mode_msg().

Julia Lawall fixed an issue found by Coccinelle, where i40e_client_ops
structure can be const since it is never modified.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b3b4663c 3949c4ac
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1863,7 +1863,7 @@ static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
}
}


/* client interface functions */
/* client interface functions */
static struct i40e_client_ops i40e_ops = {
static const struct i40e_client_ops i40e_ops = {
	.open = i40iw_open,
	.open = i40iw_open,
	.close = i40iw_close,
	.close = i40iw_close,
	.l2_param_change = i40iw_l2param_change,
	.l2_param_change = i40iw_l2param_change,
+1 −10
Original line number Original line Diff line number Diff line
@@ -101,7 +101,6 @@
#define I40E_PRIV_FLAGS_LINKPOLL_FLAG	BIT(1)
#define I40E_PRIV_FLAGS_LINKPOLL_FLAG	BIT(1)
#define I40E_PRIV_FLAGS_FD_ATR		BIT(2)
#define I40E_PRIV_FLAGS_FD_ATR		BIT(2)
#define I40E_PRIV_FLAGS_VEB_STATS	BIT(3)
#define I40E_PRIV_FLAGS_VEB_STATS	BIT(3)
#define I40E_PRIV_FLAGS_PS		BIT(4)
#define I40E_PRIV_FLAGS_HW_ATR_EVICT	BIT(5)
#define I40E_PRIV_FLAGS_HW_ATR_EVICT	BIT(5)


#define I40E_NVM_VERSION_LO_SHIFT  0
#define I40E_NVM_VERSION_LO_SHIFT  0
@@ -123,10 +122,7 @@
#define XSTRINGIFY(bar) STRINGIFY(bar)
#define XSTRINGIFY(bar) STRINGIFY(bar)


#define I40E_RX_DESC(R, i)			\
#define I40E_RX_DESC(R, i)			\
	((ring_is_16byte_desc_enabled(R))	\
	(&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
		? (union i40e_32byte_rx_desc *)	\
			(&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
		: (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
#define I40E_TX_DESC(R, i)			\
#define I40E_TX_DESC(R, i)			\
	(&(((struct i40e_tx_desc *)((R)->desc))[i]))
	(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i)			\
#define I40E_TX_CTXTDESC(R, i)			\
@@ -320,8 +316,6 @@ struct i40e_pf {
#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
#define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
#define I40E_FLAG_RX_1BUF_ENABLED		BIT_ULL(4)
#define I40E_FLAG_RX_PS_ENABLED			BIT_ULL(5)
#define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
#define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT		BIT_ULL(8)
#define I40E_FLAG_FDIR_REQUIRES_REINIT		BIT_ULL(8)
@@ -330,7 +324,6 @@ struct i40e_pf {
#ifdef I40E_FCOE
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED			BIT_ULL(11)
#define I40E_FLAG_FCOE_ENABLED			BIT_ULL(11)
#endif /* I40E_FCOE */
#endif /* I40E_FCOE */
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED	BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
#define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
#define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT_ULL(16)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT_ULL(16)
@@ -534,9 +527,7 @@ struct i40e_vsi {
	u8  *rss_lut_user;  /* User configured lookup table entries */
	u8  *rss_lut_user;  /* User configured lookup table entries */


	u16 max_frame;
	u16 max_frame;
	u16 rx_hdr_len;
	u16 rx_buf_len;
	u16 rx_buf_len;
	u8  dtype;


	/* List of q_vectors allocated to this VSI */
	/* List of q_vectors allocated to this VSI */
	struct i40e_q_vector **q_vectors;
	struct i40e_q_vector **q_vectors;
+1 −1
Original line number Original line Diff line number Diff line
@@ -217,7 +217,7 @@ struct i40e_client {
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE	BIT(0)
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE	BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS	BIT(2)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS	BIT(2)
	enum i40e_client_type type;
	enum i40e_client_type type;
	struct i40e_client_ops *ops;	/* client ops provided by the client */
	const struct i40e_client_ops *ops; /* client ops provided by the client */
};
};


static inline bool i40e_client_is_registered(struct i40e_client *client)
static inline bool i40e_client_is_registered(struct i40e_client *client)
+6 −25
Original line number Original line Diff line number Diff line
@@ -268,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
			 rx_ring->queue_index,
			 rx_ring->queue_index,
			 rx_ring->reg_idx);
			 rx_ring->reg_idx);
		dev_info(&pf->pdev->dev,
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
			 "    rx_rings[%i]: rx_buf_len = %d\n",
			 i, rx_ring->rx_hdr_len,
			 i, rx_ring->rx_buf_len);
			 rx_ring->rx_buf_len,
			 rx_ring->dtype);
		dev_info(&pf->pdev->dev,
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
			 "    rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
			 i, ring_is_ps_enabled(rx_ring),
			 i,
			 rx_ring->next_to_use,
			 rx_ring->next_to_use,
			 rx_ring->next_to_clean,
			 rx_ring->next_to_clean,
			 rx_ring->ring_active);
			 rx_ring->ring_active);
@@ -325,9 +323,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
			 i, tx_ring->state,
			 i, tx_ring->state,
			 tx_ring->queue_index,
			 tx_ring->queue_index,
			 tx_ring->reg_idx);
			 tx_ring->reg_idx);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: dtype = %d\n",
			 i, tx_ring->dtype);
		dev_info(&pf->pdev->dev,
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
			 "    tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
			 i,
			 i,
@@ -365,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
		 "    work_limit = %d\n",
		 "    work_limit = %d\n",
		 vsi->work_limit);
		 vsi->work_limit);
	dev_info(&pf->pdev->dev,
	dev_info(&pf->pdev->dev,
		 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
		 "    max_frame = %d, rx_buf_len = %d dtype = %d\n",
		 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
		 vsi->max_frame, vsi->rx_buf_len, 0);
	dev_info(&pf->pdev->dev,
	dev_info(&pf->pdev->dev,
		 "    num_q_vectors = %i, base_vector = %i\n",
		 "    num_q_vectors = %i, base_vector = %i\n",
		 vsi->num_q_vectors, vsi->base_vector);
		 vsi->num_q_vectors, vsi->base_vector);
@@ -591,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
					 "   d[%03x] = 0x%016llx 0x%016llx\n",
					 "   d[%03x] = 0x%016llx 0x%016llx\n",
					 i, txd->buffer_addr,
					 i, txd->buffer_addr,
					 txd->cmd_type_offset_bsz);
					 txd->cmd_type_offset_bsz);
			} else if (sizeof(union i40e_rx_desc) ==
				   sizeof(union i40e_16byte_rx_desc)) {
				rxd = I40E_RX_DESC(ring, i);
				dev_info(&pf->pdev->dev,
					 "   d[%03x] = 0x%016llx 0x%016llx\n",
					 i, rxd->read.pkt_addr,
					 rxd->read.hdr_addr);
			} else {
			} else {
				rxd = I40E_RX_DESC(ring, i);
				rxd = I40E_RX_DESC(ring, i);
				dev_info(&pf->pdev->dev,
				dev_info(&pf->pdev->dev,
@@ -619,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
				 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
				 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
				 vsi_seid, ring_id, desc_n,
				 vsi_seid, ring_id, desc_n,
				 txd->buffer_addr, txd->cmd_type_offset_bsz);
				 txd->buffer_addr, txd->cmd_type_offset_bsz);
		} else if (sizeof(union i40e_rx_desc) ==
			   sizeof(union i40e_16byte_rx_desc)) {
			rxd = I40E_RX_DESC(ring, desc_n);
			dev_info(&pf->pdev->dev,
				 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
				 vsi_seid, ring_id, desc_n,
				 rxd->read.pkt_addr, rxd->read.hdr_addr);
		} else {
		} else {
			rxd = I40E_RX_DESC(ring, desc_n);
			rxd = I40E_RX_DESC(ring, desc_n);
			dev_info(&pf->pdev->dev,
			dev_info(&pf->pdev->dev,
+31 −23
Original line number Original line Diff line number Diff line
@@ -235,7 +235,6 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
	"LinkPolling",
	"LinkPolling",
	"flow-director-atr",
	"flow-director-atr",
	"veb-stats",
	"veb-stats",
	"packet-split",
	"hw-atr-eviction",
	"hw-atr-eviction",
};
};


@@ -1275,6 +1274,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
		}
		}


		for (i = 0; i < vsi->num_queue_pairs; i++) {
		for (i = 0; i < vsi->num_queue_pairs; i++) {
			/* this is to allow wr32 to have something to write to
			 * during early allocation of Rx buffers
			 */
			u32 __iomem faketail = 0;
			struct i40e_ring *ring;
			u16 unused;

			/* clone ring and setup updated count */
			/* clone ring and setup updated count */
			rx_rings[i] = *vsi->rx_rings[i];
			rx_rings[i] = *vsi->rx_rings[i];
			rx_rings[i].count = new_rx_count;
			rx_rings[i].count = new_rx_count;
@@ -1283,12 +1289,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
			 */
			 */
			rx_rings[i].desc = NULL;
			rx_rings[i].desc = NULL;
			rx_rings[i].rx_bi = NULL;
			rx_rings[i].rx_bi = NULL;
			rx_rings[i].tail = (u8 __iomem *)&faketail;
			err = i40e_setup_rx_descriptors(&rx_rings[i]);
			err = i40e_setup_rx_descriptors(&rx_rings[i]);
			if (err)
				goto rx_unwind;

			/* now allocate the Rx buffers to make sure the OS
			 * has enough memory, any failure here means abort
			 */
			ring = &rx_rings[i];
			unused = I40E_DESC_UNUSED(ring);
			err = i40e_alloc_rx_buffers(ring, unused);
rx_unwind:
			if (err) {
			if (err) {
				while (i) {
				do {
					i--;
					i40e_free_rx_resources(&rx_rings[i]);
					i40e_free_rx_resources(&rx_rings[i]);
				}
				} while (i--);
				kfree(rx_rings);
				kfree(rx_rings);
				rx_rings = NULL;
				rx_rings = NULL;


@@ -1314,6 +1330,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
	if (rx_rings) {
	if (rx_rings) {
		for (i = 0; i < vsi->num_queue_pairs; i++) {
		for (i = 0; i < vsi->num_queue_pairs; i++) {
			i40e_free_rx_resources(vsi->rx_rings[i]);
			i40e_free_rx_resources(vsi->rx_rings[i]);
			/* get the real tail offset */
			rx_rings[i].tail = vsi->rx_rings[i]->tail;
			/* this is to fake out the allocation routine
			 * into thinking it has to realloc everything
			 * but the recycling logic will let us re-use
			 * the buffers allocated above
			 */
			rx_rings[i].next_to_use = 0;
			rx_rings[i].next_to_clean = 0;
			rx_rings[i].next_to_alloc = 0;
			/* do a struct copy */
			*vsi->rx_rings[i] = rx_rings[i];
			*vsi->rx_rings[i] = rx_rings[i];
		}
		}
		kfree(rx_rings);
		kfree(rx_rings);
@@ -2829,8 +2856,6 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
		I40E_PRIV_FLAGS_FD_ATR : 0;
		I40E_PRIV_FLAGS_FD_ATR : 0;
	ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
	ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
		I40E_PRIV_FLAGS_VEB_STATS : 0;
		I40E_PRIV_FLAGS_VEB_STATS : 0;
	ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
		I40E_PRIV_FLAGS_PS : 0;
	ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
	ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
		0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
		0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;


@@ -2851,23 +2876,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)


	/* NOTE: MFP is not settable */
	/* NOTE: MFP is not settable */


	/* allow the user to control the method of receive
	 * buffer DMA, whether the packet is split at header
	 * boundaries into two separate buffers.  In some cases
	 * one routine or the other will perform better.
	 */
	if ((flags & I40E_PRIV_FLAGS_PS) &&
	    !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
		pf->flags |= I40E_FLAG_RX_PS_ENABLED;
		pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
		reset_required = true;
	} else if (!(flags & I40E_PRIV_FLAGS_PS) &&
		   (pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
		pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
		pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
		reset_required = true;
	}

	if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
	if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
		pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
		pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
	else
	else
Loading