Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0492b71c authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-04-08

This series contains updates to i40e and i40evf only.

Mitch fixes an issue where the client driver (i40iw) was attempting to
load on x710 devices (which do not support iWARP), so only register with
the client if iWARP is supported.

Jake fixes up error messages to better clarify to the user when adding a
invalid flow type.  Updates the driver to look up the MAC address from
eth_get_platform_mac_address() first before checking what the firmware
provides.  Cleans up code so we are not repeating a duplicate loop, by
checking both transmit and receive queues in a single loop.  Also cleans
up flags never used, so remove the definitions.

Alex does cleanup so that we are always updating pf->flags when a change
is made to the private flags.  Adds support for 3K buffers to the receive
path so that we can provide the additional padding needed in the event
of NET_IP_ALIGN being non-zero or a cache line being greater than 64.
Adds support for build_skb() to i40e/i40evf.

Maciej adjusts the scope of the rtnl lock held during reset because it
was stopping other PFs from running their reset procedures.

Alan reduces code complexity in i40e_detect_recover_hung_queue().
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 417d978f f8b45b74
Loading
Loading
Loading
Loading
+1 −9
Original line number Diff line number Diff line
@@ -389,13 +389,9 @@ struct i40e_pf {
#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
#define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
#define I40E_FLAG_NEED_LINK_UPDATE		BIT_ULL(9)
#define I40E_FLAG_IWARP_ENABLED			BIT_ULL(10)
#define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT_ULL(16)
#define I40E_FLAG_PROCESS_MDD_EVENT		BIT_ULL(17)
#define I40E_FLAG_PROCESS_VFLR_EVENT		BIT_ULL(18)
#define I40E_FLAG_SRIOV_ENABLED			BIT_ULL(19)
#define I40E_FLAG_DCB_ENABLED			BIT_ULL(20)
#define I40E_FLAG_FD_SB_ENABLED			BIT_ULL(21)
@@ -617,7 +613,6 @@ struct i40e_vsi {
	u32 tx_busy;
	u64 tx_linearize;
	u64 tx_force_wb;
	u64 tx_lost_interrupt;
	u32 rx_buf_failed;
	u32 rx_page_failed;

@@ -703,9 +698,6 @@ struct i40e_q_vector {

	u8 num_ringpairs;	/* total number of ring pairs in vector */

#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */
	unsigned long hung_detected; /* Set/Reset for hung_detection logic */

	cpumask_t affinity_mask;
	struct irq_affinity_notify affinity_notify;

@@ -837,7 +829,7 @@ void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+3 −4
Original line number Diff line number Diff line
@@ -89,7 +89,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
	I40E_VSI_STAT("tx_linearize", tx_linearize),
	I40E_VSI_STAT("tx_force_wb", tx_force_wb),
	I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
	I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
	I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
};
@@ -1852,7 +1851,7 @@ static void i40e_diag_test(struct net_device *netdev,
			 * link then the following link test would have
			 * to be moved to before the reset
			 */
			i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
			i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);

		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1868,7 +1867,7 @@ static void i40e_diag_test(struct net_device *netdev,
			eth_test->flags |= ETH_TEST_FL_FAILED;

		clear_bit(__I40E_TESTING, &pf->state);
		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);

		if (if_running)
			i40e_open(netdev);
@@ -4099,7 +4098,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
	 */
	if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
	    ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);

	return 0;
}
+148 −120

File changed.

Preview size limit exceeded, changes collapsed.

+84 −32

File changed.

Preview size limit exceeded, changes collapsed.

+82 −3
Original line number Diff line number Diff line
@@ -119,6 +119,7 @@ enum i40e_dyn_idx_t {
#define I40E_RXBUFFER_256   256
#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048  2048
#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */

/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -134,6 +135,58 @@ enum i40e_dyn_idx_t {
#define I40E_RX_DMA_ATTR \
	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)

/* Attempt to maximize the headroom available for incoming frames.  We
 * use a 2K buffer for receives and need 1536/1534 to store the data for
 * the frame.  This leaves us with 512 bytes of room.  From that we need
 * to deduct the space needed for the shared info and the padding needed
 * to IP align the frame.
 *
 * Note: For cache line sizes 256 or larger this value is going to end
 *	 up negative.  In these cases we should fall back to the legacy
 *	 receive path.
 */
#if (PAGE_SIZE < 8192)
#define I40E_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))

static inline int i40e_compute_pad(int rx_buf_len)
{
	int page_size, pad_size;

	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;

	return pad_size;
}

static inline int i40e_skb_pad(void)
{
	int rx_buf_len;

	/* If a 2K buffer cannot handle a standard Ethernet frame then
	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
	 *
	 * For a 3K buffer we need to add enough padding to allow for
	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
	 * cache-line alignment.
	 */
	if (I40E_2K_TOO_SMALL_WITH_PADDING)
		rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
	else
		rx_buf_len = I40E_RXBUFFER_1536;

	/* if needed make room for NET_IP_ALIGN */
	rx_buf_len -= NET_IP_ALIGN;

	return i40e_compute_pad(rx_buf_len);
}

#define I40E_SKB_PAD i40e_skb_pad()
#else
#define I40E_2K_TOO_SMALL_WITH_PADDING false
#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif

/**
 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
 * @rx_desc: pointer to receive descriptor (in le64 format)
@@ -275,7 +328,6 @@ struct i40e_tx_queue_stats {
	u64 tx_done_old;
	u64 tx_linearize;
	u64 tx_force_wb;
	u64 tx_lost_interrupt;
};

struct i40e_rx_queue_stats {
@@ -342,6 +394,7 @@ struct i40e_ring {

	u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR		BIT(0)
#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)

	/* stats structs */
	struct i40e_queue_stats	stats;
@@ -369,6 +422,21 @@ struct i40e_ring {
					 */
} ____cacheline_internodealigned_in_smp;

static inline bool ring_uses_build_skb(struct i40e_ring *ring)
{
	return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
}

static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
{
	ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
}

static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
{
	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
}

enum i40e_latency_range {
	I40E_LOWEST_LATENCY = 0,
	I40E_LOW_LATENCY = 1,
@@ -390,6 +458,17 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
	for (pos = (head).ring; pos != NULL; pos = pos->next)

static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
{
#if (PAGE_SIZE < 8192)
	if (ring->rx_buf_len > (PAGE_SIZE / 2))
		return 1;
#endif
	return 0;
}

#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))

bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -400,7 +479,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
u32 i40e_get_tx_pending(struct i40e_ring *ring);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);

Loading