Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b97526f3 authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-03-03

This series contains updates to fm10k, i40e and i40evf.

Matthew updates the fm10k driver by cleaning up code comments and whitespace
issues.  Also modifies the tunnel length header check, to make it more robust
by calculating the inner L4 header length based on whether it is TCP or UDP.
Implemented ndo_features_check() that allows drivers to report their offload
capabilities per-skb.

Neerav updates the i40e driver to skip over priority tagging if DCB is not
enabled.  Fixes an issue where the driver is not flushing out the
DCBNL app table for applications that are not present in the local DCBX
application configuration TLVs.  Fixed i40e where, in the case of MFP
mode, the driver was returning the incorrect number of traffic classes
for partitions that are not enabled for iSCSI.  Even though the driver
was not configuring these traffic classes in the transmit scheduler for
the NIC partitions, it does use this map to setup the queue mappings.

Shannon updates i40e/i40evf to include the firmware build number in the
formatted firmware version string.

Akeem adds a safety net (by adding a 'default' case) for the possible
unmatched switch calls.

Mitch updates i40e to not automatically disable PF loopback at runtime,
now that we have the functionality to enable and disable PF loopback.  This
fix cleans up a bogus error message when removing the PF module with VFs
enabled.  Adds a extra check to make sure that the indirection table
pointer is valid before dereferencing it.

Anjali enables i40e to enable more than the max RSS qps when running in a
single TC mode for the main VSI.  It is possible to enable as many as
num_online_cpus().  Adds a firmware check to ensure that DCB is disabled for
firmware versions older than 4.33.  Updates i40e/i40evf to add missing
packet types for VXLAN offload.  Updated i40e to be able to handle varying
RSS table size for each VSI, since all VSI's do not have the same RSS table
size.

v2: Dropped previous patch #9 "i40e/i40evf: Add capability to gather VEB
    per TC stats" since the stats should be in ethtool and not debugfs.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1cea7e2c ce458fcf
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -439,6 +439,7 @@ extern char fm10k_driver_name[];
extern const char fm10k_driver_version[];
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
__be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
				  struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
+1 −2
Original line number Diff line number Diff line
@@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
 *  Function invalidates the index values for the queues so any updates that
 *  may have happened are ignored and the base for the queue stats is reset.
 **/

void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
{
	u32 i;
+1 −1
Original line number Diff line number Diff line
@@ -275,7 +275,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
	if (vf_idx >= iov_data->num_vfs)
		return FM10K_ERR_PARAM;

	/* determine if an update has occured and if so notify the VF */
	/* determine if an update has occurred and if so notify the VF */
	vf_info = &iov_data->vf_info[vf_idx];
	if (vf_info->sw_vid != pvid) {
		vf_info->sw_vid = pvid;
+27 −12
Original line number Diff line number Diff line
@@ -711,10 +711,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
	if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
		return NULL;

	/* verify protocol is transparent Ethernet bridging */
	if (nvgre_hdr->proto != htons(ETH_P_TEB))
		return NULL;

	/* report start of ethernet header */
	if (nvgre_hdr->flags & NVGRE_TNI)
		return (struct ethhdr *)(nvgre_hdr + 1);
@@ -722,15 +718,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
	return (struct ethhdr *)(&nvgre_hdr->tni);
}

static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
{
	u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
	struct ethhdr *eth_hdr;
	u8 l4_hdr = 0;

/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET	164
	if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
	    FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
	    skb->inner_protocol != htons(ETH_P_TEB))
		return 0;

	switch (vlan_get_protocol(skb)) {
@@ -760,12 +754,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)

	switch (eth_hdr->h_proto) {
	case htons(ETH_P_IP):
		inner_l4_hdr = inner_ip_hdr(skb)->protocol;
		break;
	case htons(ETH_P_IPV6):
		inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
		break;
	default:
		return 0;
	}

	switch (inner_l4_hdr) {
	case IPPROTO_TCP:
		inner_l4_hlen = inner_tcp_hdrlen(skb);
		break;
	case IPPROTO_UDP:
		inner_l4_hlen = 8;
		break;
	default:
		return 0;
	}

	/* The hardware allows tunnel offloads only if the combined inner and
	 * outer header is 184 bytes or less
	 */
	if (skb_inner_transport_header(skb) + inner_l4_hlen -
	    skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
		return 0;

	return eth_hdr->h_proto;
}

@@ -934,10 +949,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{
	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);

	/* Memory barrier before checking head and tail */
	smp_mb();

	/* We need to check again in a case another CPU has just
	 * made room available. */
	/* Check again in a case another CPU has just made room available */
	if (likely(fm10k_desc_unused(tx_ring) < size))
		return -EBUSY;

+10 −10
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
 *  @fifo: pointer to FIFO
 *  @offset: offset to add to head
 *
 *  This function returns the indicies into the fifo based on head + offset
 *  This function returns the indices into the fifo based on head + offset
 **/
static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
{
@@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
 *  @fifo: pointer to FIFO
 *  @offset: offset to add to tail
 *
 *  This function returns the indicies into the fifo based on tail + offset
 *  This function returns the indices into the fifo based on tail + offset
 **/
static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
{
@@ -326,7 +326,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
 *  fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
 *  @mbx: pointer to mailbox
 *
 *  This function will take a seciton of the Rx FIFO and copy it into the
 *  This function will take a section of the Rx FIFO and copy it into the
		mbx->tail--;
 *  mailbox memory.  The offset in mbmem is based on the lower bits of the
 *  tail and len determines the length to copy.
@@ -418,7 +418,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw,
 *  @hw: pointer to hardware structure
 *  @mbx: pointer to mailbox
 *
 *  This function will take a seciton of the mailbox memory and copy it
 *  This function will take a section of the mailbox memory and copy it
 *  into the Rx FIFO.  The offset is based on the lower bits of the
 *  head and len determines the length to copy.
 **/
@@ -464,7 +464,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw,
 *  @tail: tail index of message
 *
 *  This function will first validate the tail index and size for the
 *  incoming message.  It then updates the acknowlegment number and
 *  incoming message.  It then updates the acknowledgment number and
 *  copies the data into the FIFO.  It will return the number of messages
 *  dequeued on success and a negative value on error.
 **/
@@ -761,7 +761,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
		err = fm10k_fifo_enqueue(&mbx->tx, msg);
	}

	/* if we failed trhead the error */
	/* if we failed treat the error */
	if (err) {
		mbx->timeout = 0;
		mbx->tx_busy++;
@@ -815,7 +815,7 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
	u32 mbmem = mbx->mbmem_reg;

	/* write new msg header to notify recepient of change */
	/* write new msg header to notify recipient of change */
	fm10k_write_reg(hw, mbmem, mbx->mbx_hdr);

	/* write mailbox to sent interrupt */
@@ -1251,7 +1251,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
	/* we will need to pull all of the fields for verification */
	head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);

	/* we only have lower 10 bits of error number os add upper bits */
	/* we only have lower 10 bits of error number so add upper bits */
	err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
	err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);

@@ -1548,7 +1548,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
	mbx->timeout = 0;
	mbx->udelay = FM10K_MBX_INIT_DELAY;

	/* initalize tail and head */
	/* initialize tail and head */
	mbx->tail = 1;
	mbx->head = 1;

@@ -1627,7 +1627,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
	mbx->local = FM10K_SM_MBX_VERSION;
	mbx->remote = 0;

	/* initalize tail and head */
	/* initialize tail and head */
	mbx->tail = 1;
	mbx->head = 1;

Loading