Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a9be2242 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Fix checksumming regressions, from Tom Herbert.

 2) Undo unintentional permissions changes for SCTP rto_alpha and
    rto_beta sysfs knobs, from Denial Borkmann.

 3) VXLAN, like other IP tunnels, should advertize it's encapsulation
    size using dev->needed_headroom instead of dev->hard_header_len.
    From Cong Wang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  net: sctp: fix permissions for rto_alpha and rto_beta knobs
  vxlan: Checksum fixes
  net: add skb_pop_rcv_encapsulation
  udp: call __skb_checksum_complete when doing full checksum
  net: Fix save software checksum complete
  net: Fix GSO constants to match NETIF flags
  udp: ipv4: do not waste time in __udp4_lib_mcast_demux_lookup
  vxlan: use dev->needed_headroom instead of dev->hard_header_len
  MAINTAINERS: update cxgb4 maintainer
parents dd1845af b58537a1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2594,7 +2594,7 @@ S: Supported
F:	drivers/infiniband/hw/cxgb3/

CXGB4 ETHERNET DRIVER (CXGB4)
M:	Dimitris Michailidis <dm@chelsio.com>
M:	Hariprasad S <hariprasad@chelsio.com>
L:	netdev@vger.kernel.org
W:	http://www.chelsio.com
S:	Supported
+5 −13
Original line number Diff line number Diff line
@@ -1156,15 +1156,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
	if (!vs)
		goto drop;

	/* If the NIC driver gave us an encapsulated packet
	 * with the encapsulation mark, the device checksummed it
	 * for us. Otherwise force the upper layers to verify it.
	 */
	if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
	    !skb->encapsulation)
		skb->ip_summed = CHECKSUM_NONE;

	skb->encapsulation = 0;
	skb_pop_rcv_encapsulation(skb);

	vs->rcv(vs, skb, vxh->vx_vni);
	return 0;
@@ -1201,6 +1193,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
	skb_reset_mac_header(skb);
	skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
	skb->protocol = eth_type_trans(skb, vxlan->dev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	/* Ignore packet loops (and multicast echo) */
	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
@@ -2247,9 +2240,9 @@ static void vxlan_setup(struct net_device *dev)
	eth_hw_addr_random(dev);
	ether_setup(dev);
	if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
		dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
		dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
	else
		dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
		dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;

	dev->netdev_ops = &vxlan_netdev_ops;
	dev->destructor = free_netdev;
@@ -2646,8 +2639,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
		if (!tb[IFLA_MTU])
			dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);

		/* update header length based on lower device */
		dev->hard_header_len = lowerdev->hard_header_len +
		dev->needed_headroom = lowerdev->hard_header_len +
				       (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
	} else if (use_ipv6)
		vxlan->flags |= VXLAN_F_IPV6;
+1 −0
Original line number Diff line number Diff line
@@ -117,6 +117,7 @@ enum {
#define NETIF_F_GSO_IPIP	__NETIF_F(GSO_IPIP)
#define NETIF_F_GSO_SIT		__NETIF_F(GSO_SIT)
#define NETIF_F_GSO_UDP_TUNNEL	__NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
#define NETIF_F_GSO_MPLS	__NETIF_F(GSO_MPLS)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX	__NETIF_F(HW_VLAN_STAG_RX)
+7 −0
Original line number Diff line number Diff line
@@ -3305,6 +3305,13 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_IPIP    != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_SIT     != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
	BUILD_BUG_ON(SKB_GSO_MPLS    != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));

	return (features & feature) == feature;
}
+18 −5
Original line number Diff line number Diff line
@@ -338,17 +338,18 @@ enum {

	SKB_GSO_GRE = 1 << 6,

	SKB_GSO_IPIP = 1 << 7,
	SKB_GSO_GRE_CSUM = 1 << 7,

	SKB_GSO_SIT = 1 << 8,
	SKB_GSO_IPIP = 1 << 8,

	SKB_GSO_UDP_TUNNEL = 1 << 9,
	SKB_GSO_SIT = 1 << 9,

	SKB_GSO_MPLS = 1 << 10,
	SKB_GSO_UDP_TUNNEL = 1 << 10,

	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,

	SKB_GSO_GRE_CSUM = 1 << 12,
	SKB_GSO_MPLS = 1 << 12,

};

#if BITS_PER_LONG > 32
@@ -1853,6 +1854,18 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
	return pskb_may_pull(skb, skb_network_offset(skb) + len);
}

static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
{
	/* Only continue with checksum unnecessary if device indicated
	 * it is valid across encapsulation (skb->encapsulation was set).
	 */
	if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
		skb->ip_summed = CHECKSUM_NONE;

	skb->encapsulation = 0;
	skb->csum_valid = 0;
}

/*
 * CPUs often take a performance hit when accessing unaligned memory
 * locations. The actual performance hit varies, it can be small if the
Loading