Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ac979929 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ipv6-gre-offloads'



Alexander Duyck says:

====================
Add support for offloads with IPv6 GRE tunnels

This patch series enables the use of segmentation and checksum offloads
with IPv6 based GRE tunnels.

In order to enable this series I had to make a change to
iptunnel_handle_offloads so that it would no longer free the skb.  This was
necessary as there were multiple paths in the IPv6 GRE code that required
the skb to still be present so it could be freed.  As it turned out I
believe this actually fixes a bug that was present in FOU/GUE based tunnels
anyway.

Below is a quick breakdown of the performance gains seen with a simple
netperf test passing traffic through a ip6gretap tunnel and then an i40e
interface:

Throughput Throughput  Local Local   Result
           Units       CPU   Service Tag
                       Util  Demand
                       %
3544.93    10^6bits/s  6.30  4.656   "before"
13081.75   10^6bits/s  3.75  0.752   "after"
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ec9dcd35 3a80e1fa
Loading
Loading
Loading
Loading
+12 −20
Original line number Diff line number Diff line
@@ -696,16 +696,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err)) {
		kfree_skb(skb);
	if (unlikely(err))
		goto free_rt;
	}

	skb = udp_tunnel_handle_offloads(skb, udp_sum);
	if (IS_ERR(skb)) {
		err = PTR_ERR(skb);
	err = udp_tunnel_handle_offloads(skb, udp_sum);
	if (err)
		goto free_rt;
	}

	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -733,16 +729,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err)) {
		kfree_skb(skb);
	if (unlikely(err))
		goto free_dst;
	}

	skb = udp_tunnel_handle_offloads(skb, udp_sum);
	if (IS_ERR(skb)) {
		err = PTR_ERR(skb);
	err = udp_tunnel_handle_offloads(skb, udp_sum);
	if (IS_ERR(skb))
		goto free_dst;
	}

	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -937,7 +929,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
		err = geneve_build_skb(rt, skb, key->tun_flags, vni,
				       info->options_len, opts, flags, xnet);
		if (unlikely(err))
			goto err;
			goto tx_error;

		tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
		ttl = key->ttl;
@@ -946,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
		err = geneve_build_skb(rt, skb, 0, geneve->vni,
				       0, NULL, flags, xnet);
		if (unlikely(err))
			goto err;
			goto tx_error;

		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
		ttl = geneve->ttl;
@@ -964,7 +956,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,

tx_error:
	dev_kfree_skb(skb);
err:

	if (err == -ELOOP)
		dev->stats.collisions++;
	else if (err == -ENETUNREACH)
@@ -1026,7 +1018,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
					info->options_len, opts,
					flags, xnet);
		if (unlikely(err))
			goto err;
			goto tx_error;

		prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
		ttl = key->ttl;
@@ -1035,7 +1027,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
		err = geneve6_build_skb(dst, skb, 0, geneve->vni,
					0, NULL, flags, xnet);
		if (unlikely(err))
			goto err;
			goto tx_error;

		prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
					   iip, skb);
@@ -1054,7 +1046,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,

tx_error:
	dev_kfree_skb(skb);
err:

	if (err == -ELOOP)
		dev->stats.collisions++;
	else if (err == -ENETUNREACH)
+3 −3
Original line number Diff line number Diff line
@@ -1797,9 +1797,9 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
	if (WARN_ON(!skb))
		return -ENOMEM;

	skb = iptunnel_handle_offloads(skb, type);
	if (IS_ERR(skb))
		return PTR_ERR(skb);
	err = iptunnel_handle_offloads(skb, type);
	if (err)
		goto out_free;

	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
	vxh->vx_flags = VXLAN_HF_VNI;
+1 −1
Original line number Diff line number Diff line
@@ -309,7 +309,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
					     gfp_t flags);

struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);

static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{
+1 −2
Original line number Diff line number Diff line
@@ -105,8 +105,7 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
				    __be16 flags, __be64 tunnel_id,
				    int md_size);

static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
							 bool udp_csum)
static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
{
	int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;

+8 −8
Original line number Diff line number Diff line
@@ -802,11 +802,11 @@ int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
	int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
						       SKB_GSO_UDP_TUNNEL;
	__be16 sport;
	int err;

	skb = iptunnel_handle_offloads(skb, type);

	if (IS_ERR(skb))
		return PTR_ERR(skb);
	err = iptunnel_handle_offloads(skb, type);
	if (err)
		return err;

	sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
					       skb, 0, 0, false);
@@ -826,6 +826,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
	__be16 sport;
	void *data;
	bool need_priv = false;
	int err;

	if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
	    skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -836,10 +837,9 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,

	optlen += need_priv ? GUE_LEN_PRIV : 0;

	skb = iptunnel_handle_offloads(skb, type);

	if (IS_ERR(skb))
		return PTR_ERR(skb);
	err = iptunnel_handle_offloads(skb, type);
	if (err)
		return err;

	/* Get source port (based on flow hash) before skb_push */
	sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
Loading