Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 02803b1c authored by Steffen Klassert's avatar Steffen Klassert Committed by Greg Kroah-Hartman
Browse files

UPSTREAM: net: Support GRO/GSO fraglist chaining.



This patch adds the core functions to chain/unchain
GSO skbs at the frag_list pointer. This also adds
a new GSO type SKB_GSO_FRAGLIST and a is_flist
flag to napi_gro_cb which indicates that this
flow will be GROed by fraglist chaining.

Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>

Bug: 167707933
Change-Id: I4bb29cfcd13ccb48871070bf94ea5b87160d65d4
(cherry picked from commit 3a1296a38d0cf62bffb9a03c585cbd5dbf15d596)
Signed-off-by: default avatarHyunsoon Kim <h10.kim@samsung.com>
parent d106e05d
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -2350,7 +2350,8 @@ struct napi_gro_cb {
	/* Number of gro_receive callbacks this packet already went through */
	u8 recursion_counter:4;

	/* 1 bit hole */
	/* GRO is done by frag_list pointer chaining. */
	u8	is_flist:1;

	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
	__wsum	csum;
@@ -2705,6 +2706,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);

static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
+2 −0
Original line number Diff line number Diff line
@@ -3531,6 +3531,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
				 unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
+1 −1
Original line number Diff line number Diff line
@@ -3032,7 +3032,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,

	segs = skb_mac_gso_segment(skb, features);

	if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
		skb_warn_bad_offload(skb);

	return segs;
+91 −0
Original line number Diff line number Diff line
@@ -3640,6 +3640,97 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
	return head_frag;
}

struct sk_buff *skb_segment_list(struct sk_buff *skb,
				 netdev_features_t features,
				 unsigned int offset)
{
	struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
	unsigned int tnl_hlen = skb_tnl_header_len(skb);
	unsigned int delta_truesize = 0;
	unsigned int delta_len = 0;
	struct sk_buff *tail = NULL;
	struct sk_buff *nskb;

	skb_push(skb, -skb_network_offset(skb) + offset);

	skb_shinfo(skb)->frag_list = NULL;

	do {
		nskb = list_skb;
		list_skb = list_skb->next;

		if (!tail)
			skb->next = nskb;
		else
			tail->next = nskb;

		tail = nskb;

		delta_len += nskb->len;
		delta_truesize += nskb->truesize;

		skb_push(nskb, -skb_network_offset(nskb) + offset);

		 __copy_skb_header(nskb, skb);

		skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
						 nskb->data - tnl_hlen,
						 offset + tnl_hlen);

		if (skb_needs_linearize(nskb, features) &&
		    __skb_linearize(nskb))
			goto err_linearize;

	} while (list_skb);

	skb->truesize = skb->truesize - delta_truesize;
	skb->data_len = skb->data_len - delta_len;
	skb->len = skb->len - delta_len;

	skb_gso_reset(skb);

	skb->prev = tail;

	if (skb_needs_linearize(skb, features) &&
	    __skb_linearize(skb))
		goto err_linearize;

	skb_get(skb);

	return skb;

err_linearize:
	kfree_skb_list(skb->next);
	skb->next = NULL;
	return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(skb_segment_list);

int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
{
	if (unlikely(p->len + skb->len >= 65536))
		return -E2BIG;

	if (NAPI_GRO_CB(p)->last == p)
		skb_shinfo(p)->frag_list = skb;
	else
		NAPI_GRO_CB(p)->last->next = skb;

	skb_pull(skb, skb_gro_offset(skb));

	NAPI_GRO_CB(p)->last = skb;
	NAPI_GRO_CB(p)->count++;
	p->data_len += skb->len;
	p->truesize += skb->truesize;
	p->len += skb->len;

	NAPI_GRO_CB(skb)->same_flow = 1;

	return 0;
}
EXPORT_SYMBOL_GPL(skb_gro_receive_list);

/**
 *	skb_segment - Perform protocol segmentation on skb.
 *	@head_skb: buffer to segment