Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 86911732 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller
Browse files

gro: Avoid copying headers of unmerged packets



Unfortunately simplicity isn't always the best.  The fraginfo
interface turned out to be suboptimal.  The problem was quite
obvious.  For every packet, we have to copy the headers from
the frags structure into skb->head, even though for 99% of the
packets this part is immediately thrown away after the merge.

LRO didn't have this problem because it directly read the headers
from the frags structure.

This patch attempts to address this by creating an interface
that allows GRO to access the headers in the first frag without
having to copy it.  Because all drivers that use frags place the
headers in the first frag this optimisation should be enough.

Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5d0d9be8
Loading
Loading
Loading
Loading
+26 −0
Original line number Diff line number Diff line
@@ -984,6 +984,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
void netif_napi_del(struct napi_struct *napi);

struct napi_gro_cb {
	/* This indicates where we are processing relative to skb->data. */
	int data_offset;

	/* This is non-zero if the packet may be of the same flow. */
	int same_flow;

@@ -1087,6 +1090,29 @@ extern int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
extern int		netpoll_trap(void);
#endif
extern void	      *skb_gro_header(struct sk_buff *skb, unsigned int hlen);
extern int	       skb_gro_receive(struct sk_buff **head,
				       struct sk_buff *skb);

static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
	return NAPI_GRO_CB(skb)->data_offset;
}

static inline unsigned int skb_gro_len(const struct sk_buff *skb)
{
	return skb->len - NAPI_GRO_CB(skb)->data_offset;
}

static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
{
	NAPI_GRO_CB(skb)->data_offset += len;
}

static inline void skb_gro_reset_offset(struct sk_buff *skb)
{
	NAPI_GRO_CB(skb)->data_offset = 0;
}

static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
				  unsigned short type,
+0 −2
Original line number Diff line number Diff line
@@ -1687,8 +1687,6 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
				 int shiftlen);

extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
extern int	       skb_gro_receive(struct sk_buff **head,
				       struct sk_buff *skb);

static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
				       int len, void *buffer)
+2 −0
Original line number Diff line number Diff line
@@ -98,6 +98,8 @@ drop:
int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
		     unsigned int vlan_tci, struct sk_buff *skb)
{
	skb_gro_reset_offset(skb);

	return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
}
EXPORT_SYMBOL(vlan_gro_receive);
+59 −11
Original line number Diff line number Diff line
@@ -215,6 +215,13 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
	return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
}

static inline void *skb_gro_mac_header(struct sk_buff *skb)
{
	return skb_headlen(skb) ? skb_mac_header(skb) :
	       page_address(skb_shinfo(skb)->frags[0].page) +
	       skb_shinfo(skb)->frags[0].page_offset;
}

/* Device list insertion */
static int list_netdevice(struct net_device *dev)
{
@@ -2350,7 +2357,6 @@ static int napi_gro_complete(struct sk_buff *skb)

out:
	skb_shinfo(skb)->gso_size = 0;
	__skb_push(skb, -skb_network_offset(skb));
	return netif_receive_skb(skb);
}

@@ -2368,6 +2374,25 @@ void napi_gro_flush(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_flush);

void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
{
	unsigned int offset = skb_gro_offset(skb);

	hlen += offset;
	if (hlen <= skb_headlen(skb))
		return skb->data + offset;

	if (unlikely(!skb_shinfo(skb)->nr_frags ||
		     skb_shinfo(skb)->frags[0].size <=
		     hlen - skb_headlen(skb) ||
		     PageHighMem(skb_shinfo(skb)->frags[0].page)))
		return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;

	return page_address(skb_shinfo(skb)->frags[0].page) +
	       skb_shinfo(skb)->frags[0].page_offset + offset;
}
EXPORT_SYMBOL(skb_gro_header);

int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
	struct sk_buff **pp = NULL;
@@ -2388,11 +2413,13 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, head, list) {
		struct sk_buff *p;
		void *mac;

		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
			continue;

		skb_reset_network_header(skb);
		skb_set_network_header(skb, skb_gro_offset(skb));
		mac = skb_gro_mac_header(skb);
		mac_len = skb->network_header - skb->mac_header;
		skb->mac_len = mac_len;
		NAPI_GRO_CB(skb)->same_flow = 0;
@@ -2406,8 +2433,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
				continue;

			if (p->mac_len != mac_len ||
			    memcmp(skb_mac_header(p), skb_mac_header(skb),
				   mac_len))
			    memcmp(skb_mac_header(p), mac, mac_len))
				NAPI_GRO_CB(p)->same_flow = 0;
		}

@@ -2434,13 +2460,11 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
	if (same_flow)
		goto ok;

	if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) {
		__skb_push(skb, -skb_network_offset(skb));
	if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS)
		goto normal;
	}

	NAPI_GRO_CB(skb)->count = 1;
	skb_shinfo(skb)->gso_size = skb->len;
	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
	skb->next = napi->gro_list;
	napi->gro_list = skb;
	ret = GRO_HELD;
@@ -2488,6 +2512,8 @@ EXPORT_SYMBOL(napi_skb_finish);

int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
	skb_gro_reset_offset(skb);

	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
}
EXPORT_SYMBOL(napi_gro_receive);
@@ -2506,6 +2532,7 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
{
	struct net_device *dev = napi->dev;
	struct sk_buff *skb = napi->skb;
	struct ethhdr *eth;

	napi->skb = NULL;

@@ -2525,13 +2552,23 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
	skb->len += info->len;
	skb->truesize += info->len;

	if (!pskb_may_pull(skb, ETH_HLEN)) {
	skb_reset_mac_header(skb);
	skb_gro_reset_offset(skb);

	eth = skb_gro_header(skb, sizeof(*eth));
	if (!eth) {
		napi_reuse_skb(napi, skb);
		skb = NULL;
		goto out;
	}

	skb->protocol = eth_type_trans(skb, dev);
	skb_gro_pull(skb, sizeof(*eth));

	/*
	 * This works because the only protocols we care about don't require
	 * special handling.  We'll fix it up properly at the end.
	 */
	skb->protocol = eth->h_proto;

	skb->ip_summed = info->ip_summed;
	skb->csum = info->csum;
@@ -2544,11 +2581,22 @@ EXPORT_SYMBOL(napi_fraginfo_skb);
int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
{
	int err = NET_RX_SUCCESS;
	int may;

	switch (ret) {
	case GRO_NORMAL:
	case GRO_HELD:
		may = pskb_may_pull(skb, skb_gro_offset(skb));
		BUG_ON(!may);

		skb->protocol = eth_type_trans(skb, napi->dev);

		if (ret == GRO_NORMAL)
			return netif_receive_skb(skb);

		skb_gro_pull(skb, -ETH_HLEN);
		break;

	case GRO_DROP:
		err = NET_RX_DROP;
		/* fall through */
+15 −8
Original line number Diff line number Diff line
@@ -2584,17 +2584,21 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
	struct sk_buff *p = *head;
	struct sk_buff *nskb;
	unsigned int headroom;
	unsigned int hlen = p->data - skb_mac_header(p);
	unsigned int len = skb->len;
	unsigned int len = skb_gro_len(skb);

	if (hlen + p->len + len >= 65536)
	if (p->len + len >= 65536)
		return -E2BIG;

	if (skb_shinfo(p)->frag_list)
		goto merge;
	else if (!skb_headlen(p) && !skb_headlen(skb) &&
		 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <
	else if (skb_headlen(skb) <= skb_gro_offset(skb) &&
		 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <=
		 MAX_SKB_FRAGS) {
		skb_shinfo(skb)->frags[0].page_offset +=
			skb_gro_offset(skb) - skb_headlen(skb);
		skb_shinfo(skb)->frags[0].size -=
			skb_gro_offset(skb) - skb_headlen(skb);

		memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
		       skb_shinfo(skb)->frags,
		       skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
@@ -2611,7 +2615,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
	}

	headroom = skb_headroom(p);
	nskb = netdev_alloc_skb(p->dev, headroom);
	nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
	if (unlikely(!nskb))
		return -ENOMEM;

@@ -2619,12 +2623,15 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
	nskb->mac_len = p->mac_len;

	skb_reserve(nskb, headroom);
	__skb_put(nskb, skb_gro_offset(p));

	skb_set_mac_header(nskb, -hlen);
	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
	skb_set_network_header(nskb, skb_network_offset(p));
	skb_set_transport_header(nskb, skb_transport_offset(p));

	memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen);
	__skb_pull(p, skb_gro_offset(p));
	memcpy(skb_mac_header(nskb), skb_mac_header(p),
	       p->data - skb_mac_header(p));

	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
	skb_shinfo(nskb)->frag_list = p;
Loading