Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4546c25 authored by David Miller's avatar David Miller Committed by David S. Miller
Browse files

net: Convert GRO SKB handling to list_head.



Manage pending per-NAPI GRO packets via list_head.

Return an SKB pointer from the GRO receive handlers.  When GRO receive
handlers return non-NULL, it means that this SKB needs to be completed
at this time and removed from the NAPI queue.

Several operations are greatly simplified by this transformation,
especially timing out the oldest SKB in the list when gro_count
exceeds MAX_GRO_SKBS, and napi_gro_flush() which walks the queue
in reverse order.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9ff3b40e
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -418,11 +418,12 @@ static int geneve_hlen(struct genevehdr *gh)
	return sizeof(*gh) + gh->opt_len * 4;
}

static struct sk_buff **geneve_gro_receive(struct sock *sk,
					   struct sk_buff **head,
static struct sk_buff *geneve_gro_receive(struct sock *sk,
					  struct list_head *head,
					  struct sk_buff *skb)
{
	struct sk_buff *p, **pp = NULL;
	struct sk_buff *pp = NULL;
	struct sk_buff *p;
	struct genevehdr *gh, *gh2;
	unsigned int hlen, gh_len, off_gnv;
	const struct packet_offload *ptype;
@@ -449,7 +450,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
			goto out;
	}

	for (p = *head; p; p = p->next) {
	list_for_each_entry(p, head, list) {
		if (!NAPI_GRO_CB(p)->same_flow)
			continue;

+6 −5
Original line number Diff line number Diff line
@@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
	return vh;
}

static struct sk_buff **vxlan_gro_receive(struct sock *sk,
					  struct sk_buff **head,
static struct sk_buff *vxlan_gro_receive(struct sock *sk,
					 struct list_head *head,
					 struct sk_buff *skb)
{
	struct sk_buff *p, **pp = NULL;
	struct sk_buff *pp = NULL;
	struct sk_buff *p;
	struct vxlanhdr *vh, *vh2;
	unsigned int hlen, off_vx;
	int flush = 1;
@@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,

	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */

	for (p = *head; p; p = p->next) {
	list_for_each_entry(p, head, list) {
		if (!NAPI_GRO_CB(p)->same_flow)
			continue;

+1 −2
Original line number Diff line number Diff line
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
					   unsigned int rxqs);
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)

struct sk_buff **eth_gro_receive(struct sk_buff **head,
				 struct sk_buff *skb);
struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);

/* Reserved Ethernet Addresses per IEEE 802.1Q */
+16 −16
Original line number Diff line number Diff line
@@ -322,7 +322,7 @@ struct napi_struct {
	int			poll_owner;
#endif
	struct net_device	*dev;
	struct sk_buff		*gro_list;
	struct list_head	gro_list;
	struct sk_buff		*skb;
	struct hrtimer		timer;
	struct list_head	dev_list;
@@ -2255,9 +2255,9 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
}

typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
						struct sk_buff **head,
typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
					       struct list_head *head,
					       struct sk_buff *skb)
{
	if (unlikely(gro_recursion_inc_test(skb))) {
@@ -2268,11 +2268,11 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
	return cb(head, skb);
}

typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
					    struct sk_buff *);
static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
						  struct sock *sk,
						   struct sk_buff **head,
						  struct list_head *head,
						  struct sk_buff *skb)
{
	if (unlikely(gro_recursion_inc_test(skb))) {
@@ -2299,7 +2299,7 @@ struct packet_type {
struct offload_callbacks {
	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
						netdev_features_t features);
	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
	struct sk_buff		*(*gro_receive)(struct list_head *head,
						struct sk_buff *skb);
	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
};
@@ -2568,7 +2568,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);

static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -2784,13 +2784,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
}

#ifdef CONFIG_XFRM_OFFLOAD
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{
	if (PTR_ERR(pp) != -EINPROGRESS)
		NAPI_GRO_CB(skb)->flush |= flush;
}
#else
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{
	NAPI_GRO_CB(skb)->flush |= flush;
}
+2 −1
Original line number Diff line number Diff line
@@ -678,6 +678,7 @@ struct sk_buff {
			};
		};
		struct rb_node		rbnode; /* used in netem & tcp stack */
		struct list_head	list;
	};
	struct sock		*sk;

Loading