Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 70ae7222 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'inet-frags-bring-rhashtables-to-IP-defrag'



Eric Dumazet says:

====================
inet: frags: bring rhashtables to IP defrag

IP defrag processing is one of the remaining problematic layer in linux.

It uses static hash tables of 1024 buckets, and up to 128 items per bucket.

A work queue is supposed to garbage collect items when host is under memory
pressure, and doing a hash rebuild, changing seed used in hash computations.

This work queue blocks softirqs for up to 25 ms when doing a hash rebuild,
occurring every 5 seconds if host is under fire.

Then there is the problem of sharing this hash table for all netns.

It is time to switch to rhashtables, and allocate one of them per netns
to speedup netns dismantle, since this is a critical metric these days.

Lookup is now using RCU, and 64bit hosts can now provision whatever amount
of memory needed to handle the expected workloads.

v2: Addressed Herbert and Kirill feedbacks
  (Use rhashtable_free_and_destroy(), and split the big patch into small units)

v3: Removed the extra add_frag_mem_limit(...) from inet_frag_create()
    Removed the refcount_inc_not_zero() call from inet_frags_free_cb(),
    as we can exploit del_timer() return value.

v4: kbuild robot feedback about one missing static (squashed)
    Additional patches :
      inet: frags: do not clone skb in ip_expire()
      ipv6: frags: rewrite ip6_expire_frag_queue()
      rhashtable: reorganize struct rhashtable layout
      inet: frags: reorganize struct netns_frags
      inet: frags: get rid of ipfrag_skb_cb/FRAG_CB
      ipv6: frags: get rid of ip6frag_skb_cb/FRAG6_CB
      inet: frags: get rid of nf_ct_frag6_skb_cb/NFCT_FRAG6_CB
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5749d6af f2d1c724
Loading
Loading
Loading
Loading
+5 −8
Original line number Diff line number Diff line
@@ -133,14 +133,11 @@ min_adv_mss - INTEGER

IP Fragmentation:

ipfrag_high_thresh - INTEGER
	Maximum memory used to reassemble IP fragments. When
	ipfrag_high_thresh bytes of memory is allocated for this purpose,
	the fragment handler will toss packets until ipfrag_low_thresh
	is reached. This also serves as a maximum limit to namespaces
	different from the initial one.

ipfrag_low_thresh - INTEGER
ipfrag_high_thresh - LONG INTEGER
	Maximum memory used to reassemble IP fragments.

ipfrag_low_thresh - LONG INTEGER
	(Obsolete since linux-4.17)
	Maximum memory used to reassemble IP fragments before the kernel
	begins to remove incomplete fragment queues to free up resources.
	The kernel still accepts new fragments for defragmentation.
+4 −4
Original line number Diff line number Diff line
@@ -152,25 +152,25 @@ struct rhashtable_params {
/**
 * struct rhashtable - Hash table handle
 * @tbl: Bucket table
 * @nelems: Number of elements in table
 * @key_len: Key length for hashfn
 * @p: Configuration parameters
 * @max_elems: Maximum number of elements in table
 * @p: Configuration parameters
 * @rhlist: True if this is an rhltable
 * @run_work: Deferred worker to expand/shrink asynchronously
 * @mutex: Mutex to protect current/future table swapping
 * @lock: Spin lock to protect walker list
 * @nelems: Number of elements in table
 */
struct rhashtable {
	struct bucket_table __rcu	*tbl;
	atomic_t			nelems;
	unsigned int			key_len;
	struct rhashtable_params	p;
	unsigned int			max_elems;
	struct rhashtable_params	p;
	bool				rhlist;
	struct work_struct		run_work;
	struct mutex                    mutex;
	spinlock_t			lock;
	atomic_t			nelems;
};

/**
+1 −0
Original line number Diff line number Diff line
@@ -672,6 +672,7 @@ struct sk_buff {
				 * UDP receive path is one user.
				 */
				unsigned long		dev_scratch;
				int			ip_defrag_offset;
			};
		};
		struct rb_node	rbnode; /* used in netem & tcp stack */
+55 −71
Original line number Diff line number Diff line
@@ -2,14 +2,20 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__

#include <linux/rhashtable.h>

struct netns_frags {
	/* Keep atomic mem on separate cachelines in structs that include it */
	atomic_t		mem ____cacheline_aligned_in_smp;
	/* sysctls */
	long			high_thresh;
	long			low_thresh;
	int			timeout;
	int			high_thresh;
	int			low_thresh;
	int			max_dist;
	struct inet_frags	*f;

	struct rhashtable       rhashtable ____cacheline_aligned_in_smp;

	/* Keep atomic mem on separate cachelines in structs that include it */
	atomic_long_t		mem ____cacheline_aligned_in_smp;
};

/**
@@ -25,12 +31,30 @@ enum {
	INET_FRAG_COMPLETE	= BIT(2),
};

struct frag_v4_compare_key {
	__be32		saddr;
	__be32		daddr;
	u32		user;
	u32		vif;
	__be16		id;
	u16		protocol;
};

struct frag_v6_compare_key {
	struct in6_addr	saddr;
	struct in6_addr	daddr;
	u32		user;
	__be32		id;
	u32		iif;
};

/**
 * struct inet_frag_queue - fragment queue
 *
 * @lock: spinlock protecting the queue
 * @node: rhash node
 * @key: keys identifying this frag.
 * @timer: queue expiration timer
 * @list: hash bucket list
 * @lock: spinlock protecting this frag
 * @refcnt: reference count of the queue
 * @fragments: received fragments head
 * @fragments_tail: received fragments tail
@@ -40,12 +64,16 @@ enum {
 * @flags: fragment queue flags
 * @max_size: maximum received fragment size
 * @net: namespace that this frag belongs to
 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
 * @rcu: rcu head for freeing deferall
 */
struct inet_frag_queue {
	spinlock_t		lock;
	struct rhash_head	node;
	union {
		struct frag_v4_compare_key v4;
		struct frag_v6_compare_key v6;
	} key;
	struct timer_list	timer;
	struct hlist_node	list;
	spinlock_t		lock;
	refcount_t		refcnt;
	struct sk_buff		*fragments;
	struct sk_buff		*fragments_tail;
@@ -55,100 +83,56 @@ struct inet_frag_queue {
	__u8			flags;
	u16			max_size;
	struct netns_frags      *net;
	struct hlist_node	list_evictor;
};

#define INETFRAGS_HASHSZ	1024

/* averaged:
 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
 *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
 *	       struct frag_queue))
 */
#define INETFRAGS_MAXDEPTH	128

struct inet_frag_bucket {
	struct hlist_head	chain;
	spinlock_t		chain_lock;
	struct rcu_head		rcu;
};

struct inet_frags {
	struct inet_frag_bucket	hash[INETFRAGS_HASHSZ];

	struct work_struct	frags_work;
	unsigned int next_bucket;
	unsigned long last_rebuild_jiffies;
	bool rebuild;

	/* The first call to hashfn is responsible to initialize
	 * rnd. This is best done with net_get_random_once.
	 *
	 * rnd_seqlock is used to let hash insertion detect
	 * when it needs to re-lookup the hash chain to use.
	 */
	u32			rnd;
	seqlock_t		rnd_seqlock;
	unsigned int		qsize;

	unsigned int		(*hashfn)(const struct inet_frag_queue *);
	bool			(*match)(const struct inet_frag_queue *q,
					 const void *arg);
	void			(*constructor)(struct inet_frag_queue *q,
					       const void *arg);
	void			(*destructor)(struct inet_frag_queue *);
	void			(*frag_expire)(struct timer_list *t);
	struct kmem_cache	*frags_cachep;
	const char		*frags_cache_name;
	struct rhashtable_params rhash_params;
};

int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);

static inline void inet_frags_init_net(struct netns_frags *nf)
static inline int inet_frags_init_net(struct netns_frags *nf)
{
	atomic_set(&nf->mem, 0);
	atomic_long_set(&nf->mem, 0);
	return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
}
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frags_exit_net(struct netns_frags *nf);

void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
		struct inet_frags *f, void *key, unsigned int hash);
void inet_frag_kill(struct inet_frag_queue *q);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);

void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
				   const char *prefix);

static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
static inline void inet_frag_put(struct inet_frag_queue *q)
{
	if (refcount_dec_and_test(&q->refcnt))
		inet_frag_destroy(q, f);
}

static inline bool inet_frag_evicting(struct inet_frag_queue *q)
{
	return !hlist_unhashed(&q->list_evictor);
		inet_frag_destroy(q);
}

/* Memory Tracking Functions. */

static inline int frag_mem_limit(struct netns_frags *nf)
{
	return atomic_read(&nf->mem);
}

static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
static inline long frag_mem_limit(const struct netns_frags *nf)
{
	atomic_sub(i, &nf->mem);
	return atomic_long_read(&nf->mem);
}

static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
{
	atomic_add(i, &nf->mem);
	atomic_long_sub(val, &nf->mem);
}

static inline int sum_frag_mem_limit(struct netns_frags *nf)
static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
{
	return atomic_read(&nf->mem);
	atomic_long_add(val, &nf->mem);
}

/* RFC 3168 support :
+0 −1
Original line number Diff line number Diff line
@@ -588,7 +588,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s
	return skb;
}
#endif
int ip_frag_mem(struct net *net);

/*
 *	Functions provided by ip_forward.c
Loading