Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e9965c1 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'kill_rtcache'

The ipv4 routing cache is non-deterministic, performance wise, and is
subject to reasonably easy to launch denial of service attacks.

The routing cache works great for well behaved traffic, and the world
was a much friendlier place when the tradeoffs that led to the routing
cache's design were considered.

What it boils down to is that the performance of the routing cache is
a product of the traffic patterns seen by a system rather than being a
product of the contents of the routing tables.  The former of which is
controllable by external entitites.

Even for "well behaved" legitimate traffic, high volume sites can see
hit rates in the routing cache of only ~%10.

The general flow of this patch series is that first the routing cache
is removed.  We build a completely new rtable entry every lookup
request.

Next we make some simplifications due to the fact that removing the
routing cache causes several members of struct rtable to become no
longer necessary.

Then we need to make some amends such that we can legally cache
pre-constructed routes in the FIB nexthops.  Firstly, we need to
invalidate routes which are hit with nexthop exceptions.  Secondly we
have to change the semantics of rt->rt_gateway such that zero means
that the destination is on-link and non-zero otherwise.

Now that the preparations are ready, we start caching precomputed
routes in the FIB nexthops.  Output and input routes need different
kinds of care when determining if we can legally do such caching or
not.  The details are in the commit log messages for those changes.

The patch series then winds down with some more struct rtable
simplifications and other tidy ups that remove unnecessary overhead.

On a SPARC-T3 output route lookups are ~876 cycles.  Input route
lookups are ~1169 cycles with rpfilter disabled, and about ~1468
cycles with rpfilter enabled.

These measurements were taken with the kbench_mod test module in the
net_test_tools GIT tree:

git://git.kernel.org/pub/scm/linux/kernel/git/davem/net_test_tools.git



That GIT tree also includes a udpflood tester tool and stresses
route lookups on packet output.

For example, on the same SPARC-T3 system we can run:

	time ./udpflood -l 10000000 10.2.2.11

with routing cache:
real    1m21.955s       user    0m6.530s        sys     1m15.390s

without routing cache:
real    1m31.678s       user    0m6.520s        sys     1m25.140s

Performance undoubtedly can easily be improved further.

For example fib_table_lookup() performs a lot of excessive
computations with all the masking and shifting, some of it
conditionalized to deal with edge cases.

Also, Eric's no-ref optimization for input route lookups can be
re-instated for the FIB nexthop caching code path.  I would be really
pleased if someone would work on that.

In fact anyone suitable motivated can just fire up perf on the loading
of the test net_test_tools benchmark kernel module.  I spend much of
my time going:

bash# perf record insmod ./kbench_mod.ko dst=172.30.42.22 src=74.128.0.1 iif=2
bash# perf report

Thanks to helpful feedback from Joe Perches, Eric Dumazet, Ben
Hutchings, and others.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3ba97381 2860583f
Loading
Loading
Loading
Loading
+14 −1
Original line number Diff line number Diff line
@@ -65,7 +65,20 @@ struct dst_entry {
	unsigned short		pending_confirm;

	short			error;

	/* A non-zero value of dst->obsolete forces by-hand validation
	 * of the route entry.  Positive values are set by the generic
	 * dst layer to indicate that the entry has been forcefully
	 * destroyed.
	 *
	 * Negative values are used by the implementation layer code to
	 * force invocation of the dst_ops->check() method.
	 */
	short			obsolete;
#define DST_OBSOLETE_NONE	0
#define DST_OBSOLETE_DEAD	2
#define DST_OBSOLETE_FORCE_CHK	-1
#define DST_OBSOLETE_KILL	-2
	unsigned short		header_len;	/* more space at head required */
	unsigned short		trailer_len;	/* space to reserve at tail */
#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -359,7 +372,7 @@ extern struct dst_entry *dst_destroy(struct dst_entry *dst);

static inline void dst_free(struct dst_entry *dst)
{
	if (dst->obsolete > 1)
	if (dst->obsolete > 0)
		return;
	if (!atomic_read(&dst->__refcnt)) {
		dst = dst_destroy(dst);
+0 −1
Original line number Diff line number Diff line
@@ -21,7 +21,6 @@ struct flowi_common {
	__u8	flowic_flags;
#define FLOWI_FLAG_ANYSRC		0x01
#define FLOWI_FLAG_CAN_SLEEP		0x02
#define FLOWI_FLAG_RT_NOCACHE		0x04
	__u32	flowic_secid;
};

+1 −2
Original line number Diff line number Diff line
@@ -250,8 +250,7 @@ extern int inet_csk_get_port(struct sock *sk, unsigned short snum);

extern struct dst_entry* inet_csk_route_req(struct sock *sk,
					    struct flowi4 *fl4,
					    const struct request_sock *req,
					    bool nocache);
					    const struct request_sock *req);
extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk,
						   struct sock *newsk,
						   const struct request_sock *req);
+3 −0
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@ struct fib_config {
 };

struct fib_info;
struct rtable;

struct fib_nh_exception {
	struct fib_nh_exception __rcu	*fnhe_next;
@@ -80,6 +81,8 @@ struct fib_nh {
	__be32			nh_gw;
	__be32			nh_saddr;
	int			nh_saddr_genid;
	struct rtable		*nh_rth_output;
	struct rtable		*nh_rth_input;
	struct fnhe_hash_bucket	*nh_exceptions;
};

+12 −28
Original line number Diff line number Diff line
@@ -44,38 +44,35 @@ struct fib_info;
struct rtable {
	struct dst_entry	dst;

	/* Lookup key. */
	__be32			rt_key_dst;
	__be32			rt_key_src;

	int			rt_genid;
	unsigned int		rt_flags;
	__u16			rt_type;
	__u8			rt_key_tos;
	__u16			rt_is_input;

	__be32			rt_dst;	/* Path destination	*/
	__be32			rt_src;	/* Path source		*/
	int			rt_route_iif;
	int			rt_iif;
	int			rt_oif;
	__u32			rt_mark;

	/* Info on neighbour */
	__be32			rt_gateway;

	/* Miscellaneous cached information */
	u32			rt_pmtu;
	struct fib_info		*fi; /* for client ref to shared metrics */
};

static inline bool rt_is_input_route(const struct rtable *rt)
{
	return rt->rt_route_iif != 0;
	return rt->rt_is_input != 0;
}

static inline bool rt_is_output_route(const struct rtable *rt)
{
	return rt->rt_route_iif == 0;
	return rt->rt_is_input == 0;
}

static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
{
	if (rt->rt_gateway)
		return rt->rt_gateway;
	return daddr;
}

struct ip_rt_acct {
@@ -109,7 +106,6 @@ extern struct ip_rt_acct __percpu *ip_rt_acct;
struct in_device;
extern int		ip_rt_init(void);
extern void		rt_cache_flush(struct net *net, int how);
extern void		rt_cache_flush_batch(struct net *net);
extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
					   struct sock *sk);
@@ -161,20 +157,8 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
	return ip_route_output_key(net, fl4);
}

extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src,
				 u8 tos, struct net_device *devin, bool noref);

static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
				 u8 tos, struct net_device *devin)
{
	return ip_route_input_common(skb, dst, src, tos, devin, false);
}

static inline int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
				       u8 tos, struct net_device *devin)
{
	return ip_route_input_common(skb, dst, src, tos, devin, true);
}
extern int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
			  u8 tos, struct net_device *devin);

extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
			     int oif, u32 mark, u8 protocol, int flow_flags);
Loading