Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2472186f authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'tcp-listener-fixes-and-improvement'



Eric Dumazet says:

====================
tcp: lockless listener fixes and improvement

This fixes issues with TCP FastOpen vs lockless listeners,
and SYNACK being attached to request sockets.

Then, last patch brings performance improvement for
syncookies generation and validation.

Tested under a 4.3 Mpps SYNFLOOD attack, new perf profile looks
like :
    12.11%  [kernel]  [k] sha_transform
     5.83%  [kernel]  [k] tcp_conn_request
     4.59%  [kernel]  [k] __inet_lookup_listener
     4.11%  [kernel]  [k] ipt_do_table
     3.91%  [kernel]  [k] tcp_make_synack
     3.05%  [kernel]  [k] fib_table_lookup
     2.74%  [kernel]  [k] sock_wfree
     2.66%  [kernel]  [k] memcpy_erms
     2.12%  [kernel]  [k] tcp_v4_rcv
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3e087caa a1a5344d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -266,7 +266,7 @@ struct tcp6_timewait_sock {
#if IS_ENABLED(CONFIG_IPV6)
static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
{
	return inet_sk(__sk)->pinet6;
	return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
}

static inline struct raw6_sock *raw6_sk(const struct sock *sk)
+2 −1
Original line number Diff line number Diff line
@@ -245,7 +245,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr,
}

struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
				      struct sock *sk_listener);
				      struct sock *sk_listener,
				      bool attach_listener);

static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
+6 −3
Original line number Diff line number Diff line
@@ -323,12 +323,15 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,

static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
{
	if (!skb->sk || ip_sk_use_pmtu(skb->sk)) {
	struct sock *sk = skb->sk;

	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;

		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
	} else {
		return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
	}

	return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
}

u32 ip_idents_reserve(u32 hash, int segs);
+9 −3
Original line number Diff line number Diff line
@@ -80,7 +80,8 @@ static inline struct sock *req_to_sk(struct request_sock *req)
}

static inline struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
	    bool attach_listener)
{
	struct request_sock *req;

@@ -88,10 +89,15 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)

	if (req) {
		req->rsk_ops = ops;
		if (attach_listener) {
			sock_hold(sk_listener);
			req->rsk_listener = sk_listener;
		} else {
			req->rsk_listener = NULL;
		}
		req_to_sk(req)->sk_prot = sk_listener->sk_prot;
		sk_node_init(&req_to_sk(req)->sk_node);
		sk_tx_queue_clear(req_to_sk(req));
		req->saved_syn = NULL;
		/* Following is temporary. It is coupled with debugging
		 * helpers in reqsk_put() & reqsk_free()
+1 −0
Original line number Diff line number Diff line
@@ -2974,6 +2974,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
			new_index = skb_tx_hash(dev, skb);

		if (queue_index != new_index && sk &&
		    sk_fullsock(sk) &&
		    rcu_access_pointer(sk->sk_dst_cache))
			sk_tx_queue_set(sk, new_index);

Loading