Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e1a21b6 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'tcp_conn_request_unification'



Octavian Purdila says:

====================
tcp: remove code duplication in tcp_v[46]_conn_request

This patch series unifies the TCPv4 and TCPv6 connection request flow
in a single new function (tcp_conn_request).

The first 3 patches are small cleanups and fixes found during the code
merge process.

The next patches add new methods in tcp_request_sock_ops to abstract
the IPv4/IPv6 operations and keep the TCP connection request flow
common.

To identify potential performance issues this patch has been tested
by measuring the connection per second rate with nginx and a httperf
like client (to allow for concurrent connection requests - 256 CC were
used during testing) using the loopback interface. A dual-core i5 Ivy
Bridge processor was used and each process was bounded to a different
core to make results consistent.

Results for IPv4, unit is connections per second, higher is better, 20
measurements have been collected:

		before		after
min		27917		27962
max		28262		28366
avg		28094.1		28212.75
stdev		87.35		97.26

Results for IPv6, unit is connections per second, higher is better, 20
measurements have been collected:

		before		after
min		24813		24877
max		25029		25119
avg		24935.5		25017
stdev		64.13		62.93

Changes since v1:

 * add benchmarking datapoints

 * fix a few issues in the last patch (IPv6 related)
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c1c27fb9 1fb6f159
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -256,16 +256,6 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
	return inet_sk(__sk)->pinet6;
}

static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
{
	struct request_sock *req = reqsk_alloc(ops);

	if (req)
		inet_rsk(req)->pktopts = NULL;

	return req;
}

static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
	return (struct raw6_sock *)sk;
+0 −3
Original line number Diff line number Diff line
@@ -111,10 +111,7 @@ struct tcp_request_sock_ops;

struct tcp_request_sock {
	struct inet_request_sock 	req;
#ifdef CONFIG_TCP_MD5SIG
	/* Only used by TCP MD5 Signature so far. */
	const struct tcp_request_sock_ops *af_specific;
#endif
	struct sock			*listener; /* needed for TFO */
	u32				rcv_isn;
	u32				snt_isn;
+4 −2
Original line number Diff line number Diff line
@@ -88,8 +88,10 @@ struct inet_request_sock {
				acked	   : 1,
				no_srccheck: 1;
	kmemcheck_bitfield_end(flags);
	union {
		struct ip_options_rcu	*opt;
		struct sk_buff		*pktopts;
	};
	u32                     ir_mark;
};

+39 −15
Original line number Diff line number Diff line
@@ -493,14 +493,8 @@ static inline u32 tcp_cookie_time(void)

u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
			      u16 *mssp);
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
#else
static inline __u32 cookie_v4_init_sequence(struct sock *sk,
					    struct sk_buff *skb,
					    __u16 *mss)
{
	return 0;
}
__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
			      __u16 *mss);
#endif

__u32 cookie_init_timestamp(struct request_sock *req);
@@ -516,13 +510,6 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
			      const struct tcphdr *th, u16 *mssp);
__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
			      __u16 *mss);
#else
static inline __u32 cookie_v6_init_sequence(struct sock *sk,
					    struct sk_buff *skb,
					    __u16 *mss)
{
	return 0;
}
#endif
/* tcp_output.c */

@@ -1586,6 +1573,11 @@ int tcp4_proc_init(void);
void tcp4_proc_exit(void);
#endif

int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
int tcp_conn_request(struct request_sock_ops *rsk_ops,
		     const struct tcp_request_sock_ops *af_ops,
		     struct sock *sk, struct sk_buff *skb);

/* TCP af-specific functions */
struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG
@@ -1603,6 +1595,7 @@ struct tcp_sock_af_ops {
};

struct tcp_request_sock_ops {
	u16 mss_clamp;
#ifdef CONFIG_TCP_MD5SIG
	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
						struct request_sock *req);
@@ -1612,8 +1605,39 @@ struct tcp_request_sock_ops {
						  const struct request_sock *req,
						  const struct sk_buff *skb);
#endif
	void (*init_req)(struct request_sock *req, struct sock *sk,
			 struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
	__u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
				 __u16 *mss);
#endif
	struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
				       const struct request_sock *req,
				       bool *strict);
	__u32 (*init_seq)(const struct sk_buff *skb);
	int (*send_synack)(struct sock *sk, struct dst_entry *dst,
			   struct flowi *fl, struct request_sock *req,
			   u16 queue_mapping, struct tcp_fastopen_cookie *foc);
	void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
			       const unsigned long timeout);
};

#ifdef CONFIG_SYN_COOKIES
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
					 struct sock *sk, struct sk_buff *skb,
					 __u16 *mss)
{
	return ops->cookie_init_seq(sk, skb, mss);
}
#else
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
					 struct sock *sk, struct sk_buff *skb,
					 __u16 *mss)
{
	return 0;
}
#endif

int tcpv4_offload_init(void);

void tcp_v4_init(void);
+1 −1
Original line number Diff line number Diff line
@@ -386,7 +386,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
		goto drop;

	req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
	req = inet_reqsk_alloc(&dccp6_request_sock_ops);
	if (req == NULL)
		goto drop;

Loading