Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 16c0cd07 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-preserve-sock-reference-when-scrubbing-the-skb'



Flavio Leitner says:

====================
net: preserve sock reference when scrubbing the skb.

The sock reference is lost when scrubbing the packet and that breaks
TSQ (TCP Small Queues) and XPS (Transmit Packet Steering) causing
performance impacts of about 50% in a single TCP stream when crossing
network namespaces.

XPS breaks because the queue mapping stored in the socket is not
available, so another random queue might be selected when the stack
needs to transmit something like a TCP ACK, or TCP Retransmissions.
That causes packet re-ordering and/or performance issues.

TSQ breaks because it orphans the packet while it is still in the
host, so packets are queued contributing to the buffer bloat problem.

Preserving the sock reference fixes both issues. The socket is
orphaned anyways in the receiving path before any relevant action,
but the transmit side needs some extra checking included in the
first patch.

The first patch will update netfilter to check if the socket
netns is local before use it.

The second patch removes the skb_orphan() from the skb_scrub_packet()
and improve the documentation.

ChangeLog:
- split into two (Eric)
- addressed Paolo's offline feedback to swap the checks in xt_socket.c
  to preserve original behavior.
- improved ip-sysctl.txt (reported by Cong)
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 003504a2 9c4c3252
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -733,11 +733,11 @@ tcp_limit_output_bytes - INTEGER
	Controls TCP Small Queue limit per tcp socket.
	TCP bulk sender tends to increase packets in flight until it
	gets losses notifications. With SNDBUF autotuning, this can
	result in a large amount of packets queued in qdisc/device
	on the local machine, hurting latency of other flows, for
	typical pfifo_fast qdiscs.
	tcp_limit_output_bytes limits the number of bytes on qdisc
	or device to reduce artificial RTT/cwnd and reduce bufferbloat.
	result in a large amount of packets queued on the local machine
	(e.g.: qdiscs, CPU backlog, or device) hurting latency of other
	flows, for typical pfifo_fast qdiscs.  tcp_limit_output_bytes
	limits the number of bytes on qdisc or device to reduce artificial
	RTT/cwnd and reduce bufferbloat.
	Default: 262144

tcp_challenge_ack_limit - INTEGER
+2 −1
Original line number Diff line number Diff line
@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
			   u8 proto, int fragment, unsigned int offset,
			   unsigned int logflags);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
			    struct sock *sk);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
			       unsigned int hooknum, const struct sk_buff *skb,
			       const struct net_device *in,
+0 −1
Original line number Diff line number Diff line
@@ -4911,7 +4911,6 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
		return;

	ipvs_reset(skb);
	skb_orphan(skb);
	skb->mark = 0;
}
EXPORT_SYMBOL_GPL(skb_scrub_packet);
+4 −4
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = {
};

/* One level of recursion won't kill us */
static void dump_ipv4_packet(struct nf_log_buf *m,
static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
			     const struct nf_loginfo *info,
			     const struct sk_buff *skb, unsigned int iphoff)
{
@@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
			/* Max length: 3+maxlen */
			if (!iphoff) { /* Only recurse once. */
				nf_log_buf_add(m, "[");
				dump_ipv4_packet(m, info, skb,
				dump_ipv4_packet(net, m, info, skb,
					    iphoff + ih->ihl*4+sizeof(_icmph));
				nf_log_buf_add(m, "] ");
			}
@@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,

	/* Max length: 15 "UID=4294967295 " */
	if ((logflags & NF_LOG_UID) && !iphoff)
		nf_log_dump_sk_uid_gid(m, skb->sk);
		nf_log_dump_sk_uid_gid(net, m, skb->sk);

	/* Max length: 16 "MARK=0xFFFFFFFF " */
	if (!iphoff && skb->mark)
@@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
	if (in != NULL)
		dump_ipv4_mac_header(m, loginfo, skb);

	dump_ipv4_packet(m, loginfo, skb, 0);
	dump_ipv4_packet(net, m, loginfo, skb, 0);

	nf_log_buf_close(m);
}
+4 −4
Original line number Diff line number Diff line
@@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = {
};

/* One level of recursion won't kill us */
static void dump_ipv6_packet(struct nf_log_buf *m,
static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
			     const struct nf_loginfo *info,
			     const struct sk_buff *skb, unsigned int ip6hoff,
			     int recurse)
@@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
			/* Max length: 3+maxlen */
			if (recurse) {
				nf_log_buf_add(m, "[");
				dump_ipv6_packet(m, info, skb,
				dump_ipv6_packet(net, m, info, skb,
						 ptr + sizeof(_icmp6h), 0);
				nf_log_buf_add(m, "] ");
			}
@@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,

	/* Max length: 15 "UID=4294967295 " */
	if ((logflags & NF_LOG_UID) && recurse)
		nf_log_dump_sk_uid_gid(m, skb->sk);
		nf_log_dump_sk_uid_gid(net, m, skb->sk);

	/* Max length: 16 "MARK=0xFFFFFFFF " */
	if (recurse && skb->mark)
@@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
	if (in != NULL)
		dump_ipv6_mac_header(m, loginfo, skb);

	dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
	dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);

	nf_log_buf_close(m);
}
Loading