Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7aa5470c authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

tcp: tsq: move tsq_flags close to sk_wmem_alloc



tsq_flags being in the same cache line than sk_wmem_alloc
makes a lot of sense. Both fields are changed from tcp_wfree()
and more generally by various TSQ related functions.

Prior patch made room in struct sock and added sk_tsq_flags,
this patch deletes tsq_flags from struct tcp_sock.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9115e8cd
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -186,7 +186,6 @@ struct tcp_sock {
	u32	tsoffset;	/* timestamp offset */

	struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
	unsigned long	tsq_flags;

	/* Data for direct copy to user */
	struct {
+2 −2
Original line number Diff line number Diff line
@@ -663,9 +663,9 @@ static void tcp_push(struct sock *sk, int flags, int mss_now,
	if (tcp_should_autocork(sk, skb, size_goal)) {

		/* avoid atomic op if TSQ_THROTTLED bit is already set */
		if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
		}
		/* It is possible TX completion already happened
		 * before we set TSQ_THROTTLED.
+1 −1
Original line number Diff line number Diff line
@@ -443,7 +443,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
			if (!sock_owned_by_user(sk)) {
				tcp_v4_mtu_reduced(sk);
			} else {
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
					sock_hold(sk);
			}
			goto out;
+11 −13
Original line number Diff line number Diff line
@@ -767,14 +767,15 @@ static void tcp_tasklet_func(unsigned long data)
	list_for_each_safe(q, n, &list) {
		tp = list_entry(q, struct tcp_sock, tsq_node);
		list_del(&tp->tsq_node);
		clear_bit(TSQ_QUEUED, &tp->tsq_flags);

		sk = (struct sock *)tp;
		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);

		if (!sk->sk_lock.owned &&
		    test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) {
		    test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
			bh_lock_sock(sk);
			if (!sock_owned_by_user(sk)) {
				clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
				clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
				tcp_tsq_handler(sk);
			}
			bh_unlock_sock(sk);
@@ -797,16 +798,15 @@ static void tcp_tasklet_func(unsigned long data)
 */
void tcp_release_cb(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);
	unsigned long flags, nflags;

	/* perform an atomic operation only if at least one flag is set */
	do {
		flags = tp->tsq_flags;
		flags = sk->sk_tsq_flags;
		if (!(flags & TCP_DEFERRED_ALL))
			return;
		nflags = flags & ~TCP_DEFERRED_ALL;
	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);

	if (flags & TCPF_TSQ_DEFERRED)
		tcp_tsq_handler(sk);
@@ -878,7 +878,7 @@ void tcp_wfree(struct sk_buff *skb)
	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
		goto out;

	for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) {
	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
		struct tsq_tasklet *tsq;
		bool empty;

@@ -886,7 +886,7 @@ void tcp_wfree(struct sk_buff *skb)
			goto out;

		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
		nval = cmpxchg(&tp->tsq_flags, oval, nval);
		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
		if (nval != oval)
			continue;

@@ -2100,7 +2100,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
		    skb->prev == sk->sk_write_queue.next)
			return false;

		set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
		/* It is possible TX completion already happened
		 * before we set TSQ_THROTTLED, so we must
		 * test again the condition.
@@ -2241,8 +2241,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
			break;

		if (test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags))
			clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
		if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
			clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
		if (tcp_small_queue_check(sk, skb, 0))
			break;

@@ -3545,8 +3545,6 @@ void tcp_send_ack(struct sock *sk)
	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
	 * too much.
	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
	 * We also avoid tcp_wfree() overhead (cache line miss accessing
	 * tp->tsq_flags) by using regular sock_wfree()
	 */
	skb_set_tcp_pure_ack(buff);

+2 −2
Original line number Diff line number Diff line
@@ -310,7 +310,7 @@ static void tcp_delack_timer(unsigned long data)
		inet_csk(sk)->icsk_ack.blocked = 1;
		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
		/* deleguate our work to tcp_release_cb() */
		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
			sock_hold(sk);
	}
	bh_unlock_sock(sk);
@@ -592,7 +592,7 @@ static void tcp_write_timer(unsigned long data)
		tcp_write_timer_handler(sk);
	} else {
		/* delegate our work to tcp_release_cb() */
		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
			sock_hold(sk);
	}
	bh_unlock_sock(sk);
Loading