Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 119768c9 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'tcp-ECN-quickack'



Eric Dumazet says:

====================
tcp: reduce quickack pressure for ECN

Small patch series changing TCP behavior vs quickack and ECN

First patch is a refactoring, adding parameter to tcp_incr_quickack()
and tcp_enter_quickack_mode() helpers.

Second patch implements the change, lowering number of ACK packets
sent after an ECN event.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 290aa0ad 522040ea
Loading
Loading
Loading
Loading
+13 −11
Original line number Original line Diff line number Diff line
@@ -203,21 +203,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
	}
	}
}
}


static void tcp_incr_quickack(struct sock *sk)
static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
{
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);
	unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
	unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);


	if (quickacks == 0)
	if (quickacks == 0)
		quickacks = 2;
		quickacks = 2;
	quickacks = min(quickacks, max_quickacks);
	if (quickacks > icsk->icsk_ack.quick)
	if (quickacks > icsk->icsk_ack.quick)
		icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
		icsk->icsk_ack.quick = quickacks;
}
}


static void tcp_enter_quickack_mode(struct sock *sk)
static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
{
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);
	tcp_incr_quickack(sk);

	tcp_incr_quickack(sk, max_quickacks);
	icsk->icsk_ack.pingpong = 0;
	icsk->icsk_ack.pingpong = 0;
	icsk->icsk_ack.ato = TCP_ATO_MIN;
	icsk->icsk_ack.ato = TCP_ATO_MIN;
}
}
@@ -261,7 +263,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
		 * it is probably a retransmit.
		 * it is probably a retransmit.
		 */
		 */
		if (tp->ecn_flags & TCP_ECN_SEEN)
		if (tp->ecn_flags & TCP_ECN_SEEN)
			tcp_enter_quickack_mode((struct sock *)tp);
			tcp_enter_quickack_mode((struct sock *)tp, 1);
		break;
		break;
	case INET_ECN_CE:
	case INET_ECN_CE:
		if (tcp_ca_needs_ecn((struct sock *)tp))
		if (tcp_ca_needs_ecn((struct sock *)tp))
@@ -269,7 +271,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)


		if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
		if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
			/* Better not delay acks, sender can have a very low cwnd */
			/* Better not delay acks, sender can have a very low cwnd */
			tcp_enter_quickack_mode((struct sock *)tp);
			tcp_enter_quickack_mode((struct sock *)tp, 1);
			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
		}
		}
		tp->ecn_flags |= TCP_ECN_SEEN;
		tp->ecn_flags |= TCP_ECN_SEEN;
@@ -686,7 +688,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
		/* The _first_ data packet received, initialize
		/* The _first_ data packet received, initialize
		 * delayed ACK engine.
		 * delayed ACK engine.
		 */
		 */
		tcp_incr_quickack(sk);
		tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
		icsk->icsk_ack.ato = TCP_ATO_MIN;
		icsk->icsk_ack.ato = TCP_ATO_MIN;
	} else {
	} else {
		int m = now - icsk->icsk_ack.lrcvtime;
		int m = now - icsk->icsk_ack.lrcvtime;
@@ -702,7 +704,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
			/* Too long gap. Apparently sender failed to
			/* Too long gap. Apparently sender failed to
			 * restart window, so that we send ACKs quickly.
			 * restart window, so that we send ACKs quickly.
			 */
			 */
			tcp_incr_quickack(sk);
			tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
			sk_mem_reclaim(sk);
			sk_mem_reclaim(sk);
		}
		}
	}
	}
@@ -4179,7 +4181,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
		tcp_enter_quickack_mode(sk);
		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);


		if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
		if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -4706,7 +4708,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);


out_of_window:
out_of_window:
		tcp_enter_quickack_mode(sk);
		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
		inet_csk_schedule_ack(sk);
		inet_csk_schedule_ack(sk);
drop:
drop:
		tcp_drop(sk, skb);
		tcp_drop(sk, skb);
@@ -5790,7 +5792,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
			 * to stand against the temptation 8)     --ANK
			 * to stand against the temptation 8)     --ANK
			 */
			 */
			inet_csk_schedule_ack(sk);
			inet_csk_schedule_ack(sk);
			tcp_enter_quickack_mode(sk);
			tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  TCP_DELACK_MAX, TCP_RTO_MAX);
						  TCP_DELACK_MAX, TCP_RTO_MAX);