Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e636f8b0 authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller
Browse files

tcp: new helper for RACK to detect loss



Create a new helper tcp_rack_detect_loss to prepare the upcoming
RACK reordering timer patch.

Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent db8da6bb
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -1863,8 +1863,7 @@ extern int sysctl_tcp_recovery;
/* Use TCP RACK to detect (some) tail and retransmit losses */
#define TCP_RACK_LOST_RETRANS  0x1

extern int tcp_rack_mark_lost(struct sock *sk);

extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp,
			     const struct skb_mstamp *xmit_time, u8 sacked);

+8 −4
Original line number Diff line number Diff line
@@ -2865,11 +2865,15 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
	}

	/* Use RACK to detect loss */
	if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
	    tcp_rack_mark_lost(sk)) {
	if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) {
		u32 prior_retrans = tp->retrans_out;

		tcp_rack_mark_lost(sk);
		if (prior_retrans > tp->retrans_out) {
			flag |= FLAG_LOST_RETRANS;
			*ack_flag |= FLAG_LOST_RETRANS;
		}
	}

	/* E. Process state. */
	switch (icsk->icsk_ca_state) {
+13 −9
Original line number Diff line number Diff line
@@ -32,17 +32,11 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
 * The current version is only used after recovery starts but can be
 * easily extended to detect the first loss.
 */
int tcp_rack_mark_lost(struct sock *sk)
static void tcp_rack_detect_loss(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	u32 reo_wnd, prior_retrans = tp->retrans_out;

	if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
		return 0;

	/* Reset the advanced flag to avoid unnecessary queue scanning */
	tp->rack.advanced = 0;
	u32 reo_wnd;

	/* To be more reordering resilient, allow min_rtt/4 settling delay
	 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
@@ -82,7 +76,17 @@ int tcp_rack_mark_lost(struct sock *sk)
			break;
		}
	}
	return prior_retrans - tp->retrans_out;
}

void tcp_rack_mark_lost(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
		return;
	/* Reset the advanced flag to avoid unnecessary queue scanning */
	tp->rack.advanced = 0;
	tcp_rack_detect_loss(sk);
}

/* Record the most recently (re)sent time among the (s)acked packets */