Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 59ce170d authored by Kuniyuki Iwashima's avatar Kuniyuki Iwashima Committed by Greg Kroah-Hartman
Browse files

tcp: Fix data-races around sysctl_tcp_reordering.



[ Upstream commit 46778cd16e6a5ad1b2e3a91f6c057c907379418e ]

While reading sysctl_tcp_reordering, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.

Fixes: 1da177e4 ("Linux-2.6.12-rc2")
Signed-off-by: default avatarKuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 97e228a4
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -436,7 +436,7 @@ void tcp_init_sock(struct sock *sk)
	tp->snd_cwnd_clamp = ~0;
	tp->snd_cwnd_clamp = ~0;
	tp->mss_cache = TCP_MSS_DEFAULT;
	tp->mss_cache = TCP_MSS_DEFAULT;


	tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
	tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
	tcp_assign_congestion_control(sk);
	tcp_assign_congestion_control(sk);


	tp->tsoffset = 0;
	tp->tsoffset = 0;
+7 −3
Original line number Original line Diff line number Diff line
@@ -1982,6 +1982,7 @@ void tcp_enter_loss(struct sock *sk)
	struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct net *net = sock_net(sk);
	struct net *net = sock_net(sk);
	bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
	bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
	u8 reordering;


	tcp_timeout_mark_lost(sk);
	tcp_timeout_mark_lost(sk);


@@ -2002,10 +2003,12 @@ void tcp_enter_loss(struct sock *sk)
	/* Timeout in disordered state after receiving substantial DUPACKs
	/* Timeout in disordered state after receiving substantial DUPACKs
	 * suggests that the degree of reordering is over-estimated.
	 * suggests that the degree of reordering is over-estimated.
	 */
	 */
	reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
	if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
	if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
	    tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
	    tp->sacked_out >= reordering)
		tp->reordering = min_t(unsigned int, tp->reordering,
		tp->reordering = min_t(unsigned int, tp->reordering,
				       net->ipv4.sysctl_tcp_reordering);
				       reordering);

	tcp_set_ca_state(sk, TCP_CA_Loss);
	tcp_set_ca_state(sk, TCP_CA_Loss);
	tp->high_seq = tp->snd_nxt;
	tp->high_seq = tp->snd_nxt;
	tcp_ecn_queue_cwr(tp);
	tcp_ecn_queue_cwr(tp);
@@ -3303,7 +3306,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
	 * new SACK or ECE mark may first advance cwnd here and later reduce
	 * new SACK or ECE mark may first advance cwnd here and later reduce
	 * cwnd in tcp_fastretrans_alert() based on more states.
	 * cwnd in tcp_fastretrans_alert() based on more states.
	 */
	 */
	if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
	if (tcp_sk(sk)->reordering >
	    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
		return flag & FLAG_FORWARD_PROGRESS;
		return flag & FLAG_FORWARD_PROGRESS;


	return flag & FLAG_DATA_ACKED;
	return flag & FLAG_DATA_ACKED;
+2 −1
Original line number Original line Diff line number Diff line
@@ -425,7 +425,8 @@ void tcp_update_metrics(struct sock *sk)
		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
			if (val < tp->reordering &&
			if (val < tp->reordering &&
			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
			    tp->reordering !=
			    READ_ONCE(net->ipv4.sysctl_tcp_reordering))
				tcp_metric_set(tm, TCP_METRIC_REORDERING,
				tcp_metric_set(tm, TCP_METRIC_REORDERING,
					       tp->reordering);
					       tp->reordering);
		}
		}