Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc566cf3 authored by Eric Dumazet's avatar Eric Dumazet Committed by Greg Kroah-Hartman
Browse files

tcp_metrics: annotate data-races around tm->tcpm_vals[]



[ Upstream commit 8c4d04f6b443869d25e59822f7cec88d647028a9 ]

tm->tcpm_vals[] values can be read or written locklessly.

Add needed READ_ONCE()/WRITE_ONCE() to document this,
and force use of tcp_metric_get() and tcp_metric_set()

Fixes: 51c5d0c4 ("tcp: Maintain dynamic metrics in local cache.")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Reviewed-by: default avatarKuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 76b47dab
Loading
Loading
Loading
Loading
+14 −9
Original line number Diff line number Diff line
@@ -63,17 +63,19 @@ static bool tcp_metric_locked(struct tcp_metrics_block *tm,
	return READ_ONCE(tm->tcpm_lock) & (1 << idx);
}

static u32 tcp_metric_get(struct tcp_metrics_block *tm,
static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
			  enum tcp_metric_index idx)
{
	return tm->tcpm_vals[idx];
	/* Paired with WRITE_ONCE() in tcp_metric_set() */
	return READ_ONCE(tm->tcpm_vals[idx]);
}

static void tcp_metric_set(struct tcp_metrics_block *tm,
			   enum tcp_metric_index idx,
			   u32 val)
{
	tm->tcpm_vals[idx] = val;
	/* Paired with READ_ONCE() in tcp_metric_get() */
	WRITE_ONCE(tm->tcpm_vals[idx], val);
}

static bool addr_same(const struct inetpeer_addr *a,
@@ -115,13 +117,16 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
	WRITE_ONCE(tm->tcpm_lock, val);

	msval = dst_metric_raw(dst, RTAX_RTT);
	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
	tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);

	msval = dst_metric_raw(dst, RTAX_RTTVAR);
	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
	tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
	tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
		       dst_metric_raw(dst, RTAX_SSTHRESH));
	tcp_metric_set(tm, TCP_METRIC_CWND,
		       dst_metric_raw(dst, RTAX_CWND));
	tcp_metric_set(tm, TCP_METRIC_REORDERING,
		       dst_metric_raw(dst, RTAX_REORDERING));
	if (fastopen_clear) {
		tm->tcpm_fastopen.mss = 0;
		tm->tcpm_fastopen.syn_loss = 0;
@@ -662,7 +667,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
		if (!nest)
			goto nla_put_failure;
		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
			u32 val = tm->tcpm_vals[i];
			u32 val = tcp_metric_get(tm, i);

			if (!val)
				continue;