Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cadefe5f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

tcp_bbr: fix bbr pacing rate for internal pacing



This commit makes BBR use only the MSS (without any headers) to
calculate pacing rates when internal TCP-layer pacing is used.

This is necessary to achieve the correct pacing behavior in this case,
since tcp_internal_pacing() uses only the payload length to calculate
pacing delays.

Signed-off-by: default avatarKevin Yang <yyd@google.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3f484a6e
Loading
Loading
Loading
Loading
+11 −0
Original line number Original line Diff line number Diff line
@@ -1184,6 +1184,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
	return tp->is_cwnd_limited;
	return tp->is_cwnd_limited;
}
}


/* BBR congestion control needs pacing.
 * Same remark for SO_MAX_PACING_RATE.
 * sch_fq packet scheduler is efficiently handling pacing,
 * but is not always installed/used.
 * Return true if TCP stack should pace packets itself.
 */
static inline bool tcp_needs_internal_pacing(const struct sock *sk)
{
	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}

/* Something is really bad, we could not queue an additional packet,
/* Something is really bad, we could not queue an additional packet,
 * because qdisc is full or receiver sent a 0 window.
 * because qdisc is full or receiver sent a 0 window.
 * We do not want to add fuel to the fire, or abort too early,
 * We do not want to add fuel to the fire, or abort too early,
+5 −1
Original line number Original line Diff line number Diff line
@@ -205,7 +205,11 @@ static u32 bbr_bw(const struct sock *sk)
 */
 */
static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
{
{
	rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache);
	unsigned int mss = tcp_sk(sk)->mss_cache;

	if (!tcp_needs_internal_pacing(sk))
		mss = tcp_mss_to_mtu(sk, mss);
	rate *= mss;
	rate *= gain;
	rate *= gain;
	rate >>= BBR_SCALE;
	rate >>= BBR_SCALE;
	rate *= USEC_PER_SEC;
	rate *= USEC_PER_SEC;
+0 −14
Original line number Original line Diff line number Diff line
@@ -973,17 +973,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
	return HRTIMER_NORESTART;
	return HRTIMER_NORESTART;
}
}


/* BBR congestion control needs pacing.
 * Same remark for SO_MAX_PACING_RATE.
 * sch_fq packet scheduler is efficiently handling pacing,
 * but is not always installed/used.
 * Return true if TCP stack should pace packets itself.
 */
static bool tcp_needs_internal_pacing(const struct sock *sk)
{
	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}

static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
{
{
	u64 len_ns;
	u64 len_ns;
@@ -995,9 +984,6 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
	if (!rate || rate == ~0U)
	if (!rate || rate == ~0U)
		return;
		return;


	/* Should account for header sizes as sch_fq does,
	 * but lets make things simple.
	 */
	len_ns = (u64)skb->len * NSEC_PER_SEC;
	len_ns = (u64)skb->len * NSEC_PER_SEC;
	do_div(len_ns, rate);
	do_div(len_ns, rate);
	hrtimer_start(&tcp_sk(sk)->pacing_timer,
	hrtimer_start(&tcp_sk(sk)->pacing_timer,