Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eaf6dc03 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'tcp-cwnd-undo-refactor'



Yuchung Cheng says:

====================
tcp cwnd undo refactor

This patch series consolidate similar cwnd undo functions
implemented by various congestion control by using existing
tcp socket state variable. The first patch fixes a corner
case in of cwnd undo in Reno and HTCP. Since the bug has
existed for many years and is very minor, we consider this
patch set more suitable for net-next as the major change
is the refactor itself.

- v1->v2
  Fix trivial compile errors
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 10377ba7 f1722a1b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -258,7 +258,7 @@ struct tcp_sock {
	u32	snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
	u32	snd_cwnd_used;
	u32	snd_cwnd_stamp;
	u32	prior_cwnd;	/* Congestion window at start of Recovery. */
	u32	prior_cwnd;	/* cwnd right before starting loss recovery */
	u32	prr_delivered;	/* Number of newly delivered packets to
				 * receiver in Recovery. */
	u32	prr_out;	/* Total number of pkts sent during Recovery. */
+1 −13
Original line number Diff line number Diff line
@@ -49,7 +49,6 @@ MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wma
struct bictcp {
	u32	cnt;		/* increase cwnd by 1 after ACKs */
	u32	last_max_cwnd;	/* last maximum snd_cwnd */
	u32	loss_cwnd;	/* congestion window at last loss */
	u32	last_cwnd;	/* the last snd_cwnd */
	u32	last_time;	/* time when updated last_cwnd */
	u32	epoch_start;	/* beginning of an epoch */
@@ -72,7 +71,6 @@ static void bictcp_init(struct sock *sk)
	struct bictcp *ca = inet_csk_ca(sk);

	bictcp_reset(ca);
	ca->loss_cwnd = 0;

	if (initial_ssthresh)
		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
@@ -172,22 +170,12 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
	else
		ca->last_max_cwnd = tp->snd_cwnd;

	ca->loss_cwnd = tp->snd_cwnd;

	if (tp->snd_cwnd <= low_window)
		return max(tp->snd_cwnd >> 1U, 2U);
	else
		return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}

static u32 bictcp_undo_cwnd(struct sock *sk)
{
	const struct tcp_sock *tp = tcp_sk(sk);
	const struct bictcp *ca = inet_csk_ca(sk);

	return max(tp->snd_cwnd, ca->loss_cwnd);
}

static void bictcp_state(struct sock *sk, u8 new_state)
{
	if (new_state == TCP_CA_Loss)
@@ -214,7 +202,7 @@ static struct tcp_congestion_ops bictcp __read_mostly = {
	.ssthresh	= bictcp_recalc_ssthresh,
	.cong_avoid	= bictcp_cong_avoid,
	.set_state	= bictcp_state,
	.undo_cwnd	= bictcp_undo_cwnd,
	.undo_cwnd	= tcp_reno_undo_cwnd,
	.pkts_acked     = bictcp_acked,
	.owner		= THIS_MODULE,
	.name		= "bic",
+1 −11
Original line number Diff line number Diff line
@@ -85,7 +85,6 @@ struct cdg {
	u8  state;
	u8  delack;
	u32 rtt_seq;
	u32 undo_cwnd;
	u32 shadow_wnd;
	u16 backoff_cnt;
	u16 sample_cnt;
@@ -330,8 +329,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
	struct cdg *ca = inet_csk_ca(sk);
	struct tcp_sock *tp = tcp_sk(sk);

	ca->undo_cwnd = tp->snd_cwnd;

	if (ca->state == CDG_BACKOFF)
		return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);

@@ -344,13 +341,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
	return max(2U, tp->snd_cwnd >> 1);
}

static u32 tcp_cdg_undo_cwnd(struct sock *sk)
{
	struct cdg *ca = inet_csk_ca(sk);

	return max(tcp_sk(sk)->snd_cwnd, ca->undo_cwnd);
}

static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
{
	struct cdg *ca = inet_csk_ca(sk);
@@ -403,7 +393,7 @@ struct tcp_congestion_ops tcp_cdg __read_mostly = {
	.cong_avoid = tcp_cdg_cong_avoid,
	.cwnd_event = tcp_cdg_cwnd_event,
	.pkts_acked = tcp_cdg_acked,
	.undo_cwnd = tcp_cdg_undo_cwnd,
	.undo_cwnd = tcp_reno_undo_cwnd,
	.ssthresh = tcp_cdg_ssthresh,
	.release = tcp_cdg_release,
	.init = tcp_cdg_init,
+1 −1
Original line number Diff line number Diff line
@@ -456,7 +456,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk)
{
	const struct tcp_sock *tp = tcp_sk(sk);

	return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
	return max(tp->snd_cwnd, tp->prior_cwnd);
}
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);

+1 −12
Original line number Diff line number Diff line
@@ -83,7 +83,6 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse
struct bictcp {
	u32	cnt;		/* increase cwnd by 1 after ACKs */
	u32	last_max_cwnd;	/* last maximum snd_cwnd */
	u32	loss_cwnd;	/* congestion window at last loss */
	u32	last_cwnd;	/* the last snd_cwnd */
	u32	last_time;	/* time when updated last_cwnd */
	u32	bic_origin_point;/* origin point of bic function */
@@ -142,7 +141,6 @@ static void bictcp_init(struct sock *sk)
	struct bictcp *ca = inet_csk_ca(sk);

	bictcp_reset(ca);
	ca->loss_cwnd = 0;

	if (hystart)
		bictcp_hystart_reset(sk);
@@ -366,18 +364,9 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
	else
		ca->last_max_cwnd = tp->snd_cwnd;

	ca->loss_cwnd = tp->snd_cwnd;

	return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}

static u32 bictcp_undo_cwnd(struct sock *sk)
{
	struct bictcp *ca = inet_csk_ca(sk);

	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}

static void bictcp_state(struct sock *sk, u8 new_state)
{
	if (new_state == TCP_CA_Loss) {
@@ -470,7 +459,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
	.ssthresh	= bictcp_recalc_ssthresh,
	.cong_avoid	= bictcp_cong_avoid,
	.set_state	= bictcp_state,
	.undo_cwnd	= bictcp_undo_cwnd,
	.undo_cwnd	= tcp_reno_undo_cwnd,
	.cwnd_event	= bictcp_cwnd_event,
	.pkts_acked     = bictcp_acked,
	.owner		= THIS_MODULE,
Loading