Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 40b215e5 authored by Pavel Emelyanov's avatar Pavel Emelyanov Committed by David S. Miller
Browse files

tcp: de-bloat a bit with factoring NET_INC_STATS_BH out



There are some places in TCP that select one MIB index to
bump snmp statistics like this:

	if (<something>)
		NET_INC_STATS_BH(<some_id>);
	else if (<something_else>)
		NET_INC_STATS_BH(<some_other_id>);
	...
	else
		NET_INC_STATS_BH(<default_id>);

or in a more tricky but still similar way.

On the other hand, this NET_INC_STATS_BH is a camouflaged
increment of percpu variable, which is not that small.

Factoring those cases out de-bloats 235 bytes on non-preemptible
i386 config and drives parts of the code into 80 columns.

add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-235 (-235)
function                                     old     new   delta
tcp_fastretrans_alert                       1437    1424     -13
tcp_dsack_set                                137     124     -13
tcp_xmit_retransmit_queue                    690     676     -14
tcp_try_undo_recovery                        283     265     -18
tcp_sacktag_write_queue                     1550    1515     -35
tcp_update_reordering                        162     106     -56
tcp_retransmit_timer                         990     904     -86

Signed-off-by: default avatarPavel Emelyanov <xemul@openvz.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b4653e99
Loading
Loading
Loading
Loading
+32 −14
Original line number Original line Diff line number Diff line
@@ -947,17 +947,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
{
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	if (metric > tp->reordering) {
	if (metric > tp->reordering) {
		int mib_idx;

		tp->reordering = min(TCP_MAX_REORDERING, metric);
		tp->reordering = min(TCP_MAX_REORDERING, metric);


		/* This exciting event is worth to be remembered. 8) */
		/* This exciting event is worth to be remembered. 8) */
		if (ts)
		if (ts)
			NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
			mib_idx = LINUX_MIB_TCPTSREORDER;
		else if (tcp_is_reno(tp))
		else if (tcp_is_reno(tp))
			NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
			mib_idx = LINUX_MIB_TCPRENOREORDER;
		else if (tcp_is_fack(tp))
		else if (tcp_is_fack(tp))
			NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
			mib_idx = LINUX_MIB_TCPFACKREORDER;
		else
		else
			NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
			mib_idx = LINUX_MIB_TCPSACKREORDER;

		NET_INC_STATS_BH(mib_idx);
#if FASTRETRANS_DEBUG > 1
#if FASTRETRANS_DEBUG > 1
		printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
		printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
		       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
		       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1456,18 +1460,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
		if (!tcp_is_sackblock_valid(tp, dup_sack,
		if (!tcp_is_sackblock_valid(tp, dup_sack,
					    sp[used_sacks].start_seq,
					    sp[used_sacks].start_seq,
					    sp[used_sacks].end_seq)) {
					    sp[used_sacks].end_seq)) {
			int mib_idx;

			if (dup_sack) {
			if (dup_sack) {
				if (!tp->undo_marker)
				if (!tp->undo_marker)
					NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
					mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
				else
				else
					NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
					mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
			} else {
			} else {
				/* Don't count olds caused by ACK reordering */
				/* Don't count olds caused by ACK reordering */
				if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
				if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
				    !after(sp[used_sacks].end_seq, tp->snd_una))
				    !after(sp[used_sacks].end_seq, tp->snd_una))
					continue;
					continue;
				NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
				mib_idx = LINUX_MIB_TCPSACKDISCARD;
			}
			}

			NET_INC_STATS_BH(mib_idx);
			if (i == 0)
			if (i == 0)
				first_sack_index = -1;
				first_sack_index = -1;
			continue;
			continue;
@@ -2380,15 +2388,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
	struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);


	if (tcp_may_undo(tp)) {
	if (tcp_may_undo(tp)) {
		int mib_idx;

		/* Happy end! We did not retransmit anything
		/* Happy end! We did not retransmit anything
		 * or our original transmission succeeded.
		 * or our original transmission succeeded.
		 */
		 */
		DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
		DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
		tcp_undo_cwr(sk, 1);
		tcp_undo_cwr(sk, 1);
		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
			NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
			mib_idx = LINUX_MIB_TCPLOSSUNDO;
		else
		else
			NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
			mib_idx = LINUX_MIB_TCPFULLUNDO;

		NET_INC_STATS_BH(mib_idx);
		tp->undo_marker = 0;
		tp->undo_marker = 0;
	}
	}
	if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
	if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2560,7 +2572,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
	int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
	int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
	int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
	int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
				    (tcp_fackets_out(tp) > tp->reordering));
				    (tcp_fackets_out(tp) > tp->reordering));
	int fast_rexmit = 0;
	int fast_rexmit = 0, mib_idx;


	if (WARN_ON(!tp->packets_out && tp->sacked_out))
	if (WARN_ON(!tp->packets_out && tp->sacked_out))
		tp->sacked_out = 0;
		tp->sacked_out = 0;
@@ -2683,9 +2695,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
		/* Otherwise enter Recovery state */
		/* Otherwise enter Recovery state */


		if (tcp_is_reno(tp))
		if (tcp_is_reno(tp))
			NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
			mib_idx = LINUX_MIB_TCPRENORECOVERY;
		else
		else
			NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
			mib_idx = LINUX_MIB_TCPSACKRECOVERY;

		NET_INC_STATS_BH(mib_idx);


		tp->high_seq = tp->snd_nxt;
		tp->high_seq = tp->snd_nxt;
		tp->prior_ssthresh = 0;
		tp->prior_ssthresh = 0;
@@ -3700,10 +3714,14 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
{
{
	if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
	if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
		int mib_idx;

		if (before(seq, tp->rcv_nxt))
		if (before(seq, tp->rcv_nxt))
			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
			mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
		else
		else
			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
			mib_idx = LINUX_MIB_TCPDSACKOFOSENT;

		NET_INC_STATS_BH(mib_idx);


		tp->rx_opt.dsack = 1;
		tp->rx_opt.dsack = 1;
		tp->duplicate_sack[0].start_seq = seq;
		tp->duplicate_sack[0].start_seq = seq;
+5 −2
Original line number Original line Diff line number Diff line
@@ -1985,14 +1985,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk)


			if (sacked & TCPCB_LOST) {
			if (sacked & TCPCB_LOST) {
				if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
				if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
					int mib_idx;

					if (tcp_retransmit_skb(sk, skb)) {
					if (tcp_retransmit_skb(sk, skb)) {
						tp->retransmit_skb_hint = NULL;
						tp->retransmit_skb_hint = NULL;
						return;
						return;
					}
					}
					if (icsk->icsk_ca_state != TCP_CA_Loss)
					if (icsk->icsk_ca_state != TCP_CA_Loss)
						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
						mib_idx = LINUX_MIB_TCPFASTRETRANS;
					else
					else
						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
						mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
					NET_INC_STATS_BH(mib_idx);


					if (skb == tcp_write_queue_head(sk))
					if (skb == tcp_write_queue_head(sk))
						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+9 −6
Original line number Original line Diff line number Diff line
@@ -326,24 +326,27 @@ static void tcp_retransmit_timer(struct sock *sk)
		goto out;
		goto out;


	if (icsk->icsk_retransmits == 0) {
	if (icsk->icsk_retransmits == 0) {
		int mib_idx;

		if (icsk->icsk_ca_state == TCP_CA_Disorder ||
		if (icsk->icsk_ca_state == TCP_CA_Disorder ||
		    icsk->icsk_ca_state == TCP_CA_Recovery) {
		    icsk->icsk_ca_state == TCP_CA_Recovery) {
			if (tcp_is_sack(tp)) {
			if (tcp_is_sack(tp)) {
				if (icsk->icsk_ca_state == TCP_CA_Recovery)
				if (icsk->icsk_ca_state == TCP_CA_Recovery)
					NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
					mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
				else
				else
					NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
					mib_idx = LINUX_MIB_TCPSACKFAILURES;
			} else {
			} else {
				if (icsk->icsk_ca_state == TCP_CA_Recovery)
				if (icsk->icsk_ca_state == TCP_CA_Recovery)
					NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
					mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
				else
				else
					NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
					mib_idx = LINUX_MIB_TCPRENOFAILURES;
			}
			}
		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
			NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
		} else {
		} else {
			NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
			mib_idx = LINUX_MIB_TCPTIMEOUTS;
		}
		}
		NET_INC_STATS_BH(mib_idx);
	}
	}


	if (tcp_use_frto(sk)) {
	if (tcp_use_frto(sk)) {