Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 626e264d authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller
Browse files

tcpv6: combine tcp_v6_send_(reset|ack)



$ codiff tcp_ipv6.o.old tcp_ipv6.o.new
net/ipv6/tcp_ipv6.c:
  tcp_v6_md5_hash_hdr | -144
  tcp_v6_send_ack     | -585
  tcp_v6_send_reset   | -540
 3 functions changed, 1269 bytes removed, diff: -1269

net/ipv6/tcp_ipv6.c:
  tcp_v6_send_response | +791
 1 function changed, 791 bytes added, diff: +791

tcp_ipv6.o.new:
 4 functions changed, 791 bytes added, 1269 bytes removed, diff: -478

I choose to leave the reset related netns comment in place (not
the one that is killed) as I cannot understand its English so
it's a bit hard for me to evaluate its usefulness :-).

Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 81ada62d
Loading
Loading
Loading
Loading
+40 −99
Original line number Diff line number Diff line
@@ -942,7 +942,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
	return 0;
}

static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
				 u32 ts, struct tcp_md5sig_key *key, int rst)
{
	struct tcphdr *th = tcp_hdr(skb), *t1;
	struct sk_buff *buff;
@@ -951,31 +952,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
	struct sock *ctl_sk = net->ipv6.tcp_sk;
	unsigned int tot_len = sizeof(struct tcphdr);
	__be32 *topt;
#ifdef CONFIG_TCP_MD5SIG
	struct tcp_md5sig_key *key;
#endif

	if (th->rst)
		return;

	if (!ipv6_unicast_destination(skb))
		return;

	if (ts)
		tot_len += TCPOLEN_TSTAMP_ALIGNED;
#ifdef CONFIG_TCP_MD5SIG
	if (sk)
		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
	else
		key = NULL;

	if (key)
		tot_len += TCPOLEN_MD5SIG_ALIGNED;
#endif

	/*
	 * We need to grab some memory, and put together an RST,
	 * and then put it into the queue to be sent.
	 */

	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
			 GFP_ATOMIC);
	if (buff == NULL)
@@ -990,18 +974,21 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
	t1->dest = th->source;
	t1->source = th->dest;
	t1->doff = tot_len / 4;
	t1->rst = 1;

	if(th->ack) {
		t1->seq = th->ack_seq;
	} else {
		t1->ack = 1;
		t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
				    + skb->len - (th->doff<<2));
	}
	t1->seq = htonl(seq);
	t1->ack_seq = htonl(ack);
	t1->ack = !rst || !th->ack;
	t1->rst = rst;
	t1->window = htons(win);

	topt = (__be32 *)(t1 + 1);

	if (ts) {
		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
		*topt++ = htonl(tcp_time_stamp);
		*topt++ = htonl(ts);
	}

#ifdef CONFIG_TCP_MD5SIG
	if (key) {
		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
@@ -1036,6 +1023,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
		if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
			ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
			TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
			if (rst)
				TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
			return;
		}
@@ -1044,87 +1032,40 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
	kfree_skb(buff);
}

static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
			    struct tcp_md5sig_key *key)
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
{
	struct tcphdr *th = tcp_hdr(skb), *t1;
	struct sk_buff *buff;
	struct flowi fl;
	struct net *net = dev_net(skb->dst->dev);
	struct sock *ctl_sk = net->ipv6.tcp_sk;
	unsigned int tot_len = sizeof(struct tcphdr);
	__be32 *topt;

	if (ts)
		tot_len += TCPOLEN_TSTAMP_ALIGNED;
	struct tcphdr *th = tcp_hdr(skb);
	u32 seq = 0, ack_seq = 0;
#ifdef CONFIG_TCP_MD5SIG
	if (key)
		tot_len += TCPOLEN_MD5SIG_ALIGNED;
	struct tcp_md5sig_key *key;
#endif

	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
			 GFP_ATOMIC);
	if (buff == NULL)
	if (th->rst)
		return;

	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);

	t1 = (struct tcphdr *) skb_push(buff, tot_len);

	/* Swap the send and the receive. */
	memset(t1, 0, sizeof(*t1));
	t1->dest = th->source;
	t1->source = th->dest;
	t1->doff = tot_len / 4;
	t1->seq = htonl(seq);
	t1->ack_seq = htonl(ack);
	t1->ack = 1;
	t1->window = htons(win);

	topt = (__be32 *)(t1 + 1);

	if (ts) {
		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
		*topt++ = htonl(tcp_time_stamp);
		*topt++ = htonl(ts);
	}
	if (!ipv6_unicast_destination(skb))
		return;

#ifdef CONFIG_TCP_MD5SIG
	if (key) {
		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
				    &ipv6_hdr(skb)->saddr,
				    &ipv6_hdr(skb)->daddr, t1);
	}
	if (sk)
		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
	else
		key = NULL;
#endif

	buff->csum = csum_partial((char *)t1, tot_len, 0);

	memset(&fl, 0, sizeof(fl));
	ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
	ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);

	t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
				    tot_len, IPPROTO_TCP,
				    buff->csum);

	fl.proto = IPPROTO_TCP;
	fl.oif = inet6_iif(skb);
	fl.fl_ip_dport = t1->dest;
	fl.fl_ip_sport = t1->source;
	security_skb_classify_flow(skb, &fl);
	if (th->ack)
		seq = ntohl(th->ack_seq);
	else
		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
			  (th->doff << 2);

	if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
		if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
			ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
			TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
			return;
		}
	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
}

	kfree_skb(buff);
static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
			    struct tcp_md5sig_key *key)
{
	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
}

static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)