Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e8d0b7e authored by Eric Dumazet's avatar Eric Dumazet Committed by Lorenzo Colitti
Browse files

tcp: fix more NULL deref after prequeue changes



[ Upstream commit 0f85feae6b710ced3abad5b2b47d31dfcb956b62 ]

When I cooked commit c3658e8d ("tcp: fix possible NULL dereference in
tcp_vX_send_reset()") I missed other spots we could deref a NULL
skb_dst(skb)

Again, if a socket is provided, we do not need skb_dst() to get a
pointer to network namespace : sock_net(sk) is good enough.

[Cherry-pick of stable/linux-3.18.y 7efe8f1b]

Bug: 16355602
Change-Id: Ibe1def7979625ee7902bff2f33ec8945b9945948
Reported-by: default avatarDann Frazier <dann.frazier@canonical.com>
Bisected-by: default avatarDann Frazier <dann.frazier@canonical.com>
Tested-by: default avatarDann Frazier <dann.frazier@canonical.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Fixes: ca777eff ("tcp: remove dst refcount false sharing for prequeue mode")
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 733a1d2d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -623,6 +623,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
#ifdef CONFIG_TCP_MD5SIG
	hash_location = tcp_parse_md5sig_option(th);
	if (!sk && hash_location) {
@@ -633,7 +634,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
		sk1 = __inet_lookup_listener(net,
					     &tcp_hashinfo, ip_hdr(skb)->saddr,
					     th->source, ip_hdr(skb)->daddr,
					     ntohs(th->source), inet_iif(skb));
@@ -681,7 +682,6 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
	if (sk)
		arg.bound_dev_if = sk->sk_bound_dev_if;

	net = dev_net(skb_dst(skb)->dev);
	arg.tos = ip_hdr(skb)->tos;
	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
+14 −14
Original line number Diff line number Diff line
@@ -793,16 +793,16 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
};

static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
				 u32 tsval, u32 tsecr, int oif,
				 struct tcp_md5sig_key *key, int rst, u8 tclass,
				 u32 label)
static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
				 u32 ack, u32 win, u32 tsval, u32 tsecr,
				 int oif, struct tcp_md5sig_key *key, int rst,
				 u8 tclass, u32 label)
{
	const struct tcphdr *th = tcp_hdr(skb);
	struct tcphdr *t1;
	struct sk_buff *buff;
	struct flowi6 fl6;
	struct net *net = dev_net(skb_dst(skb)->dev);
	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
	struct sock *ctl_sk = net->ipv6.tcp_sk;
	unsigned int tot_len = sizeof(struct tcphdr);
	struct dst_entry *dst;
@@ -952,7 +952,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
			  (th->doff << 2);

	oif = sk ? sk->sk_bound_dev_if : 0;
	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);

#ifdef CONFIG_TCP_MD5SIG
release_sk1:
@@ -963,13 +963,13 @@ release_sk1:
#endif
}

static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
			    u32 win, u32 tsval, u32 tsecr, int oif,
static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
			    struct tcp_md5sig_key *key, u8 tclass,
			    u32 label)
{
	tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
			     label);
	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
			     tclass, label);
}

static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -977,7 +977,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
	struct inet_timewait_sock *tw = inet_twsk(sk);
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);

	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
			tcp_time_stamp + tcptw->tw_ts_offset,
			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
@@ -992,10 +992,10 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
	tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
			tcp_rsk(req)->rcv_nxt,
			req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
			0, 0);
}