Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 792365bf authored by Eric Dumazet's avatar Eric Dumazet Committed by Greg Kroah-Hartman
Browse files

tcp/dccp: fix possible race __inet_lookup_established()



commit 8dbd76e79a16b45b2ccb01d2f2e08dbf64e71e40 upstream.

Michal Kubecek and Firo Yang did a very nice analysis of crashes
happening in __inet_lookup_established().

Since a TCP socket can go from TCP_ESTABLISH to TCP_LISTEN
(via a close()/socket()/listen() cycle) without a RCU grace period,
I should not have changed listeners linkage in their hash table.

They must use the nulls protocol (Documentation/RCU/rculist_nulls.txt),
so that a lookup can detect a socket in a hash list was moved in
another one.

Since we added code in commit d296ba60 ("soreuseport: Resolve
merge conflict for v4/v6 ordering fix"), we have to add
hlist_nulls_add_tail_rcu() helper.

Fixes: 3b24d854 ("tcp/dccp: do not touch listener sk_refcnt under synflood")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatarMichal Kubecek <mkubecek@suse.cz>
Reported-by: default avatarFiro Yang <firo.yang@suse.com>
Reviewed-by: default avatarMichal Kubecek <mkubecek@suse.cz>
Link: https://lore.kernel.org/netdev/20191120083919.GH27852@unicorn.suse.cz/


Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
[stable-4.9: we also need to update code in __inet_lookup_listener() and
 inet6_lookup_listener() which has been removed in 5.0-rc1.]
Signed-off-by: default avatarMichal Kubecek <mkubecek@suse.cz>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0a8f421b
Loading
Loading
Loading
Loading
+37 −0
Original line number Diff line number Diff line
@@ -99,6 +99,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
		first->pprev = &n->next;
}

/**
 * hlist_nulls_add_tail_rcu
 * @n: the element to add to the hash list.
 * @h: the list to add to.
 *
 * Description:
 * Adds the specified element to the specified hlist_nulls,
 * while permitting racing traversals.
 *
 * The caller must take whatever precautions are necessary
 * (such as holding appropriate locks) to avoid racing
 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
 * or hlist_nulls_del_rcu(), running on this same list.
 * However, it is perfectly legal to run concurrently with
 * the _rcu list-traversal primitives, such as
 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
 * problems on Alpha CPUs.  Regardless of the type of CPU, the
 * list-traversal primitive must be guarded by rcu_read_lock().
 */
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
					    struct hlist_nulls_head *h)
{
	struct hlist_nulls_node *i, *last = NULL;

	/* Note: write side code, so rcu accessors are not needed. */
	for (i = h->first; !is_a_nulls(i); i = i->next)
		last = i;

	if (last) {
		n->next = last->next;
		n->pprev = &last->next;
		rcu_assign_pointer(hlist_next_rcu(last), n);
	} else {
		hlist_nulls_add_head_rcu(n, h);
	}
}

/**
 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
 * @tpos:	the type * to use as a loop cursor.
+9 −3
Original line number Diff line number Diff line
@@ -98,12 +98,18 @@ struct inet_bind_hashbucket {
	struct hlist_head	chain;
};

/*
 * Sockets can be hashed in established or listening table
/* Sockets can be hashed in established or listening table.
 * We must use different 'nulls' end-of-chain value for all hash buckets :
 * A socket might transition from ESTABLISH to LISTEN state without
 * RCU grace period. A lookup in ehash table needs to handle this case.
 */
#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
	spinlock_t		lock;
	union {
		struct hlist_head	head;
		struct hlist_nulls_head	nulls_head;
	};
};

/* This is for listening sockets, thus all sockets which possess wildcards. */
+5 −0
Original line number Diff line number Diff line
@@ -661,6 +661,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}

static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
	hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
}

static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
	sock_hold(sk);
+2 −1
Original line number Diff line number Diff line
@@ -868,12 +868,13 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,

		for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
			struct inet_listen_hashbucket *ilb;
			struct hlist_nulls_node *node;
			struct sock *sk;

			num = 0;
			ilb = &hashinfo->listening_hash[i];
			spin_lock_bh(&ilb->lock);
			sk_for_each(sk, &ilb->head) {
			sk_nulls_for_each(sk, node, &ilb->nulls_head) {
				struct inet_sock *inet = inet_sk(sk);

				if (!net_eq(sock_net(sk), net))
+9 −9
Original line number Diff line number Diff line
@@ -218,9 +218,10 @@ struct sock *__inet_lookup_listener(struct net *net,
	int score, hiscore = 0, matches = 0, reuseport = 0;
	bool exact_dif = inet_exact_dif_match(net, skb);
	struct sock *sk, *result = NULL;
	struct hlist_nulls_node *node;
	u32 phash = 0;

	sk_for_each_rcu(sk, &ilb->head) {
	sk_nulls_for_each_rcu(sk, node, &ilb->nulls_head) {
		score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
		if (score > hiscore) {
			reuseport = sk->sk_reuseport;
@@ -441,10 +442,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
						     bool match_wildcard))
{
	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
	const struct hlist_nulls_node *node;
	struct sock *sk2;
	kuid_t uid = sock_i_uid(sk);

	sk_for_each_rcu(sk2, &ilb->head) {
	sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
		if (sk2 != sk &&
		    sk2->sk_family == sk->sk_family &&
		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
@@ -482,9 +484,9 @@ int __inet_hash(struct sock *sk, struct sock *osk,
	}
	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
		sk->sk_family == AF_INET6)
		hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
		__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
	else
		hlist_add_head_rcu(&sk->sk_node, &ilb->head);
		__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
	sock_set_flag(sk, SOCK_RCU_FREE);
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
@@ -527,9 +529,6 @@ void inet_unhash(struct sock *sk)
	spin_lock_bh(lock);
	if (rcu_access_pointer(sk->sk_reuseport_cb))
		reuseport_detach_sock(sk);
	if (listener)
		done = __sk_del_node_init(sk);
	else
	done = __sk_nulls_del_node_init_rcu(sk);
	if (done)
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
@@ -666,7 +665,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)

	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
		spin_lock_init(&h->listening_hash[i].lock);
		INIT_HLIST_HEAD(&h->listening_hash[i].head);
		INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
				      i + LISTENING_NULLS_BASE);
	}
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
Loading