Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b5f962e authored by Craig Gallek's avatar Craig Gallek Committed by David S. Miller
Browse files

soreuseport: fix initialization race



Syzkaller stumbled upon a way to trigger
WARNING: CPU: 1 PID: 13881 at net/core/sock_reuseport.c:41
reuseport_alloc+0x306/0x3b0 net/core/sock_reuseport.c:39

There are two initialization paths for the sock_reuseport structure in a
socket: Through the udp/tcp bind paths of SO_REUSEPORT sockets or through
SO_ATTACH_REUSEPORT_[CE]BPF before bind.  The existing implementation
assumedthat the socket lock protected both of these paths when it actually
only protects the SO_ATTACH_REUSEPORT path.  Syzkaller triggered this
double allocation by running these paths concurrently.

This patch moves the check for double allocation into the reuseport_alloc
function which is protected by a global spin lock.

Fixes: e32ea7e7 ("soreuseport: fast reuseport UDP socket selection")
Fixes: c125e80b ("soreuseport: fast reuseport TCP socket selection")
Signed-off-by: default avatarCraig Gallek <kraig@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 66c54517
Loading
Loading
Loading
Loading
+9 −3
Original line number Original line Diff line number Diff line
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
	 * soft irq of receive path or setsockopt from process context
	 * soft irq of receive path or setsockopt from process context
	 */
	 */
	spin_lock_bh(&reuseport_lock);
	spin_lock_bh(&reuseport_lock);
	WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,

					    lockdep_is_held(&reuseport_lock)),
	/* Allocation attempts can occur concurrently via the setsockopt path
		  "multiple allocations for the same socket");
	 * and the bind/hash path.  Nothing to do when we lose the race.
	 */
	if (rcu_dereference_protected(sk->sk_reuseport_cb,
				      lockdep_is_held(&reuseport_lock)))
		goto out;

	reuse = __reuseport_alloc(INIT_SOCKS);
	reuse = __reuseport_alloc(INIT_SOCKS);
	if (!reuse) {
	if (!reuse) {
		spin_unlock_bh(&reuseport_lock);
		spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
	reuse->num_socks = 1;
	reuse->num_socks = 1;
	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);


out:
	spin_unlock_bh(&reuseport_lock);
	spin_unlock_bh(&reuseport_lock);


	return 0;
	return 0;
+1 −4
Original line number Original line Diff line number Diff line
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
			return reuseport_add_sock(sk, sk2);
			return reuseport_add_sock(sk, sk2);
	}
	}


	/* Initial allocation may have already happened via setsockopt */
	if (!rcu_access_pointer(sk->sk_reuseport_cb))
	return reuseport_alloc(sk);
	return reuseport_alloc(sk);
	return 0;
}
}


int __inet_hash(struct sock *sk, struct sock *osk)
int __inet_hash(struct sock *sk, struct sock *osk)
+1 −4
Original line number Original line Diff line number Diff line
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
		}
		}
	}
	}


	/* Initial allocation may have already happened via setsockopt */
	if (!rcu_access_pointer(sk->sk_reuseport_cb))
	return reuseport_alloc(sk);
	return reuseport_alloc(sk);
	return 0;
}
}


/**
/**