Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a57de0b4 authored by Jiri Olsa's avatar Jiri Olsa Committed by David S. Miller
Browse files

net: adding memory barrier to the poll and receive callbacks



Adding memory barrier after the poll_wait function, paired with
receive callbacks. Adding fuctions sock_poll_wait and sk_has_sleeper
to wrap the memory barrier.

Without the memory barrier, following race can happen.
The race fires, when following code paths meet, and the tp->rcv_nxt
and __add_wait_queue updates stay in CPU caches.

CPU1                         CPU2

sys_select                   receive packet
  ...                        ...
  __add_wait_queue           update tp->rcv_nxt
  ...                        ...
  tp->rcv_nxt check          sock_def_readable
  ...                        {
  schedule                      ...
                                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                                        wake_up_interruptible(sk->sk_sleep)
                                ...
                             }

If there was no cache the code would work ok, since the wait_queue and
rcv_nxt are opposit to each other.

Meaning that once tp->rcv_nxt is updated by CPU2, the CPU1 either already
passed the tp->rcv_nxt check and sleeps, or will get the new value for
tp->rcv_nxt and will return with new data mask.
In both cases the process (CPU1) is being added to the wait queue, so the
waitqueue_active (CPU2) call cannot miss and will wake up CPU1.

The bad case is when the __add_wait_queue changes done by CPU1 stay in its
cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1 will then
endup calling schedule and sleep forever if there are no more data on the
socket.

Calls to poll_wait in following modules were ommited:
	net/bluetooth/af_bluetooth.c
	net/irda/af_irda.c
	net/irda/irnet/irnet_ppp.c
	net/mac80211/rc80211_pid_debugfs.c
	net/phonet/socket.c
	net/rds/af_rds.c
	net/rfkill/core.c
	net/sunrpc/cache.c
	net/sunrpc/rpc_pipe.c
	net/tipc/socket.c

Signed-off-by: default avatarJiri Olsa <jolsa@redhat.com>
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1b614fb9
Loading
Loading
Loading
Loading
+66 −0
Original line number Diff line number Diff line
@@ -54,6 +54,7 @@

#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/poll.h>

#include <asm/atomic.h>
#include <net/dst.h>
@@ -1241,6 +1242,71 @@ static inline int sk_has_allocations(const struct sock *sk)
	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}

/**
 * sk_has_sleeper - check if there are any waiting processes
 * @sk: socket
 *
 * Returns true if socket has waiting processes
 *
 * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
 * barrier call. They were added due to the race found within the tcp code.
 *
 * Consider following tcp code paths:
 *
 * CPU1                  CPU2
 *
 * sys_select            receive packet
 *   ...                 ...
 *   __add_wait_queue    update tp->rcv_nxt
 *   ...                 ...
 *   tp->rcv_nxt check   sock_def_readable
 *   ...                 {
 *   schedule               ...
 *                          if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
 *                              wake_up_interruptible(sk->sk_sleep)
 *                          ...
 *                       }
 *
 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
 * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
 * could then endup calling schedule and sleep forever if there are no more
 * data on the socket.
 */
static inline int sk_has_sleeper(struct sock *sk)
{
	/*
	 * We need to be sure we are in sync with the
	 * add_wait_queue modifications to the wait queue.
	 *
	 * This memory barrier is paired in the sock_poll_wait.
	 */
	smp_mb();
	return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
}

/**
 * sock_poll_wait - place memory barrier behind the poll_wait call.
 * @filp:           file
 * @wait_address:   socket wait queue
 * @p:              poll_table
 *
 * See the comments in the sk_has_sleeper function.
 */
static inline void sock_poll_wait(struct file *filp,
		wait_queue_head_t *wait_address, poll_table *p)
{
	if (p && wait_address) {
		poll_wait(filp, wait_address, p);
		/*
		 * We need to be sure we are in sync with the
		 * socket flags modification.
		 *
		 * This memory barrier is paired in the sk_has_sleeper.
		*/
		smp_mb();
	}
}

/*
 * 	Queue a received datagram if it will fit. Stream and sequenced
 *	protocols can't normally use this as they need to fit buffers in
+3 −3
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ static void vcc_sock_destruct(struct sock *sk)
static void vcc_def_wakeup(struct sock *sk)
{
	read_lock(&sk->sk_callback_lock);
	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
	if (sk_has_sleeper(sk))
		wake_up(sk->sk_sleep);
	read_unlock(&sk->sk_callback_lock);
}
@@ -110,7 +110,7 @@ static void vcc_write_space(struct sock *sk)
	read_lock(&sk->sk_callback_lock);

	if (vcc_writable(sk)) {
		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
		if (sk_has_sleeper(sk))
			wake_up_interruptible(sk->sk_sleep);

		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
@@ -594,7 +594,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
	struct atm_vcc *vcc;
	unsigned int mask;

	poll_wait(file, sk->sk_sleep, wait);
	sock_poll_wait(file, sk->sk_sleep, wait);
	mask = 0;

	vcc = ATM_SD(sock);
+1 −1
Original line number Diff line number Diff line
@@ -712,7 +712,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
	struct sock *sk = sock->sk;
	unsigned int mask;

	poll_wait(file, sk->sk_sleep, wait);
	sock_poll_wait(file, sk->sk_sleep, wait);
	mask = 0;

	/* exceptional events? */
+4 −4
Original line number Diff line number Diff line
@@ -1715,7 +1715,7 @@ EXPORT_SYMBOL(sock_no_sendpage);
static void sock_def_wakeup(struct sock *sk)
{
	read_lock(&sk->sk_callback_lock);
	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
	if (sk_has_sleeper(sk))
		wake_up_interruptible_all(sk->sk_sleep);
	read_unlock(&sk->sk_callback_lock);
}
@@ -1723,7 +1723,7 @@ static void sock_def_wakeup(struct sock *sk)
static void sock_def_error_report(struct sock *sk)
{
	read_lock(&sk->sk_callback_lock);
	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
	if (sk_has_sleeper(sk))
		wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
	read_unlock(&sk->sk_callback_lock);
@@ -1732,7 +1732,7 @@ static void sock_def_error_report(struct sock *sk)
static void sock_def_readable(struct sock *sk, int len)
{
	read_lock(&sk->sk_callback_lock);
	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
	if (sk_has_sleeper(sk))
		wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
						POLLRDNORM | POLLRDBAND);
	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
@@ -1747,7 +1747,7 @@ static void sock_def_write_space(struct sock *sk)
	 * progress.  --DaveM
	 */
	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
		if (sk_has_sleeper(sk))
			wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
						POLLWRNORM | POLLWRBAND);

+1 −1
Original line number Diff line number Diff line
@@ -196,7 +196,7 @@ void dccp_write_space(struct sock *sk)
{
	read_lock(&sk->sk_callback_lock);

	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
	if (sk_has_sleeper(sk))
		wake_up_interruptible(sk->sk_sleep);
	/* Should agree with poll, otherwise some programs break */
	if (sock_writeable(sk))
Loading