Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eaefd110 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: add __rcu annotations to sk_wq and wq



Add proper RCU annotations/verbs to sk_wq and wq members

Fix __sctp_write_space() sk_sleep() abuse (and sock->wq access)

Fix sunrpc sk_sleep() abuse too

Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 04cfa852
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -118,6 +118,7 @@ enum sock_shutdown_cmd {
};
};


struct socket_wq {
struct socket_wq {
	/* Note: wait MUST be first field of socket_wq */
	wait_queue_head_t	wait;
	wait_queue_head_t	wait;
	struct fasync_struct	*fasync_list;
	struct fasync_struct	*fasync_list;
	struct rcu_head		rcu;
	struct rcu_head		rcu;
@@ -142,7 +143,7 @@ struct socket {


	unsigned long		flags;
	unsigned long		flags;


	struct socket_wq	*wq;
	struct socket_wq __rcu	*wq;


	struct file		*file;
	struct file		*file;
	struct sock		*sk;
	struct sock		*sk;
+4 −3
Original line number Original line Diff line number Diff line
@@ -281,7 +281,7 @@ struct sock {
	int			sk_rcvbuf;
	int			sk_rcvbuf;


	struct sk_filter __rcu	*sk_filter;
	struct sk_filter __rcu	*sk_filter;
	struct socket_wq	*sk_wq;
	struct socket_wq __rcu	*sk_wq;


#ifdef CONFIG_NET_DMA
#ifdef CONFIG_NET_DMA
	struct sk_buff_head	sk_async_wait_queue;
	struct sk_buff_head	sk_async_wait_queue;
@@ -1266,7 +1266,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)


static inline wait_queue_head_t *sk_sleep(struct sock *sk)
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
{
	return &sk->sk_wq->wait;
	BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
	return &rcu_dereference_raw(sk->sk_wq)->wait;
}
}
/* Detach socket from process context.
/* Detach socket from process context.
 * Announce socket dead, detach it from wait queue and inode.
 * Announce socket dead, detach it from wait queue and inode.
@@ -1287,7 +1288,7 @@ static inline void sock_orphan(struct sock *sk)
static inline void sock_graft(struct sock *sk, struct socket *parent)
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
{
	write_lock_bh(&sk->sk_callback_lock);
	write_lock_bh(&sk->sk_callback_lock);
	rcu_assign_pointer(sk->sk_wq, parent->wq);
	sk->sk_wq = parent->wq;
	parent->sk = sk;
	parent->sk = sk;
	sk_set_socket(sk, parent);
	sk_set_socket(sk, parent);
	security_sock_graft(sk, parent);
	security_sock_graft(sk, parent);
+5 −4
Original line number Original line Diff line number Diff line
@@ -6102,15 +6102,16 @@ static void __sctp_write_space(struct sctp_association *asoc)
			wake_up_interruptible(&asoc->wait);
			wake_up_interruptible(&asoc->wait);


		if (sctp_writeable(sk)) {
		if (sctp_writeable(sk)) {
			if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
			wait_queue_head_t *wq = sk_sleep(sk);
				wake_up_interruptible(sk_sleep(sk));

			if (wq && waitqueue_active(wq))
				wake_up_interruptible(wq);


			/* Note that we try to include the Async I/O support
			/* Note that we try to include the Async I/O support
			 * here by modeling from the current TCP/UDP code.
			 * here by modeling from the current TCP/UDP code.
			 * We have not tested with it yet.
			 * We have not tested with it yet.
			 */
			 */
			if (sock->wq->fasync_list &&
			if (!(sk->sk_shutdown & SEND_SHUTDOWN))
			    !(sk->sk_shutdown & SEND_SHUTDOWN))
				sock_wake_async(sock,
				sock_wake_async(sock,
						SOCK_WAKE_SPACE, POLL_OUT);
						SOCK_WAKE_SPACE, POLL_OUT);
		}
		}
+14 −9
Original line number Original line Diff line number Diff line
@@ -240,17 +240,19 @@ static struct kmem_cache *sock_inode_cachep __read_mostly;
static struct inode *sock_alloc_inode(struct super_block *sb)
static struct inode *sock_alloc_inode(struct super_block *sb)
{
{
	struct socket_alloc *ei;
	struct socket_alloc *ei;
	struct socket_wq *wq;


	ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
	ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
	if (!ei)
	if (!ei)
		return NULL;
		return NULL;
	ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
	wq = kmalloc(sizeof(*wq), GFP_KERNEL);
	if (!ei->socket.wq) {
	if (!wq) {
		kmem_cache_free(sock_inode_cachep, ei);
		kmem_cache_free(sock_inode_cachep, ei);
		return NULL;
		return NULL;
	}
	}
	init_waitqueue_head(&ei->socket.wq->wait);
	init_waitqueue_head(&wq->wait);
	ei->socket.wq->fasync_list = NULL;
	wq->fasync_list = NULL;
	RCU_INIT_POINTER(ei->socket.wq, wq);


	ei->socket.state = SS_UNCONNECTED;
	ei->socket.state = SS_UNCONNECTED;
	ei->socket.flags = 0;
	ei->socket.flags = 0;
@@ -273,9 +275,11 @@ static void wq_free_rcu(struct rcu_head *head)
static void sock_destroy_inode(struct inode *inode)
static void sock_destroy_inode(struct inode *inode)
{
{
	struct socket_alloc *ei;
	struct socket_alloc *ei;
	struct socket_wq *wq;


	ei = container_of(inode, struct socket_alloc, vfs_inode);
	ei = container_of(inode, struct socket_alloc, vfs_inode);
	call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
	wq = rcu_dereference_protected(ei->socket.wq, 1);
	call_rcu(&wq->rcu, wq_free_rcu);
	kmem_cache_free(sock_inode_cachep, ei);
	kmem_cache_free(sock_inode_cachep, ei);
}
}


@@ -524,7 +528,7 @@ void sock_release(struct socket *sock)
		module_put(owner);
		module_put(owner);
	}
	}


	if (sock->wq->fasync_list)
	if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
		printk(KERN_ERR "sock_release: fasync list not empty!\n");
		printk(KERN_ERR "sock_release: fasync list not empty!\n");


	percpu_sub(sockets_in_use, 1);
	percpu_sub(sockets_in_use, 1);
@@ -1108,15 +1112,16 @@ static int sock_fasync(int fd, struct file *filp, int on)
{
{
	struct socket *sock = filp->private_data;
	struct socket *sock = filp->private_data;
	struct sock *sk = sock->sk;
	struct sock *sk = sock->sk;
	struct socket_wq *wq;


	if (sk == NULL)
	if (sk == NULL)
		return -EINVAL;
		return -EINVAL;


	lock_sock(sk);
	lock_sock(sk);
	wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
	fasync_helper(fd, filp, on, &wq->fasync_list);


	fasync_helper(fd, filp, on, &sock->wq->fasync_list);
	if (!wq->fasync_list)

	if (!sock->wq->fasync_list)
		sock_reset_flag(sk, SOCK_FASYNC);
		sock_reset_flag(sk, SOCK_FASYNC);
	else
	else
		sock_set_flag(sk, SOCK_FASYNC);
		sock_set_flag(sk, SOCK_FASYNC);
+20 −12
Original line number Original line Diff line number Diff line
@@ -420,6 +420,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
static void svc_udp_data_ready(struct sock *sk, int count)
static void svc_udp_data_ready(struct sock *sk, int count)
{
{
	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
	wait_queue_head_t *wq = sk_sleep(sk);


	if (svsk) {
	if (svsk) {
		dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
		dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
@@ -428,8 +429,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
		svc_xprt_enqueue(&svsk->sk_xprt);
		svc_xprt_enqueue(&svsk->sk_xprt);
	}
	}
	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
	if (wq && waitqueue_active(wq))
		wake_up_interruptible(sk_sleep(sk));
		wake_up_interruptible(wq);
}
}


/*
/*
@@ -438,6 +439,7 @@ static void svc_udp_data_ready(struct sock *sk, int count)
static void svc_write_space(struct sock *sk)
static void svc_write_space(struct sock *sk)
{
{
	struct svc_sock	*svsk = (struct svc_sock *)(sk->sk_user_data);
	struct svc_sock	*svsk = (struct svc_sock *)(sk->sk_user_data);
	wait_queue_head_t *wq = sk_sleep(sk);


	if (svsk) {
	if (svsk) {
		dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
		dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
@@ -445,10 +447,10 @@ static void svc_write_space(struct sock *sk)
		svc_xprt_enqueue(&svsk->sk_xprt);
		svc_xprt_enqueue(&svsk->sk_xprt);
	}
	}


	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
	if (wq && waitqueue_active(wq)) {
		dprintk("RPC svc_write_space: someone sleeping on %p\n",
		dprintk("RPC svc_write_space: someone sleeping on %p\n",
		       svsk);
		       svsk);
		wake_up_interruptible(sk_sleep(sk));
		wake_up_interruptible(wq);
	}
	}
}
}


@@ -739,6 +741,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
{
{
	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
	wait_queue_head_t *wq;


	dprintk("svc: socket %p TCP (listen) state change %d\n",
	dprintk("svc: socket %p TCP (listen) state change %d\n",
		sk, sk->sk_state);
		sk, sk->sk_state);
@@ -761,8 +764,9 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
			printk("svc: socket %p: no user data\n", sk);
			printk("svc: socket %p: no user data\n", sk);
	}
	}


	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
	wq = sk_sleep(sk);
		wake_up_interruptible_all(sk_sleep(sk));
	if (wq && waitqueue_active(wq))
		wake_up_interruptible_all(wq);
}
}


/*
/*
@@ -771,6 +775,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
static void svc_tcp_state_change(struct sock *sk)
static void svc_tcp_state_change(struct sock *sk)
{
{
	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
	wait_queue_head_t *wq = sk_sleep(sk);


	dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
	dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
		sk, sk->sk_state, sk->sk_user_data);
		sk, sk->sk_state, sk->sk_user_data);
@@ -781,13 +786,14 @@ static void svc_tcp_state_change(struct sock *sk)
		set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
		set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
		svc_xprt_enqueue(&svsk->sk_xprt);
		svc_xprt_enqueue(&svsk->sk_xprt);
	}
	}
	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
	if (wq && waitqueue_active(wq))
		wake_up_interruptible_all(sk_sleep(sk));
		wake_up_interruptible_all(wq);
}
}


static void svc_tcp_data_ready(struct sock *sk, int count)
static void svc_tcp_data_ready(struct sock *sk, int count)
{
{
	struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
	struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
	wait_queue_head_t *wq = sk_sleep(sk);


	dprintk("svc: socket %p TCP data ready (svsk %p)\n",
	dprintk("svc: socket %p TCP data ready (svsk %p)\n",
		sk, sk->sk_user_data);
		sk, sk->sk_user_data);
@@ -795,8 +801,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
		svc_xprt_enqueue(&svsk->sk_xprt);
		svc_xprt_enqueue(&svsk->sk_xprt);
	}
	}
	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
	if (wq && waitqueue_active(wq))
		wake_up_interruptible(sk_sleep(sk));
		wake_up_interruptible(wq);
}
}


/*
/*
@@ -1531,6 +1537,7 @@ static void svc_sock_detach(struct svc_xprt *xprt)
{
{
	struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
	struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
	struct sock *sk = svsk->sk_sk;
	struct sock *sk = svsk->sk_sk;
	wait_queue_head_t *wq;


	dprintk("svc: svc_sock_detach(%p)\n", svsk);
	dprintk("svc: svc_sock_detach(%p)\n", svsk);


@@ -1539,8 +1546,9 @@ static void svc_sock_detach(struct svc_xprt *xprt)
	sk->sk_data_ready = svsk->sk_odata;
	sk->sk_data_ready = svsk->sk_odata;
	sk->sk_write_space = svsk->sk_owspace;
	sk->sk_write_space = svsk->sk_owspace;


	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
	wq = sk_sleep(sk);
		wake_up_interruptible(sk_sleep(sk));
	if (wq && waitqueue_active(wq))
		wake_up_interruptible(wq);
}
}


/*
/*
Loading