Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a36e1978 authored by Eric Biggers's avatar Eric Biggers Committed by Greg Kroah-Hartman
Browse files

wait: add wake_up_pollfree()



commit 42288cb44c4b5fff7653bc392b583a2b8bd6a8c0 upstream.

Several ->poll() implementations are special in that they use a
waitqueue whose lifetime is the current task, rather than the struct
file as is normally the case.  This is okay for blocking polls, since a
blocking poll occurs within one task; however, non-blocking polls
require another solution.  This solution is for the queue to be cleared
before it is freed, using 'wake_up_poll(wq, EPOLLHUP | POLLFREE);'.

However, that has a bug: wake_up_poll() calls __wake_up() with
nr_exclusive=1.  Therefore, if there are multiple "exclusive" waiters,
and the wakeup function for the first one returns a positive value, only
that one will be called.  That's *not* what's needed for POLLFREE;
POLLFREE is special in that it really needs to wake up everyone.

Considering the three non-blocking poll systems:

- io_uring poll doesn't handle POLLFREE at all, so it is broken anyway.

- aio poll is unaffected, since it doesn't support exclusive waits.
  However, that's fragile, as someone could add this feature later.

- epoll doesn't appear to be broken by this, since its wakeup function
  returns 0 when it sees POLLFREE.  But this is fragile.

Although there is a workaround (see epoll), it's better to define a
function which always sends POLLFREE to all waiters.  Add such a
function.  Also make it verify that the queue really becomes empty after
all waiters have been woken up.

Reported-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20211209010455.42744-2-ebiggers@kernel.org


Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 09297c39
Loading
Loading
Loading
Loading
+26 −0
Original line number Original line Diff line number Diff line
@@ -191,6 +191,7 @@ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_pollfree(struct wait_queue_head *wq_head);


#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
@@ -215,6 +216,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
#define wake_up_interruptible_sync_poll(x, m)					\
#define wake_up_interruptible_sync_poll(x, m)					\
	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))


/**
 * wake_up_pollfree - signal that a polled waitqueue is going away
 * @wq_head: the wait queue head
 *
 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
 * lifetime is tied to a task rather than to the 'struct file' being polled,
 * this function must be called before the waitqueue is freed so that
 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
 *
 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
 */
static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
{
	/*
	 * For performance reasons, we don't always take the queue lock here.
	 * Therefore, we might race with someone removing the last entry from
	 * the queue, and proceed while they still hold the queue lock.
	 * However, rcu_read_lock() is required to be held in such cases, so we
	 * can safely proceed with an RCU-delayed free.
	 */
	if (waitqueue_active(wq_head))
		__wake_up_pollfree(wq_head);
}

#define ___wait_cond_timeout(condition)						\
#define ___wait_cond_timeout(condition)						\
({										\
({										\
	bool __cond = (condition);						\
	bool __cond = (condition);						\
+8 −0
Original line number Original line Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/wait.h>
#include <linux/wait.h>
#include <linux/hash.h>
#include <linux/hash.h>
#include <linux/kthread.h>
#include <linux/kthread.h>
#include <linux/poll.h>


void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
{
{
@@ -213,6 +214,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_e
}
}
EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */


void __wake_up_pollfree(struct wait_queue_head *wq_head)
{
	__wake_up(wq_head, TASK_NORMAL, 0, (void *)(POLLHUP | POLLFREE));
	/* POLLFREE must have cleared the queue. */
	WARN_ON_ONCE(waitqueue_active(wq_head));
}

/*
/*
 * Note: we use "set_current_state()" _after_ the wait-queue add,
 * Note: we use "set_current_state()" _after_ the wait-queue add,
 * because we need a memory barrier there on SMP, so that any
 * because we need a memory barrier there on SMP, so that any