Loading drivers/android/binder.c +9 −12 Original line number Diff line number Diff line Loading @@ -4748,23 +4748,20 @@ static int binder_thread_release(struct binder_proc *proc, } /* * If this thread used poll, make sure we remove the waitqueue * from any epoll data structures holding it with POLLFREE. * waitqueue_active() is safe to use here because we're holding * the inner lock. * If this thread used poll, make sure we remove the waitqueue from any * poll data structures holding it. */ if ((thread->looper & BINDER_LOOPER_STATE_POLL) && waitqueue_active(&thread->wait)) { wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); } if (thread->looper & BINDER_LOOPER_STATE_POLL) wake_up_pollfree(&thread->wait); binder_inner_proc_unlock(thread->proc); /* * This is needed to avoid races between wake_up_poll() above and * and ep_remove_waitqueue() called for other reasons (eg the epoll file * descriptor being closed); ep_remove_waitqueue() holds an RCU read * lock, so we can be sure it's done after calling synchronize_rcu(). * This is needed to avoid races between wake_up_pollfree() above and * someone else removing the last entry from the queue for other reasons * (e.g. ep_remove_wait_queue() being called due to an epoll file * descriptor being closed). Such other users hold an RCU read lock, so * we can be sure they're done after we call synchronize_rcu(). */ if (thread->looper & BINDER_LOOPER_STATE_POLL) synchronize_rcu(); Loading include/linux/wait.h +26 −0 Original line number Diff line number Diff line Loading @@ -192,6 +192,7 @@ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) Loading Loading @@ -219,6 +220,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) /** * wake_up_pollfree - signal that a polled waitqueue is going away * @wq_head: the wait queue head * * In the very rare cases where a ->poll() implementation uses a waitqueue whose * lifetime is tied to a task rather than to the 'struct file' being polled, * this function must be called before the waitqueue is freed so that * non-blocking polls (e.g. epoll) are notified that the queue is going away. * * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. */ static inline void wake_up_pollfree(struct wait_queue_head *wq_head) { /* * For performance reasons, we don't always take the queue lock here. * Therefore, we might race with someone removing the last entry from * the queue, and proceed while they still hold the queue lock. * However, rcu_read_lock() is required to be held in such cases, so we * can safely proceed with an RCU-delayed free. */ if (waitqueue_active(wq_head)) __wake_up_pollfree(wq_head); } #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ Loading kernel/sched/wait.c +7 −0 Original line number Diff line number Diff line Loading @@ -209,6 +209,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_e } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ void __wake_up_pollfree(struct wait_queue_head *wq_head) { __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE)); /* POLLFREE must have cleared the queue. */ WARN_ON_ONCE(waitqueue_active(wq_head)); } /* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any Loading Loading
drivers/android/binder.c +9 −12 Original line number Diff line number Diff line Loading @@ -4748,23 +4748,20 @@ static int binder_thread_release(struct binder_proc *proc, } /* * If this thread used poll, make sure we remove the waitqueue * from any epoll data structures holding it with POLLFREE. * waitqueue_active() is safe to use here because we're holding * the inner lock. * If this thread used poll, make sure we remove the waitqueue from any * poll data structures holding it. */ if ((thread->looper & BINDER_LOOPER_STATE_POLL) && waitqueue_active(&thread->wait)) { wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); } if (thread->looper & BINDER_LOOPER_STATE_POLL) wake_up_pollfree(&thread->wait); binder_inner_proc_unlock(thread->proc); /* * This is needed to avoid races between wake_up_poll() above and * and ep_remove_waitqueue() called for other reasons (eg the epoll file * descriptor being closed); ep_remove_waitqueue() holds an RCU read * lock, so we can be sure it's done after calling synchronize_rcu(). * This is needed to avoid races between wake_up_pollfree() above and * someone else removing the last entry from the queue for other reasons * (e.g. ep_remove_wait_queue() being called due to an epoll file * descriptor being closed). Such other users hold an RCU read lock, so * we can be sure they're done after we call synchronize_rcu(). */ if (thread->looper & BINDER_LOOPER_STATE_POLL) synchronize_rcu(); Loading
include/linux/wait.h +26 −0 Original line number Diff line number Diff line Loading @@ -192,6 +192,7 @@ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) Loading Loading @@ -219,6 +220,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) /** * wake_up_pollfree - signal that a polled waitqueue is going away * @wq_head: the wait queue head * * In the very rare cases where a ->poll() implementation uses a waitqueue whose * lifetime is tied to a task rather than to the 'struct file' being polled, * this function must be called before the waitqueue is freed so that * non-blocking polls (e.g. epoll) are notified that the queue is going away. * * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. */ static inline void wake_up_pollfree(struct wait_queue_head *wq_head) { /* * For performance reasons, we don't always take the queue lock here. * Therefore, we might race with someone removing the last entry from * the queue, and proceed while they still hold the queue lock. * However, rcu_read_lock() is required to be held in such cases, so we * can safely proceed with an RCU-delayed free. */ if (waitqueue_active(wq_head)) __wake_up_pollfree(wq_head); } #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ Loading
kernel/sched/wait.c +7 −0 Original line number Diff line number Diff line Loading @@ -209,6 +209,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_e } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ void __wake_up_pollfree(struct wait_queue_head *wq_head) { __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE)); /* POLLFREE must have cleared the queue. */ WARN_ON_ONCE(waitqueue_active(wq_head)); } /* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any Loading