Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 296e236e authored by Davide Libenzi's avatar Davide Libenzi Committed by Linus Torvalds
Browse files

epoll: fix epoll's own poll (update)



Signed-off-by: default avatarDavide Libenzi <davidel@xmailserver.org>
Cc: Pavel Pisa <pisa@cmp.felk.cvut.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5071f97e
Loading
Loading
Loading
Loading
+57 −53
Original line number Diff line number Diff line
@@ -454,9 +454,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
	int error, pwake = 0;
	unsigned long flags;
	struct epitem *epi, *nepi;
	struct list_head txlist;

	INIT_LIST_HEAD(&txlist);
	LIST_HEAD(txlist);

	/*
	 * We need to lock this because we could be hit by
@@ -473,8 +471,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
	 * in a lockless way.
	 */
	spin_lock_irqsave(&ep->lock, flags);
	list_splice(&ep->rdllist, &txlist);
	INIT_LIST_HEAD(&ep->rdllist);
	list_splice_init(&ep->rdllist, &txlist);
	ep->ovflist = NULL;
	spin_unlock_irqrestore(&ep->lock, flags);

@@ -514,8 +511,8 @@ static int ep_scan_ready_list(struct eventpoll *ep,

	if (!list_empty(&ep->rdllist)) {
		/*
		 * Wake up (if active) both the eventpoll wait list and the ->poll()
		 * wait list (delayed after we release the lock).
		 * Wake up (if active) both the eventpoll wait list and
		 * the ->poll() wait list (delayed after we release the lock).
		 */
		if (waitqueue_active(&ep->wq))
			wake_up_locked(&ep->wq);
@@ -632,7 +629,8 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
	return 0;
}

static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv)
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
			       void *priv)
{
	struct epitem *epi, *tmp;

@@ -640,7 +638,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, voi
		if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
		    epi->event.events)
			return POLLIN | POLLRDNORM;
		else
		else {
			/*
			 * Item has been dropped into the ready list by the poll
			 * callback, but it's not actually ready, as far as
@@ -648,6 +646,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, voi
			 */
			list_del_init(&epi->rdllink);
		}
	}

	return 0;
}
@@ -872,10 +871,11 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
		add_wait_queue(whead, &pwq->wait);
		list_add_tail(&pwq->llink, &epi->pwqlist);
		epi->nwait++;
	} else
	} else {
		/* We have to signal that an error occurred */
		epi->nwait = -1;
	}
}

static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
{
@@ -1055,7 +1055,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
	return 0;
}

static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, void *priv)
static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
			       void *priv)
{
	struct ep_send_events_data *esed = priv;
	int eventcnt;
@@ -1091,26 +1092,28 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, voi
			uevent++;
			if (epi->event.events & EPOLLONESHOT)
				epi->event.events &= EP_PRIVATE_BITS;
  			else if (!(epi->event.events & EPOLLET))
  				/*
				 * If this file has been added with Level Trigger
				 * mode, we need to insert back inside the ready
				 * list, so that the next call to epoll_wait()
				 * will check again the events availability.
  				 * At this point, noone can insert into ep->rdllist
  				 * besides us. The epoll_ctl() callers are locked
				 * out by ep_scan_ready_list() holding "mtx" and
				 * the poll callback will queue them in ep->ovflist.
			else if (!(epi->event.events & EPOLLET)) {
				/*
				 * If this file has been added with Level
				 * Trigger mode, we need to insert back inside
				 * the ready list, so that the next call to
				 * epoll_wait() will check again the events
				 * availability. At this point, noone can insert
				 * into ep->rdllist besides us. The epoll_ctl()
				 * callers are locked out by
				 * ep_scan_ready_list() holding "mtx" and the
				 * poll callback will queue them in ep->ovflist.
				 */
				list_add_tail(&epi->rdllink, &ep->rdllist);
			}
		}
	}

	return eventcnt;
}

static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events,
			  int maxevents)
static int ep_send_events(struct eventpoll *ep,
			  struct epoll_event __user *events, int maxevents)
{
	struct ep_send_events_data esed;

@@ -1194,40 +1197,41 @@ retry:
 */
SYSCALL_DEFINE1(epoll_create1, int, flags)
{
	int error;
	struct eventpoll *ep = NULL;
	int error, fd = -1;
	struct eventpoll *ep;

	/* Check the EPOLL_* constant for consistency.  */
	BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);

	if (flags & ~EPOLL_CLOEXEC)
		return -EINVAL;

	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n",
		     current, flags));

	error = -EINVAL;
	if (flags & ~EPOLL_CLOEXEC)
		goto error_return;

	/*
	 * Create the internal data structure ( "struct eventpoll" ).
	 */
	error = ep_alloc(&ep);
	if (error < 0)
	if (error < 0) {
		fd = error;
		goto error_return;
	}

	/*
	 * Creates all the items needed to setup an eventpoll file. That is,
	 * a file structure and a free file descriptor.
	 */
	error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
	fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
			      flags & O_CLOEXEC);
	if (error < 0)
	if (fd < 0)
		ep_free(ep);

error_return:
	DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
		     current, flags, error));
		     current, flags, fd));

	return error;
	return fd;
}

SYSCALL_DEFINE1(epoll_create, int, size)