Commit 4e0982a0 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Linus Torvalds

fs/epoll: simplify ep_send_events_proc() ready-list loop

The current logic is a bit convoluted.  Lets simplify this with a
standard list_for_each_entry_safe() loop instead and just break out
after maxevents is reached.

While at it, remove an unnecessary indentation level in the loop when
there are in fact ready events.

Link: http://lkml.kernel.org/r/20181108051006.18751-3-dave@stgolabs.netSigned-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Jason Baron <jbaron@akamai.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 74bdc129
...@@ -1624,21 +1624,22 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head ...@@ -1624,21 +1624,22 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head
{ {
struct ep_send_events_data *esed = priv; struct ep_send_events_data *esed = priv;
__poll_t revents; __poll_t revents;
struct epitem *epi; struct epitem *epi, *tmp;
struct epoll_event __user *uevent; struct epoll_event __user *uevent = esed->events;
struct wakeup_source *ws; struct wakeup_source *ws;
poll_table pt; poll_table pt;
init_poll_funcptr(&pt, NULL); init_poll_funcptr(&pt, NULL);
esed->res = 0;
/* /*
* We can loop without lock because we are passed a task private list. * We can loop without lock because we are passed a task private list.
* Items cannot vanish during the loop because ep_scan_ready_list() is * Items cannot vanish during the loop because ep_scan_ready_list() is
* holding "mtx" during this call. * holding "mtx" during this call.
*/ */
for (esed->res = 0, uevent = esed->events; list_for_each_entry_safe(epi, tmp, head, rdllink) {
!list_empty(head) && esed->res < esed->maxevents;) { if (esed->res >= esed->maxevents)
epi = list_first_entry(head, struct epitem, rdllink); break;
/* /*
* Activate ep->ws before deactivating epi->ws to prevent * Activate ep->ws before deactivating epi->ws to prevent
...@@ -1658,42 +1659,42 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head ...@@ -1658,42 +1659,42 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head
list_del_init(&epi->rdllink); list_del_init(&epi->rdllink);
revents = ep_item_poll(epi, &pt, 1);
/* /*
* If the event mask intersect the caller-requested one, * If the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, ep_scan_ready_list() * deliver the event to userspace. Again, ep_scan_ready_list()
* is holding "mtx", so no operations coming from userspace * is holding ep->mtx, so no operations coming from userspace
* can change the item. * can change the item.
*/ */
if (revents) { revents = ep_item_poll(epi, &pt, 1);
if (__put_user(revents, &uevent->events) || if (!revents)
__put_user(epi->event.data, &uevent->data)) { continue;
list_add(&epi->rdllink, head);
ep_pm_stay_awake(epi); if (__put_user(revents, &uevent->events) ||
if (!esed->res) __put_user(epi->event.data, &uevent->data)) {
esed->res = -EFAULT; list_add(&epi->rdllink, head);
return 0; ep_pm_stay_awake(epi);
} if (!esed->res)
esed->res++; esed->res = -EFAULT;
uevent++; return 0;
if (epi->event.events & EPOLLONESHOT) }
epi->event.events &= EP_PRIVATE_BITS; esed->res++;
else if (!(epi->event.events & EPOLLET)) { uevent++;
/* if (epi->event.events & EPOLLONESHOT)
* If this file has been added with Level epi->event.events &= EP_PRIVATE_BITS;
* Trigger mode, we need to insert back inside else if (!(epi->event.events & EPOLLET)) {
* the ready list, so that the next call to /*
* epoll_wait() will check again the events * If this file has been added with Level
* availability. At this point, no one can insert * Trigger mode, we need to insert back inside
* into ep->rdllist besides us. The epoll_ctl() * the ready list, so that the next call to
* callers are locked out by * epoll_wait() will check again the events
* ep_scan_ready_list() holding "mtx" and the * availability. At this point, no one can insert
* poll callback will queue them in ep->ovflist. * into ep->rdllist besides us. The epoll_ctl()
*/ * callers are locked out by
list_add_tail(&epi->rdllink, &ep->rdllist); * ep_scan_ready_list() holding "mtx" and the
ep_pm_stay_awake(epi); * poll callback will queue them in ep->ovflist.
} */
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment