Commit 86c05179 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Linus Torvalds

fs/epoll: deal with wait_queue only once

There is no reason why we rearm the waitiqueue upon every fetch_events
retry (for when events are found yet send_events() fails).  If nothing
else, this saves four lock operations per retry, and furthermore reduces
the scope of the lock even further.

[akpm@linux-foundation.org: restore code to original position, fix and reflow comment]
Link: http://lkml.kernel.org/r/20181114182532.27981-2-dave@stgolabs.netSigned-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Cc: Jason Baron <jbaron@akamai.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 35cff1a6
...@@ -1749,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ...@@ -1749,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
{ {
int res = 0, eavail, timed_out = 0; int res = 0, eavail, timed_out = 0;
u64 slack = 0; u64 slack = 0;
bool waiter = false;
wait_queue_entry_t wait; wait_queue_entry_t wait;
ktime_t expires, *to = NULL; ktime_t expires, *to = NULL;
...@@ -1794,14 +1795,18 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ...@@ -1794,14 +1795,18 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
ep_reset_busy_poll_napi_id(ep); ep_reset_busy_poll_napi_id(ep);
/* /*
* We don't have any available event to return to the caller. * We don't have any available event to return to the caller. We need
* We need to sleep here, and we will be wake up by * to sleep here, and we will be woken by ep_poll_callback() when events
* ep_poll_callback() when events will become available. * become available.
*/ */
init_waitqueue_entry(&wait, current); if (!waiter) {
spin_lock_irq(&ep->wq.lock); waiter = true;
__add_wait_queue_exclusive(&ep->wq, &wait); init_waitqueue_entry(&wait, current);
spin_unlock_irq(&ep->wq.lock);
spin_lock_irq(&ep->wq.lock);
__add_wait_queue_exclusive(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
}
for (;;) { for (;;) {
/* /*
...@@ -1837,10 +1842,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ...@@ -1837,10 +1842,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
spin_lock_irq(&ep->wq.lock);
__remove_wait_queue(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
send_events: send_events:
/* /*
* Try to transfer events to user space. In case we get 0 events and * Try to transfer events to user space. In case we get 0 events and
...@@ -1851,6 +1852,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ...@@ -1851,6 +1852,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
!(res = ep_send_events(ep, events, maxevents)) && !timed_out) !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
goto fetch_events; goto fetch_events;
if (waiter) {
spin_lock_irq(&ep->wq.lock);
__remove_wait_queue(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
}
return res; return res;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment