Commit c16bda37 authored by Jens Axboe's avatar Jens Axboe

io_uring/poll: allow some retries for poll triggering spuriously

If we get woken spuriously when polling and fail the operation with
-EAGAIN again, then we generally only allow polling again if data
had been transferred at some point. This is indicated with
REQ_F_PARTIAL_IO. However, if the spurious poll triggers when the socket
was originally empty, then we haven't transferred data yet and we will
fail the poll re-arm. This either punts the socket to io-wq if it's
blocking, or it fails the request with -EAGAIN if not. Neither condition
is desirable, as the former will slow things down, while the latter
will make the application confused.

We want to ensure that a repeated poll trigger doesn't lead to infinite
work making no progress, that's what the REQ_F_PARTIAL_IO check was
for. But it doesn't protect against a loop post the first receive, and
it's unnecessarily strict if we started out with an empty socket.

Add a somewhat random retry count, just to put an upper limit on the
potential number of retries that will be done. This should be high enough
that we won't really hit it in practice, unless something needs to be
aborted anyway.

Cc: stable@vger.kernel.org # v5.10+
Link: https://github.com/axboe/liburing/issues/364Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7605c43d
...@@ -650,6 +650,14 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, ...@@ -650,6 +650,14 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
} }
/*
* We can't reliably detect loops in repeated poll triggers and issue
* subsequently failing. But rather than fail these immediately, allow a
* certain amount of retries before we give up. Given that this condition
* should _rarely_ trigger even once, we should be fine with a larger value.
*/
#define APOLL_MAX_RETRY 128
static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
unsigned issue_flags) unsigned issue_flags)
{ {
...@@ -665,14 +673,18 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, ...@@ -665,14 +673,18 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
if (entry == NULL) if (entry == NULL)
goto alloc_apoll; goto alloc_apoll;
apoll = container_of(entry, struct async_poll, cache); apoll = container_of(entry, struct async_poll, cache);
apoll->poll.retries = APOLL_MAX_RETRY;
} else { } else {
alloc_apoll: alloc_apoll:
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll)) if (unlikely(!apoll))
return NULL; return NULL;
apoll->poll.retries = APOLL_MAX_RETRY;
} }
apoll->double_poll = NULL; apoll->double_poll = NULL;
req->apoll = apoll; req->apoll = apoll;
if (unlikely(!--apoll->poll.retries))
return NULL;
return apoll; return apoll;
} }
...@@ -694,8 +706,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) ...@@ -694,8 +706,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
return IO_APOLL_ABORTED; return IO_APOLL_ABORTED;
if (!file_can_poll(req->file)) if (!file_can_poll(req->file))
return IO_APOLL_ABORTED; return IO_APOLL_ABORTED;
if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
return IO_APOLL_ABORTED;
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) if (!(req->flags & REQ_F_APOLL_MULTISHOT))
mask |= EPOLLONESHOT; mask |= EPOLLONESHOT;
......
...@@ -12,6 +12,7 @@ struct io_poll { ...@@ -12,6 +12,7 @@ struct io_poll {
struct file *file; struct file *file;
struct wait_queue_head *head; struct wait_queue_head *head;
__poll_t events; __poll_t events;
int retries;
struct wait_queue_entry wait; struct wait_queue_entry wait;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment