Commit a2416e1e authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: don't halt iopoll too early

IOPOLL users should care more about getting completions for requests
they submitted, but not in "device did/completed something". Currently,
io_do_iopoll() may return a positive number, which will instruct
io_iopoll_check() to break the loop and end the syscall, even if there
is not enough CQEs or none at all.

Don't return positive numbers, so io_iopoll_check() exits only when it
gets an actual error, need reschedule or got enough CQEs.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/641a88f751623b6758303b3171f0a4141f06726e.1628471125.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 864ea921
...@@ -2288,7 +2288,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2288,7 +2288,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
LIST_HEAD(done); LIST_HEAD(done);
bool spin; bool spin;
int ret;
/* /*
* Only spin for completions if we don't have multiple devices hanging * Only spin for completions if we don't have multiple devices hanging
...@@ -2296,9 +2295,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2296,9 +2295,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
*/ */
spin = !ctx->poll_multi_queue && *nr_events < min; spin = !ctx->poll_multi_queue && *nr_events < min;
ret = 0;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
int ret;
/* /*
* Move completed and retryable entries to our local lists. * Move completed and retryable entries to our local lists.
...@@ -2313,22 +2312,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2313,22 +2312,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
break; break;
ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
if (ret < 0) if (unlikely(ret < 0))
break; return ret;
else if (ret)
spin = false;
/* iopoll may have completed current req */ /* iopoll may have completed current req */
if (READ_ONCE(req->iopoll_completed)) if (READ_ONCE(req->iopoll_completed))
list_move_tail(&req->inflight_entry, &done); list_move_tail(&req->inflight_entry, &done);
if (ret && spin)
spin = false;
ret = 0;
} }
if (!list_empty(&done)) if (!list_empty(&done))
io_iopoll_complete(ctx, nr_events, &done, resubmit); io_iopoll_complete(ctx, nr_events, &done, resubmit);
return ret; return 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment