Commit b66ceaf3 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: move iopoll reissue into regular IO path

230d50d4 ("io_uring: move reissue into regular IO path")
made non-IOPOLL I/O to not retry from ki_complete handler. Follow it
steps and do the same for IOPOLL. Same problems, same implementation,
same -EAGAIN assumptions.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/f80dfee2d5fa7678f0052a8ab3cfca9496a112ca.1631699928.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7dedd3e1
...@@ -736,7 +736,6 @@ enum { ...@@ -736,7 +736,6 @@ enum {
REQ_F_BUFFER_SELECTED_BIT, REQ_F_BUFFER_SELECTED_BIT,
REQ_F_COMPLETE_INLINE_BIT, REQ_F_COMPLETE_INLINE_BIT,
REQ_F_REISSUE_BIT, REQ_F_REISSUE_BIT,
REQ_F_DONT_REISSUE_BIT,
REQ_F_CREDS_BIT, REQ_F_CREDS_BIT,
REQ_F_REFCOUNT_BIT, REQ_F_REFCOUNT_BIT,
REQ_F_ARM_LTIMEOUT_BIT, REQ_F_ARM_LTIMEOUT_BIT,
...@@ -783,8 +782,6 @@ enum { ...@@ -783,8 +782,6 @@ enum {
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
/* caller should reissue async */ /* caller should reissue async */
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
/* don't attempt request reissue, see io_rw_reissue() */
REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
/* supports async reads */ /* supports async reads */
REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT), REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
/* supports async writes */ /* supports async writes */
...@@ -2440,13 +2437,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2440,13 +2437,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry); req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry); list_del(&req->inflight_entry);
if (READ_ONCE(req->result) == -EAGAIN &&
!(req->flags & REQ_F_DONT_REISSUE)) {
req->iopoll_completed = 0;
io_req_task_queue_reissue(req);
continue;
}
__io_cqring_fill_event(ctx, req->user_data, req->result, __io_cqring_fill_event(ctx, req->user_data, req->result,
io_put_rw_kbuf(req)); io_put_rw_kbuf(req));
(*nr_events)++; (*nr_events)++;
...@@ -2709,10 +2699,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) ...@@ -2709,10 +2699,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
if (kiocb->ki_flags & IOCB_WRITE) if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req); kiocb_end_write(req);
if (unlikely(res != req->result)) { if (unlikely(res != req->result)) {
if (!(res == -EAGAIN && io_rw_should_reissue(req) && if (res == -EAGAIN && io_rw_should_reissue(req)) {
io_resubmit_prep(req))) { req->flags |= REQ_F_REISSUE;
req_set_fail(req); return;
req->flags |= REQ_F_DONT_REISSUE;
} }
} }
...@@ -2926,7 +2915,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, ...@@ -2926,7 +2915,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
{ {
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data; struct io_async_rw *io = req->async_data;
bool check_reissue = kiocb->ki_complete == io_complete_rw;
/* add previously done IO, if any */ /* add previously done IO, if any */
if (io && io->bytes_done > 0) { if (io && io->bytes_done > 0) {
...@@ -2938,19 +2926,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, ...@@ -2938,19 +2926,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
if (req->flags & REQ_F_CUR_POS) if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos; req->file->f_pos = kiocb->ki_pos;
if (ret >= 0 && check_reissue) if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
__io_complete_rw(req, ret, 0, issue_flags); __io_complete_rw(req, ret, 0, issue_flags);
else else
io_rw_done(kiocb, ret); io_rw_done(kiocb, ret);
if (check_reissue && (req->flags & REQ_F_REISSUE)) { if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE; req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) { if (io_resubmit_prep(req)) {
io_req_task_queue_reissue(req); io_req_task_queue_reissue(req);
} else { } else {
unsigned int cflags = io_put_rw_kbuf(req);
struct io_ring_ctx *ctx = req->ctx;
req_set_fail(req); req_set_fail(req);
__io_req_complete(req, issue_flags, ret, if (issue_flags & IO_URING_F_NONBLOCK) {
io_put_rw_kbuf(req)); mutex_lock(&ctx->uring_lock);
__io_req_complete(req, issue_flags, ret, cflags);
mutex_unlock(&ctx->uring_lock);
} else {
__io_req_complete(req, issue_flags, ret, cflags);
}
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment