Commit d01905db authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: clean iowq submit work cancellation

If we've got IO_WQ_WORK_CANCEL in io_wq_submit_work(), handle the error
on the same lines as the check instead of having a weird code flow. The
main loop doesn't change but goes one indention left.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ff4a09cf41f7a22bbb294b6f1faea721e21fe615.1634987320.git.asml.silence@gmail.comReviewed-by: default avatarHao Xu <haoxu@linux.alibaba.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 255657d2
...@@ -6721,6 +6721,8 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work) ...@@ -6721,6 +6721,8 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
static void io_wq_submit_work(struct io_wq_work *work) static void io_wq_submit_work(struct io_wq_work *work)
{ {
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
unsigned int issue_flags = IO_URING_F_UNLOCKED;
bool needs_poll = false;
struct io_kiocb *timeout; struct io_kiocb *timeout;
int ret = 0; int ret = 0;
...@@ -6735,40 +6737,37 @@ static void io_wq_submit_work(struct io_wq_work *work) ...@@ -6735,40 +6737,37 @@ static void io_wq_submit_work(struct io_wq_work *work)
io_queue_linked_timeout(timeout); io_queue_linked_timeout(timeout);
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */ /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
if (work->flags & IO_WQ_WORK_CANCEL) if (work->flags & IO_WQ_WORK_CANCEL) {
ret = -ECANCELED; io_req_task_queue_fail(req, -ECANCELED);
return;
}
if (!ret) { if (req->flags & REQ_F_FORCE_ASYNC) {
bool needs_poll = false; needs_poll = req->file && file_can_poll(req->file);
unsigned int issue_flags = IO_URING_F_UNLOCKED; if (needs_poll)
issue_flags |= IO_URING_F_NONBLOCK;
}
if (req->flags & REQ_F_FORCE_ASYNC) { do {
needs_poll = req->file && file_can_poll(req->file); ret = io_issue_sqe(req, issue_flags);
if (needs_poll) if (ret != -EAGAIN)
issue_flags |= IO_URING_F_NONBLOCK; break;
/*
* We can get EAGAIN for iopolled IO even though we're
* forcing a sync submission from here, since we can't
* wait for request slots on the block side.
*/
if (!needs_poll) {
cond_resched();
continue;
} }
do { if (io_arm_poll_handler(req) == IO_APOLL_OK)
ret = io_issue_sqe(req, issue_flags); return;
if (ret != -EAGAIN) /* aborted or ready, in either case retry blocking */
break; needs_poll = false;
/* issue_flags &= ~IO_URING_F_NONBLOCK;
* We can get EAGAIN for iopolled IO even though we're } while (1);
* forcing a sync submission from here, since we can't
* wait for request slots on the block side.
*/
if (!needs_poll) {
cond_resched();
continue;
}
if (io_arm_poll_handler(req) == IO_APOLL_OK)
return;
/* aborted or ready, in either case retry blocking */
needs_poll = false;
issue_flags &= ~IO_URING_F_NONBLOCK;
} while (1);
}
/* avoid locking problems by failing it from a clean context */ /* avoid locking problems by failing it from a clean context */
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment