Commit cb3d8972 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: refactor io_iopoll_req_issued

A simple refactoring of io_iopoll_req_issued(), move in_async inside so
we don't pass it around and save on double checking it.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1513bfde4f0c835be25ac69a82737ab0668d7665.1623634181.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 382cb030
...@@ -2525,9 +2525,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) ...@@ -2525,9 +2525,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
* find it from a io_do_iopoll() thread before the issuer is done * find it from a io_do_iopoll() thread before the issuer is done
* accessing the kiocb cookie. * accessing the kiocb cookie.
*/ */
static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async) static void io_iopoll_req_issued(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
const bool in_async = io_wq_current_is_worker();
/* workqueue context doesn't hold uring_lock, grab it now */
if (unlikely(in_async))
mutex_lock(&ctx->uring_lock);
/* /*
* Track whether we have multiple files in our lists. This will impact * Track whether we have multiple files in our lists. This will impact
...@@ -2554,14 +2559,19 @@ static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async) ...@@ -2554,14 +2559,19 @@ static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
else else
list_add_tail(&req->inflight_entry, &ctx->iopoll_list); list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
/* if (unlikely(in_async)) {
* If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread /*
* task context or in io worker task context. If current task context is * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
* sq thread, we don't need to check whether should wake up sq thread. * in sq thread task context or in io worker task context. If
*/ * current task context is sq thread, we don't need to check
if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) && * whether should wake up sq thread.
wq_has_sleeper(&ctx->sq_data->wait)) */
wake_up(&ctx->sq_data->wait); if ((ctx->flags & IORING_SETUP_SQPOLL) &&
wq_has_sleeper(&ctx->sq_data->wait))
wake_up(&ctx->sq_data->wait);
mutex_unlock(&ctx->uring_lock);
}
} }
static inline void io_state_file_put(struct io_submit_state *state) static inline void io_state_file_put(struct io_submit_state *state)
...@@ -6215,23 +6225,11 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6215,23 +6225,11 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (creds) if (creds)
revert_creds(creds); revert_creds(creds);
if (ret) if (ret)
return ret; return ret;
/* If the op doesn't have a file, we're not polling for it */ /* If the op doesn't have a file, we're not polling for it */
if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
const bool in_async = io_wq_current_is_worker(); io_iopoll_req_issued(req);
/* workqueue context doesn't hold uring_lock, grab it now */
if (in_async)
mutex_lock(&ctx->uring_lock);
io_iopoll_req_issued(req, in_async);
if (in_async)
mutex_unlock(&ctx->uring_lock);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment