Commit e5c12945 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: refactor io_fill_cqe_req_aux

The restriction on multishot execution context disallowing io-wq is
driven by rules of io_fill_cqe_req_aux(), it should only be called in
the master task context, either from the syscall path or in task_work.
Since task_work now always takes the ctx lock implying
IO_URING_F_COMPLETE_DEFER, we can just assume that the function is
always called with its defer argument set to true.

Kill the argument. Also rename the function for more consistency as
"fill" in CQE related functions was usually meant for raw interfaces
only copying data into the CQ without any locking, waking the user
and other accounting "post" functions take care of.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Tested-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/93423d106c33116c7d06bf277f651aa68b427328.1710799188.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8e5b3b89
...@@ -907,40 +907,30 @@ static void __io_flush_post_cqes(struct io_ring_ctx *ctx) ...@@ -907,40 +907,30 @@ static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
state->cqes_count = 0; state->cqes_count = 0;
} }
static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
bool allow_overflow)
{ {
bool filled; bool filled;
io_cq_lock(ctx); io_cq_lock(ctx);
filled = io_fill_cqe_aux(ctx, user_data, res, cflags); filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
if (!filled && allow_overflow) if (!filled)
filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
io_cq_unlock_post(ctx); io_cq_unlock_post(ctx);
return filled; return filled;
} }
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
{
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
}
/* /*
* A helper for multishot requests posting additional CQEs. * A helper for multishot requests posting additional CQEs.
* Should only be used from a task_work including IO_URING_F_MULTISHOT. * Should only be used from a task_work including IO_URING_F_MULTISHOT.
*/ */
bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags) bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
u64 user_data = req->cqe.user_data; u64 user_data = req->cqe.user_data;
struct io_uring_cqe *cqe; struct io_uring_cqe *cqe;
lockdep_assert(!io_wq_current_is_worker()); lockdep_assert(!io_wq_current_is_worker());
if (!defer)
return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
lockdep_assert_held(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock);
if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->completion_cqes)) { if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->completion_cqes)) {
......
...@@ -67,7 +67,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx); ...@@ -67,7 +67,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res); void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags); bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx); void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
......
...@@ -706,8 +706,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, ...@@ -706,8 +706,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
* receive from this socket. * receive from this socket.
*/ */
if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
*ret, cflags | IORING_CQE_F_MORE)) {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE; int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
...@@ -1429,8 +1428,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1429,8 +1428,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, if (io_req_post_cqe(req, ret, IORING_CQE_F_MORE))
ret, IORING_CQE_F_MORE))
goto retry; goto retry;
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
......
...@@ -322,8 +322,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) ...@@ -322,8 +322,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
__poll_t mask = mangle_poll(req->cqe.res & __poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events); req->apoll_events);
if (!io_fill_cqe_req_aux(req, true, mask, if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
IORING_CQE_F_MORE)) {
io_req_set_res(req, mask, 0); io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES; return IOU_POLL_REMOVE_POLL_USE_RES;
} }
......
...@@ -962,9 +962,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) ...@@ -962,9 +962,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
cflags = io_put_kbuf(req, issue_flags); cflags = io_put_kbuf(req, issue_flags);
rw->len = 0; /* similarly to above, reset len to 0 */ rw->len = 0; /* similarly to above, reset len to 0 */
if (io_fill_cqe_req_aux(req, if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
issue_flags & IO_URING_F_COMPLETE_DEFER,
ret, cflags | IORING_CQE_F_MORE)) {
if (issue_flags & IO_URING_F_MULTISHOT) { if (issue_flags & IO_URING_F_MULTISHOT) {
/* /*
* Force retry, as we might have more data to * Force retry, as we might have more data to
......
...@@ -72,7 +72,7 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) ...@@ -72,7 +72,7 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
if (!io_timeout_finish(timeout, data)) { if (!io_timeout_finish(timeout, data)) {
if (io_fill_cqe_req_aux(req, true, -ETIME, IORING_CQE_F_MORE)) { if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
/* re-arm timer */ /* re-arm timer */
spin_lock_irq(&ctx->timeout_lock); spin_lock_irq(&ctx->timeout_lock);
list_add(&timeout->list, ctx->timeout_list.prev); list_add(&timeout->list, ctx->timeout_list.prev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment