Commit 6971253f authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: revise completion_lock locking

io_kill_timeouts() doesn't post any events but queues everything to
task_work. Locking there is needed for protecting linked requests
traversing, we should grab completion_lock directly instead of using
io_cq_[un]lock helpers. Same goes for __io_req_find_next_prep().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/88e75d481a65dc295cb59722bb1cf76402d1c06b.1670002973.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ea011ee1
...@@ -597,6 +597,18 @@ static inline void __io_cq_unlock(struct io_ring_ctx *ctx) ...@@ -597,6 +597,18 @@ static inline void __io_cq_unlock(struct io_ring_ctx *ctx)
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
} }
static inline void io_cq_lock(struct io_ring_ctx *ctx)
__acquires(ctx->completion_lock)
{
spin_lock(&ctx->completion_lock);
}
static inline void io_cq_unlock(struct io_ring_ctx *ctx)
__releases(ctx->completion_lock)
{
spin_unlock(&ctx->completion_lock);
}
/* keep it inlined for io_submit_flush_completions() */ /* keep it inlined for io_submit_flush_completions() */
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
__releases(ctx->completion_lock) __releases(ctx->completion_lock)
...@@ -1074,9 +1086,9 @@ static void __io_req_find_next_prep(struct io_kiocb *req) ...@@ -1074,9 +1086,9 @@ static void __io_req_find_next_prep(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
io_cq_lock(ctx); spin_lock(&ctx->completion_lock);
io_disarm_next(req); io_disarm_next(req);
io_cq_unlock_post(ctx); spin_unlock(&ctx->completion_lock);
} }
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
......
...@@ -87,17 +87,6 @@ static inline void io_req_task_work_add(struct io_kiocb *req) ...@@ -87,17 +87,6 @@ static inline void io_req_task_work_add(struct io_kiocb *req)
#define io_for_each_link(pos, head) \ #define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link) for (pos = (head); pos; pos = pos->link)
static inline void io_cq_lock(struct io_ring_ctx *ctx)
__acquires(ctx->completion_lock)
{
spin_lock(&ctx->completion_lock);
}
static inline void io_cq_unlock(struct io_ring_ctx *ctx)
{
spin_unlock(&ctx->completion_lock);
}
void io_cq_unlock_post(struct io_ring_ctx *ctx); void io_cq_unlock_post(struct io_ring_ctx *ctx);
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
......
...@@ -624,7 +624,11 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, ...@@ -624,7 +624,11 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct io_timeout *timeout, *tmp; struct io_timeout *timeout, *tmp;
int canceled = 0; int canceled = 0;
io_cq_lock(ctx); /*
* completion_lock is needed for io_match_task(). Take it before
* timeout_lockfirst to keep locking ordering.
*/
spin_lock(&ctx->completion_lock);
spin_lock_irq(&ctx->timeout_lock); spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout); struct io_kiocb *req = cmd_to_io_kiocb(timeout);
...@@ -634,6 +638,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, ...@@ -634,6 +638,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
canceled++; canceled++;
} }
spin_unlock_irq(&ctx->timeout_lock); spin_unlock_irq(&ctx->timeout_lock);
io_cq_unlock_post(ctx); spin_unlock(&ctx->completion_lock);
return canceled != 0; return canceled != 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment