Commit 135fcde8 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: add req->timeout.list

Instead of using shared req->list, hang timeouts up on their own list
entry. struct io_timeout have enough extra space for it, but if that
will be a problem ->inflight_entry can reused for that.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 40d8ddd4
...@@ -396,6 +396,7 @@ struct io_timeout { ...@@ -396,6 +396,7 @@ struct io_timeout {
int flags; int flags;
u32 off; u32 off;
u32 target_seq; u32 target_seq;
struct list_head list;
}; };
struct io_rw { struct io_rw {
...@@ -1213,7 +1214,7 @@ static void io_kill_timeout(struct io_kiocb *req) ...@@ -1213,7 +1214,7 @@ static void io_kill_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&req->io->timeout.timer); ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
if (ret != -1) { if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts); atomic_inc(&req->ctx->cq_timeouts);
list_del_init(&req->list); list_del_init(&req->timeout.list);
req->flags |= REQ_F_COMP_LOCKED; req->flags |= REQ_F_COMP_LOCKED;
io_cqring_fill_event(req, 0); io_cqring_fill_event(req, 0);
io_put_req(req); io_put_req(req);
...@@ -1225,7 +1226,7 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx) ...@@ -1225,7 +1226,7 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx)
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list) list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
io_kill_timeout(req); io_kill_timeout(req);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
} }
...@@ -1248,7 +1249,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) ...@@ -1248,7 +1249,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
{ {
while (!list_empty(&ctx->timeout_list)) { while (!list_empty(&ctx->timeout_list)) {
struct io_kiocb *req = list_first_entry(&ctx->timeout_list, struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
struct io_kiocb, list); struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req)) if (io_is_timeout_noseq(req))
break; break;
...@@ -1256,7 +1257,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) ...@@ -1256,7 +1257,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
- atomic_read(&ctx->cq_timeouts)) - atomic_read(&ctx->cq_timeouts))
break; break;
list_del_init(&req->list); list_del_init(&req->timeout.list);
io_kill_timeout(req); io_kill_timeout(req);
} }
} }
...@@ -4997,8 +4998,8 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) ...@@ -4997,8 +4998,8 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
* We could be racing with timeout deletion. If the list is empty, * We could be racing with timeout deletion. If the list is empty,
* then timeout lookup already found it and will be handling it. * then timeout lookup already found it and will be handling it.
*/ */
if (!list_empty(&req->list)) if (!list_empty(&req->timeout.list))
list_del_init(&req->list); list_del_init(&req->timeout.list);
io_cqring_fill_event(req, -ETIME); io_cqring_fill_event(req, -ETIME);
io_commit_cqring(ctx); io_commit_cqring(ctx);
...@@ -5015,9 +5016,9 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) ...@@ -5015,9 +5016,9 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
struct io_kiocb *req; struct io_kiocb *req;
int ret = -ENOENT; int ret = -ENOENT;
list_for_each_entry(req, &ctx->timeout_list, list) { list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
if (user_data == req->user_data) { if (user_data == req->user_data) {
list_del_init(&req->list); list_del_init(&req->timeout.list);
ret = 0; ret = 0;
break; break;
} }
...@@ -5139,7 +5140,8 @@ static int io_timeout(struct io_kiocb *req) ...@@ -5139,7 +5140,8 @@ static int io_timeout(struct io_kiocb *req)
* the one we need first. * the one we need first.
*/ */
list_for_each_prev(entry, &ctx->timeout_list) { list_for_each_prev(entry, &ctx->timeout_list) {
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
timeout.list);
if (io_is_timeout_noseq(nxt)) if (io_is_timeout_noseq(nxt))
continue; continue;
...@@ -5148,7 +5150,7 @@ static int io_timeout(struct io_kiocb *req) ...@@ -5148,7 +5150,7 @@ static int io_timeout(struct io_kiocb *req)
break; break;
} }
add: add:
list_add(&req->list, entry); list_add(&req->timeout.list, entry);
data->timer.function = io_timeout_fn; data->timer.function = io_timeout_fn;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment