Commit b3fa03fd authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: convert iopoll_completed to store_release

Convert explicit barrier around iopoll_completed to smp_load_acquire()
and smp_store_release(). Similar on the callback side, but replaces a
single smp_rmb() with per-request smp_load_acquire(), neither imply any
extra CPU ordering for x86. Use READ_ONCE as usual where it doesn't
matter.

Use it to move filling CQEs by iopoll earlier, that will be necessary
to avoid traversing the list one extra time in the future.
Suggested-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8bd663cb15efdc72d6247c38ee810964e744a450.1632516769.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3aa83bfb
...@@ -2429,17 +2429,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) ...@@ -2429,17 +2429,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done)
struct req_batch rb; struct req_batch rb;
struct io_kiocb *req; struct io_kiocb *req;
/* order with ->result store in io_complete_rw_iopoll() */
smp_rmb();
io_init_req_batch(&rb); io_init_req_batch(&rb);
while (!list_empty(done)) { while (!list_empty(done)) {
req = list_first_entry(done, struct io_kiocb, inflight_entry); req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry); list_del(&req->inflight_entry);
__io_cqring_fill_event(ctx, req->user_data, req->result,
io_put_rw_kbuf(req));
if (req_ref_put_and_test(req)) if (req_ref_put_and_test(req))
io_req_free_batch(&rb, req, &ctx->submit_state); io_req_free_batch(&rb, req, &ctx->submit_state);
} }
...@@ -2498,8 +2492,12 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) ...@@ -2498,8 +2492,12 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
wq_list_for_each_resume(pos, prev) { wq_list_for_each_resume(pos, prev) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
if (!READ_ONCE(req->iopoll_completed)) /* order with io_complete_rw_iopoll(), e.g. ->result updates */
if (!smp_load_acquire(&req->iopoll_completed))
break; break;
__io_cqring_fill_event(ctx, req->user_data, req->result,
io_put_rw_kbuf(req));
list_add_tail(&req->inflight_entry, &done); list_add_tail(&req->inflight_entry, &done);
nr_events++; nr_events++;
} }
...@@ -2712,10 +2710,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) ...@@ -2712,10 +2710,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
} }
} }
WRITE_ONCE(req->result, res); req->result = res;
/* order with io_iopoll_complete() checking ->result */ /* order with io_iopoll_complete() checking ->iopoll_completed */
smp_wmb(); smp_store_release(&req->iopoll_completed, 1);
WRITE_ONCE(req->iopoll_completed, 1);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment