Commit e1e16097 authored by Jens Axboe's avatar Jens Axboe

io_uring: provide generic io_req_complete() helper

We have lots of callers of:

io_cqring_add_event(req, result);
io_put_req(req);

Provide a helper that does this for us. It helps clean up the code, and
also provides a more convenient location for us to change the completion
handling.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d3cac64c
...@@ -1335,7 +1335,7 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res) ...@@ -1335,7 +1335,7 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
__io_cqring_fill_event(req, res, 0); __io_cqring_fill_event(req, res, 0);
} }
static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags) static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
unsigned long flags; unsigned long flags;
...@@ -1348,9 +1348,15 @@ static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags) ...@@ -1348,9 +1348,15 @@ static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
} }
static void io_cqring_add_event(struct io_kiocb *req, long res) static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags)
{ {
__io_cqring_add_event(req, res, 0); io_cqring_add_event(req, res, cflags);
io_put_req(req);
}
static void io_req_complete(struct io_kiocb *req, long res)
{
__io_req_complete(req, res, 0);
} }
static inline bool io_is_fallback_req(struct io_kiocb *req) static inline bool io_is_fallback_req(struct io_kiocb *req)
...@@ -1978,7 +1984,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res) ...@@ -1978,7 +1984,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
req_set_fail_links(req); req_set_fail_links(req);
if (req->flags & REQ_F_BUFFER_SELECTED) if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_kbuf(req); cflags = io_put_kbuf(req);
__io_cqring_add_event(req, res, cflags); io_cqring_add_event(req, res, cflags);
} }
static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
...@@ -2048,9 +2054,8 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error) ...@@ -2048,9 +2054,8 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
return true; return true;
kfree(iovec); kfree(iovec);
end_req: end_req:
io_cqring_add_event(req, ret);
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_req_complete(req, ret);
return false; return false;
} }
...@@ -3117,10 +3122,9 @@ static int io_tee(struct io_kiocb *req, bool force_nonblock) ...@@ -3117,10 +3122,9 @@ static int io_tee(struct io_kiocb *req, bool force_nonblock)
io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
io_cqring_add_event(req, ret);
if (ret != sp->len) if (ret != sp->len)
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_req_complete(req, ret);
return 0; return 0;
} }
...@@ -3154,10 +3158,9 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock) ...@@ -3154,10 +3158,9 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
io_cqring_add_event(req, ret);
if (ret != sp->len) if (ret != sp->len)
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_req_complete(req, ret);
return 0; return 0;
} }
...@@ -3171,8 +3174,7 @@ static int io_nop(struct io_kiocb *req) ...@@ -3171,8 +3174,7 @@ static int io_nop(struct io_kiocb *req)
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
io_cqring_add_event(req, 0); io_req_complete(req, 0);
io_put_req(req);
return 0; return 0;
} }
...@@ -3211,8 +3213,7 @@ static int io_fsync(struct io_kiocb *req, bool force_nonblock) ...@@ -3211,8 +3213,7 @@ static int io_fsync(struct io_kiocb *req, bool force_nonblock)
req->sync.flags & IORING_FSYNC_DATASYNC); req->sync.flags & IORING_FSYNC_DATASYNC);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3245,8 +3246,7 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock) ...@@ -3245,8 +3246,7 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3342,8 +3342,7 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock) ...@@ -3342,8 +3342,7 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock)
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3416,8 +3415,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock) ...@@ -3416,8 +3415,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
io_ring_submit_lock(ctx, !force_nonblock); io_ring_submit_lock(ctx, !force_nonblock);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3504,8 +3502,7 @@ static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock) ...@@ -3504,8 +3502,7 @@ static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
io_ring_submit_unlock(ctx, !force_nonblock); io_ring_submit_unlock(ctx, !force_nonblock);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3548,8 +3545,7 @@ static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock) ...@@ -3548,8 +3545,7 @@ static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -3585,8 +3581,7 @@ static int io_madvise(struct io_kiocb *req, bool force_nonblock) ...@@ -3585,8 +3581,7 @@ static int io_madvise(struct io_kiocb *req, bool force_nonblock)
ret = do_madvise(ma->addr, ma->len, ma->advice); ret = do_madvise(ma->addr, ma->len, ma->advice);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -3625,8 +3620,7 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) ...@@ -3625,8 +3620,7 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3665,8 +3659,7 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock) ...@@ -3665,8 +3659,7 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3722,10 +3715,9 @@ static int io_close(struct io_kiocb *req, bool force_nonblock) ...@@ -3722,10 +3715,9 @@ static int io_close(struct io_kiocb *req, bool force_nonblock)
ret = filp_close(close->put_file, req->work.files); ret = filp_close(close->put_file, req->work.files);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret);
fput(close->put_file); fput(close->put_file);
close->put_file = NULL; close->put_file = NULL;
io_put_req(req); io_req_complete(req, ret);
return 0; return 0;
} }
...@@ -3759,8 +3751,7 @@ static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) ...@@ -3759,8 +3751,7 @@ static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
req->sync.flags); req->sync.flags);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -3859,10 +3850,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock) ...@@ -3859,10 +3850,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
if (kmsg && kmsg->iov != kmsg->fast_iov) if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov); kfree(kmsg->iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
io_cqring_add_event(req, ret);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_req_complete(req, ret);
return 0; return 0;
} }
...@@ -3902,10 +3892,9 @@ static int io_send(struct io_kiocb *req, bool force_nonblock) ...@@ -3902,10 +3892,9 @@ static int io_send(struct io_kiocb *req, bool force_nonblock)
ret = -EINTR; ret = -EINTR;
} }
io_cqring_add_event(req, ret);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_req_complete(req, ret);
return 0; return 0;
} }
...@@ -4102,10 +4091,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock) ...@@ -4102,10 +4091,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
if (kmsg && kmsg->iov != kmsg->fast_iov) if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov); kfree(kmsg->iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
__io_cqring_add_event(req, ret, cflags);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); __io_req_complete(req, ret, cflags);
return 0; return 0;
} }
...@@ -4159,10 +4147,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock) ...@@ -4159,10 +4147,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock)
kfree(kbuf); kfree(kbuf);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
__io_cqring_add_event(req, ret, cflags);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); __io_req_complete(req, ret, cflags);
return 0; return 0;
} }
...@@ -4201,8 +4188,7 @@ static int io_accept(struct io_kiocb *req, bool force_nonblock) ...@@ -4201,8 +4188,7 @@ static int io_accept(struct io_kiocb *req, bool force_nonblock)
ret = -EINTR; ret = -EINTR;
req_set_fail_links(req); req_set_fail_links(req);
} }
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -4262,8 +4248,7 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock) ...@@ -4262,8 +4248,7 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock)
out: out:
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
#else /* !CONFIG_NET */ #else /* !CONFIG_NET */
...@@ -4555,7 +4540,7 @@ static void io_async_task_func(struct callback_head *cb) ...@@ -4555,7 +4540,7 @@ static void io_async_task_func(struct callback_head *cb)
if (!canceled) { if (!canceled) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (io_sq_thread_acquire_mm(ctx, req)) { if (io_sq_thread_acquire_mm(ctx, req)) {
io_cqring_add_event(req, -EFAULT); io_cqring_add_event(req, -EFAULT, 0);
goto end_req; goto end_req;
} }
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
...@@ -4804,10 +4789,9 @@ static int io_poll_remove(struct io_kiocb *req) ...@@ -4804,10 +4789,9 @@ static int io_poll_remove(struct io_kiocb *req)
ret = io_poll_cancel(ctx, addr); ret = io_poll_cancel(ctx, addr);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_add_event(req, ret);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_req_complete(req, ret);
return 0; return 0;
} }
...@@ -5163,8 +5147,7 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock) ...@@ -5163,8 +5147,7 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock)
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
return 0; return 0;
} }
...@@ -5657,8 +5640,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -5657,8 +5640,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
if (ret) { if (ret) {
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_req_complete(req, ret);
io_put_req(req);
} }
io_steal_work(req, workptr); io_steal_work(req, workptr);
...@@ -5775,8 +5757,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -5775,8 +5757,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
io_put_req(prev); io_put_req(prev);
} else { } else {
io_cqring_add_event(req, -ETIME); io_req_complete(req, -ETIME);
io_put_req(req);
} }
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -5885,9 +5866,8 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5885,9 +5866,8 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
/* and drop final reference, if we failed */ /* and drop final reference, if we failed */
if (ret) { if (ret) {
io_cqring_add_event(req, ret);
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_req_complete(req, ret);
} }
if (nxt) { if (nxt) {
req = nxt; req = nxt;
...@@ -5909,9 +5889,9 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5909,9 +5889,9 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
fail_req: fail_req:
io_cqring_add_event(req, ret);
req_set_fail_links(req); req_set_fail_links(req);
io_double_put_req(req); io_put_req(req);
io_req_complete(req, ret);
} }
} else if (req->flags & REQ_F_FORCE_ASYNC) { } else if (req->flags & REQ_F_FORCE_ASYNC) {
if (!req->io) { if (!req->io) {
...@@ -5937,8 +5917,8 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5937,8 +5917,8 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static inline void io_queue_link_head(struct io_kiocb *req) static inline void io_queue_link_head(struct io_kiocb *req)
{ {
if (unlikely(req->flags & REQ_F_FAIL_LINK)) { if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
io_cqring_add_event(req, -ECANCELED); io_put_req(req);
io_double_put_req(req); io_req_complete(req, -ECANCELED);
} else } else
io_queue_sqe(req, NULL); io_queue_sqe(req, NULL);
} }
...@@ -6195,8 +6175,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -6195,8 +6175,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (unlikely(err)) { if (unlikely(err)) {
fail_req: fail_req:
io_cqring_add_event(req, err); io_put_req(req);
io_double_put_req(req); io_req_complete(req, err);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment