Commit f56040b8 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: deduplicate io_grab_files() calls

Move io_req_init_async() into io_grab_files(), it's safer this way. Note
that io_queue_async_work() does *init_async(), so it's valid to move out
of __io_queue_sqe() punt path. Also, add a helper around io_grab_files().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ae34817b
...@@ -912,7 +912,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req); ...@@ -912,7 +912,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx, static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_files_update *ip, struct io_uring_files_update *ip,
unsigned nr_args); unsigned nr_args);
static int io_grab_files(struct io_kiocb *req); static int io_prep_work_files(struct io_kiocb *req);
static void io_complete_rw_common(struct kiocb *kiocb, long res, static void io_complete_rw_common(struct kiocb *kiocb, long res,
struct io_comp_state *cs); struct io_comp_state *cs);
static void __io_clean_op(struct io_kiocb *req); static void __io_clean_op(struct io_kiocb *req);
...@@ -5294,13 +5294,9 @@ static int io_req_defer_prep(struct io_kiocb *req, ...@@ -5294,13 +5294,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
if (io_alloc_async_ctx(req)) if (io_alloc_async_ctx(req))
return -EAGAIN; return -EAGAIN;
ret = io_prep_work_files(req);
if (io_op_defs[req->opcode].file_table) { if (unlikely(ret))
io_req_init_async(req); return ret;
ret = io_grab_files(req);
if (unlikely(ret))
return ret;
}
switch (req->opcode) { switch (req->opcode) {
case IORING_OP_NOP: case IORING_OP_NOP:
...@@ -5851,6 +5847,8 @@ static int io_grab_files(struct io_kiocb *req) ...@@ -5851,6 +5847,8 @@ static int io_grab_files(struct io_kiocb *req)
int ret = -EBADF; int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
io_req_init_async(req);
if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE)) if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
return 0; return 0;
if (!ctx->ring_file) if (!ctx->ring_file)
...@@ -5876,6 +5874,13 @@ static int io_grab_files(struct io_kiocb *req) ...@@ -5876,6 +5874,13 @@ static int io_grab_files(struct io_kiocb *req)
return ret; return ret;
} }
static inline int io_prep_work_files(struct io_kiocb *req)
{
if (!io_op_defs[req->opcode].file_table)
return 0;
return io_grab_files(req);
}
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
{ {
struct io_timeout_data *data = container_of(timer, struct io_timeout_data *data = container_of(timer,
...@@ -5987,14 +5992,9 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -5987,14 +5992,9 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
goto exit; goto exit;
} }
punt: punt:
io_req_init_async(req); ret = io_prep_work_files(req);
if (unlikely(ret))
if (io_op_defs[req->opcode].file_table) { goto err;
ret = io_grab_files(req);
if (ret)
goto err;
}
/* /*
* Queued up for async execution, worker will release * Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted. * submit reference when the iocb is actually submitted.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment