Commit c1379e24 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: move req preps out of io_issue_sqe()

All request preparations are done only during submission, reflect it in
the code by moving io_req_prep() much earlier into io_queue_sqe().

That's much cleaner, because it doen't expose bits to async code which
it won't ever use. Also it makes the interface harder to misuse, and
there are potential places for bugs.

For instance, __io_queue() doesn't clear @sqe before proceeding to a
next linked request, that could have been disastrous, but hopefully
there are linked requests IFF sqe==NULL, so not actually a bug.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bfe76559
...@@ -971,9 +971,7 @@ static int io_prep_work_files(struct io_kiocb *req); ...@@ -971,9 +971,7 @@ static int io_prep_work_files(struct io_kiocb *req);
static void __io_clean_op(struct io_kiocb *req); static void __io_clean_op(struct io_kiocb *req);
static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
int fd, struct file **out_file, bool fixed); int fd, struct file **out_file, bool fixed);
static void __io_queue_sqe(struct io_kiocb *req, static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
const struct io_uring_sqe *sqe,
struct io_comp_state *cs);
static void io_file_put_work(struct work_struct *work); static void io_file_put_work(struct work_struct *work);
static ssize_t io_import_iovec(int rw, struct io_kiocb *req, static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
...@@ -1944,7 +1942,7 @@ static void __io_req_task_submit(struct io_kiocb *req) ...@@ -1944,7 +1942,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
if (!__io_sq_thread_acquire_mm(ctx)) { if (!__io_sq_thread_acquire_mm(ctx)) {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
__io_queue_sqe(req, NULL, NULL); __io_queue_sqe(req, NULL);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} else { } else {
__io_req_task_cancel(req, -EFAULT); __io_req_task_cancel(req, -EFAULT);
...@@ -5809,18 +5807,12 @@ static void __io_clean_op(struct io_kiocb *req) ...@@ -5809,18 +5807,12 @@ static void __io_clean_op(struct io_kiocb *req)
io_req_drop_files(req); io_req_drop_files(req);
} }
static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
bool force_nonblock, struct io_comp_state *cs) struct io_comp_state *cs)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
if (sqe) {
ret = io_req_prep(req, sqe);
if (unlikely(ret < 0))
return ret;
}
switch (req->opcode) { switch (req->opcode) {
case IORING_OP_NOP: case IORING_OP_NOP:
ret = io_nop(req, cs); ret = io_nop(req, cs);
...@@ -5958,7 +5950,7 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) ...@@ -5958,7 +5950,7 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
if (!ret) { if (!ret) {
do { do {
ret = io_issue_sqe(req, NULL, false, NULL); ret = io_issue_sqe(req, false, NULL);
/* /*
* We can get EAGAIN for polled IO even though we're * We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't * forcing a sync submission from here, since we can't
...@@ -6136,8 +6128,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) ...@@ -6136,8 +6128,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
return nxt; return nxt;
} }
static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
struct io_comp_state *cs)
{ {
struct io_kiocb *linked_timeout; struct io_kiocb *linked_timeout;
struct io_kiocb *nxt; struct io_kiocb *nxt;
...@@ -6157,7 +6148,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -6157,7 +6148,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
old_creds = override_creds(req->work.creds); old_creds = override_creds(req->work.creds);
} }
ret = io_issue_sqe(req, sqe, true, cs); ret = io_issue_sqe(req, true, cs);
/* /*
* We async punt it if the file wasn't marked NOWAIT, or if the file * We async punt it if the file wasn't marked NOWAIT, or if the file
...@@ -6236,7 +6227,12 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -6236,7 +6227,12 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->work.flags |= IO_WQ_WORK_CONCURRENT; req->work.flags |= IO_WQ_WORK_CONCURRENT;
io_queue_async_work(req); io_queue_async_work(req);
} else { } else {
__io_queue_sqe(req, sqe, cs); if (sqe) {
ret = io_req_prep(req, sqe);
if (unlikely(ret))
goto fail_req;
}
__io_queue_sqe(req, cs);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment