Commit d530a402 authored by Jens Axboe's avatar Jens Axboe

io_uring: add prepped flag

We currently use the fact that if ->ki_filp is already set, then we've
done the prep. In preparation for moving the file assignment earlier,
use a separate flag to tell whether the request has been prepped for
IO or not.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e0c5c576
...@@ -214,6 +214,7 @@ struct io_kiocb { ...@@ -214,6 +214,7 @@ struct io_kiocb {
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */ #define REQ_F_FIXED_FILE 4 /* ctx owns file */
#define REQ_F_SEQ_PREV 8 /* sequential with previous */ #define REQ_F_SEQ_PREV 8 /* sequential with previous */
#define REQ_F_PREPPED 16 /* prep already done */
u64 user_data; u64 user_data;
u64 error; u64 error;
...@@ -741,7 +742,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -741,7 +742,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
int fd, ret; int fd, ret;
/* For -EAGAIN retry, everything is already prepped */ /* For -EAGAIN retry, everything is already prepped */
if (kiocb->ki_filp) if (req->flags & REQ_F_PREPPED)
return 0; return 0;
flags = READ_ONCE(sqe->flags); flags = READ_ONCE(sqe->flags);
...@@ -799,6 +800,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -799,6 +800,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
} }
kiocb->ki_complete = io_complete_rw; kiocb->ki_complete = io_complete_rw;
} }
req->flags |= REQ_F_PREPPED;
return 0; return 0;
out_fput: out_fput:
if (!(flags & IOSQE_FIXED_FILE)) { if (!(flags & IOSQE_FIXED_FILE)) {
...@@ -1099,8 +1101,8 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1099,8 +1101,8 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned flags; unsigned flags;
int fd; int fd;
/* Prep already done */ /* Prep already done (EAGAIN retry) */
if (req->rw.ki_filp) if (req->flags & REQ_F_PREPPED)
return 0; return 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
...@@ -1122,6 +1124,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1122,6 +1124,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EBADF; return -EBADF;
} }
req->flags |= REQ_F_PREPPED;
return 0; return 0;
} }
...@@ -1632,8 +1635,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, ...@@ -1632,8 +1635,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
if (unlikely(!req)) if (unlikely(!req))
return -EAGAIN; return -EAGAIN;
req->rw.ki_filp = NULL;
ret = __io_submit_sqe(ctx, req, s, true, state); ret = __io_submit_sqe(ctx, req, s, true, state);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment