Commit 5e49c973 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: clean up io_import_iovec

Make io_import_iovec taking struct io_rw_state instead of an iter
pointer. First it takes care of initialising iovec pointer, which can be
forgotten. Even more, we can not init it if not needed, e.g. in case of
IORING_OP_READ_FIXED or IORING_OP_READ. Also hide saving iter_state
inside of it by splitting out an inline function of it to avoid extra
ifs.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b1bbc213a95e5272d4da5867bb977d9acb6f2109.1634144845.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 51aac424
......@@ -3155,9 +3155,10 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
return __io_iov_buffer_select(req, iov, issue_flags);
}
static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
struct iov_iter *iter, unsigned int issue_flags)
static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
struct io_rw_state *s, unsigned int issue_flags)
{
struct iov_iter *iter = &s->iter;
void __user *buf = u64_to_user_ptr(req->rw.addr);
size_t sqe_len = req->rw.len;
u8 opcode = req->opcode;
......@@ -3180,11 +3181,13 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
req->rw.len = sqe_len;
}
ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
*iovec = NULL;
return ret;
}
*iovec = s->fast_iov;
if (req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select(req, *iovec, issue_flags);
if (!ret)
......@@ -3197,6 +3200,19 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
req->ctx->compat);
}
static inline int io_import_iovec(int rw, struct io_kiocb *req,
struct iovec **iovec, struct io_rw_state *s,
unsigned int issue_flags)
{
int ret;
ret = __io_import_iovec(rw, req, iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
iov_iter_save_state(&s->iter, &s->iter_state);
return ret;
}
static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
{
return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
......@@ -3322,11 +3338,11 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
{
struct io_async_rw *iorw = req->async_data;
struct iovec *iov = iorw->s.fast_iov;
struct iovec *iov;
int ret;
/* submission path, ->uring_lock should already be taken */
ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, IO_URING_F_NONBLOCK);
ret = io_import_iovec(rw, req, &iov, &iorw->s, IO_URING_F_NONBLOCK);
if (unlikely(ret < 0))
return ret;
......@@ -3334,7 +3350,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
iorw->free_iovec = iov;
if (iov)
req->flags |= REQ_F_NEED_CLEANUP;
iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
return 0;
}
......@@ -3453,12 +3468,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
iovec = NULL;
} else {
s = &__s;
iovec = s->fast_iov;
ret = io_import_iovec(READ, req, &iovec, &s->iter, issue_flags);
if (ret < 0)
ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
iov_iter_save_state(&s->iter, &s->iter_state);
}
req->result = iov_iter_count(&s->iter);
......@@ -3581,11 +3593,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
iovec = NULL;
} else {
s = &__s;
iovec = s->fast_iov;
ret = io_import_iovec(WRITE, req, &iovec, &s->iter, issue_flags);
if (ret < 0)
ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
iov_iter_save_state(&s->iter, &s->iter_state);
}
req->result = iov_iter_count(&s->iter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment