Commit 607b6fb8 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: rearrange io_read()/write()

Combine force_nonblock branches (which is already optimised by
compiler), flip branches so the most hot/common path is the first, e.g.
as with non on-stack iov setup, and add extra likely/unlikely
attributions for errror paths.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/2c2536c5896d70994de76e387ea09a0402173a3f.1634144845.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5e49c973
...@@ -3433,7 +3433,7 @@ static bool io_rw_should_retry(struct io_kiocb *req) ...@@ -3433,7 +3433,7 @@ static bool io_rw_should_retry(struct io_kiocb *req)
static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
{ {
if (req->file->f_op->read_iter) if (likely(req->file->f_op->read_iter))
return call_read_iter(req->file, &req->rw.kiocb, iter); return call_read_iter(req->file, &req->rw.kiocb, iter);
else if (req->file->f_op->read) else if (req->file->f_op->read)
return loop_rw_iter(READ, req, iter); return loop_rw_iter(READ, req, iter);
...@@ -3449,14 +3449,18 @@ static bool need_read_all(struct io_kiocb *req) ...@@ -3449,14 +3449,18 @@ static bool need_read_all(struct io_kiocb *req)
static int io_read(struct io_kiocb *req, unsigned int issue_flags) static int io_read(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_rw_state __s, *s; struct io_rw_state __s, *s = &__s;
struct iovec *iovec; struct iovec *iovec;
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
struct io_async_rw *rw; struct io_async_rw *rw;
ssize_t ret, ret2; ssize_t ret, ret2;
if (req_has_async_data(req)) { if (!req_has_async_data(req)) {
ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
} else {
rw = req->async_data; rw = req->async_data;
s = &rw->s; s = &rw->s;
/* /*
...@@ -3466,25 +3470,20 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) ...@@ -3466,25 +3470,20 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
*/ */
iov_iter_restore(&s->iter, &s->iter_state); iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL; iovec = NULL;
} else {
s = &__s;
ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
} }
req->result = iov_iter_count(&s->iter); req->result = iov_iter_count(&s->iter);
/* Ensure we clear previously set non-block flag */ if (force_nonblock) {
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
else
kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */ /* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_nowait(req, READ)) { if (unlikely(!io_file_supports_nowait(req, READ))) {
ret = io_setup_async_rw(req, iovec, s, true); ret = io_setup_async_rw(req, iovec, s, true);
return ret ?: -EAGAIN; return ret ?: -EAGAIN;
} }
kiocb->ki_flags |= IOCB_NOWAIT;
} else {
/* Ensure we clear previously set non-block flag */
kiocb->ki_flags &= ~IOCB_NOWAIT;
}
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret)) { if (unlikely(ret)) {
...@@ -3579,34 +3578,28 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -3579,34 +3578,28 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_write(struct io_kiocb *req, unsigned int issue_flags) static int io_write(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_rw_state __s, *s; struct io_rw_state __s, *s = &__s;
struct io_async_rw *rw;
struct iovec *iovec; struct iovec *iovec;
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ssize_t ret, ret2; ssize_t ret, ret2;
if (req_has_async_data(req)) { if (!req_has_async_data(req)) {
rw = req->async_data;
s = &rw->s;
iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
} else {
s = &__s;
ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
} else {
struct io_async_rw *rw = req->async_data;
s = &rw->s;
iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
} }
req->result = iov_iter_count(&s->iter); req->result = iov_iter_count(&s->iter);
/* Ensure we clear previously set non-block flag */ if (force_nonblock) {
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
else
kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */ /* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_nowait(req, WRITE)) if (unlikely(!io_file_supports_nowait(req, WRITE)))
goto copy_iov; goto copy_iov;
/* file path doesn't support NOWAIT for non-direct_IO */ /* file path doesn't support NOWAIT for non-direct_IO */
...@@ -3614,6 +3607,12 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) ...@@ -3614,6 +3607,12 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
(req->flags & REQ_F_ISREG)) (req->flags & REQ_F_ISREG))
goto copy_iov; goto copy_iov;
kiocb->ki_flags |= IOCB_NOWAIT;
} else {
/* Ensure we clear previously set non-block flag */
kiocb->ki_flags &= ~IOCB_NOWAIT;
}
ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret)) if (unlikely(ret))
goto out_free; goto out_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment