Commit fa15bafb authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: flip if handling after io_setup_async_rw

As recently done with with send/recv, flip the if after
rw_verify_aread() in io_{read,write}() and tabulise left bits left.
This removes mispredicted by a compiler jump on the success/fast path.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1752f0ad
...@@ -3034,57 +3034,56 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ...@@ -3034,57 +3034,56 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter; struct iov_iter iter;
size_t iov_count; size_t iov_count;
ssize_t io_size, ret; ssize_t io_size, ret, ret2;
unsigned long nr_segs;
ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock); ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
if (ret < 0) if (ret < 0)
return ret; return ret;
io_size = ret;
req->result = io_size;
/* Ensure we clear previously set non-block flag */ /* Ensure we clear previously set non-block flag */
if (!force_nonblock) if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT; kiocb->ki_flags &= ~IOCB_NOWAIT;
io_size = ret;
req->result = io_size;
/* If the file doesn't support async, just async punt */ /* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, READ)) if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov; goto copy_iov;
iov_count = iov_iter_count(&iter); iov_count = iov_iter_count(&iter);
nr_segs = iter.nr_segs;
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
if (!ret) { if (unlikely(ret))
unsigned long nr_segs = iter.nr_segs; goto out_free;
ssize_t ret2 = 0;
ret2 = io_iter_do_read(req, &iter); ret2 = io_iter_do_read(req, &iter);
/* Catch -EAGAIN return for forced non-blocking submission */ /* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) { if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
kiocb_done(kiocb, ret2, cs); kiocb_done(kiocb, ret2, cs);
} else { } else {
iter.count = iov_count; iter.count = iov_count;
iter.nr_segs = nr_segs; iter.nr_segs = nr_segs;
copy_iov: copy_iov:
ret = io_setup_async_rw(req, io_size, iovec, ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
inline_vecs, &iter); &iter);
if (ret) if (ret)
goto out_free;
/* it's copied and will be cleaned with ->io */
iovec = NULL;
/* if we can retry, do so with the callbacks armed */
if (io_rw_should_retry(req)) {
ret2 = io_iter_do_read(req, &iter);
if (ret2 == -EIOCBQUEUED) {
goto out_free;
} else if (ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, cs);
goto out_free; goto out_free;
/* it's copied and will be cleaned with ->io */
iovec = NULL;
/* if we can retry, do so with the callbacks armed */
if (io_rw_should_retry(req)) {
ret2 = io_iter_do_read(req, &iter);
if (ret2 == -EIOCBQUEUED) {
goto out_free;
} else if (ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, cs);
goto out_free;
}
} }
kiocb->ki_flags &= ~IOCB_WAITQ;
return -EAGAIN;
} }
kiocb->ki_flags &= ~IOCB_WAITQ;
return -EAGAIN;
} }
out_free: out_free:
if (iovec) if (iovec)
...@@ -3117,19 +3116,19 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, ...@@ -3117,19 +3116,19 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter; struct iov_iter iter;
size_t iov_count; size_t iov_count;
ssize_t ret, io_size; ssize_t ret, ret2, io_size;
unsigned long nr_segs;
ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock); ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
if (ret < 0) if (ret < 0)
return ret; return ret;
io_size = ret;
req->result = io_size;
/* Ensure we clear previously set non-block flag */ /* Ensure we clear previously set non-block flag */
if (!force_nonblock) if (!force_nonblock)
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT; req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
io_size = ret;
req->result = io_size;
/* If the file doesn't support async, just async punt */ /* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, WRITE)) if (force_nonblock && !io_file_supports_async(req->file, WRITE))
goto copy_iov; goto copy_iov;
...@@ -3140,51 +3139,50 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, ...@@ -3140,51 +3139,50 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
goto copy_iov; goto copy_iov;
iov_count = iov_iter_count(&iter); iov_count = iov_iter_count(&iter);
nr_segs = iter.nr_segs;
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count); ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
if (!ret) { if (unlikely(ret))
unsigned long nr_segs = iter.nr_segs; goto out_free;
ssize_t ret2;
/* /*
* Open-code file_start_write here to grab freeze protection, * Open-code file_start_write here to grab freeze protection,
* which will be released by another thread in * which will be released by another thread in
* io_complete_rw(). Fool lockdep by telling it the lock got * io_complete_rw(). Fool lockdep by telling it the lock got
* released so that it doesn't complain about the held lock when * released so that it doesn't complain about the held lock when
* we return to userspace. * we return to userspace.
*/ */
if (req->flags & REQ_F_ISREG) { if (req->flags & REQ_F_ISREG) {
__sb_start_write(file_inode(req->file)->i_sb, __sb_start_write(file_inode(req->file)->i_sb,
SB_FREEZE_WRITE, true); SB_FREEZE_WRITE, true);
__sb_writers_release(file_inode(req->file)->i_sb, __sb_writers_release(file_inode(req->file)->i_sb,
SB_FREEZE_WRITE); SB_FREEZE_WRITE);
} }
kiocb->ki_flags |= IOCB_WRITE; kiocb->ki_flags |= IOCB_WRITE;
if (req->file->f_op->write_iter) if (req->file->f_op->write_iter)
ret2 = call_write_iter(req->file, kiocb, &iter); ret2 = call_write_iter(req->file, kiocb, &iter);
else else
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter); ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
/* /*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
* retry them without IOCB_NOWAIT. * retry them without IOCB_NOWAIT.
*/ */
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN; ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) { if (!force_nonblock || ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, cs); kiocb_done(kiocb, ret2, cs);
} else { } else {
iter.count = iov_count; iter.count = iov_count;
iter.nr_segs = nr_segs; iter.nr_segs = nr_segs;
copy_iov: copy_iov:
ret = io_setup_async_rw(req, io_size, iovec, ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
inline_vecs, &iter); &iter);
if (ret) if (ret)
goto out_free; goto out_free;
/* it's copied and will be cleaned with ->io */ /* it's copied and will be cleaned with ->io */
iovec = NULL; iovec = NULL;
return -EAGAIN; return -EAGAIN;
}
} }
out_free: out_free:
if (iovec) if (iovec)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment