Commit d2d778fb authored by Jens Axboe's avatar Jens Axboe

io_uring/rw: mark readv/writev as vectored in the opcode definition

This is cleaner than gating on the opcode type, particularly as more
read/write type opcodes may be added.

Then we can use that for the data import, and for __io_read() on
whether or not we need to copy state.
Reviewed-by: default avatarGabriel Krisman Bertazi <krisman@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a08d195b
...@@ -63,6 +63,7 @@ const struct io_issue_def io_issue_defs[] = { ...@@ -63,6 +63,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1, .iopoll_queue = 1,
.vectored = 1,
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_read, .issue = io_read,
}, },
...@@ -76,6 +77,7 @@ const struct io_issue_def io_issue_defs[] = { ...@@ -76,6 +77,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1, .iopoll_queue = 1,
.vectored = 1,
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_write, .issue = io_write,
}, },
......
...@@ -29,6 +29,8 @@ struct io_issue_def { ...@@ -29,6 +29,8 @@ struct io_issue_def {
unsigned iopoll_queue : 1; unsigned iopoll_queue : 1;
/* opcode specific path will handle ->async_data allocation if needed */ /* opcode specific path will handle ->async_data allocation if needed */
unsigned manual_alloc : 1; unsigned manual_alloc : 1;
/* vectored opcode, set if 1) vectored, and 2) handler needs to know */
unsigned vectored : 1;
int (*issue)(struct io_kiocb *, unsigned int); int (*issue)(struct io_kiocb *, unsigned int);
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
......
...@@ -388,8 +388,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, ...@@ -388,8 +388,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
buf = u64_to_user_ptr(rw->addr); buf = u64_to_user_ptr(rw->addr);
sqe_len = rw->len; sqe_len = rw->len;
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE || if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
(req->flags & REQ_F_BUFFER_SELECT)) {
if (io_do_buffer_select(req)) { if (io_do_buffer_select(req)) {
buf = io_buffer_select(req, &sqe_len, issue_flags); buf = io_buffer_select(req, &sqe_len, issue_flags);
if (!buf) if (!buf)
...@@ -776,8 +775,11 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) ...@@ -776,8 +775,11 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE; req->flags &= ~REQ_F_REISSUE;
/* if we can poll, just do that */ /*
if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) * If we can poll, just do that. For a vectored read, we'll
* need to copy state first.
*/
if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
return -EAGAIN; return -EAGAIN;
/* IOPOLL retry should happen for io-wq threads */ /* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment