Commit 73911426 authored by Jens Axboe's avatar Jens Axboe

io_uring: check IOPOLL/ioprio support upfront

Don't punt this check to the op prep handlers, add the support to
io_op_defs and we can check them while setting up the request.

This reduces the text size by 500 bytes on aarch64, and makes this less
fragile by having the check in one spot and needing opcodes to opt in
to IOPOLL or ioprio support.
Reviewed-by: default avatarHao Xu <howeyxu@tencent.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f2e030dd
...@@ -1028,12 +1028,19 @@ struct io_op_def { ...@@ -1028,12 +1028,19 @@ struct io_op_def {
unsigned not_supported : 1; unsigned not_supported : 1;
/* skip auditing */ /* skip auditing */
unsigned audit_skip : 1; unsigned audit_skip : 1;
/* supports ioprio */
unsigned ioprio : 1;
/* supports iopoll */
unsigned iopoll : 1;
/* size of async data needed, if any */ /* size of async data needed, if any */
unsigned short async_size; unsigned short async_size;
}; };
static const struct io_op_def io_op_defs[] = { static const struct io_op_def io_op_defs[] = {
[IORING_OP_NOP] = {}, [IORING_OP_NOP] = {
.audit_skip = 1,
.iopoll = 1,
},
[IORING_OP_READV] = { [IORING_OP_READV] = {
.needs_file = 1, .needs_file = 1,
.unbound_nonreg_file = 1, .unbound_nonreg_file = 1,
...@@ -1042,6 +1049,8 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1042,6 +1049,8 @@ static const struct io_op_def io_op_defs[] = {
.needs_async_setup = 1, .needs_async_setup = 1,
.plug = 1, .plug = 1,
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1,
.iopoll = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
}, },
[IORING_OP_WRITEV] = { [IORING_OP_WRITEV] = {
...@@ -1052,6 +1061,8 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1052,6 +1061,8 @@ static const struct io_op_def io_op_defs[] = {
.needs_async_setup = 1, .needs_async_setup = 1,
.plug = 1, .plug = 1,
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1,
.iopoll = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
}, },
[IORING_OP_FSYNC] = { [IORING_OP_FSYNC] = {
...@@ -1064,6 +1075,8 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1064,6 +1075,8 @@ static const struct io_op_def io_op_defs[] = {
.pollin = 1, .pollin = 1,
.plug = 1, .plug = 1,
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1,
.iopoll = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
}, },
[IORING_OP_WRITE_FIXED] = { [IORING_OP_WRITE_FIXED] = {
...@@ -1073,6 +1086,8 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1073,6 +1086,8 @@ static const struct io_op_def io_op_defs[] = {
.pollout = 1, .pollout = 1,
.plug = 1, .plug = 1,
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1,
.iopoll = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
}, },
[IORING_OP_POLL_ADD] = { [IORING_OP_POLL_ADD] = {
...@@ -1137,6 +1152,7 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1137,6 +1152,7 @@ static const struct io_op_def io_op_defs[] = {
[IORING_OP_CLOSE] = {}, [IORING_OP_CLOSE] = {},
[IORING_OP_FILES_UPDATE] = { [IORING_OP_FILES_UPDATE] = {
.audit_skip = 1, .audit_skip = 1,
.iopoll = 1,
}, },
[IORING_OP_STATX] = { [IORING_OP_STATX] = {
.audit_skip = 1, .audit_skip = 1,
...@@ -1148,6 +1164,8 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1148,6 +1164,8 @@ static const struct io_op_def io_op_defs[] = {
.buffer_select = 1, .buffer_select = 1,
.plug = 1, .plug = 1,
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1,
.iopoll = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
}, },
[IORING_OP_WRITE] = { [IORING_OP_WRITE] = {
...@@ -1157,6 +1175,8 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1157,6 +1175,8 @@ static const struct io_op_def io_op_defs[] = {
.pollout = 1, .pollout = 1,
.plug = 1, .plug = 1,
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1,
.iopoll = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
}, },
[IORING_OP_FADVISE] = { [IORING_OP_FADVISE] = {
...@@ -1191,9 +1211,11 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1191,9 +1211,11 @@ static const struct io_op_def io_op_defs[] = {
}, },
[IORING_OP_PROVIDE_BUFFERS] = { [IORING_OP_PROVIDE_BUFFERS] = {
.audit_skip = 1, .audit_skip = 1,
.iopoll = 1,
}, },
[IORING_OP_REMOVE_BUFFERS] = { [IORING_OP_REMOVE_BUFFERS] = {
.audit_skip = 1, .audit_skip = 1,
.iopoll = 1,
}, },
[IORING_OP_TEE] = { [IORING_OP_TEE] = {
.needs_file = 1, .needs_file = 1,
...@@ -1211,6 +1233,7 @@ static const struct io_op_def io_op_defs[] = { ...@@ -1211,6 +1233,7 @@ static const struct io_op_def io_op_defs[] = {
[IORING_OP_LINKAT] = {}, [IORING_OP_LINKAT] = {},
[IORING_OP_MSG_RING] = { [IORING_OP_MSG_RING] = {
.needs_file = 1, .needs_file = 1,
.iopoll = 1,
}, },
}; };
...@@ -4139,9 +4162,7 @@ static int io_renameat_prep(struct io_kiocb *req, ...@@ -4139,9 +4162,7 @@ static int io_renameat_prep(struct io_kiocb *req,
struct io_rename *ren = &req->rename; struct io_rename *ren = &req->rename;
const char __user *oldf, *newf; const char __user *oldf, *newf;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE)) if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF; return -EBADF;
...@@ -4190,10 +4211,7 @@ static int io_unlinkat_prep(struct io_kiocb *req, ...@@ -4190,10 +4211,7 @@ static int io_unlinkat_prep(struct io_kiocb *req,
struct io_unlink *un = &req->unlink; struct io_unlink *un = &req->unlink;
const char __user *fname; const char __user *fname;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE)) if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF; return -EBADF;
...@@ -4239,10 +4257,7 @@ static int io_mkdirat_prep(struct io_kiocb *req, ...@@ -4239,10 +4257,7 @@ static int io_mkdirat_prep(struct io_kiocb *req,
struct io_mkdir *mkd = &req->mkdir; struct io_mkdir *mkd = &req->mkdir;
const char __user *fname; const char __user *fname;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE)) if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF; return -EBADF;
...@@ -4282,10 +4297,7 @@ static int io_symlinkat_prep(struct io_kiocb *req, ...@@ -4282,10 +4297,7 @@ static int io_symlinkat_prep(struct io_kiocb *req,
struct io_symlink *sl = &req->symlink; struct io_symlink *sl = &req->symlink;
const char __user *oldpath, *newpath; const char __user *oldpath, *newpath;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE)) if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF; return -EBADF;
...@@ -4331,9 +4343,7 @@ static int io_linkat_prep(struct io_kiocb *req, ...@@ -4331,9 +4343,7 @@ static int io_linkat_prep(struct io_kiocb *req,
struct io_hardlink *lnk = &req->hardlink; struct io_hardlink *lnk = &req->hardlink;
const char __user *oldf, *newf; const char __user *oldf, *newf;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE)) if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF; return -EBADF;
...@@ -4380,9 +4390,7 @@ static int io_shutdown_prep(struct io_kiocb *req, ...@@ -4380,9 +4390,7 @@ static int io_shutdown_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
{ {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
return -EINVAL;
if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
sqe->buf_index || sqe->splice_fd_in)) sqe->buf_index || sqe->splice_fd_in))
return -EINVAL; return -EINVAL;
...@@ -4422,9 +4430,6 @@ static int __io_splice_prep(struct io_kiocb *req, ...@@ -4422,9 +4430,6 @@ static int __io_splice_prep(struct io_kiocb *req,
struct io_splice *sp = &req->splice; struct io_splice *sp = &req->splice;
unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL; unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
sp->len = READ_ONCE(sqe->len); sp->len = READ_ONCE(sqe->len);
sp->flags = READ_ONCE(sqe->splice_flags); sp->flags = READ_ONCE(sqe->splice_flags);
if (unlikely(sp->flags & ~valid_flags)) if (unlikely(sp->flags & ~valid_flags))
...@@ -4523,11 +4528,6 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4523,11 +4528,6 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
*/ */
static int io_nop(struct io_kiocb *req, unsigned int issue_flags) static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
__io_req_complete(req, issue_flags, 0, 0); __io_req_complete(req, issue_flags, 0, 0);
return 0; return 0;
} }
...@@ -4535,8 +4535,8 @@ static int io_nop(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4535,8 +4535,8 @@ static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
static int io_msg_ring_prep(struct io_kiocb *req, static int io_msg_ring_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
{ {
if (unlikely(sqe->addr || sqe->ioprio || sqe->rw_flags || if (unlikely(sqe->addr || sqe->rw_flags || sqe->splice_fd_in ||
sqe->splice_fd_in || sqe->buf_index || sqe->personality)) sqe->buf_index || sqe->personality))
return -EINVAL; return -EINVAL;
req->msg.user_data = READ_ONCE(sqe->off); req->msg.user_data = READ_ONCE(sqe->off);
...@@ -4577,12 +4577,7 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4577,12 +4577,7 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
sqe->splice_fd_in))
return -EINVAL; return -EINVAL;
req->sync.flags = READ_ONCE(sqe->fsync_flags); req->sync.flags = READ_ONCE(sqe->fsync_flags);
...@@ -4615,10 +4610,7 @@ static int io_fsync(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4615,10 +4610,7 @@ static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
static int io_fallocate_prep(struct io_kiocb *req, static int io_fallocate_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
{ {
if (sqe->ioprio || sqe->buf_index || sqe->rw_flags || if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
req->sync.off = READ_ONCE(sqe->off); req->sync.off = READ_ONCE(sqe->off);
...@@ -4649,9 +4641,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe ...@@ -4649,9 +4641,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
const char __user *fname; const char __user *fname;
int ret; int ret;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(sqe->buf_index))
return -EINVAL;
if (unlikely(sqe->ioprio || sqe->buf_index))
return -EINVAL; return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE)) if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF; return -EBADF;
...@@ -4783,7 +4773,7 @@ static int io_remove_buffers_prep(struct io_kiocb *req, ...@@ -4783,7 +4773,7 @@ static int io_remove_buffers_prep(struct io_kiocb *req,
struct io_provide_buf *p = &req->pbuf; struct io_provide_buf *p = &req->pbuf;
u64 tmp; u64 tmp;
if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off || if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
sqe->splice_fd_in) sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
...@@ -4850,7 +4840,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req, ...@@ -4850,7 +4840,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
struct io_provide_buf *p = &req->pbuf; struct io_provide_buf *p = &req->pbuf;
u64 tmp; u64 tmp;
if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in) if (sqe->rw_flags || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
tmp = READ_ONCE(sqe->fd); tmp = READ_ONCE(sqe->fd);
...@@ -4980,9 +4970,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, ...@@ -4980,9 +4970,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
{ {
#if defined(CONFIG_EPOLL) #if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
req->epoll.epfd = READ_ONCE(sqe->fd); req->epoll.epfd = READ_ONCE(sqe->fd);
...@@ -5026,9 +5014,7 @@ static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5026,9 +5014,7 @@ static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU) #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in) if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
req->madvise.addr = READ_ONCE(sqe->addr); req->madvise.addr = READ_ONCE(sqe->addr);
...@@ -5061,9 +5047,7 @@ static int io_madvise(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5061,9 +5047,7 @@ static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in) if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
req->fadvise.offset = READ_ONCE(sqe->off); req->fadvise.offset = READ_ONCE(sqe->off);
...@@ -5099,9 +5083,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5099,9 +5083,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
const char __user *path; const char __user *path;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE) if (req->flags & REQ_F_FIXED_FILE)
return -EBADF; return -EBADF;
...@@ -5146,10 +5128,7 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5146,10 +5128,7 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
sqe->rw_flags || sqe->buf_index)
return -EINVAL; return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE) if (req->flags & REQ_F_FIXED_FILE)
return -EBADF; return -EBADF;
...@@ -5215,12 +5194,7 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5215,12 +5194,7 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
sqe->splice_fd_in))
return -EINVAL; return -EINVAL;
req->sync.off = READ_ONCE(sqe->off); req->sync.off = READ_ONCE(sqe->off);
...@@ -5298,7 +5272,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5298,7 +5272,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sr_msg *sr = &req->sr_msg; struct io_sr_msg *sr = &req->sr_msg;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(sqe->addr2 || sqe->file_index))
return -EINVAL; return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
...@@ -5531,7 +5505,7 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5531,7 +5505,7 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sr_msg *sr = &req->sr_msg; struct io_sr_msg *sr = &req->sr_msg;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(sqe->addr2 || sqe->file_index))
return -EINVAL; return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
...@@ -5688,9 +5662,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5688,9 +5662,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_accept *accept = &req->accept; struct io_accept *accept = &req->accept;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->len || sqe->buf_index)
return -EINVAL;
if (sqe->ioprio || sqe->len || sqe->buf_index)
return -EINVAL; return -EINVAL;
accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
...@@ -5756,10 +5728,7 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5756,10 +5728,7 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_connect *conn = &req->connect; struct io_connect *conn = &req->connect;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
...@@ -6442,9 +6411,7 @@ static int io_poll_update_prep(struct io_kiocb *req, ...@@ -6442,9 +6411,7 @@ static int io_poll_update_prep(struct io_kiocb *req,
struct io_poll_update *upd = &req->poll_update; struct io_poll_update *upd = &req->poll_update;
u32 flags; u32 flags;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
flags = READ_ONCE(sqe->len); flags = READ_ONCE(sqe->len);
if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
...@@ -6474,9 +6441,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe ...@@ -6474,9 +6441,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
struct io_poll_iocb *poll = &req->poll; struct io_poll_iocb *poll = &req->poll;
u32 flags; u32 flags;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->buf_index || sqe->off || sqe->addr)
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
return -EINVAL; return -EINVAL;
flags = READ_ONCE(sqe->len); flags = READ_ONCE(sqe->len);
if (flags & ~IORING_POLL_ADD_MULTI) if (flags & ~IORING_POLL_ADD_MULTI)
...@@ -6683,11 +6648,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req, ...@@ -6683,11 +6648,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
{ {
struct io_timeout_rem *tr = &req->timeout_rem; struct io_timeout_rem *tr = &req->timeout_rem;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL; return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in) if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
tr->ltimeout = false; tr->ltimeout = false;
...@@ -6757,10 +6720,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -6757,10 +6720,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
unsigned flags; unsigned flags;
u32 off = READ_ONCE(sqe->off); u32 off = READ_ONCE(sqe->off);
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
if (off && is_timeout_link) if (off && is_timeout_link)
return -EINVAL; return -EINVAL;
...@@ -6942,11 +6902,9 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) ...@@ -6942,11 +6902,9 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
static int io_async_cancel_prep(struct io_kiocb *req, static int io_async_cancel_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
{ {
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(req->flags & REQ_F_BUFFER_SELECT)) if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL; return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in) if (sqe->off || sqe->len || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
req->cancel.addr = READ_ONCE(sqe->addr); req->cancel.addr = READ_ONCE(sqe->addr);
...@@ -7032,7 +6990,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req, ...@@ -7032,7 +6990,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
{ {
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL; return -EINVAL;
if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in) if (sqe->rw_flags || sqe->splice_fd_in)
return -EINVAL; return -EINVAL;
req->rsrc_update.offset = READ_ONCE(sqe->off); req->rsrc_update.offset = READ_ONCE(sqe->off);
...@@ -7845,6 +7803,11 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -7845,6 +7803,11 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
} }
} }
if (!io_op_defs[opcode].ioprio && sqe->ioprio)
return -EINVAL;
if (!io_op_defs[opcode].iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (io_op_defs[opcode].needs_file) { if (io_op_defs[opcode].needs_file) {
struct io_submit_state *state = &ctx->submit_state; struct io_submit_state *state = &ctx->submit_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment