Commit 0b416c3e authored by Jens Axboe's avatar Jens Axboe

io_uring: fix sporadic -EFAULT from IORING_OP_RECVMSG

If we have to punt the recvmsg to async context, we copy all the
context.  But since the iovec used can be either on-stack (if small) or
dynamically allocated, if it's on-stack, then we need to ensure we reset
the iov pointer. If we don't, then we're reusing old stack data, and
that can lead to -EFAULTs if things get overwritten.

Ensure we retain the right pointers for the iov, and free it as well if
we end up having to go beyond UIO_FASTIOV number of vectors.

Fixes: 03b1230c ("io_uring: ensure async punted sendmsg/recvmsg requests copy data")
Reported-by: default avatar李通洲 <carter.li@eoitek.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d195a66e
...@@ -2041,6 +2041,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2041,6 +2041,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt, bool force_nonblock) struct io_kiocb **nxt, bool force_nonblock)
{ {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
struct io_async_msghdr *kmsg = NULL;
struct socket *sock; struct socket *sock;
int ret; int ret;
...@@ -2051,7 +2052,6 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2051,7 +2052,6 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (sock) { if (sock) {
struct io_async_ctx io, *copy; struct io_async_ctx io, *copy;
struct sockaddr_storage addr; struct sockaddr_storage addr;
struct msghdr *kmsg;
unsigned flags; unsigned flags;
flags = READ_ONCE(sqe->msg_flags); flags = READ_ONCE(sqe->msg_flags);
...@@ -2061,17 +2061,21 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2061,17 +2061,21 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
if (req->io) { if (req->io) {
kmsg = &req->io->msg.msg; kmsg = &req->io->msg;
kmsg->msg_name = &addr; kmsg->msg.msg_name = &addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else { } else {
kmsg = &io.msg.msg; kmsg = &io.msg;
kmsg->msg_name = &addr; kmsg->msg.msg_name = &addr;
ret = io_sendmsg_prep(req, &io); ret = io_sendmsg_prep(req, &io);
if (ret) if (ret)
goto out; goto out;
} }
ret = __sys_sendmsg_sock(sock, kmsg, flags); ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (force_nonblock && ret == -EAGAIN) { if (force_nonblock && ret == -EAGAIN) {
copy = kmalloc(sizeof(*copy), GFP_KERNEL); copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy) { if (!copy) {
...@@ -2082,13 +2086,15 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2082,13 +2086,15 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->io = copy; req->io = copy;
memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
req->sqe = &req->io->sqe; req->sqe = &req->io->sqe;
return ret; return -EAGAIN;
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
} }
out: out:
if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
...@@ -2120,6 +2126,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2120,6 +2126,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt, bool force_nonblock) struct io_kiocb **nxt, bool force_nonblock)
{ {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
struct io_async_msghdr *kmsg = NULL;
struct socket *sock; struct socket *sock;
int ret; int ret;
...@@ -2131,7 +2138,6 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2131,7 +2138,6 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct user_msghdr __user *msg; struct user_msghdr __user *msg;
struct io_async_ctx io, *copy; struct io_async_ctx io, *copy;
struct sockaddr_storage addr; struct sockaddr_storage addr;
struct msghdr *kmsg;
unsigned flags; unsigned flags;
flags = READ_ONCE(sqe->msg_flags); flags = READ_ONCE(sqe->msg_flags);
...@@ -2143,17 +2149,21 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2143,17 +2149,21 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
msg = (struct user_msghdr __user *) (unsigned long) msg = (struct user_msghdr __user *) (unsigned long)
READ_ONCE(sqe->addr); READ_ONCE(sqe->addr);
if (req->io) { if (req->io) {
kmsg = &req->io->msg.msg; kmsg = &req->io->msg;
kmsg->msg_name = &addr; kmsg->msg.msg_name = &addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else { } else {
kmsg = &io.msg.msg; kmsg = &io.msg;
kmsg->msg_name = &addr; kmsg->msg.msg_name = &addr;
ret = io_recvmsg_prep(req, &io); ret = io_recvmsg_prep(req, &io);
if (ret) if (ret)
goto out; goto out;
} }
ret = __sys_recvmsg_sock(sock, kmsg, msg, io.msg.uaddr, flags); ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags);
if (force_nonblock && ret == -EAGAIN) { if (force_nonblock && ret == -EAGAIN) {
copy = kmalloc(sizeof(*copy), GFP_KERNEL); copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy) { if (!copy) {
...@@ -2164,13 +2174,15 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2164,13 +2174,15 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->io = copy; req->io = copy;
memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
req->sqe = &req->io->sqe; req->sqe = &req->io->sqe;
return ret; return -EAGAIN;
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
} }
out: out:
if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment