Commit f4eaf8ed authored by Gabriel Krisman Bertazi's avatar Gabriel Krisman Bertazi Committed by Jens Axboe

io_uring/rsrc: Drop io_copy_iov in favor of iovec API

Instead of open coding an io_uring function to copy iovs from userspace,
rely on the existing iovec_from_user function.  While there, avoid
repeatedly zeroing the iov in the !arg case for io_sqe_buffer_register.

tested with liburing testsuite.
Signed-off-by: default avatarGabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20240523214535.31890-1-krisman@suse.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 81cc927d
...@@ -85,31 +85,6 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) ...@@ -85,31 +85,6 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
return 0; return 0;
} }
static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
void __user *arg, unsigned index)
{
struct iovec __user *src;
#ifdef CONFIG_COMPAT
if (ctx->compat) {
struct compat_iovec __user *ciovs;
struct compat_iovec ciov;
ciovs = (struct compat_iovec __user *) arg;
if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
return -EFAULT;
dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
dst->iov_len = ciov.iov_len;
return 0;
}
#endif
src = (struct iovec __user *) arg;
if (copy_from_user(dst, &src[index], sizeof(*dst)))
return -EFAULT;
return 0;
}
static int io_buffer_validate(struct iovec *iov) static int io_buffer_validate(struct iovec *iov)
{ {
unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1); unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
...@@ -420,8 +395,9 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, ...@@ -420,8 +395,9 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update2 *up, struct io_uring_rsrc_update2 *up,
unsigned int nr_args) unsigned int nr_args)
{ {
struct iovec __user *uvec = u64_to_user_ptr(up->data);
u64 __user *tags = u64_to_user_ptr(up->tags); u64 __user *tags = u64_to_user_ptr(up->tags);
struct iovec iov, __user *iovs = u64_to_user_ptr(up->data); struct iovec fast_iov, *iov;
struct page *last_hpage = NULL; struct page *last_hpage = NULL;
__u32 done; __u32 done;
int i, err; int i, err;
...@@ -435,21 +411,23 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, ...@@ -435,21 +411,23 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
struct io_mapped_ubuf *imu; struct io_mapped_ubuf *imu;
u64 tag = 0; u64 tag = 0;
err = io_copy_iov(ctx, &iov, iovs, done); iov = iovec_from_user(&uvec[done], 1, 1, &fast_iov, ctx->compat);
if (err) if (IS_ERR(iov)) {
err = PTR_ERR(iov);
break; break;
}
if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) { if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
err = -EFAULT; err = -EFAULT;
break; break;
} }
err = io_buffer_validate(&iov); err = io_buffer_validate(iov);
if (err) if (err)
break; break;
if (!iov.iov_base && tag) { if (!iov->iov_base && tag) {
err = -EINVAL; err = -EINVAL;
break; break;
} }
err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage); err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage);
if (err) if (err)
break; break;
...@@ -971,8 +949,9 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -971,8 +949,9 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
{ {
struct page *last_hpage = NULL; struct page *last_hpage = NULL;
struct io_rsrc_data *data; struct io_rsrc_data *data;
struct iovec fast_iov, *iov = &fast_iov;
const struct iovec __user *uvec = (struct iovec * __user) arg;
int i, ret; int i, ret;
struct iovec iov;
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
...@@ -989,24 +968,27 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -989,24 +968,27 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
return ret; return ret;
} }
if (!arg)
memset(iov, 0, sizeof(*iov));
for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) { for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
if (arg) { if (arg) {
ret = io_copy_iov(ctx, &iov, arg, i); iov = iovec_from_user(&uvec[i], 1, 1, &fast_iov, ctx->compat);
if (ret) if (IS_ERR(iov)) {
ret = PTR_ERR(iov);
break; break;
ret = io_buffer_validate(&iov); }
ret = io_buffer_validate(iov);
if (ret) if (ret)
break; break;
} else {
memset(&iov, 0, sizeof(iov));
} }
if (!iov.iov_base && *io_get_tag_slot(data, i)) { if (!iov->iov_base && *io_get_tag_slot(data, i)) {
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i], ret = io_sqe_buffer_register(ctx, iov, &ctx->user_bufs[i],
&last_hpage); &last_hpage);
if (ret) if (ret)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment