Commit 88a6f18b authored by Jens Axboe's avatar Jens Axboe

aio: split out iocb copy from io_submit_one()

In preparation of handing in iocbs in a different fashion as well. Also
make it clear that the iocb being passed in isn't modified, by marking
it const throughout.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 71ebc6fe
......@@ -1422,7 +1422,7 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
aio_complete(iocb, res, res2);
}
static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
{
int ret;
......@@ -1463,7 +1463,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
return ret;
}
static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
bool vectored, bool compat, struct iov_iter *iter)
{
void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
......@@ -1502,8 +1502,8 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
}
}
static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
bool compat)
static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
......@@ -1535,8 +1535,8 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
return ret;
}
static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
bool compat)
static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
......@@ -1591,7 +1591,8 @@ static void aio_fsync_work(struct work_struct *work)
aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
}
static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
bool datasync)
{
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
iocb->aio_rw_flags))
......@@ -1719,7 +1720,7 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
add_wait_queue(head, &pt->iocb->poll.wait);
}
static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
{
struct kioctx *ctx = aiocb->ki_ctx;
struct poll_iocb *req = &aiocb->poll;
......@@ -1791,27 +1792,23 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
return 0;
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
struct iocb __user *user_iocb, bool compat)
{
struct aio_kiocb *req;
struct iocb iocb;
ssize_t ret;
if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
return -EFAULT;
/* enforce forwards compatibility on users */
if (unlikely(iocb.aio_reserved2)) {
if (unlikely(iocb->aio_reserved2)) {
pr_debug("EINVAL: reserve field set\n");
return -EINVAL;
}
/* prevent overflows */
if (unlikely(
(iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
(iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
((ssize_t)iocb.aio_nbytes < 0)
(iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
(iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
((ssize_t)iocb->aio_nbytes < 0)
)) {
pr_debug("EINVAL: overflow check\n");
return -EINVAL;
......@@ -1825,14 +1822,14 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
if (unlikely(!req))
goto out_put_reqs_available;
if (iocb.aio_flags & IOCB_FLAG_RESFD) {
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
/*
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
* instance of the file* now. The file descriptor must be
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
req->ki_eventfd = eventfd_ctx_fdget((int) iocb.aio_resfd);
req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
......@@ -1847,32 +1844,32 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
}
req->ki_user_iocb = user_iocb;
req->ki_user_data = iocb.aio_data;
req->ki_user_data = iocb->aio_data;
switch (iocb.aio_lio_opcode) {
switch (iocb->aio_lio_opcode) {
case IOCB_CMD_PREAD:
ret = aio_read(&req->rw, &iocb, false, compat);
ret = aio_read(&req->rw, iocb, false, compat);
break;
case IOCB_CMD_PWRITE:
ret = aio_write(&req->rw, &iocb, false, compat);
ret = aio_write(&req->rw, iocb, false, compat);
break;
case IOCB_CMD_PREADV:
ret = aio_read(&req->rw, &iocb, true, compat);
ret = aio_read(&req->rw, iocb, true, compat);
break;
case IOCB_CMD_PWRITEV:
ret = aio_write(&req->rw, &iocb, true, compat);
ret = aio_write(&req->rw, iocb, true, compat);
break;
case IOCB_CMD_FSYNC:
ret = aio_fsync(&req->fsync, &iocb, false);
ret = aio_fsync(&req->fsync, iocb, false);
break;
case IOCB_CMD_FDSYNC:
ret = aio_fsync(&req->fsync, &iocb, true);
ret = aio_fsync(&req->fsync, iocb, true);
break;
case IOCB_CMD_POLL:
ret = aio_poll(req, &iocb);
ret = aio_poll(req, iocb);
break;
default:
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
ret = -EINVAL;
break;
}
......@@ -1894,6 +1891,17 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
return ret;
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
struct iocb iocb;
if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
return -EFAULT;
return __io_submit_one(ctx, &iocb, user_iocb, compat);
}
/* sys_io_submit:
* Queue the nr iocbs pointed to by iocbpp for processing. Returns
* the number of iocbs queued. May return -EINVAL if the aio_context
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment