Commit a9339b78 authored by Al Viro's avatar Al Viro

aio: keep io_event in aio_kiocb

We want to separate forming the resulting io_event from putting it
into the ring buffer.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 833f4154
......@@ -204,8 +204,7 @@ struct aio_kiocb {
struct kioctx *ki_ctx;
kiocb_cancel_fn *ki_cancel;
struct iocb __user *ki_user_iocb; /* user's aiocb */
__u64 ki_user_data; /* user's data for completion */
struct io_event ki_res;
struct list_head ki_list; /* the aio core uses this
* for cancellation */
......@@ -1084,15 +1083,6 @@ static inline void iocb_put(struct aio_kiocb *iocb)
iocb_destroy(iocb);
}
static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
long res, long res2)
{
ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
ev->data = iocb->ki_user_data;
ev->res = res;
ev->res2 = res2;
}
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
......@@ -1104,6 +1094,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
unsigned tail, pos, head;
unsigned long flags;
iocb->ki_res.res = res;
iocb->ki_res.res2 = res2;
/*
* Add a completion event to the ring buffer. Must be done holding
* ctx->completion_lock to prevent other code from messing with the tail
......@@ -1120,14 +1112,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
aio_fill_event(event, iocb, res, res2);
*event = iocb->ki_res;
kunmap_atomic(ev_page);
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
res, res2);
pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
(void __user *)(unsigned long)iocb->ki_res.obj,
iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
/* after flagging the request as done, we
* must never even look at it again
......@@ -1844,8 +1836,10 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
goto out_put_req;
}
req->ki_user_iocb = user_iocb;
req->ki_user_data = iocb->aio_data;
req->ki_res.obj = (u64)(unsigned long)user_iocb;
req->ki_res.data = iocb->aio_data;
req->ki_res.res = 0;
req->ki_res.res2 = 0;
switch (iocb->aio_lio_opcode) {
case IOCB_CMD_PREAD:
......@@ -2019,6 +2013,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct aio_kiocb *kiocb;
int ret = -EINVAL;
u32 key;
u64 obj = (u64)(unsigned long)iocb;
if (unlikely(get_user(key, &iocb->aio_key)))
return -EFAULT;
......@@ -2032,7 +2027,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
spin_lock_irq(&ctx->ctx_lock);
/* TODO: use a hash or array, this sucks. */
list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
if (kiocb->ki_user_iocb == iocb) {
if (kiocb->ki_res.obj == obj) {
ret = kiocb->ki_cancel(&kiocb->rw);
list_del_init(&kiocb->ki_list);
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment