Commit bec68faa authored by Kent Overstreet's avatar Kent Overstreet Committed by Benjamin LaHaise

aio: io_cancel() no longer returns the io_event

Originally, io_event() was documented to return the io_event if
cancellation succeeded - the io_event wouldn't be delivered via the ring
buffer like it normally would.

But this isn't what the implementation was actually doing; the only
driver implementing cancellation, the usb gadget code, never returned an
io_event in its cancel function. And aio_complete() was recently changed
to no longer suppress event delivery if the kiocb had been cancelled.

This gets rid of the unused io_event argument to kiocb_cancel() and
kiocb->ki_cancel(), and changes io_cancel() to return -EINPROGRESS if
kiocb->ki_cancel() returned success.

Also tweak the refcounting in kiocb_cancel() to make more sense.
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
Cc: Zach Brown <zab@redhat.com>
Cc: Felipe Balbi <balbi@ti.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: default avatarBenjamin LaHaise <bcrl@kvack.org>
parent 723be6e3
......@@ -524,7 +524,7 @@ struct kiocb_priv {
unsigned actual;
};
static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
static int ep_aio_cancel(struct kiocb *iocb)
{
struct kiocb_priv *priv = iocb->private;
struct ep_data *epdata;
......@@ -540,7 +540,6 @@ static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
// spin_unlock(&epdata->dev->lock);
local_irq_enable();
aio_put_req(iocb);
return value;
}
......
......@@ -358,8 +358,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);
static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
struct io_event *res)
static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
{
kiocb_cancel_fn *old, *cancel;
int ret = -EINVAL;
......@@ -381,12 +380,10 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
atomic_inc(&kiocb->ki_users);
spin_unlock_irq(&ctx->ctx_lock);
memset(res, 0, sizeof(*res));
res->obj = (u64)(unsigned long)kiocb->ki_obj.user;
res->data = kiocb->ki_user_data;
ret = cancel(kiocb, res);
ret = cancel(kiocb);
spin_lock_irq(&ctx->ctx_lock);
aio_put_req(kiocb);
return ret;
}
......@@ -408,7 +405,6 @@ static void free_ioctx(struct work_struct *work)
{
struct kioctx *ctx = container_of(work, struct kioctx, free_work);
struct aio_ring *ring;
struct io_event res;
struct kiocb *req;
unsigned cpu, head, avail;
......@@ -419,7 +415,7 @@ static void free_ioctx(struct work_struct *work)
struct kiocb, ki_list);
list_del_init(&req->ki_list);
kiocb_cancel(ctx, req, &res);
kiocb_cancel(ctx, req);
}
spin_unlock_irq(&ctx->ctx_lock);
......@@ -795,21 +791,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
}
/*
* cancelled requests don't get events, userland was given one
* when the event got cancelled.
*/
if (unlikely(xchg(&iocb->ki_cancel,
KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
/*
* Can't use the percpu reqs_available here - could race with
* free_ioctx()
*/
atomic_inc(&ctx->reqs_available);
/* Still need the wake_up in case free_ioctx is waiting */
goto put_rq;
}
/*
* Add a completion event to the ring buffer. Must be done holding
* ctx->completion_lock to prevent other code from messing with the tail
......@@ -862,7 +843,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
if (iocb->ki_eventfd != NULL)
eventfd_signal(iocb->ki_eventfd, 1);
put_rq:
/* everything turned out well, dispose of the aiocb. */
aio_put_req(iocb);
......@@ -1439,7 +1419,6 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct io_event __user *, result)
{
struct io_event res;
struct kioctx *ctx;
struct kiocb *kiocb;
u32 key;
......@@ -1457,18 +1436,19 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
kiocb = lookup_kiocb(ctx, iocb, key);
if (kiocb)
ret = kiocb_cancel(ctx, kiocb, &res);
ret = kiocb_cancel(ctx, kiocb);
else
ret = -EINVAL;
spin_unlock_irq(&ctx->ctx_lock);
if (!ret) {
/* Cancellation succeeded -- copy the result
* into the user's buffer.
/*
* The result argument is no longer used - the io_event is
* always delivered via the ring buffer. -EINPROGRESS indicates
* cancellation is progress:
*/
if (copy_to_user(result, &res, sizeof(res)))
ret = -EFAULT;
ret = -EINPROGRESS;
}
percpu_ref_put(&ctx->users);
......
......@@ -27,7 +27,7 @@ struct kiocb;
*/
#define KIOCB_CANCELLED ((void *) (~0ULL))
typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
typedef int (kiocb_cancel_fn)(struct kiocb *);
struct kiocb {
atomic_t ki_users;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment