Commit 78a861b9 authored by Jens Axboe's avatar Jens Axboe

io_uring: add sync cancelation API through io_uring_register()

The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.

However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.

Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:

0		Request found and canceled fine.
> 0		Requests found and canceled. Only happens if asked to
		cancel multiple requests, and if the work wasn't in
		progress.
-ENOENT		Request not found.
-ETIME		A timeout on the operation was requested, but the timeout
		expired before we could cancel.

and we won't get -EALREADY via this API.

If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.

Link: https://github.com/axboe/liburing/discussions/608Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7d8ca725
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/time_types.h>
/* /*
* IO submission data structure (Submission Queue Entry) * IO submission data structure (Submission Queue Entry)
...@@ -428,6 +429,9 @@ enum { ...@@ -428,6 +429,9 @@ enum {
IORING_REGISTER_PBUF_RING = 22, IORING_REGISTER_PBUF_RING = 22,
IORING_UNREGISTER_PBUF_RING = 23, IORING_UNREGISTER_PBUF_RING = 23,
/* sync cancelation API */
IORING_REGISTER_SYNC_CANCEL = 24,
/* this goes last */ /* this goes last */
IORING_REGISTER_LAST IORING_REGISTER_LAST
}; };
...@@ -563,4 +567,15 @@ struct io_uring_getevents_arg { ...@@ -563,4 +567,15 @@ struct io_uring_getevents_arg {
__u64 ts; __u64 ts;
}; };
/*
* Argument for IORING_REGISTER_SYNC_CANCEL
*/
struct io_uring_sync_cancel_reg {
__u64 addr;
__s32 fd;
__u32 flags;
struct __kernel_timespec timeout;
__u64 pad[4];
};
#endif #endif
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/nospec.h>
#include <linux/io_uring.h> #include <linux/io_uring.h>
#include <uapi/linux/io_uring.h> #include <uapi/linux/io_uring.h>
...@@ -206,3 +207,109 @@ void init_hash_table(struct io_hash_table *table, unsigned size) ...@@ -206,3 +207,109 @@ void init_hash_table(struct io_hash_table *table, unsigned size)
INIT_HLIST_HEAD(&table->hbs[i].list); INIT_HLIST_HEAD(&table->hbs[i].list);
} }
} }
static int __io_sync_cancel(struct io_uring_task *tctx,
struct io_cancel_data *cd, int fd)
{
struct io_ring_ctx *ctx = cd->ctx;
/* fixed must be grabbed every time since we drop the uring_lock */
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
unsigned long file_ptr;
if (unlikely(fd > ctx->nr_user_files))
return -EBADF;
fd = array_index_nospec(fd, ctx->nr_user_files);
file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
cd->file = (struct file *) (file_ptr & FFS_MASK);
if (!cd->file)
return -EBADF;
}
return __io_async_cancel(cd, tctx, 0);
}
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
__must_hold(&ctx->uring_lock)
{
struct io_cancel_data cd = {
.ctx = ctx,
.seq = atomic_inc_return(&ctx->cancel_seq),
};
ktime_t timeout = KTIME_MAX;
struct io_uring_sync_cancel_reg sc;
struct fd f = { };
DEFINE_WAIT(wait);
int ret;
if (copy_from_user(&sc, arg, sizeof(sc)))
return -EFAULT;
if (sc.flags & ~CANCEL_FLAGS)
return -EINVAL;
if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
return -EINVAL;
cd.data = sc.addr;
cd.flags = sc.flags;
/* we can grab a normal file descriptor upfront */
if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
!(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
f = fdget(sc.fd);
if (!f.file)
return -EBADF;
cd.file = f.file;
}
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
/* found something, done! */
if (ret != -EALREADY)
goto out;
if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
struct timespec64 ts = {
.tv_sec = sc.timeout.tv_sec,
.tv_nsec = sc.timeout.tv_nsec
};
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
/*
* Keep looking until we get -ENOENT. we'll get woken everytime
* every time a request completes and will retry the cancelation.
*/
do {
cd.seq = atomic_inc_return(&ctx->cancel_seq);
prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
if (ret != -EALREADY)
break;
mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig();
if (ret < 0) {
mutex_lock(&ctx->uring_lock);
break;
}
ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
mutex_lock(&ctx->uring_lock);
if (!ret) {
ret = -ETIME;
break;
}
} while (1);
finish_wait(&ctx->cq_wait, &wait);
if (ret == -ENOENT || ret > 0)
ret = 0;
out:
fdput(f);
return ret;
}
...@@ -19,3 +19,5 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags); ...@@ -19,3 +19,5 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd, int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
unsigned int issue_flags); unsigned int issue_flags);
void init_hash_table(struct io_hash_table *table, unsigned size); void init_hash_table(struct io_hash_table *table, unsigned size);
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
...@@ -3871,6 +3871,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -3871,6 +3871,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break; break;
ret = io_unregister_pbuf_ring(ctx, arg); ret = io_unregister_pbuf_ring(ctx, arg);
break; break;
case IORING_REGISTER_SYNC_CANCEL:
ret = -EINVAL;
if (!arg || nr_args != 1)
break;
ret = io_sync_cancel(ctx, arg);
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment