Commit f8929630 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: cleanup conditional submit locking

Refactor io_ring_submit_[un]lock(), make it accept issue_flags and
remove manual IO_URING_F_UNLOCKED checks. It also allows us to place
lockdep annotations inside instead of sprinkling them in a bunch of
places. There is only one user that doesn't fit now, so hand code
locking in __io_rsrc_put_work().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e55c2c06767676a801252e8094c9ab09912487a4.1648209006.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d487b43c
...@@ -1223,6 +1223,26 @@ struct sock *io_uring_get_socket(struct file *file) ...@@ -1223,6 +1223,26 @@ struct sock *io_uring_get_socket(struct file *file)
} }
EXPORT_SYMBOL(io_uring_get_socket); EXPORT_SYMBOL(io_uring_get_socket);
static void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags)
{
lockdep_assert_held(&ctx->uring_lock);
if (issue_flags & IO_URING_F_UNLOCKED)
mutex_unlock(&ctx->uring_lock);
}
static void io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags)
{
/*
* "Normal" inline submissions always hold the uring_lock, since we
* grab it from the system call. Same is true for the SQPOLL offload.
* The only exception is when we've detached the request and issue it
* from an async worker thread, grab the lock for that case.
*/
if (issue_flags & IO_URING_F_UNLOCKED)
mutex_lock(&ctx->uring_lock);
lockdep_assert_held(&ctx->uring_lock);
}
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
{ {
if (!*locked) { if (!*locked) {
...@@ -1420,10 +1440,7 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) ...@@ -1420,10 +1440,7 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
if (req->flags & REQ_F_PARTIAL_IO) if (req->flags & REQ_F_PARTIAL_IO)
return; return;
if (issue_flags & IO_URING_F_UNLOCKED) io_ring_submit_lock(ctx, issue_flags);
mutex_lock(&ctx->uring_lock);
lockdep_assert_held(&ctx->uring_lock);
buf = req->kbuf; buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid); bl = io_buffer_get_list(ctx, buf->bgid);
...@@ -1431,8 +1448,7 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) ...@@ -1431,8 +1448,7 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
req->flags &= ~REQ_F_BUFFER_SELECTED; req->flags &= ~REQ_F_BUFFER_SELECTED;
req->kbuf = NULL; req->kbuf = NULL;
if (issue_flags & IO_URING_F_UNLOCKED) io_ring_submit_unlock(ctx, issue_flags);
mutex_unlock(&ctx->uring_lock);
} }
static bool io_match_task(struct io_kiocb *head, struct task_struct *task, static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
...@@ -3309,24 +3325,6 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, ...@@ -3309,24 +3325,6 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
return __io_import_fixed(req, rw, iter, imu); return __io_import_fixed(req, rw, iter, imu);
} }
static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
{
if (needs_lock)
mutex_unlock(&ctx->uring_lock);
}
static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
{
/*
* "Normal" inline submissions always hold the uring_lock, since we
* grab it from the system call. Same is true for the SQPOLL offload.
* The only exception is when we've detached the request and issue it
* from an async worker thread, grab the lock for that case.
*/
if (needs_lock)
mutex_lock(&ctx->uring_lock);
}
static void io_buffer_add_list(struct io_ring_ctx *ctx, static void io_buffer_add_list(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned int bgid) struct io_buffer_list *bl, unsigned int bgid)
{ {
...@@ -3342,16 +3340,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3342,16 +3340,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
int bgid, unsigned int issue_flags) int bgid, unsigned int issue_flags)
{ {
struct io_buffer *kbuf = req->kbuf; struct io_buffer *kbuf = req->kbuf;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl; struct io_buffer_list *bl;
if (req->flags & REQ_F_BUFFER_SELECTED) if (req->flags & REQ_F_BUFFER_SELECTED)
return kbuf; return kbuf;
io_ring_submit_lock(ctx, needs_lock); io_ring_submit_lock(req->ctx, issue_flags);
lockdep_assert_held(&ctx->uring_lock);
bl = io_buffer_get_list(ctx, bgid); bl = io_buffer_get_list(ctx, bgid);
if (bl && !list_empty(&bl->buf_list)) { if (bl && !list_empty(&bl->buf_list)) {
...@@ -3365,7 +3360,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3365,7 +3360,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
kbuf = ERR_PTR(-ENOBUFS); kbuf = ERR_PTR(-ENOBUFS);
} }
io_ring_submit_unlock(req->ctx, needs_lock); io_ring_submit_unlock(req->ctx, issue_flags);
return kbuf; return kbuf;
} }
...@@ -4734,11 +4729,8 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4734,11 +4729,8 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl; struct io_buffer_list *bl;
int ret = 0; int ret = 0;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
io_ring_submit_lock(ctx, needs_lock); io_ring_submit_lock(ctx, issue_flags);
lockdep_assert_held(&ctx->uring_lock);
ret = -ENOENT; ret = -ENOENT;
bl = io_buffer_get_list(ctx, p->bgid); bl = io_buffer_get_list(ctx, p->bgid);
...@@ -4749,7 +4741,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4749,7 +4741,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
/* complete before unlock, IOPOLL may need the lock */ /* complete before unlock, IOPOLL may need the lock */
__io_req_complete(req, issue_flags, ret, 0); __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, needs_lock); io_ring_submit_unlock(ctx, issue_flags);
return 0; return 0;
} }
...@@ -4863,11 +4855,8 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4863,11 +4855,8 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl; struct io_buffer_list *bl;
int ret = 0; int ret = 0;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
io_ring_submit_lock(ctx, needs_lock);
lockdep_assert_held(&ctx->uring_lock); io_ring_submit_lock(ctx, issue_flags);
bl = io_buffer_get_list(ctx, p->bgid); bl = io_buffer_get_list(ctx, p->bgid);
if (unlikely(!bl)) { if (unlikely(!bl)) {
...@@ -4885,7 +4874,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4885,7 +4874,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
req_set_fail(req); req_set_fail(req);
/* complete before unlock, IOPOLL may need the lock */ /* complete before unlock, IOPOLL may need the lock */
__io_req_complete(req, issue_flags, ret, 0); __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, needs_lock); io_ring_submit_unlock(ctx, issue_flags);
return 0; return 0;
} }
...@@ -6787,7 +6776,6 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6787,7 +6776,6 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
u64 sqe_addr = req->cancel.addr; u64 sqe_addr = req->cancel.addr;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_tctx_node *node; struct io_tctx_node *node;
int ret; int ret;
...@@ -6796,7 +6784,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6796,7 +6784,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
goto done; goto done;
/* slow path, try all io-wq's */ /* slow path, try all io-wq's */
io_ring_submit_lock(ctx, needs_lock); io_ring_submit_lock(ctx, issue_flags);
ret = -ENOENT; ret = -ENOENT;
list_for_each_entry(node, &ctx->tctx_list, ctx_node) { list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
struct io_uring_task *tctx = node->task->io_uring; struct io_uring_task *tctx = node->task->io_uring;
...@@ -6805,7 +6793,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6805,7 +6793,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
if (ret != -ENOENT) if (ret != -ENOENT)
break; break;
} }
io_ring_submit_unlock(ctx, needs_lock); io_ring_submit_unlock(ctx, issue_flags);
done: done:
if (ret < 0) if (ret < 0)
req_set_fail(req); req_set_fail(req);
...@@ -6832,7 +6820,6 @@ static int io_rsrc_update_prep(struct io_kiocb *req, ...@@ -6832,7 +6820,6 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_uring_rsrc_update2 up; struct io_uring_rsrc_update2 up;
int ret; int ret;
...@@ -6843,10 +6830,10 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6843,10 +6830,10 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
up.resv = 0; up.resv = 0;
up.resv2 = 0; up.resv2 = 0;
io_ring_submit_lock(ctx, needs_lock); io_ring_submit_lock(ctx, issue_flags);
ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
&up, req->rsrc_update.nr_args); &up, req->rsrc_update.nr_args);
io_ring_submit_unlock(ctx, needs_lock); io_ring_submit_unlock(ctx, issue_flags);
if (ret < 0) if (ret < 0)
req_set_fail(req); req_set_fail(req);
...@@ -8772,15 +8759,17 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) ...@@ -8772,15 +8759,17 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
list_del(&prsrc->list); list_del(&prsrc->list);
if (prsrc->tag) { if (prsrc->tag) {
bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL; if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock);
io_ring_submit_lock(ctx, lock_ring);
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
io_fill_cqe_aux(ctx, prsrc->tag, 0, 0); io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_ring_submit_unlock(ctx, lock_ring);
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock);
} }
rsrc_data->do_put(ctx, prsrc); rsrc_data->do_put(ctx, prsrc);
...@@ -8956,12 +8945,11 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, ...@@ -8956,12 +8945,11 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index) unsigned int issue_flags, u32 slot_index)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
bool needs_switch = false; bool needs_switch = false;
struct io_fixed_file *file_slot; struct io_fixed_file *file_slot;
int ret = -EBADF; int ret = -EBADF;
io_ring_submit_lock(ctx, needs_lock); io_ring_submit_lock(ctx, issue_flags);
if (file->f_op == &io_uring_fops) if (file->f_op == &io_uring_fops)
goto err; goto err;
ret = -ENXIO; ret = -ENXIO;
...@@ -9002,7 +8990,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, ...@@ -9002,7 +8990,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
err: err:
if (needs_switch) if (needs_switch)
io_rsrc_node_switch(ctx, ctx->file_data); io_rsrc_node_switch(ctx, ctx->file_data);
io_ring_submit_unlock(ctx, needs_lock); io_ring_submit_unlock(ctx, issue_flags);
if (ret) if (ret)
fput(file); fput(file);
return ret; return ret;
...@@ -9012,12 +9000,11 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) ...@@ -9012,12 +9000,11 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
{ {
unsigned int offset = req->close.file_slot - 1; unsigned int offset = req->close.file_slot - 1;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_fixed_file *file_slot; struct io_fixed_file *file_slot;
struct file *file; struct file *file;
int ret; int ret;
io_ring_submit_lock(ctx, needs_lock); io_ring_submit_lock(ctx, issue_flags);
ret = -ENXIO; ret = -ENXIO;
if (unlikely(!ctx->file_data)) if (unlikely(!ctx->file_data))
goto out; goto out;
...@@ -9043,7 +9030,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) ...@@ -9043,7 +9030,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
io_rsrc_node_switch(ctx, ctx->file_data); io_rsrc_node_switch(ctx, ctx->file_data);
ret = 0; ret = 0;
out: out:
io_ring_submit_unlock(ctx, needs_lock); io_ring_submit_unlock(ctx, issue_flags);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment