Commit 010e8e6b authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: de-unionise io_kiocb

As io_kiocb have enough space, move ->work out of a union. It's safer
this way and removes ->work memcpy bouncing.
By the way make tabulation in struct io_kiocb consistent.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b089ed39
...@@ -600,7 +600,6 @@ enum { ...@@ -600,7 +600,6 @@ enum {
struct async_poll { struct async_poll {
struct io_poll_iocb poll; struct io_poll_iocb poll;
struct io_poll_iocb *double_poll; struct io_poll_iocb *double_poll;
struct io_wq_work work;
}; };
/* /*
...@@ -641,36 +640,26 @@ struct io_kiocb { ...@@ -641,36 +640,26 @@ struct io_kiocb {
u16 buf_index; u16 buf_index;
u32 result; u32 result;
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
unsigned int flags; unsigned int flags;
refcount_t refs; refcount_t refs;
struct task_struct *task; struct task_struct *task;
u64 user_data; u64 user_data;
struct list_head link_list; struct list_head link_list;
/* /*
* 1. used with ctx->iopoll_list with reads/writes * 1. used with ctx->iopoll_list with reads/writes
* 2. to track reqs with ->files (see io_op_def::file_table) * 2. to track reqs with ->files (see io_op_def::file_table)
*/ */
struct list_head inflight_entry; struct list_head inflight_entry;
struct percpu_ref *fixed_file_refs; struct percpu_ref *fixed_file_refs;
struct callback_head task_work;
union { /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
/* struct hlist_node hash_node;
* Only commands that never go async can use the below fields, struct async_poll *apoll;
* obviously. Right now only IORING_OP_POLL_ADD uses them, and struct io_wq_work work;
* async armed poll handlers for regular commands. The latter
* restore the work, if needed.
*/
struct {
struct hlist_node hash_node;
struct async_poll *apoll;
};
struct io_wq_work work;
};
struct callback_head task_work;
}; };
struct io_defer_entry { struct io_defer_entry {
...@@ -4668,10 +4657,6 @@ static void io_async_task_func(struct callback_head *cb) ...@@ -4668,10 +4657,6 @@ static void io_async_task_func(struct callback_head *cb)
io_poll_remove_double(req, apoll->double_poll); io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
/* restore ->work in case we need to retry again */
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work, sizeof(req->work));
if (!READ_ONCE(apoll->poll.canceled)) if (!READ_ONCE(apoll->poll.canceled))
__io_req_task_submit(req); __io_req_task_submit(req);
else else
...@@ -4763,9 +4748,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req) ...@@ -4763,9 +4748,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
apoll->double_poll = NULL; apoll->double_poll = NULL;
req->flags |= REQ_F_POLLED; req->flags |= REQ_F_POLLED;
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&apoll->work, &req->work, sizeof(req->work));
io_get_req_task(req); io_get_req_task(req);
req->apoll = apoll; req->apoll = apoll;
INIT_HLIST_NODE(&req->hash_node); INIT_HLIST_NODE(&req->hash_node);
...@@ -4784,8 +4766,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req) ...@@ -4784,8 +4766,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
if (ret) { if (ret) {
io_poll_remove_double(req, apoll->double_poll); io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll->double_poll); kfree(apoll->double_poll);
kfree(apoll); kfree(apoll);
return false; return false;
...@@ -4828,14 +4808,6 @@ static bool io_poll_remove_one(struct io_kiocb *req) ...@@ -4828,14 +4808,6 @@ static bool io_poll_remove_one(struct io_kiocb *req)
do_complete = __io_poll_remove_one(req, &apoll->poll); do_complete = __io_poll_remove_one(req, &apoll->poll);
if (do_complete) { if (do_complete) {
io_put_req(req); io_put_req(req);
/*
* restore ->work because we will call
* io_req_clean_work below when dropping the
* final reference.
*/
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work,
sizeof(req->work));
kfree(apoll->double_poll); kfree(apoll->double_poll);
kfree(apoll); kfree(apoll);
} }
...@@ -4969,9 +4941,6 @@ static int io_poll_add(struct io_kiocb *req) ...@@ -4969,9 +4941,6 @@ static int io_poll_add(struct io_kiocb *req)
struct io_poll_table ipt; struct io_poll_table ipt;
__poll_t mask; __poll_t mask;
/* ->work is in union with hash_node and others */
io_req_clean_work(req);
INIT_HLIST_NODE(&req->hash_node); INIT_HLIST_NODE(&req->hash_node);
ipt.pt._qproc = io_poll_queue_proc; ipt.pt._qproc = io_poll_queue_proc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment