Commit 7f1129d2 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: shuffle more fields into SQ ctx section

Since moving locked_free_* out of struct io_submit_state
ctx->submit_state is accessed on submission side only, so move it into
the submission section. Same goes for rsrc table pointers/nodes/etc.,
they must be taken and checked during submission because sync'ed by
uring_lock, so move them there as well.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8a5899a50afc6ccca63249e716f580b246f3dec6.1623709150.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b52ecf8c
...@@ -353,6 +353,7 @@ struct io_ring_ctx { ...@@ -353,6 +353,7 @@ struct io_ring_ctx {
unsigned int restricted: 1; unsigned int restricted: 1;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/* submission data */
struct { struct {
/* /*
* Ring buffer of indices into array of io_uring_sqe, which is * Ring buffer of indices into array of io_uring_sqe, which is
...@@ -369,13 +370,27 @@ struct io_ring_ctx { ...@@ -369,13 +370,27 @@ struct io_ring_ctx {
struct io_uring_sqe *sq_sqes; struct io_uring_sqe *sq_sqes;
unsigned cached_sq_head; unsigned cached_sq_head;
unsigned sq_entries; unsigned sq_entries;
unsigned sq_thread_idle;
unsigned cached_sq_dropped; unsigned cached_sq_dropped;
unsigned long sq_check_overflow; unsigned long sq_check_overflow;
struct list_head defer_list; struct list_head defer_list;
/*
* Fixed resources fast path, should be accessed only under
* uring_lock, and updated through io_uring_register(2)
*/
struct io_rsrc_node *rsrc_node;
struct io_file_table file_table;
unsigned nr_user_files;
unsigned nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct io_submit_state submit_state;
struct list_head timeout_list; struct list_head timeout_list;
struct list_head cq_overflow_list; struct list_head cq_overflow_list;
struct xarray io_buffers;
struct xarray personalities;
u32 pers_next;
unsigned sq_thread_idle;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct { struct {
...@@ -383,7 +398,6 @@ struct io_ring_ctx { ...@@ -383,7 +398,6 @@ struct io_ring_ctx {
wait_queue_head_t wait; wait_queue_head_t wait;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct io_submit_state submit_state;
/* IRQ completion list, under ->completion_lock */ /* IRQ completion list, under ->completion_lock */
struct list_head locked_free_list; struct list_head locked_free_list;
unsigned int locked_free_nr; unsigned int locked_free_nr;
...@@ -394,21 +408,6 @@ struct io_ring_ctx { ...@@ -394,21 +408,6 @@ struct io_ring_ctx {
struct wait_queue_head sqo_sq_wait; struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list; struct list_head sqd_list;
/*
* Fixed resources fast path, should be accessed only under uring_lock,
* and updated through io_uring_register(2)
*/
struct io_rsrc_node *rsrc_node;
struct io_file_table file_table;
unsigned nr_user_files;
unsigned nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct xarray io_buffers;
struct xarray personalities;
u32 pers_next;
struct { struct {
unsigned cached_cq_tail; unsigned cached_cq_tail;
unsigned cq_entries; unsigned cq_entries;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment