Commit 2804ecd8 authored by Jens Axboe's avatar Jens Axboe

io_uring: move apoll->events cache

In preparation for fixing a regression with pulling in an extra cacheline
for IO that doesn't usually touch the last cacheline of the io_kiocb,
move the cached location of apoll->events to space shared with some other
completion data. Like cflags, this isn't used until after the request
has been completed, so we can piggy back on top of comp_list.

Fixes: 81459350 ("io_uring: cache req->apoll->events in req->cflags")
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6f83ab22
...@@ -916,8 +916,12 @@ struct io_kiocb { ...@@ -916,8 +916,12 @@ struct io_kiocb {
/* store used ubuf, so we can prevent reloading */ /* store used ubuf, so we can prevent reloading */
struct io_mapped_ubuf *imu; struct io_mapped_ubuf *imu;
union {
/* used by request caches, completion batching and iopoll */ /* used by request caches, completion batching and iopoll */
struct io_wq_work_node comp_list; struct io_wq_work_node comp_list;
/* cache ->apoll->events */
int apoll_events;
};
atomic_t refs; atomic_t refs;
atomic_t poll_refs; atomic_t poll_refs;
struct io_task_work io_task_work; struct io_task_work io_task_work;
...@@ -5833,7 +5837,6 @@ static void io_poll_remove_entries(struct io_kiocb *req) ...@@ -5833,7 +5837,6 @@ static void io_poll_remove_entries(struct io_kiocb *req)
static int io_poll_check_events(struct io_kiocb *req, bool locked) static int io_poll_check_events(struct io_kiocb *req, bool locked)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_poll_iocb *poll = io_poll_get_single(req);
int v; int v;
/* req->task == current here, checking PF_EXITING is safe */ /* req->task == current here, checking PF_EXITING is safe */
...@@ -5850,17 +5853,17 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked) ...@@ -5850,17 +5853,17 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
return -ECANCELED; return -ECANCELED;
if (!req->result) { if (!req->result) {
struct poll_table_struct pt = { ._key = req->cflags }; struct poll_table_struct pt = { ._key = req->apoll_events };
if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED))) if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
req->result = -EBADF; req->result = -EBADF;
else else
req->result = vfs_poll(req->file, &pt) & req->cflags; req->result = vfs_poll(req->file, &pt) & req->apoll_events;
} }
/* multishot, just fill an CQE and proceed */ /* multishot, just fill an CQE and proceed */
if (req->result && !(req->cflags & EPOLLONESHOT)) { if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
__poll_t mask = mangle_poll(req->result & poll->events); __poll_t mask = mangle_poll(req->result & req->apoll_events);
bool filled; bool filled;
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
...@@ -5938,7 +5941,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events) ...@@ -5938,7 +5941,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
* CPU. We want to avoid pulling in req->apoll->events for that * CPU. We want to avoid pulling in req->apoll->events for that
* case. * case.
*/ */
req->cflags = events; req->apoll_events = events;
if (req->opcode == IORING_OP_POLL_ADD) if (req->opcode == IORING_OP_POLL_ADD)
req->io_task_work.func = io_poll_task_func; req->io_task_work.func = io_poll_task_func;
else else
...@@ -6330,7 +6333,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe ...@@ -6330,7 +6333,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return -EINVAL; return -EINVAL;
io_req_set_refcount(req); io_req_set_refcount(req);
req->cflags = poll->events = io_poll_parse_events(sqe, flags); req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment