Commit dbc2564c authored by Hao Xu's avatar Hao Xu Committed by Jens Axboe

io_uring: let fast poll support multishot

For operations like accept, multishot is a useful feature, since we can
reduce a number of accept sqe. Let's integrate it to fast poll, it may
be good for other operations in the future.
Signed-off-by: default avatarHao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/20220514142046.58072-4-haoxu.linux@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 227685eb
...@@ -6011,6 +6011,7 @@ static void io_poll_remove_entries(struct io_kiocb *req) ...@@ -6011,6 +6011,7 @@ static void io_poll_remove_entries(struct io_kiocb *req)
rcu_read_unlock(); rcu_read_unlock();
} }
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags);
/* /*
* All poll tw should go through this. Checks for poll events, manages * All poll tw should go through this. Checks for poll events, manages
* references, does rewait, etc. * references, does rewait, etc.
...@@ -6019,10 +6020,10 @@ static void io_poll_remove_entries(struct io_kiocb *req) ...@@ -6019,10 +6020,10 @@ static void io_poll_remove_entries(struct io_kiocb *req)
* either spurious wakeup or multishot CQE is served. 0 when it's done with * either spurious wakeup or multishot CQE is served. 0 when it's done with
* the request, then the mask is stored in req->cqe.res. * the request, then the mask is stored in req->cqe.res.
*/ */
static int io_poll_check_events(struct io_kiocb *req, bool locked) static int io_poll_check_events(struct io_kiocb *req, bool *locked)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
int v; int v, ret;
/* req->task == current here, checking PF_EXITING is safe */ /* req->task == current here, checking PF_EXITING is safe */
if (unlikely(req->task->flags & PF_EXITING)) if (unlikely(req->task->flags & PF_EXITING))
...@@ -6046,23 +6047,37 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked) ...@@ -6046,23 +6047,37 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
} }
/* multishot, just fill an CQE and proceed */ if ((unlikely(!req->cqe.res)))
if (req->cqe.res && !(req->apoll_events & EPOLLONESHOT)) { continue;
__poll_t mask = mangle_poll(req->cqe.res & req->apoll_events); if (req->apoll_events & EPOLLONESHOT)
return 0;
/* multishot, just fill a CQE and proceed */
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
bool filled; bool filled;
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
filled = io_fill_cqe_aux(ctx, req->cqe.user_data, mask, filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
IORING_CQE_F_MORE); mask, IORING_CQE_F_MORE);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
if (unlikely(!filled)) if (filled) {
return -ECANCELED; io_cqring_ev_posted(ctx);
io_cqring_ev_posted(ctx); continue;
} else if (req->cqe.res) { }
return 0; return -ECANCELED;
} }
io_tw_lock(req->ctx, locked);
if (unlikely(req->task->flags & PF_EXITING))
return -EFAULT;
ret = io_issue_sqe(req,
IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
if (ret)
return ret;
/* /*
* Release all references, retry if someone tried to restart * Release all references, retry if someone tried to restart
* task_work while we were executing it. * task_work while we were executing it.
...@@ -6077,7 +6092,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked) ...@@ -6077,7 +6092,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
ret = io_poll_check_events(req, *locked); ret = io_poll_check_events(req, locked);
if (ret > 0) if (ret > 0)
return; return;
...@@ -6102,7 +6117,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked) ...@@ -6102,7 +6117,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
ret = io_poll_check_events(req, *locked); ret = io_poll_check_events(req, locked);
if (ret > 0) if (ret > 0)
return; return;
...@@ -6343,7 +6358,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) ...@@ -6343,7 +6358,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct async_poll *apoll; struct async_poll *apoll;
struct io_poll_table ipt; struct io_poll_table ipt;
__poll_t mask = IO_ASYNC_POLL_COMMON | POLLERR; __poll_t mask = POLLPRI | POLLERR;
int ret; int ret;
if (!def->pollin && !def->pollout) if (!def->pollin && !def->pollout)
...@@ -6352,6 +6367,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) ...@@ -6352,6 +6367,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
return IO_APOLL_ABORTED; return IO_APOLL_ABORTED;
if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
return IO_APOLL_ABORTED; return IO_APOLL_ABORTED;
if (!(req->flags & REQ_F_APOLL_MULTISHOT))
mask |= EPOLLONESHOT;
if (def->pollin) { if (def->pollin) {
mask |= POLLIN | POLLRDNORM; mask |= POLLIN | POLLRDNORM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment