Commit 007301c4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.16-2021-11-09' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "Minor fixes that should go into the 5.16 release:

   - Fix max worker setting not working correctly on NUMA (Beld)

   - Correctly return current setting for max workers if zeroes are
     passed in (Pavel)

   - io_queue_sqe_arm_apoll() cleanup, as identified during the initial
     merge (Pavel)

   - Misc fixes (Nghia, me)"

* tag 'io_uring-5.16-2021-11-09' of git://git.kernel.dk/linux-block:
  io_uring: honour zeroes as io-wq worker limits
  io_uring: remove dead 'sqe' store
  io_uring: remove redundant assignment to ret in io_register_iowq_max_workers()
  io-wq: fix max-workers not correctly set on multi-node system
  io_uring: clean up io_queue_sqe_arm_apoll
parents c183e170 bad119b9
...@@ -1308,7 +1308,9 @@ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask) ...@@ -1308,7 +1308,9 @@ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
*/ */
int io_wq_max_workers(struct io_wq *wq, int *new_count) int io_wq_max_workers(struct io_wq *wq, int *new_count)
{ {
int i, node, prev = 0; int prev[IO_WQ_ACCT_NR];
bool first_node = true;
int i, node;
BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
...@@ -1319,6 +1321,9 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count) ...@@ -1319,6 +1321,9 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
new_count[i] = task_rlimit(current, RLIMIT_NPROC); new_count[i] = task_rlimit(current, RLIMIT_NPROC);
} }
for (i = 0; i < IO_WQ_ACCT_NR; i++)
prev[i] = 0;
rcu_read_lock(); rcu_read_lock();
for_each_node(node) { for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node]; struct io_wqe *wqe = wq->wqes[node];
...@@ -1327,14 +1332,19 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count) ...@@ -1327,14 +1332,19 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
raw_spin_lock(&wqe->lock); raw_spin_lock(&wqe->lock);
for (i = 0; i < IO_WQ_ACCT_NR; i++) { for (i = 0; i < IO_WQ_ACCT_NR; i++) {
acct = &wqe->acct[i]; acct = &wqe->acct[i];
prev = max_t(int, acct->max_workers, prev); if (first_node)
prev[i] = max_t(int, acct->max_workers, prev[i]);
if (new_count[i]) if (new_count[i])
acct->max_workers = new_count[i]; acct->max_workers = new_count[i];
new_count[i] = prev;
} }
raw_spin_unlock(&wqe->lock); raw_spin_unlock(&wqe->lock);
first_node = false;
} }
rcu_read_unlock(); rcu_read_unlock();
for (i = 0; i < IO_WQ_ACCT_NR; i++)
new_count[i] = prev[i];
return 0; return 0;
} }
......
...@@ -6950,10 +6950,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req) ...@@ -6950,10 +6950,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
switch (io_arm_poll_handler(req)) { switch (io_arm_poll_handler(req)) {
case IO_APOLL_READY: case IO_APOLL_READY:
if (linked_timeout) {
io_queue_linked_timeout(linked_timeout);
linked_timeout = NULL;
}
io_req_task_queue(req); io_req_task_queue(req);
break; break;
case IO_APOLL_ABORTED: case IO_APOLL_ABORTED:
...@@ -10144,7 +10140,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, ...@@ -10144,7 +10140,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
for (i = 0; i < sq_entries; i++) { for (i = 0; i < sq_entries; i++) {
unsigned int entry = i + sq_head; unsigned int entry = i + sq_head;
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; struct io_uring_sqe *sqe;
if (sq_idx > sq_mask) if (sq_idx > sq_mask)
continue; continue;
...@@ -10795,10 +10791,11 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, ...@@ -10795,10 +10791,11 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
memcpy(ctx->iowq_limits, new_count, sizeof(new_count)); for (i = 0; i < ARRAY_SIZE(new_count); i++)
if (new_count[i])
ctx->iowq_limits[i] = new_count[i];
ctx->iowq_limits_set = true; ctx->iowq_limits_set = true;
ret = -EINVAL;
if (tctx && tctx->io_wq) { if (tctx && tctx->io_wq) {
ret = io_wq_max_workers(tctx->io_wq, new_count); ret = io_wq_max_workers(tctx->io_wq, new_count);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment