Commit 576a347b authored by Jens Axboe's avatar Jens Axboe

io-wq: have io_wq_create() take a 'data' argument

We currently pass in 4 arguments outside of the bounded size. In
preparation for adding one more argument, let's bundle them up in
a struct to make it more readable.

No functional changes in this patch.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 311ae9e1
......@@ -974,9 +974,7 @@ void io_wq_flush(struct io_wq *wq)
}
}
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
struct user_struct *user, get_work_fn *get_work,
put_work_fn *put_work)
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
{
int ret = -ENOMEM, i, node;
struct io_wq *wq;
......@@ -992,11 +990,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
return ERR_PTR(-ENOMEM);
}
wq->get_work = get_work;
wq->put_work = put_work;
wq->get_work = data->get_work;
wq->put_work = data->put_work;
/* caller must already hold a reference to this */
wq->user = user;
wq->user = data->user;
i = 0;
for_each_online_node(node) {
......@@ -1009,7 +1007,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
wqe->node = node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
if (user) {
if (wq->user) {
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
task_rlimit(current, RLIMIT_NPROC);
}
......@@ -1031,7 +1029,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
goto err;
/* caller must have already done mmgrab() on this mm */
wq->mm = mm;
wq->mm = data->mm;
wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
if (!IS_ERR(wq->manager)) {
......
......@@ -42,9 +42,15 @@ struct io_wq_work {
typedef void (get_work_fn)(struct io_wq_work *);
typedef void (put_work_fn)(struct io_wq_work *);
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
struct user_struct *user,
get_work_fn *get_work, put_work_fn *put_work);
struct io_wq_data {
struct mm_struct *mm;
struct user_struct *user;
get_work_fn *get_work;
put_work_fn *put_work;
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
......
......@@ -3962,6 +3962,7 @@ static void io_get_work(struct io_wq_work *work)
static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
struct io_wq_data data;
unsigned concurrency;
int ret;
......@@ -4006,10 +4007,14 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err;
}
data.mm = ctx->sqo_mm;
data.user = ctx->user;
data.get_work = io_get_work;
data.put_work = io_put_work;
/* Do QD, or 4 * CPUS, whatever is smallest */
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user,
io_get_work, io_put_work);
ctx->io_wq = io_wq_create(concurrency, &data);
if (IS_ERR(ctx->io_wq)) {
ret = PTR_ERR(ctx->io_wq);
ctx->io_wq = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment