Commit dca9cf8b authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: inline io_req_work_grab_env()

The only caller of io_req_work_grab_env() is io_prep_async_work(), and
they are both initialising req->work. Inline grab_env(), it's easier
to keep this way, moreover there already were bugs with misplacing
io_req_init_async().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0f7e466b
...@@ -1115,31 +1115,7 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -1115,31 +1115,7 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
} }
} }
static void io_req_work_grab_env(struct io_kiocb *req) static void io_req_clean_work(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
io_req_init_async(req);
if (!req->work.mm && def->needs_mm) {
mmgrab(current->mm);
req->work.mm = current->mm;
}
if (!req->work.creds)
req->work.creds = get_current_cred();
if (!req->work.fs && def->needs_fs) {
spin_lock(&current->fs->lock);
if (!current->fs->in_exec) {
req->work.fs = current->fs;
req->work.fs->users++;
} else {
req->work.flags |= IO_WQ_WORK_CANCEL;
}
spin_unlock(&current->fs->lock);
}
}
static inline void io_req_work_drop_env(struct io_kiocb *req)
{ {
if (!(req->flags & REQ_F_WORK_INITIALIZED)) if (!(req->flags & REQ_F_WORK_INITIALIZED))
return; return;
...@@ -1177,8 +1153,22 @@ static void io_prep_async_work(struct io_kiocb *req) ...@@ -1177,8 +1153,22 @@ static void io_prep_async_work(struct io_kiocb *req)
if (def->unbound_nonreg_file) if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND; req->work.flags |= IO_WQ_WORK_UNBOUND;
} }
if (!req->work.mm && def->needs_mm) {
io_req_work_grab_env(req); mmgrab(current->mm);
req->work.mm = current->mm;
}
if (!req->work.creds)
req->work.creds = get_current_cred();
if (!req->work.fs && def->needs_fs) {
spin_lock(&current->fs->lock);
if (!current->fs->in_exec) {
req->work.fs = current->fs;
req->work.fs->users++;
} else {
req->work.flags |= IO_WQ_WORK_CANCEL;
}
spin_unlock(&current->fs->lock);
}
} }
static void io_prep_async_link(struct io_kiocb *req) static void io_prep_async_link(struct io_kiocb *req)
...@@ -1547,7 +1537,7 @@ static void io_dismantle_req(struct io_kiocb *req) ...@@ -1547,7 +1537,7 @@ static void io_dismantle_req(struct io_kiocb *req)
if (req->file) if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
__io_put_req_task(req); __io_put_req_task(req);
io_req_work_drop_env(req); io_req_clean_work(req);
if (req->flags & REQ_F_INFLIGHT) { if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -4825,7 +4815,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) ...@@ -4825,7 +4815,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
io_put_req(req); io_put_req(req);
/* /*
* restore ->work because we will call * restore ->work because we will call
* io_req_work_drop_env below when dropping the * io_req_clean_work below when dropping the
* final reference. * final reference.
*/ */
if (req->flags & REQ_F_WORK_INITIALIZED) if (req->flags & REQ_F_WORK_INITIALIZED)
...@@ -4965,7 +4955,7 @@ static int io_poll_add(struct io_kiocb *req) ...@@ -4965,7 +4955,7 @@ static int io_poll_add(struct io_kiocb *req)
__poll_t mask; __poll_t mask;
/* ->work is in union with hash_node and others */ /* ->work is in union with hash_node and others */
io_req_work_drop_env(req); io_req_clean_work(req);
req->flags &= ~REQ_F_WORK_INITIALIZED; req->flags &= ~REQ_F_WORK_INITIALIZED;
INIT_HLIST_NODE(&req->hash_node); INIT_HLIST_NODE(&req->hash_node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment