Commit 00ddff43 authored by Jens Axboe's avatar Jens Axboe

io-wq: ensure task is running before processing task_work

Mark the current task as running if we need to run task_work from the
io-wq threads as part of work handling. If that is the case, then return
as such so that the caller can appropriately loop back and reset if it
was part of a going-to-sleep flush.

Fixes: 3bfe6106 ("io-wq: fork worker threads from original task")
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4db4b1a0
...@@ -386,13 +386,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) ...@@ -386,13 +386,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
return NULL; return NULL;
} }
static void io_flush_signals(void) static bool io_flush_signals(void)
{ {
if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
__set_current_state(TASK_RUNNING);
if (current->task_works) if (current->task_works)
task_work_run(); task_work_run();
clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
return true;
} }
return false;
} }
static void io_assign_current_work(struct io_worker *worker, static void io_assign_current_work(struct io_worker *worker,
...@@ -499,7 +502,8 @@ static int io_wqe_worker(void *data) ...@@ -499,7 +502,8 @@ static int io_wqe_worker(void *data)
} }
__io_worker_idle(wqe, worker); __io_worker_idle(wqe, worker);
raw_spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
io_flush_signals(); if (io_flush_signals())
continue;
ret = schedule_timeout(WORKER_IDLE_TIMEOUT); ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
if (try_to_freeze() || ret) if (try_to_freeze() || ret)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment