Commit c9178087 authored by Tejun Heo's avatar Tejun Heo

workqueue: perform non-reentrancy test when queueing to unbound workqueues too

Because per-cpu workqueues have multiple pwqs (pool_workqueues) to
serve the CPUs, to guarantee that a single work item isn't queued on
one pwq while still executing another, __queue_work() takes a look at
the previous pool the target work item was on and if it's still
executing there, queue the work item on that pool.

To support changing workqueue_attrs on the fly, unbound workqueues too
will have multiple pwqs and thus need non-reentrancy test when
queueing.  This patch modifies __queue_work() such that the reentrancy
test is performed regardless of the workqueue type.

per_cpu_ptr(wq->cpu_pwqs, cpu) used to be used to determine the
matching pwq for the last pool.  This can't be used for unbound
workqueues and is replaced with worker->current_pwq which also happens
to be simpler.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent 75ccf595
...@@ -1209,6 +1209,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1209,6 +1209,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work) struct work_struct *work)
{ {
struct pool_workqueue *pwq; struct pool_workqueue *pwq;
struct worker_pool *last_pool;
struct list_head *worklist; struct list_head *worklist;
unsigned int work_flags; unsigned int work_flags;
unsigned int req_cpu = cpu; unsigned int req_cpu = cpu;
...@@ -1228,22 +1229,21 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1228,22 +1229,21 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
WARN_ON_ONCE(!is_chained_work(wq))) WARN_ON_ONCE(!is_chained_work(wq)))
return; return;
/* determine the pwq to use */ /* pwq which will be used unless @work is executing elsewhere */
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
struct worker_pool *last_pool;
if (cpu == WORK_CPU_UNBOUND) if (cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
} else {
pwq = first_pwq(wq);
}
/* /*
* It's multi cpu. If @work was previously on a different * If @work was previously on a different pool, it might still be
* cpu, it might still be running there, in which case the * running there, in which case the work needs to be queued on that
* work needs to be queued on that cpu to guarantee * pool to guarantee non-reentrancy.
* non-reentrancy.
*/ */
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
last_pool = get_work_pool(work); last_pool = get_work_pool(work);
if (last_pool && last_pool != pwq->pool) { if (last_pool && last_pool != pwq->pool) {
struct worker *worker; struct worker *worker;
...@@ -1252,7 +1252,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1252,7 +1252,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
worker = find_worker_executing_work(last_pool, work); worker = find_worker_executing_work(last_pool, work);
if (worker && worker->current_pwq->wq == wq) { if (worker && worker->current_pwq->wq == wq) {
pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu); pwq = worker->current_pwq;
} else { } else {
/* meh... not running there, queue here */ /* meh... not running there, queue here */
spin_unlock(&last_pool->lock); spin_unlock(&last_pool->lock);
...@@ -1261,10 +1261,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1261,10 +1261,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
} else { } else {
spin_lock(&pwq->pool->lock); spin_lock(&pwq->pool->lock);
} }
} else {
pwq = first_pwq(wq);
spin_lock(&pwq->pool->lock);
}
/* pwq determined, queue */ /* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work); trace_workqueue_queue_work(req_cpu, pwq, work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment