Commit bc2ae0f5 authored by Tejun Heo's avatar Tejun Heo

workqueue: drop @bind from create_worker()

Currently, create_worker()'s callers are responsible for deciding
whether the newly created worker should be bound to the associated CPU
and create_worker() sets WORKER_UNBOUND only for the workers for the
unbound global_cwq.  Creation during normal operation is always via
maybe_create_worker() and @bind is true.  For workers created during
hotplug, @bind is false.

Normal operation path is planned to be used even while the CPU is
going through hotplug operations or offline and this static decision
won't work.

Drop @bind from create_worker() and decide whether to bind by looking
at GCWQ_DISASSOCIATED.  create_worker() will also set WORKER_UNBOUND
autmatically if disassociated.  To avoid flipping GCWQ_DISASSOCIATED
while create_worker() is in progress, the flag is now allowed to be
changed only while holding all manager_mutexes on the global_cwq.

This requires that GCWQ_DISASSOCIATED is not cleared behind trustee's
back.  CPU_ONLINE no longer clears DISASSOCIATED before flushing
trustee, which clears DISASSOCIATED before rebinding remaining workers
if asked to release.  For cases where trustee isn't around, CPU_ONLINE
clears DISASSOCIATED after flushing trustee.  Also, now, first_idle
has UNBOUND set on creation which is explicitly cleared by CPU_ONLINE
while binding it.  These convolutions will soon be removed by further
simplification of CPU hotplug path.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatar"Rafael J. Wysocki" <rjw@sisk.pl>
parent 60373152
...@@ -45,7 +45,22 @@ ...@@ -45,7 +45,22 @@
#include "workqueue_sched.h" #include "workqueue_sched.h"
enum { enum {
/* global_cwq flags */ /*
* global_cwq flags
*
* A bound gcwq is either associated or disassociated with its CPU.
* While associated (!DISASSOCIATED), all workers are bound to the
* CPU and none has %WORKER_UNBOUND set and concurrency management
* is in effect.
*
* While DISASSOCIATED, the cpu may be offline and all workers have
* %WORKER_UNBOUND set and concurrency management disabled, and may
* be executing on any CPU. The gcwq behaves as an unbound one.
*
* Note that DISASSOCIATED can be flipped only while holding
* managership of all pools on the gcwq to avoid changing binding
* state while create_worker() is in progress.
*/
GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
GCWQ_FREEZING = 1 << 1, /* freeze in progress */ GCWQ_FREEZING = 1 << 1, /* freeze in progress */
...@@ -1334,7 +1349,6 @@ static struct worker *alloc_worker(void) ...@@ -1334,7 +1349,6 @@ static struct worker *alloc_worker(void)
/** /**
* create_worker - create a new workqueue worker * create_worker - create a new workqueue worker
* @pool: pool the new worker will belong to * @pool: pool the new worker will belong to
* @bind: whether to set affinity to @cpu or not
* *
* Create a new worker which is bound to @pool. The returned worker * Create a new worker which is bound to @pool. The returned worker
* can be started by calling start_worker() or destroyed using * can be started by calling start_worker() or destroyed using
...@@ -1346,10 +1360,9 @@ static struct worker *alloc_worker(void) ...@@ -1346,10 +1360,9 @@ static struct worker *alloc_worker(void)
* RETURNS: * RETURNS:
* Pointer to the newly created worker. * Pointer to the newly created worker.
*/ */
static struct worker *create_worker(struct worker_pool *pool, bool bind) static struct worker *create_worker(struct worker_pool *pool)
{ {
struct global_cwq *gcwq = pool->gcwq; struct global_cwq *gcwq = pool->gcwq;
bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
const char *pri = worker_pool_pri(pool) ? "H" : ""; const char *pri = worker_pool_pri(pool) ? "H" : "";
struct worker *worker = NULL; struct worker *worker = NULL;
int id = -1; int id = -1;
...@@ -1370,7 +1383,7 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind) ...@@ -1370,7 +1383,7 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind)
worker->pool = pool; worker->pool = pool;
worker->id = id; worker->id = id;
if (!on_unbound_cpu) if (gcwq->cpu != WORK_CPU_UNBOUND)
worker->task = kthread_create_on_node(worker_thread, worker->task = kthread_create_on_node(worker_thread,
worker, cpu_to_node(gcwq->cpu), worker, cpu_to_node(gcwq->cpu),
"kworker/%u:%d%s", gcwq->cpu, id, pri); "kworker/%u:%d%s", gcwq->cpu, id, pri);
...@@ -1384,14 +1397,18 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind) ...@@ -1384,14 +1397,18 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind)
set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
/* /*
* An unbound worker will become a regular one if CPU comes online * Determine CPU binding of the new worker depending on
* later on. Make sure every worker has PF_THREAD_BOUND set. * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the
* flag remains stable across this function. See the comments
* above the flag definition for details.
*
* As an unbound worker may later become a regular one if CPU comes
* online, make sure every worker has %PF_THREAD_BOUND set.
*/ */
if (bind && !on_unbound_cpu) if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
kthread_bind(worker->task, gcwq->cpu); kthread_bind(worker->task, gcwq->cpu);
else { } else {
worker->task->flags |= PF_THREAD_BOUND; worker->task->flags |= PF_THREAD_BOUND;
if (on_unbound_cpu)
worker->flags |= WORKER_UNBOUND; worker->flags |= WORKER_UNBOUND;
} }
...@@ -1568,7 +1585,7 @@ __acquires(&gcwq->lock) ...@@ -1568,7 +1585,7 @@ __acquires(&gcwq->lock)
while (true) { while (true) {
struct worker *worker; struct worker *worker;
worker = create_worker(pool, true); worker = create_worker(pool);
if (worker) { if (worker) {
del_timer_sync(&pool->mayday_timer); del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
...@@ -3420,14 +3437,12 @@ static int __cpuinit trustee_thread(void *__gcwq) ...@@ -3420,14 +3437,12 @@ static int __cpuinit trustee_thread(void *__gcwq)
if (need_to_create_worker(pool)) { if (need_to_create_worker(pool)) {
spin_unlock_irq(&gcwq->lock); spin_unlock_irq(&gcwq->lock);
worker = create_worker(pool, false); worker = create_worker(pool);
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
if (worker) { if (worker)
worker->flags |= WORKER_UNBOUND;
start_worker(worker); start_worker(worker);
} }
} }
}
/* give a breather */ /* give a breather */
if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
...@@ -3463,6 +3478,10 @@ static int __cpuinit trustee_thread(void *__gcwq) ...@@ -3463,6 +3478,10 @@ static int __cpuinit trustee_thread(void *__gcwq)
for_each_worker_pool(pool, gcwq) for_each_worker_pool(pool, gcwq)
WARN_ON(!list_empty(&pool->idle_list)); WARN_ON(!list_empty(&pool->idle_list));
/* if we're reassociating, clear DISASSOCIATED */
if (gcwq->trustee_state == TRUSTEE_RELEASE)
gcwq->flags &= ~GCWQ_DISASSOCIATED;
for_each_busy_worker(worker, i, pos, gcwq) { for_each_busy_worker(worker, i, pos, gcwq) {
struct work_struct *rebind_work = &worker->rebind_work; struct work_struct *rebind_work = &worker->rebind_work;
...@@ -3546,7 +3565,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, ...@@ -3546,7 +3565,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
i = 0; i = 0;
for_each_worker_pool(pool, gcwq) { for_each_worker_pool(pool, gcwq) {
BUG_ON(pool->first_idle); BUG_ON(pool->first_idle);
new_workers[i] = create_worker(pool, false); new_workers[i] = create_worker(pool);
if (!new_workers[i++]) if (!new_workers[i++])
goto err_destroy; goto err_destroy;
} }
...@@ -3584,13 +3603,19 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, ...@@ -3584,13 +3603,19 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
case CPU_ONLINE: case CPU_ONLINE:
gcwq->flags &= ~GCWQ_DISASSOCIATED;
if (gcwq->trustee_state != TRUSTEE_DONE) { if (gcwq->trustee_state != TRUSTEE_DONE) {
gcwq->trustee_state = TRUSTEE_RELEASE; gcwq->trustee_state = TRUSTEE_RELEASE;
wake_up_process(gcwq->trustee); wake_up_process(gcwq->trustee);
wait_trustee_state(gcwq, TRUSTEE_DONE); wait_trustee_state(gcwq, TRUSTEE_DONE);
} }
/*
* Either DISASSOCIATED is already cleared or no worker is
* left on the gcwq. Safe to clear DISASSOCIATED without
* claiming managers.
*/
gcwq->flags &= ~GCWQ_DISASSOCIATED;
/* /*
* Trustee is done and there might be no worker left. * Trustee is done and there might be no worker left.
* Put the first_idle in and request a real manager to * Put the first_idle in and request a real manager to
...@@ -3601,6 +3626,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, ...@@ -3601,6 +3626,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
kthread_bind(pool->first_idle->task, cpu); kthread_bind(pool->first_idle->task, cpu);
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
pool->flags |= POOL_MANAGE_WORKERS; pool->flags |= POOL_MANAGE_WORKERS;
pool->first_idle->flags &= ~WORKER_UNBOUND;
start_worker(pool->first_idle); start_worker(pool->first_idle);
pool->first_idle = NULL; pool->first_idle = NULL;
} }
...@@ -3899,7 +3925,7 @@ static int __init init_workqueues(void) ...@@ -3899,7 +3925,7 @@ static int __init init_workqueues(void)
for_each_worker_pool(pool, gcwq) { for_each_worker_pool(pool, gcwq) {
struct worker *worker; struct worker *worker;
worker = create_worker(pool, true); worker = create_worker(pool);
BUG_ON(!worker); BUG_ON(!worker);
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
start_worker(worker); start_worker(worker);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment