Commit bc35f7ef authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Tejun Heo

workqueue: Convert the type of pool->nr_running to int

It is only modified in associated CPU, so it doesn't need to be atomic.

tj: Comment updated.
Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent cc5bff38
......@@ -154,8 +154,13 @@ struct worker_pool {
unsigned long watchdog_ts; /* L: watchdog timestamp */
/* The current concurrency level. */
atomic_t nr_running;
/*
* The counter is incremented in a process context on the associated CPU
* w/ preemption disabled, and decremented or reset in the same context
* but w/ pool->lock held. The readers grab pool->lock and are
* guaranteed to see if the counter reached zero.
*/
int nr_running;
struct list_head worklist; /* L: list of pending works */
......@@ -777,7 +782,7 @@ static bool work_is_canceling(struct work_struct *work)
static bool __need_more_worker(struct worker_pool *pool)
{
return !atomic_read(&pool->nr_running);
return !pool->nr_running;
}
/*
......@@ -802,8 +807,7 @@ static bool may_start_working(struct worker_pool *pool)
/* Do I need to keep working? Called from currently running workers. */
static bool keep_working(struct worker_pool *pool)
{
return !list_empty(&pool->worklist) &&
atomic_read(&pool->nr_running) <= 1;
return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
}
/* Do we need a new worker? Called from manager. */
......@@ -873,7 +877,7 @@ void wq_worker_running(struct task_struct *task)
*/
preempt_disable();
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(&worker->pool->nr_running);
worker->pool->nr_running++;
preempt_enable();
worker->sleeping = 0;
}
......@@ -917,8 +921,8 @@ void wq_worker_sleeping(struct task_struct *task)
return;
}
if (atomic_dec_and_test(&pool->nr_running) &&
!list_empty(&pool->worklist))
pool->nr_running--;
if (need_more_worker(pool))
wake_up_worker(pool);
raw_spin_unlock_irq(&pool->lock);
}
......@@ -973,7 +977,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
/* If transitioning into NOT_RUNNING, adjust nr_running. */
if ((flags & WORKER_NOT_RUNNING) &&
!(worker->flags & WORKER_NOT_RUNNING)) {
atomic_dec(&pool->nr_running);
pool->nr_running--;
}
worker->flags |= flags;
......@@ -1005,7 +1009,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
*/
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(&pool->nr_running);
pool->nr_running++;
}
/**
......@@ -1806,8 +1810,7 @@ static void worker_enter_idle(struct worker *worker)
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/* Sanity check nr_running. */
WARN_ON_ONCE(pool->nr_workers == pool->nr_idle &&
atomic_read(&pool->nr_running));
WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
}
/**
......@@ -4985,7 +4988,7 @@ static void unbind_workers(int cpu)
* an unbound (in terms of concurrency management) pool which
* are served by workers tied to the pool.
*/
atomic_set(&pool->nr_running, 0);
pool->nr_running = 0;
/*
* With concurrency management just turned off, a busy
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment