Commit 24acfb71 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Tejun Heo

workqueue: Use normal rcu

There is no need for sched_rcu. The undocumented reason why sched_rcu
is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
the fact that sched_rcu reader side critical sections are also protected
by preempt or irq disabled regions.

Replace rcu_read_lock_sched with rcu_read_lock and acquire the RCU lock
where it is not yet explicit acquired. Replace local_irq_disable() with
rcu_read_lock(). Update asserts.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
[bigeasy: mangle changelog a little]
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 8194fe94
...@@ -127,16 +127,16 @@ enum { ...@@ -127,16 +127,16 @@ enum {
* *
* PL: wq_pool_mutex protected. * PL: wq_pool_mutex protected.
* *
* PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. * PR: wq_pool_mutex protected for writes. RCU protected for reads.
* *
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
* *
* PWR: wq_pool_mutex and wq->mutex protected for writes. Either or * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
* sched-RCU for reads. * RCU for reads.
* *
* WQ: wq->mutex protected. * WQ: wq->mutex protected.
* *
* WR: wq->mutex protected for writes. Sched-RCU protected for reads. * WR: wq->mutex protected for writes. RCU protected for reads.
* *
* MD: wq_mayday_lock protected. * MD: wq_mayday_lock protected.
*/ */
...@@ -183,7 +183,7 @@ struct worker_pool { ...@@ -183,7 +183,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp; atomic_t nr_running ____cacheline_aligned_in_smp;
/* /*
* Destruction of pool is sched-RCU protected to allow dereferences * Destruction of pool is RCU protected to allow dereferences
* from get_work_pool(). * from get_work_pool().
*/ */
struct rcu_head rcu; struct rcu_head rcu;
...@@ -212,7 +212,7 @@ struct pool_workqueue { ...@@ -212,7 +212,7 @@ struct pool_workqueue {
/* /*
* Release of unbound pwq is punted to system_wq. See put_pwq() * Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue * and pwq_unbound_release_workfn() for details. pool_workqueue
* itself is also sched-RCU protected so that the first pwq can be * itself is also RCU protected so that the first pwq can be
* determined without grabbing wq->mutex. * determined without grabbing wq->mutex.
*/ */
struct work_struct unbound_release_work; struct work_struct unbound_release_work;
...@@ -266,8 +266,8 @@ struct workqueue_struct { ...@@ -266,8 +266,8 @@ struct workqueue_struct {
char name[WQ_NAME_LEN]; /* I: workqueue name */ char name[WQ_NAME_LEN]; /* I: workqueue name */
/* /*
* Destruction of workqueue_struct is sched-RCU protected to allow * Destruction of workqueue_struct is RCU protected to allow walking
* walking the workqueues list without grabbing wq_pool_mutex. * the workqueues list without grabbing wq_pool_mutex.
* This is used to dump all workqueues from sysrq. * This is used to dump all workqueues from sysrq.
*/ */
struct rcu_head rcu; struct rcu_head rcu;
...@@ -359,20 +359,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); ...@@ -359,20 +359,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h> #include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \ #define assert_rcu_or_pool_mutex() \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq_pool_mutex), \ !lockdep_is_held(&wq_pool_mutex), \
"sched RCU or wq_pool_mutex should be held") "RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \ #define assert_rcu_or_wq_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex), \ !lockdep_is_held(&wq->mutex), \
"sched RCU or wq->mutex should be held") "RCU or wq->mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex) && \ !lockdep_is_held(&wq->mutex) && \
!lockdep_is_held(&wq_pool_mutex), \ !lockdep_is_held(&wq_pool_mutex), \
"sched RCU, wq->mutex or wq_pool_mutex should be held") "RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \ #define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
...@@ -384,7 +384,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); ...@@ -384,7 +384,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pool: iteration cursor * @pool: iteration cursor
* @pi: integer used for iteration * @pi: integer used for iteration
* *
* This must be called either with wq_pool_mutex held or sched RCU read * This must be called either with wq_pool_mutex held or RCU read
* locked. If the pool needs to be used beyond the locking in effect, the * locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online. * caller is responsible for guaranteeing that the pool stays online.
* *
...@@ -416,7 +416,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); ...@@ -416,7 +416,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pwq: iteration cursor * @pwq: iteration cursor
* @wq: the target workqueue * @wq: the target workqueue
* *
* This must be called either with wq->mutex held or sched RCU read locked. * This must be called either with wq->mutex held or RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is * If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online. * responsible for guaranteeing that the pwq stays online.
* *
...@@ -552,7 +552,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) ...@@ -552,7 +552,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue * @wq: the target workqueue
* @node: the node ID * @node: the node ID
* *
* This must be called with any of wq_pool_mutex, wq->mutex or sched RCU * This must be called with any of wq_pool_mutex, wq->mutex or RCU
* read locked. * read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is * If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online. * responsible for guaranteeing that the pwq stays online.
...@@ -696,8 +696,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) ...@@ -696,8 +696,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest * @work: the work item of interest
* *
* Pools are created and destroyed under wq_pool_mutex, and allows read * Pools are created and destroyed under wq_pool_mutex, and allows read
* access under sched-RCU read lock. As such, this function should be * access under RCU read lock. As such, this function should be
* called under wq_pool_mutex or with preemption disabled. * called under wq_pool_mutex or inside of a rcu_read_lock() region.
* *
* All fields of the returned pool are accessible as long as the above * All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used * mentioned locking is in effect. If the returned pool needs to be used
...@@ -1133,7 +1133,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) ...@@ -1133,7 +1133,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{ {
if (pwq) { if (pwq) {
/* /*
* As both pwqs and pools are sched-RCU protected, the * As both pwqs and pools are RCU protected, the
* following lock operations are safe. * following lock operations are safe.
*/ */
spin_lock_irq(&pwq->pool->lock); spin_lock_irq(&pwq->pool->lock);
...@@ -1261,6 +1261,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1261,6 +1261,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0; return 0;
rcu_read_lock();
/* /*
* The queueing is in progress, or it is already queued. Try to * The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING. * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
...@@ -1299,10 +1300,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1299,10 +1300,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id); set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
rcu_read_unlock();
return 1; return 1;
} }
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
fail: fail:
rcu_read_unlock();
local_irq_restore(*flags); local_irq_restore(*flags);
if (work_is_canceling(work)) if (work_is_canceling(work))
return -ENOENT; return -ENOENT;
...@@ -1416,6 +1419,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1416,6 +1419,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) && if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq))) WARN_ON_ONCE(!is_chained_work(wq)))
return; return;
rcu_read_lock();
retry: retry:
if (req_cpu == WORK_CPU_UNBOUND) if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id()); cpu = wq_select_unbound_cpu(raw_smp_processor_id());
...@@ -1472,10 +1476,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1472,10 +1476,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
/* pwq determined, queue */ /* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work); trace_workqueue_queue_work(req_cpu, pwq, work);
if (WARN_ON(!list_empty(&work->entry))) { if (WARN_ON(!list_empty(&work->entry)))
spin_unlock(&pwq->pool->lock); goto out;
return;
}
pwq->nr_in_flight[pwq->work_color]++; pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color); work_flags = work_color_to_flags(pwq->work_color);
...@@ -1493,7 +1495,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1493,7 +1495,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
insert_work(pwq, work, worklist, work_flags); insert_work(pwq, work, worklist, work_flags);
out:
spin_unlock(&pwq->pool->lock); spin_unlock(&pwq->pool->lock);
rcu_read_unlock();
} }
/** /**
...@@ -2975,14 +2979,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -2975,14 +2979,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
might_sleep(); might_sleep();
local_irq_disable(); rcu_read_lock();
pool = get_work_pool(work); pool = get_work_pool(work);
if (!pool) { if (!pool) {
local_irq_enable(); rcu_read_unlock();
return false; return false;
} }
spin_lock(&pool->lock); spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */ /* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work); pwq = get_work_pwq(work);
if (pwq) { if (pwq) {
...@@ -3014,10 +3018,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -3014,10 +3018,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map);
} }
rcu_read_unlock();
return true; return true;
already_gone: already_gone:
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
rcu_read_unlock();
return false; return false;
} }
...@@ -3504,7 +3509,7 @@ static void rcu_free_pool(struct rcu_head *rcu) ...@@ -3504,7 +3509,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool * put_unbound_pool - put a worker_pool
* @pool: worker_pool to put * @pool: worker_pool to put
* *
* Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
* safe manner. get_unbound_pool() calls this function on its failure path * safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through, * and this function should be able to release pools which went through,
* successfully or not, init_worker_pool(). * successfully or not, init_worker_pool().
...@@ -3558,7 +3563,7 @@ static void put_unbound_pool(struct worker_pool *pool) ...@@ -3558,7 +3563,7 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer); del_timer_sync(&pool->mayday_timer);
/* sched-RCU protected to allow dereferences from get_work_pool() */ /* RCU protected to allow dereferences from get_work_pool() */
call_rcu(&pool->rcu, rcu_free_pool); call_rcu(&pool->rcu, rcu_free_pool);
} }
...@@ -4472,7 +4477,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) ...@@ -4472,7 +4477,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq; struct pool_workqueue *pwq;
bool ret; bool ret;
rcu_read_lock_sched(); rcu_read_lock();
preempt_disable();
if (cpu == WORK_CPU_UNBOUND) if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id(); cpu = smp_processor_id();
...@@ -4483,7 +4489,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) ...@@ -4483,7 +4489,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works); ret = !list_empty(&pwq->delayed_works);
rcu_read_unlock_sched(); preempt_enable();
rcu_read_unlock();
return ret; return ret;
} }
...@@ -4509,15 +4516,15 @@ unsigned int work_busy(struct work_struct *work) ...@@ -4509,15 +4516,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work)) if (work_pending(work))
ret |= WORK_BUSY_PENDING; ret |= WORK_BUSY_PENDING;
local_irq_save(flags); rcu_read_lock();
pool = get_work_pool(work); pool = get_work_pool(work);
if (pool) { if (pool) {
spin_lock(&pool->lock); spin_lock_irqsave(&pool->lock, flags);
if (find_worker_executing_work(pool, work)) if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING; ret |= WORK_BUSY_RUNNING;
spin_unlock(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
} }
local_irq_restore(flags); rcu_read_unlock();
return ret; return ret;
} }
...@@ -4701,7 +4708,7 @@ void show_workqueue_state(void) ...@@ -4701,7 +4708,7 @@ void show_workqueue_state(void)
unsigned long flags; unsigned long flags;
int pi; int pi;
rcu_read_lock_sched(); rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n"); pr_info("Showing busy workqueues and worker pools:\n");
...@@ -4766,7 +4773,7 @@ void show_workqueue_state(void) ...@@ -4766,7 +4773,7 @@ void show_workqueue_state(void)
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
rcu_read_unlock_sched(); rcu_read_unlock();
} }
/* used to show worker information through /proc/PID/{comm,stat,status} */ /* used to show worker information through /proc/PID/{comm,stat,status} */
...@@ -5153,16 +5160,16 @@ bool freeze_workqueues_busy(void) ...@@ -5153,16 +5160,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe * nr_active is monotonically decreasing. It's safe
* to peek without lock. * to peek without lock.
*/ */
rcu_read_lock_sched(); rcu_read_lock();
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0); WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) { if (pwq->nr_active) {
busy = true; busy = true;
rcu_read_unlock_sched(); rcu_read_unlock();
goto out_unlock; goto out_unlock;
} }
} }
rcu_read_unlock_sched(); rcu_read_unlock();
} }
out_unlock: out_unlock:
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
...@@ -5357,7 +5364,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, ...@@ -5357,7 +5364,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = ""; const char *delim = "";
int node, written = 0; int node, written = 0;
rcu_read_lock_sched(); get_online_cpus();
rcu_read_lock();
for_each_node(node) { for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written, written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node, "%s%d:%d", delim, node,
...@@ -5365,7 +5373,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, ...@@ -5365,7 +5373,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " "; delim = " ";
} }
written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
rcu_read_unlock_sched(); rcu_read_unlock();
put_online_cpus();
return written; return written;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment