Commit 0adb8bc0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue updates from Tejun Heo:
 "Nothing too interesting. Just two trivial patches"

* 'for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: Mark up unlocked access to wq->first_flusher
  workqueue: Make workqueue_init*() return void
parents d8836005 00d5d15b
...@@ -665,7 +665,7 @@ int workqueue_online_cpu(unsigned int cpu); ...@@ -665,7 +665,7 @@ int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu); int workqueue_offline_cpu(unsigned int cpu);
#endif #endif
int __init workqueue_init_early(void); void __init workqueue_init_early(void);
int __init workqueue_init(void); void __init workqueue_init(void);
#endif #endif
...@@ -2834,7 +2834,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2834,7 +2834,7 @@ void flush_workqueue(struct workqueue_struct *wq)
* First flushers are responsible for cascading flushes and * First flushers are responsible for cascading flushes and
* handling overflow. Non-first flushers can simply return. * handling overflow. Non-first flushers can simply return.
*/ */
if (wq->first_flusher != &this_flusher) if (READ_ONCE(wq->first_flusher) != &this_flusher)
return; return;
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
...@@ -2843,7 +2843,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2843,7 +2843,7 @@ void flush_workqueue(struct workqueue_struct *wq)
if (wq->first_flusher != &this_flusher) if (wq->first_flusher != &this_flusher)
goto out_unlock; goto out_unlock;
wq->first_flusher = NULL; WRITE_ONCE(wq->first_flusher, NULL);
WARN_ON_ONCE(!list_empty(&this_flusher.list)); WARN_ON_ONCE(!list_empty(&this_flusher.list));
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
...@@ -5898,7 +5898,7 @@ static void __init wq_numa_init(void) ...@@ -5898,7 +5898,7 @@ static void __init wq_numa_init(void)
* items. Actual work item execution starts only after kthreads can be * items. Actual work item execution starts only after kthreads can be
* created and scheduled right before early initcalls. * created and scheduled right before early initcalls.
*/ */
int __init workqueue_init_early(void) void __init workqueue_init_early(void)
{ {
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
...@@ -5965,8 +5965,6 @@ int __init workqueue_init_early(void) ...@@ -5965,8 +5965,6 @@ int __init workqueue_init_early(void)
!system_unbound_wq || !system_freezable_wq || !system_unbound_wq || !system_freezable_wq ||
!system_power_efficient_wq || !system_power_efficient_wq ||
!system_freezable_power_efficient_wq); !system_freezable_power_efficient_wq);
return 0;
} }
/** /**
...@@ -5978,7 +5976,7 @@ int __init workqueue_init_early(void) ...@@ -5978,7 +5976,7 @@ int __init workqueue_init_early(void)
* are no kworkers executing the work items yet. Populate the worker pools * are no kworkers executing the work items yet. Populate the worker pools
* with the initial workers and enable future kworker creations. * with the initial workers and enable future kworker creations.
*/ */
int __init workqueue_init(void) void __init workqueue_init(void)
{ {
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct worker_pool *pool; struct worker_pool *pool;
...@@ -6025,6 +6023,4 @@ int __init workqueue_init(void) ...@@ -6025,6 +6023,4 @@ int __init workqueue_init(void)
wq_online = true; wq_online = true;
wq_watchdog_init(); wq_watchdog_init();
return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment