Commit 112202d9 authored by Tejun Heo's avatar Tejun Heo

workqueue: rename cpu_workqueue to pool_workqueue

workqueue has moved away from global_cwqs to worker_pools and with the
scheduled custom worker pools, wforkqueues will be associated with
pools which don't have anything to do with CPUs.  The workqueue code
went through significant amount of changes recently and mass renaming
isn't likely to hurt much additionally.  Let's replace 'cpu' with
'pool' so that it reflects the current design.

* s/struct cpu_workqueue_struct/struct pool_workqueue/
* s/cpu_wq/pool_wq/
* s/cwq/pwq/

This patch is purely cosmetic.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 8d03ecfe
...@@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data); ...@@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data);
enum { enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK #ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
...@@ -40,7 +40,7 @@ enum { ...@@ -40,7 +40,7 @@ enum {
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK #ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
...@@ -60,14 +60,14 @@ enum { ...@@ -60,14 +60,14 @@ enum {
WORK_CPU_END = NR_CPUS + 1, WORK_CPU_END = NR_CPUS + 1,
/* /*
* Reserve 7 bits off of cwq pointer w/ debugobjects turned * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
* off. This makes cwqs aligned to 256 bytes and allows 15 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
* workqueue flush colors. * flush colors.
*/ */
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS, WORK_STRUCT_COLOR_BITS,
/* data contains off-queue information when !WORK_STRUCT_CWQ */ /* data contains off-queue information when !WORK_STRUCT_PWQ */
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
......
...@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work, ...@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work,
/** /**
* workqueue_queue_work - called when a work gets queued * workqueue_queue_work - called when a work gets queued
* @req_cpu: the requested cpu * @req_cpu: the requested cpu
* @cwq: pointer to struct cpu_workqueue_struct * @pwq: pointer to struct pool_workqueue
* @work: pointer to struct work_struct * @work: pointer to struct work_struct
* *
* This event occurs when a work is queued immediately or once a * This event occurs when a work is queued immediately or once a
...@@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work, ...@@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work,
*/ */
TRACE_EVENT(workqueue_queue_work, TRACE_EVENT(workqueue_queue_work,
TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
struct work_struct *work), struct work_struct *work),
TP_ARGS(req_cpu, cwq, work), TP_ARGS(req_cpu, pwq, work),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( void *, work ) __field( void *, work )
...@@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work, ...@@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work,
TP_fast_assign( TP_fast_assign(
__entry->work = work; __entry->work = work;
__entry->function = work->func; __entry->function = work->func;
__entry->workqueue = cwq->wq; __entry->workqueue = pwq->wq;
__entry->req_cpu = req_cpu; __entry->req_cpu = req_cpu;
__entry->cpu = cwq->pool->cpu; __entry->cpu = pwq->pool->cpu;
), ),
TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
......
...@@ -154,11 +154,12 @@ struct worker_pool { ...@@ -154,11 +154,12 @@ struct worker_pool {
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/* /*
* The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
* work_struct->data are used for flags and thus cwqs need to be * of work_struct->data are used for flags and the remaining high bits
* aligned at two's power of the number of flag bits. * point to the pwq; thus, pwqs need to be aligned at two's power of the
* number of flag bits.
*/ */
struct cpu_workqueue_struct { struct pool_workqueue {
struct worker_pool *pool; /* I: the associated pool */ struct worker_pool *pool; /* I: the associated pool */
struct workqueue_struct *wq; /* I: the owning workqueue */ struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */ int work_color; /* L: current color */
...@@ -207,16 +208,16 @@ typedef unsigned long mayday_mask_t; ...@@ -207,16 +208,16 @@ typedef unsigned long mayday_mask_t;
struct workqueue_struct { struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */ unsigned int flags; /* W: WQ_* flags */
union { union {
struct cpu_workqueue_struct __percpu *pcpu; struct pool_workqueue __percpu *pcpu;
struct cpu_workqueue_struct *single; struct pool_workqueue *single;
unsigned long v; unsigned long v;
} cpu_wq; /* I: cwq's */ } pool_wq; /* I: pwq's */
struct list_head list; /* W: list of all workqueues */ struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */ struct mutex flush_mutex; /* protects wq flushing */
int work_color; /* F: current work color */ int work_color; /* F: current work color */
int flush_color; /* F: current flush color */ int flush_color; /* F: current flush color */
atomic_t nr_cwqs_to_flush; /* flush in progress */ atomic_t nr_pwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* F: first flusher */ struct wq_flusher *first_flusher; /* F: first flusher */
struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */ struct list_head flusher_overflow; /* F: flush overflow list */
...@@ -225,7 +226,7 @@ struct workqueue_struct { ...@@ -225,7 +226,7 @@ struct workqueue_struct {
struct worker *rescuer; /* I: rescue worker */ struct worker *rescuer; /* I: rescue worker */
int nr_drainers; /* W: drain in progress */ int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */ int saved_max_active; /* W: saved pwq max_active */
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map; struct lockdep_map lockdep_map;
#endif #endif
...@@ -268,7 +269,7 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, ...@@ -268,7 +269,7 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
return WORK_CPU_END; return WORK_CPU_END;
} }
static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
struct workqueue_struct *wq) struct workqueue_struct *wq)
{ {
return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
...@@ -284,7 +285,7 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, ...@@ -284,7 +285,7 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
* *
* for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
* for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
* for_each_cwq_cpu() : possible CPUs for bound workqueues, * for_each_pwq_cpu() : possible CPUs for bound workqueues,
* WORK_CPU_UNBOUND for unbound workqueues * WORK_CPU_UNBOUND for unbound workqueues
*/ */
#define for_each_wq_cpu(cpu) \ #define for_each_wq_cpu(cpu) \
...@@ -297,10 +298,10 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, ...@@ -297,10 +298,10 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
(cpu) < WORK_CPU_END; \ (cpu) < WORK_CPU_END; \
(cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
#define for_each_cwq_cpu(cpu, wq) \ #define for_each_pwq_cpu(cpu, wq) \
for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \ for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \
(cpu) < WORK_CPU_END; \ (cpu) < WORK_CPU_END; \
(cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq))) (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
#ifdef CONFIG_DEBUG_OBJECTS_WORK #ifdef CONFIG_DEBUG_OBJECTS_WORK
...@@ -479,14 +480,14 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri) ...@@ -479,14 +480,14 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
return &pools[highpri]; return &pools[highpri];
} }
static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, static struct pool_workqueue *get_pwq(unsigned int cpu,
struct workqueue_struct *wq) struct workqueue_struct *wq)
{ {
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
if (likely(cpu < nr_cpu_ids)) if (likely(cpu < nr_cpu_ids))
return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
} else if (likely(cpu == WORK_CPU_UNBOUND)) } else if (likely(cpu == WORK_CPU_UNBOUND))
return wq->cpu_wq.single; return wq->pool_wq.single;
return NULL; return NULL;
} }
...@@ -507,18 +508,18 @@ static int work_next_color(int color) ...@@ -507,18 +508,18 @@ static int work_next_color(int color)
} }
/* /*
* While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
* contain the pointer to the queued cwq. Once execution starts, the flag * contain the pointer to the queued pwq. Once execution starts, the flag
* is cleared and the high bits contain OFFQ flags and pool ID. * is cleared and the high bits contain OFFQ flags and pool ID.
* *
* set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling() * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
* and clear_work_data() can be used to set the cwq, pool or clear * and clear_work_data() can be used to set the pwq, pool or clear
* work->data. These functions should only be called while the work is * work->data. These functions should only be called while the work is
* owned - ie. while the PENDING bit is set. * owned - ie. while the PENDING bit is set.
* *
* get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
* corresponding to a work. Pool is available once the work has been * corresponding to a work. Pool is available once the work has been
* queued anywhere after initialization until it is sync canceled. cwq is * queued anywhere after initialization until it is sync canceled. pwq is
* available only while the work item is queued. * available only while the work item is queued.
* *
* %WORK_OFFQ_CANCELING is used to mark a work item which is being * %WORK_OFFQ_CANCELING is used to mark a work item which is being
...@@ -533,12 +534,11 @@ static inline void set_work_data(struct work_struct *work, unsigned long data, ...@@ -533,12 +534,11 @@ static inline void set_work_data(struct work_struct *work, unsigned long data,
atomic_long_set(&work->data, data | flags | work_static(work)); atomic_long_set(&work->data, data | flags | work_static(work));
} }
static void set_work_cwq(struct work_struct *work, static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
struct cpu_workqueue_struct *cwq,
unsigned long extra_flags) unsigned long extra_flags)
{ {
set_work_data(work, (unsigned long)cwq, set_work_data(work, (unsigned long)pwq,
WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
} }
static void set_work_pool_and_keep_pending(struct work_struct *work, static void set_work_pool_and_keep_pending(struct work_struct *work,
...@@ -567,11 +567,11 @@ static void clear_work_data(struct work_struct *work) ...@@ -567,11 +567,11 @@ static void clear_work_data(struct work_struct *work)
set_work_data(work, WORK_STRUCT_NO_POOL, 0); set_work_data(work, WORK_STRUCT_NO_POOL, 0);
} }
static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) static struct pool_workqueue *get_work_pwq(struct work_struct *work)
{ {
unsigned long data = atomic_long_read(&work->data); unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_CWQ) if (data & WORK_STRUCT_PWQ)
return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
else else
return NULL; return NULL;
...@@ -589,8 +589,8 @@ static struct worker_pool *get_work_pool(struct work_struct *work) ...@@ -589,8 +589,8 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
struct worker_pool *pool; struct worker_pool *pool;
int pool_id; int pool_id;
if (data & WORK_STRUCT_CWQ) if (data & WORK_STRUCT_PWQ)
return ((struct cpu_workqueue_struct *) return ((struct pool_workqueue *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool; (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
pool_id = data >> WORK_OFFQ_POOL_SHIFT; pool_id = data >> WORK_OFFQ_POOL_SHIFT;
...@@ -613,8 +613,8 @@ static int get_work_pool_id(struct work_struct *work) ...@@ -613,8 +613,8 @@ static int get_work_pool_id(struct work_struct *work)
{ {
unsigned long data = atomic_long_read(&work->data); unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_CWQ) if (data & WORK_STRUCT_PWQ)
return ((struct cpu_workqueue_struct *) return ((struct pool_workqueue *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
return data >> WORK_OFFQ_POOL_SHIFT; return data >> WORK_OFFQ_POOL_SHIFT;
...@@ -632,7 +632,7 @@ static bool work_is_canceling(struct work_struct *work) ...@@ -632,7 +632,7 @@ static bool work_is_canceling(struct work_struct *work)
{ {
unsigned long data = atomic_long_read(&work->data); unsigned long data = atomic_long_read(&work->data);
return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING); return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
} }
/* /*
...@@ -961,67 +961,67 @@ static void move_linked_works(struct work_struct *work, struct list_head *head, ...@@ -961,67 +961,67 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
*nextp = n; *nextp = n;
} }
static void cwq_activate_delayed_work(struct work_struct *work) static void pwq_activate_delayed_work(struct work_struct *work)
{ {
struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct pool_workqueue *pwq = get_work_pwq(work);
trace_workqueue_activate_work(work); trace_workqueue_activate_work(work);
move_linked_works(work, &cwq->pool->worklist, NULL); move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
cwq->nr_active++; pwq->nr_active++;
} }
static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
{ {
struct work_struct *work = list_first_entry(&cwq->delayed_works, struct work_struct *work = list_first_entry(&pwq->delayed_works,
struct work_struct, entry); struct work_struct, entry);
cwq_activate_delayed_work(work); pwq_activate_delayed_work(work);
} }
/** /**
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
* @cwq: cwq of interest * @pwq: pwq of interest
* @color: color of work which left the queue * @color: color of work which left the queue
* *
* A work either has completed or is removed from pending queue, * A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its cwq and handle workqueue flushing. * decrement nr_in_flight of its pwq and handle workqueue flushing.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * spin_lock_irq(pool->lock).
*/ */
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
{ {
/* ignore uncolored works */ /* ignore uncolored works */
if (color == WORK_NO_COLOR) if (color == WORK_NO_COLOR)
return; return;
cwq->nr_in_flight[color]--; pwq->nr_in_flight[color]--;
cwq->nr_active--; pwq->nr_active--;
if (!list_empty(&cwq->delayed_works)) { if (!list_empty(&pwq->delayed_works)) {
/* one down, submit a delayed one */ /* one down, submit a delayed one */
if (cwq->nr_active < cwq->max_active) if (pwq->nr_active < pwq->max_active)
cwq_activate_first_delayed(cwq); pwq_activate_first_delayed(pwq);
} }
/* is flush in progress and are we at the flushing tip? */ /* is flush in progress and are we at the flushing tip? */
if (likely(cwq->flush_color != color)) if (likely(pwq->flush_color != color))
return; return;
/* are there still in-flight works? */ /* are there still in-flight works? */
if (cwq->nr_in_flight[color]) if (pwq->nr_in_flight[color])
return; return;
/* this cwq is done, clear flush_color */ /* this pwq is done, clear flush_color */
cwq->flush_color = -1; pwq->flush_color = -1;
/* /*
* If this was the last cwq, wake up the first flusher. It * If this was the last pwq, wake up the first flusher. It
* will handle the rest. * will handle the rest.
*/ */
if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
complete(&cwq->wq->first_flusher->done); complete(&pwq->wq->first_flusher->done);
} }
/** /**
...@@ -1053,7 +1053,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1053,7 +1053,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
unsigned long *flags) unsigned long *flags)
{ {
struct worker_pool *pool; struct worker_pool *pool;
struct cpu_workqueue_struct *cwq; struct pool_workqueue *pwq;
local_irq_save(*flags); local_irq_save(*flags);
...@@ -1084,31 +1084,31 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1084,31 +1084,31 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
spin_lock(&pool->lock); spin_lock(&pool->lock);
/* /*
* work->data is guaranteed to point to cwq only while the work * work->data is guaranteed to point to pwq only while the work
* item is queued on cwq->wq, and both updating work->data to point * item is queued on pwq->wq, and both updating work->data to point
* to cwq on queueing and to pool on dequeueing are done under * to pwq on queueing and to pool on dequeueing are done under
* cwq->pool->lock. This in turn guarantees that, if work->data * pwq->pool->lock. This in turn guarantees that, if work->data
* points to cwq which is associated with a locked pool, the work * points to pwq which is associated with a locked pool, the work
* item is currently queued on that pool. * item is currently queued on that pool.
*/ */
cwq = get_work_cwq(work); pwq = get_work_pwq(work);
if (cwq && cwq->pool == pool) { if (pwq && pwq->pool == pool) {
debug_work_deactivate(work); debug_work_deactivate(work);
/* /*
* A delayed work item cannot be grabbed directly because * A delayed work item cannot be grabbed directly because
* it might have linked NO_COLOR work items which, if left * it might have linked NO_COLOR work items which, if left
* on the delayed_list, will confuse cwq->nr_active * on the delayed_list, will confuse pwq->nr_active
* management later on and cause stall. Make sure the work * management later on and cause stall. Make sure the work
* item is activated before grabbing. * item is activated before grabbing.
*/ */
if (*work_data_bits(work) & WORK_STRUCT_DELAYED) if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
cwq_activate_delayed_work(work); pwq_activate_delayed_work(work);
list_del_init(&work->entry); list_del_init(&work->entry);
cwq_dec_nr_in_flight(get_work_cwq(work), get_work_color(work)); pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
/* work->data points to cwq iff queued, point to pool */ /* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id); set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
...@@ -1125,25 +1125,24 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1125,25 +1125,24 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
/** /**
* insert_work - insert a work into a pool * insert_work - insert a work into a pool
* @cwq: cwq @work belongs to * @pwq: pwq @work belongs to
* @work: work to insert * @work: work to insert
* @head: insertion point * @head: insertion point
* @extra_flags: extra WORK_STRUCT_* flags to set * @extra_flags: extra WORK_STRUCT_* flags to set
* *
* Insert @work which belongs to @cwq after @head. @extra_flags is or'd to * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
* work_struct flags. * work_struct flags.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * spin_lock_irq(pool->lock).
*/ */
static void insert_work(struct cpu_workqueue_struct *cwq, static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct work_struct *work, struct list_head *head, struct list_head *head, unsigned int extra_flags)
unsigned int extra_flags)
{ {
struct worker_pool *pool = cwq->pool; struct worker_pool *pool = pwq->pool;
/* we own @work, set data and link */ /* we own @work, set data and link */
set_work_cwq(work, cwq, extra_flags); set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head); list_add_tail(&work->entry, head);
/* /*
...@@ -1170,13 +1169,13 @@ static bool is_chained_work(struct workqueue_struct *wq) ...@@ -1170,13 +1169,13 @@ static bool is_chained_work(struct workqueue_struct *wq)
* Return %true iff I'm a worker execuing a work item on @wq. If * Return %true iff I'm a worker execuing a work item on @wq. If
* I'm @worker, it's safe to dereference it without locking. * I'm @worker, it's safe to dereference it without locking.
*/ */
return worker && worker->current_cwq->wq == wq; return worker && worker->current_pwq->wq == wq;
} }
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct work_struct *work) struct work_struct *work)
{ {
struct cpu_workqueue_struct *cwq; struct pool_workqueue *pwq;
struct list_head *worklist; struct list_head *worklist;
unsigned int work_flags; unsigned int work_flags;
unsigned int req_cpu = cpu; unsigned int req_cpu = cpu;
...@@ -1196,7 +1195,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, ...@@ -1196,7 +1195,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
WARN_ON_ONCE(!is_chained_work(wq))) WARN_ON_ONCE(!is_chained_work(wq)))
return; return;
/* determine the cwq to use */ /* determine the pwq to use */
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
struct worker_pool *last_pool; struct worker_pool *last_pool;
...@@ -1209,54 +1208,54 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, ...@@ -1209,54 +1208,54 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
* work needs to be queued on that cpu to guarantee * work needs to be queued on that cpu to guarantee
* non-reentrancy. * non-reentrancy.
*/ */
cwq = get_cwq(cpu, wq); pwq = get_pwq(cpu, wq);
last_pool = get_work_pool(work); last_pool = get_work_pool(work);
if (last_pool && last_pool != cwq->pool) { if (last_pool && last_pool != pwq->pool) {
struct worker *worker; struct worker *worker;
spin_lock(&last_pool->lock); spin_lock(&last_pool->lock);
worker = find_worker_executing_work(last_pool, work); worker = find_worker_executing_work(last_pool, work);
if (worker && worker->current_cwq->wq == wq) { if (worker && worker->current_pwq->wq == wq) {
cwq = get_cwq(last_pool->cpu, wq); pwq = get_pwq(last_pool->cpu, wq);
} else { } else {
/* meh... not running there, queue here */ /* meh... not running there, queue here */
spin_unlock(&last_pool->lock); spin_unlock(&last_pool->lock);
spin_lock(&cwq->pool->lock); spin_lock(&pwq->pool->lock);
} }
} else { } else {
spin_lock(&cwq->pool->lock); spin_lock(&pwq->pool->lock);
} }
} else { } else {
cwq = get_cwq(WORK_CPU_UNBOUND, wq); pwq = get_pwq(WORK_CPU_UNBOUND, wq);
spin_lock(&cwq->pool->lock); spin_lock(&pwq->pool->lock);
} }
/* cwq determined, queue */ /* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, cwq, work); trace_workqueue_queue_work(req_cpu, pwq, work);
if (WARN_ON(!list_empty(&work->entry))) { if (WARN_ON(!list_empty(&work->entry))) {
spin_unlock(&cwq->pool->lock); spin_unlock(&pwq->pool->lock);
return; return;
} }
cwq->nr_in_flight[cwq->work_color]++; pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(cwq->work_color); work_flags = work_color_to_flags(pwq->work_color);
if (likely(cwq->nr_active < cwq->max_active)) { if (likely(pwq->nr_active < pwq->max_active)) {
trace_workqueue_activate_work(work); trace_workqueue_activate_work(work);
cwq->nr_active++; pwq->nr_active++;
worklist = &cwq->pool->worklist; worklist = &pwq->pool->worklist;
} else { } else {
work_flags |= WORK_STRUCT_DELAYED; work_flags |= WORK_STRUCT_DELAYED;
worklist = &cwq->delayed_works; worklist = &pwq->delayed_works;
} }
insert_work(cwq, work, worklist, work_flags); insert_work(pwq, work, worklist, work_flags);
spin_unlock(&cwq->pool->lock); spin_unlock(&pwq->pool->lock);
} }
/** /**
...@@ -1661,14 +1660,14 @@ static void rebind_workers(struct worker_pool *pool) ...@@ -1661,14 +1660,14 @@ static void rebind_workers(struct worker_pool *pool)
/* /*
* wq doesn't really matter but let's keep @worker->pool * wq doesn't really matter but let's keep @worker->pool
* and @cwq->pool consistent for sanity. * and @pwq->pool consistent for sanity.
*/ */
if (std_worker_pool_pri(worker->pool)) if (std_worker_pool_pri(worker->pool))
wq = system_highpri_wq; wq = system_highpri_wq;
else else
wq = system_wq; wq = system_wq;
insert_work(get_cwq(pool->cpu, wq), rebind_work, insert_work(get_pwq(pool->cpu, wq), rebind_work,
worker->scheduled.next, worker->scheduled.next,
work_color_to_flags(WORK_NO_COLOR)); work_color_to_flags(WORK_NO_COLOR));
} }
...@@ -1845,15 +1844,15 @@ static void idle_worker_timeout(unsigned long __pool) ...@@ -1845,15 +1844,15 @@ static void idle_worker_timeout(unsigned long __pool)
static bool send_mayday(struct work_struct *work) static bool send_mayday(struct work_struct *work)
{ {
struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct pool_workqueue *pwq = get_work_pwq(work);
struct workqueue_struct *wq = cwq->wq; struct workqueue_struct *wq = pwq->wq;
unsigned int cpu; unsigned int cpu;
if (!(wq->flags & WQ_RESCUER)) if (!(wq->flags & WQ_RESCUER))
return false; return false;
/* mayday mayday mayday */ /* mayday mayday mayday */
cpu = cwq->pool->cpu; cpu = pwq->pool->cpu;
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
if (cpu == WORK_CPU_UNBOUND) if (cpu == WORK_CPU_UNBOUND)
cpu = 0; cpu = 0;
...@@ -2082,9 +2081,9 @@ static void process_one_work(struct worker *worker, struct work_struct *work) ...@@ -2082,9 +2081,9 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock) __releases(&pool->lock)
__acquires(&pool->lock) __acquires(&pool->lock)
{ {
struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool; struct worker_pool *pool = worker->pool;
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
int work_color; int work_color;
struct worker *collision; struct worker *collision;
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
...@@ -2125,7 +2124,7 @@ __acquires(&pool->lock) ...@@ -2125,7 +2124,7 @@ __acquires(&pool->lock)
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
worker->current_work = work; worker->current_work = work;
worker->current_func = work->func; worker->current_func = work->func;
worker->current_cwq = cwq; worker->current_pwq = pwq;
work_color = get_work_color(work); work_color = get_work_color(work);
list_del_init(&work->entry); list_del_init(&work->entry);
...@@ -2154,7 +2153,7 @@ __acquires(&pool->lock) ...@@ -2154,7 +2153,7 @@ __acquires(&pool->lock)
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map); lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work); trace_workqueue_execute_start(work);
worker->current_func(work); worker->current_func(work);
...@@ -2164,7 +2163,7 @@ __acquires(&pool->lock) ...@@ -2164,7 +2163,7 @@ __acquires(&pool->lock)
*/ */
trace_workqueue_execute_end(work); trace_workqueue_execute_end(work);
lock_map_release(&lockdep_map); lock_map_release(&lockdep_map);
lock_map_release(&cwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
...@@ -2185,8 +2184,8 @@ __acquires(&pool->lock) ...@@ -2185,8 +2184,8 @@ __acquires(&pool->lock)
hash_del(&worker->hentry); hash_del(&worker->hentry);
worker->current_work = NULL; worker->current_work = NULL;
worker->current_func = NULL; worker->current_func = NULL;
worker->current_cwq = NULL; worker->current_pwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color); pwq_dec_nr_in_flight(pwq, work_color);
} }
/** /**
...@@ -2353,8 +2352,8 @@ static int rescuer_thread(void *__rescuer) ...@@ -2353,8 +2352,8 @@ static int rescuer_thread(void *__rescuer)
*/ */
for_each_mayday_cpu(cpu, wq->mayday_mask) { for_each_mayday_cpu(cpu, wq->mayday_mask) {
unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); struct pool_workqueue *pwq = get_pwq(tcpu, wq);
struct worker_pool *pool = cwq->pool; struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n; struct work_struct *work, *n;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -2370,7 +2369,7 @@ static int rescuer_thread(void *__rescuer) ...@@ -2370,7 +2369,7 @@ static int rescuer_thread(void *__rescuer)
*/ */
BUG_ON(!list_empty(&rescuer->scheduled)); BUG_ON(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry) list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_cwq(work) == cwq) if (get_work_pwq(work) == pwq)
move_linked_works(work, scheduled, &n); move_linked_works(work, scheduled, &n);
process_scheduled_works(rescuer); process_scheduled_works(rescuer);
...@@ -2405,7 +2404,7 @@ static void wq_barrier_func(struct work_struct *work) ...@@ -2405,7 +2404,7 @@ static void wq_barrier_func(struct work_struct *work)
/** /**
* insert_wq_barrier - insert a barrier work * insert_wq_barrier - insert a barrier work
* @cwq: cwq to insert barrier into * @pwq: pwq to insert barrier into
* @barr: wq_barrier to insert * @barr: wq_barrier to insert
* @target: target work to attach @barr to * @target: target work to attach @barr to
* @worker: worker currently executing @target, NULL if @target is not executing * @worker: worker currently executing @target, NULL if @target is not executing
...@@ -2422,12 +2421,12 @@ static void wq_barrier_func(struct work_struct *work) ...@@ -2422,12 +2421,12 @@ static void wq_barrier_func(struct work_struct *work)
* after a work with LINKED flag set. * after a work with LINKED flag set.
* *
* Note that when @worker is non-NULL, @target may be modified * Note that when @worker is non-NULL, @target may be modified
* underneath us, so we can't reliably determine cwq from @target. * underneath us, so we can't reliably determine pwq from @target.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * spin_lock_irq(pool->lock).
*/ */
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr, struct wq_barrier *barr,
struct work_struct *target, struct worker *worker) struct work_struct *target, struct worker *worker)
{ {
...@@ -2460,23 +2459,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, ...@@ -2460,23 +2459,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
} }
debug_work_activate(&barr->work); debug_work_activate(&barr->work);
insert_work(cwq, &barr->work, head, insert_work(pwq, &barr->work, head,
work_color_to_flags(WORK_NO_COLOR) | linked); work_color_to_flags(WORK_NO_COLOR) | linked);
} }
/** /**
* flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
* @wq: workqueue being flushed * @wq: workqueue being flushed
* @flush_color: new flush color, < 0 for no-op * @flush_color: new flush color, < 0 for no-op
* @work_color: new work color, < 0 for no-op * @work_color: new work color, < 0 for no-op
* *
* Prepare cwqs for workqueue flushing. * Prepare pwqs for workqueue flushing.
* *
* If @flush_color is non-negative, flush_color on all cwqs should be * If @flush_color is non-negative, flush_color on all pwqs should be
* -1. If no cwq has in-flight commands at the specified color, all * -1. If no pwq has in-flight commands at the specified color, all
* cwq->flush_color's stay at -1 and %false is returned. If any cwq * pwq->flush_color's stay at -1 and %false is returned. If any pwq
* has in flight commands, its cwq->flush_color is set to * has in flight commands, its pwq->flush_color is set to
* @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
* wakeup logic is armed and %true is returned. * wakeup logic is armed and %true is returned.
* *
* The caller should have initialized @wq->first_flusher prior to * The caller should have initialized @wq->first_flusher prior to
...@@ -2484,7 +2483,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, ...@@ -2484,7 +2483,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
* @flush_color is negative, no flush color update is done and %false * @flush_color is negative, no flush color update is done and %false
* is returned. * is returned.
* *
* If @work_color is non-negative, all cwqs should have the same * If @work_color is non-negative, all pwqs should have the same
* work_color which is previous to @work_color and all will be * work_color which is previous to @work_color and all will be
* advanced to @work_color. * advanced to @work_color.
* *
...@@ -2495,42 +2494,42 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, ...@@ -2495,42 +2494,42 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
* %true if @flush_color >= 0 and there's something to flush. %false * %true if @flush_color >= 0 and there's something to flush. %false
* otherwise. * otherwise.
*/ */
static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
int flush_color, int work_color) int flush_color, int work_color)
{ {
bool wait = false; bool wait = false;
unsigned int cpu; unsigned int cpu;
if (flush_color >= 0) { if (flush_color >= 0) {
BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
atomic_set(&wq->nr_cwqs_to_flush, 1); atomic_set(&wq->nr_pwqs_to_flush, 1);
} }
for_each_cwq_cpu(cpu, wq) { for_each_pwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
struct worker_pool *pool = cwq->pool; struct worker_pool *pool = pwq->pool;
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);
if (flush_color >= 0) { if (flush_color >= 0) {
BUG_ON(cwq->flush_color != -1); BUG_ON(pwq->flush_color != -1);
if (cwq->nr_in_flight[flush_color]) { if (pwq->nr_in_flight[flush_color]) {
cwq->flush_color = flush_color; pwq->flush_color = flush_color;
atomic_inc(&wq->nr_cwqs_to_flush); atomic_inc(&wq->nr_pwqs_to_flush);
wait = true; wait = true;
} }
} }
if (work_color >= 0) { if (work_color >= 0) {
BUG_ON(work_color != work_next_color(cwq->work_color)); BUG_ON(work_color != work_next_color(pwq->work_color));
cwq->work_color = work_color; pwq->work_color = work_color;
} }
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
} }
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
complete(&wq->first_flusher->done); complete(&wq->first_flusher->done);
return wait; return wait;
...@@ -2581,7 +2580,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2581,7 +2580,7 @@ void flush_workqueue(struct workqueue_struct *wq)
wq->first_flusher = &this_flusher; wq->first_flusher = &this_flusher;
if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
wq->work_color)) { wq->work_color)) {
/* nothing to flush, done */ /* nothing to flush, done */
wq->flush_color = next_color; wq->flush_color = next_color;
...@@ -2592,7 +2591,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2592,7 +2591,7 @@ void flush_workqueue(struct workqueue_struct *wq)
/* wait in queue */ /* wait in queue */
BUG_ON(wq->flush_color == this_flusher.flush_color); BUG_ON(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue); list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_cwqs(wq, -1, wq->work_color); flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
} }
} else { } else {
/* /*
...@@ -2659,7 +2658,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2659,7 +2658,7 @@ void flush_workqueue(struct workqueue_struct *wq)
list_splice_tail_init(&wq->flusher_overflow, list_splice_tail_init(&wq->flusher_overflow,
&wq->flusher_queue); &wq->flusher_queue);
flush_workqueue_prep_cwqs(wq, -1, wq->work_color); flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
} }
if (list_empty(&wq->flusher_queue)) { if (list_empty(&wq->flusher_queue)) {
...@@ -2669,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2669,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq)
/* /*
* Need to flush more colors. Make the next flusher * Need to flush more colors. Make the next flusher
* the new first flusher and arm cwqs. * the new first flusher and arm pwqs.
*/ */
BUG_ON(wq->flush_color == wq->work_color); BUG_ON(wq->flush_color == wq->work_color);
BUG_ON(wq->flush_color != next->flush_color); BUG_ON(wq->flush_color != next->flush_color);
...@@ -2677,7 +2676,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2677,7 +2676,7 @@ void flush_workqueue(struct workqueue_struct *wq)
list_del_init(&next->list); list_del_init(&next->list);
wq->first_flusher = next; wq->first_flusher = next;
if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
break; break;
/* /*
...@@ -2720,13 +2719,13 @@ void drain_workqueue(struct workqueue_struct *wq) ...@@ -2720,13 +2719,13 @@ void drain_workqueue(struct workqueue_struct *wq)
reflush: reflush:
flush_workqueue(wq); flush_workqueue(wq);
for_each_cwq_cpu(cpu, wq) { for_each_pwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
bool drained; bool drained;
spin_lock_irq(&cwq->pool->lock); spin_lock_irq(&pwq->pool->lock);
drained = !cwq->nr_active && list_empty(&cwq->delayed_works); drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
spin_unlock_irq(&cwq->pool->lock); spin_unlock_irq(&pwq->pool->lock);
if (drained) if (drained)
continue; continue;
...@@ -2749,7 +2748,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) ...@@ -2749,7 +2748,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
{ {
struct worker *worker = NULL; struct worker *worker = NULL;
struct worker_pool *pool; struct worker_pool *pool;
struct cpu_workqueue_struct *cwq; struct pool_workqueue *pwq;
might_sleep(); might_sleep();
pool = get_work_pool(work); pool = get_work_pool(work);
...@@ -2758,18 +2757,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) ...@@ -2758,18 +2757,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */ /* see the comment in try_to_grab_pending() with the same code */
cwq = get_work_cwq(work); pwq = get_work_pwq(work);
if (cwq) { if (pwq) {
if (unlikely(cwq->pool != pool)) if (unlikely(pwq->pool != pool))
goto already_gone; goto already_gone;
} else { } else {
worker = find_worker_executing_work(pool, work); worker = find_worker_executing_work(pool, work);
if (!worker) if (!worker)
goto already_gone; goto already_gone;
cwq = worker->current_cwq; pwq = worker->current_pwq;
} }
insert_wq_barrier(cwq, barr, work, worker); insert_wq_barrier(pwq, barr, work, worker);
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
/* /*
...@@ -2778,11 +2777,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) ...@@ -2778,11 +2777,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
* flusher is not running on the same workqueue by verifying write * flusher is not running on the same workqueue by verifying write
* access. * access.
*/ */
if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER)
lock_map_acquire(&cwq->wq->lockdep_map); lock_map_acquire(&pwq->wq->lockdep_map);
else else
lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map);
return true; return true;
already_gone: already_gone:
...@@ -3092,46 +3091,46 @@ int keventd_up(void) ...@@ -3092,46 +3091,46 @@ int keventd_up(void)
return system_wq != NULL; return system_wq != NULL;
} }
static int alloc_cwqs(struct workqueue_struct *wq) static int alloc_pwqs(struct workqueue_struct *wq)
{ {
/* /*
* cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
* Make sure that the alignment isn't lower than that of * Make sure that the alignment isn't lower than that of
* unsigned long long. * unsigned long long.
*/ */
const size_t size = sizeof(struct cpu_workqueue_struct); const size_t size = sizeof(struct pool_workqueue);
const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
__alignof__(unsigned long long)); __alignof__(unsigned long long));
if (!(wq->flags & WQ_UNBOUND)) if (!(wq->flags & WQ_UNBOUND))
wq->cpu_wq.pcpu = __alloc_percpu(size, align); wq->pool_wq.pcpu = __alloc_percpu(size, align);
else { else {
void *ptr; void *ptr;
/* /*
* Allocate enough room to align cwq and put an extra * Allocate enough room to align pwq and put an extra
* pointer at the end pointing back to the originally * pointer at the end pointing back to the originally
* allocated pointer which will be used for free. * allocated pointer which will be used for free.
*/ */
ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
if (ptr) { if (ptr) {
wq->cpu_wq.single = PTR_ALIGN(ptr, align); wq->pool_wq.single = PTR_ALIGN(ptr, align);
*(void **)(wq->cpu_wq.single + 1) = ptr; *(void **)(wq->pool_wq.single + 1) = ptr;
} }
} }
/* just in case, make sure it's actually aligned */ /* just in case, make sure it's actually aligned */
BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
return wq->cpu_wq.v ? 0 : -ENOMEM; return wq->pool_wq.v ? 0 : -ENOMEM;
} }
static void free_cwqs(struct workqueue_struct *wq) static void free_pwqs(struct workqueue_struct *wq)
{ {
if (!(wq->flags & WQ_UNBOUND)) if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->cpu_wq.pcpu); free_percpu(wq->pool_wq.pcpu);
else if (wq->cpu_wq.single) { else if (wq->pool_wq.single) {
/* the pointer to free is stored right after the cwq */ /* the pointer to free is stored right after the pwq */
kfree(*(void **)(wq->cpu_wq.single + 1)); kfree(*(void **)(wq->pool_wq.single + 1));
} }
} }
...@@ -3185,25 +3184,25 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -3185,25 +3184,25 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
wq->flags = flags; wq->flags = flags;
wq->saved_max_active = max_active; wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex); mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_cwqs_to_flush, 0); atomic_set(&wq->nr_pwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow); INIT_LIST_HEAD(&wq->flusher_overflow);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list); INIT_LIST_HEAD(&wq->list);
if (alloc_cwqs(wq) < 0) if (alloc_pwqs(wq) < 0)
goto err; goto err;
for_each_cwq_cpu(cpu, wq) { for_each_pwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
cwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI); pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
cwq->wq = wq; pwq->wq = wq;
cwq->flush_color = -1; pwq->flush_color = -1;
cwq->max_active = max_active; pwq->max_active = max_active;
INIT_LIST_HEAD(&cwq->delayed_works); INIT_LIST_HEAD(&pwq->delayed_works);
} }
if (flags & WQ_RESCUER) { if (flags & WQ_RESCUER) {
...@@ -3234,8 +3233,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -3234,8 +3233,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
spin_lock(&workqueue_lock); spin_lock(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZABLE) if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
for_each_cwq_cpu(cpu, wq) for_each_pwq_cpu(cpu, wq)
get_cwq(cpu, wq)->max_active = 0; get_pwq(cpu, wq)->max_active = 0;
list_add(&wq->list, &workqueues); list_add(&wq->list, &workqueues);
...@@ -3244,7 +3243,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -3244,7 +3243,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
return wq; return wq;
err: err:
if (wq) { if (wq) {
free_cwqs(wq); free_pwqs(wq);
free_mayday_mask(wq->mayday_mask); free_mayday_mask(wq->mayday_mask);
kfree(wq->rescuer); kfree(wq->rescuer);
kfree(wq); kfree(wq);
...@@ -3275,14 +3274,14 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -3275,14 +3274,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
spin_unlock(&workqueue_lock); spin_unlock(&workqueue_lock);
/* sanity check */ /* sanity check */
for_each_cwq_cpu(cpu, wq) { for_each_pwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
int i; int i;
for (i = 0; i < WORK_NR_COLORS; i++) for (i = 0; i < WORK_NR_COLORS; i++)
BUG_ON(cwq->nr_in_flight[i]); BUG_ON(pwq->nr_in_flight[i]);
BUG_ON(cwq->nr_active); BUG_ON(pwq->nr_active);
BUG_ON(!list_empty(&cwq->delayed_works)); BUG_ON(!list_empty(&pwq->delayed_works));
} }
if (wq->flags & WQ_RESCUER) { if (wq->flags & WQ_RESCUER) {
...@@ -3291,29 +3290,29 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -3291,29 +3290,29 @@ void destroy_workqueue(struct workqueue_struct *wq)
kfree(wq->rescuer); kfree(wq->rescuer);
} }
free_cwqs(wq); free_pwqs(wq);
kfree(wq); kfree(wq);
} }
EXPORT_SYMBOL_GPL(destroy_workqueue); EXPORT_SYMBOL_GPL(destroy_workqueue);
/** /**
* cwq_set_max_active - adjust max_active of a cwq * pwq_set_max_active - adjust max_active of a pwq
* @cwq: target cpu_workqueue_struct * @pwq: target pool_workqueue
* @max_active: new max_active value. * @max_active: new max_active value.
* *
* Set @cwq->max_active to @max_active and activate delayed works if * Set @pwq->max_active to @max_active and activate delayed works if
* increased. * increased.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * spin_lock_irq(pool->lock).
*/ */
static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
{ {
cwq->max_active = max_active; pwq->max_active = max_active;
while (!list_empty(&cwq->delayed_works) && while (!list_empty(&pwq->delayed_works) &&
cwq->nr_active < cwq->max_active) pwq->nr_active < pwq->max_active)
cwq_activate_first_delayed(cwq); pwq_activate_first_delayed(pwq);
} }
/** /**
...@@ -3336,15 +3335,15 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) ...@@ -3336,15 +3335,15 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
wq->saved_max_active = max_active; wq->saved_max_active = max_active;
for_each_cwq_cpu(cpu, wq) { for_each_pwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
struct worker_pool *pool = cwq->pool; struct worker_pool *pool = pwq->pool;
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);
if (!(wq->flags & WQ_FREEZABLE) || if (!(wq->flags & WQ_FREEZABLE) ||
!(pool->flags & POOL_FREEZING)) !(pool->flags & POOL_FREEZING))
cwq_set_max_active(cwq, max_active); pwq_set_max_active(pwq, max_active);
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
} }
...@@ -3367,9 +3366,9 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active); ...@@ -3367,9 +3366,9 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
*/ */
bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
{ {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
return !list_empty(&cwq->delayed_works); return !list_empty(&pwq->delayed_works);
} }
EXPORT_SYMBOL_GPL(workqueue_congested); EXPORT_SYMBOL_GPL(workqueue_congested);
...@@ -3408,7 +3407,7 @@ EXPORT_SYMBOL_GPL(work_busy); ...@@ -3408,7 +3407,7 @@ EXPORT_SYMBOL_GPL(work_busy);
* CPU hotplug. * CPU hotplug.
* *
* There are two challenges in supporting CPU hotplug. Firstly, there * There are two challenges in supporting CPU hotplug. Firstly, there
* are a lot of assumptions on strong associations among work, cwq and * are a lot of assumptions on strong associations among work, pwq and
* pool which make migrating pending and scheduled works very * pool which make migrating pending and scheduled works very
* difficult to implement without impacting hot paths. Secondly, * difficult to implement without impacting hot paths. Secondly,
* worker pools serve mix of short, long and very long running works making * worker pools serve mix of short, long and very long running works making
...@@ -3612,11 +3611,11 @@ void freeze_workqueues_begin(void) ...@@ -3612,11 +3611,11 @@ void freeze_workqueues_begin(void)
pool->flags |= POOL_FREEZING; pool->flags |= POOL_FREEZING;
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
if (cwq && cwq->pool == pool && if (pwq && pwq->pool == pool &&
(wq->flags & WQ_FREEZABLE)) (wq->flags & WQ_FREEZABLE))
cwq->max_active = 0; pwq->max_active = 0;
} }
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
...@@ -3655,13 +3654,13 @@ bool freeze_workqueues_busy(void) ...@@ -3655,13 +3654,13 @@ bool freeze_workqueues_busy(void)
* to peek without lock. * to peek without lock.
*/ */
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZABLE)) if (!pwq || !(wq->flags & WQ_FREEZABLE))
continue; continue;
BUG_ON(cwq->nr_active < 0); BUG_ON(pwq->nr_active < 0);
if (cwq->nr_active) { if (pwq->nr_active) {
busy = true; busy = true;
goto out_unlock; goto out_unlock;
} }
...@@ -3701,14 +3700,14 @@ void thaw_workqueues(void) ...@@ -3701,14 +3700,14 @@ void thaw_workqueues(void)
pool->flags &= ~POOL_FREEZING; pool->flags &= ~POOL_FREEZING;
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
if (!cwq || cwq->pool != pool || if (!pwq || pwq->pool != pool ||
!(wq->flags & WQ_FREEZABLE)) !(wq->flags & WQ_FREEZABLE))
continue; continue;
/* restore max_active and repopulate worklist */ /* restore max_active and repopulate worklist */
cwq_set_max_active(cwq, wq->saved_max_active); pwq_set_max_active(pwq, wq->saved_max_active);
} }
wake_up_worker(pool); wake_up_worker(pool);
......
...@@ -28,7 +28,7 @@ struct worker { ...@@ -28,7 +28,7 @@ struct worker {
struct work_struct *current_work; /* L: work being processed */ struct work_struct *current_work; /* L: work being processed */
work_func_t current_func; /* L: current_work's fn */ work_func_t current_func; /* L: current_work's fn */
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ struct pool_workqueue *current_pwq; /* L: current_work's pwq */
struct list_head scheduled; /* L: scheduled works */ struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */ struct task_struct *task; /* I: worker task */
struct worker_pool *pool; /* I: the associated pool */ struct worker_pool *pool; /* I: the associated pool */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment