Commit 55df0933 authored by Imran Khan's avatar Imran Khan Committed by Tejun Heo

workqueue: Introduce show_one_worker_pool and show_one_workqueue.

Currently show_workqueue_state shows the state of all workqueues and of
all worker pools. In certain cases we may need to dump state of only a
specific workqueue or worker pool. For example in destroy_workqueue we
only need to show state of the workqueue which is getting destroyed.

So rename show_workqueue_state to show_all_workqueues(to signify it
dumps state of all busy workqueues) and divide it into more granular
functions (show_one_workqueue and show_one_worker_pool), that would show
states of individual workqueues and worker pools and can be used in
cases such as the one mentioned above.

Also, as mentioned earlier, make destroy_workqueue dump data pertaining
to only the workqueue that is being destroyed and make user(s) of
earlier interface(show_workqueue_state), use new interface
(show_all_workqueues).
Signed-off-by: default avatarImran Khan <imran.f.khan@oracle.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent d25302e4
...@@ -296,7 +296,7 @@ static const struct sysrq_key_op sysrq_showregs_op = { ...@@ -296,7 +296,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
static void sysrq_handle_showstate(int key) static void sysrq_handle_showstate(int key)
{ {
show_state(); show_state();
show_workqueue_state(); show_all_workqueues();
} }
static const struct sysrq_key_op sysrq_showstate_op = { static const struct sysrq_key_op sysrq_showstate_op = {
.handler = sysrq_handle_showstate, .handler = sysrq_handle_showstate,
......
...@@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); ...@@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work); extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task); extern void print_worker_info(const char *log_lvl, struct task_struct *task);
extern void show_workqueue_state(void); extern void show_all_workqueues(void);
extern void show_one_workqueue(struct workqueue_struct *wq);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
/** /**
......
...@@ -94,7 +94,7 @@ static int try_to_freeze_tasks(bool user_only) ...@@ -94,7 +94,7 @@ static int try_to_freeze_tasks(bool user_only)
todo - wq_busy, wq_busy); todo - wq_busy, wq_busy);
if (wq_busy) if (wq_busy)
show_workqueue_state(); show_all_workqueues();
if (!wakeup || pm_debug_messages_on) { if (!wakeup || pm_debug_messages_on) {
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
......
...@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); ...@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
static int worker_thread(void *__worker); static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq); static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
static void show_pwq(struct pool_workqueue *pwq); static void show_pwq(struct pool_workqueue *pwq);
static void show_one_worker_pool(struct worker_pool *pool);
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h> #include <trace/events/workqueue.h>
...@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
raw_spin_unlock_irq(&pwq->pool->lock); raw_spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
show_workqueue_state(); show_one_workqueue(wq);
return; return;
} }
raw_spin_unlock_irq(&pwq->pool->lock); raw_spin_unlock_irq(&pwq->pool->lock);
...@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq) ...@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)
} }
/** /**
* show_workqueue_state - dump workqueue state * show_one_workqueue - dump state of specified workqueue
* * @wq: workqueue whose state will be printed
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
* all busy workqueues and pools.
*/ */
void show_workqueue_state(void) void show_one_workqueue(struct workqueue_struct *wq)
{ {
struct workqueue_struct *wq; struct pool_workqueue *pwq;
struct worker_pool *pool; bool idle = true;
unsigned long flags; unsigned long flags;
int pi;
rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n"); for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
list_for_each_entry_rcu(wq, &workqueues, list) { idle = false;
struct pool_workqueue *pwq; break;
bool idle = true;
for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
idle = false;
break;
}
} }
if (idle) }
continue; if (idle) /* Nothing to print for idle workqueue */
return;
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags); raw_spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
/*
* Defer printing to avoid deadlocks in console
* drivers that queue work while holding locks
* also taken in their write paths.
*/
printk_deferred_enter();
show_pwq(pwq);
printk_deferred_exit();
}
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/* /*
* We could be printing a lot from atomic context, e.g. * Defer printing to avoid deadlocks in console
* sysrq-t -> show_workqueue_state(). Avoid triggering * drivers that queue work while holding locks
* hard lockup. * also taken in their write paths.
*/ */
touch_nmi_watchdog(); printk_deferred_enter();
} show_pwq(pwq);
} printk_deferred_exit();
for_each_pool(pool, pi) {
struct worker *worker;
bool first = true;
raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
/*
* Defer printing to avoid deadlocks in console drivers that
* queue work while holding locks also taken in their write
* paths.
*/
printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
list_for_each_entry(worker, &pool->idle_list, entry) {
pr_cont(" %s%d", first ? "idle: " : "",
task_pid_nr(worker->task));
first = false;
} }
pr_cont("\n"); raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
printk_deferred_exit();
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
/* /*
* We could be printing a lot from atomic context, e.g. * We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering * sysrq-t -> show_all_workqueues(). Avoid triggering
* hard lockup. * hard lockup.
*/ */
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
}
/**
* show_one_worker_pool - dump state of specified worker pool
* @pool: worker pool whose state will be printed
*/
static void show_one_worker_pool(struct worker_pool *pool)
{
struct worker *worker;
bool first = true;
unsigned long flags;
raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
/*
* Defer printing to avoid deadlocks in console drivers that
* queue work while holding locks also taken in their write
* paths.
*/
printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
list_for_each_entry(worker, &pool->idle_list, entry) {
pr_cont(" %s%d", first ? "idle: " : "",
task_pid_nr(worker->task));
first = false;
}
pr_cont("\n");
printk_deferred_exit();
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_all_workqueues(). Avoid triggering
* hard lockup.
*/
touch_nmi_watchdog();
}
/**
* show_all_workqueues - dump workqueue state
*
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
* all busy workqueues and pools.
*/
void show_all_workqueues(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
int pi;
rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n");
list_for_each_entry_rcu(wq, &workqueues, list)
show_one_workqueue(wq);
for_each_pool(pool, pi)
show_one_worker_pool(pool);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -5876,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) ...@@ -5876,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
rcu_read_unlock(); rcu_read_unlock();
if (lockup_detected) if (lockup_detected)
show_workqueue_state(); show_all_workqueues();
wq_watchdog_reset_touched(); wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh); mod_timer(&wq_watchdog_timer, jiffies + thresh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment