Commit 1c6fd599 authored by Petr Mladek's avatar Petr Mladek

Merge branch 'rework/kthreads' into for-linus

parents a5c7a39f 701850dc
...@@ -578,6 +578,7 @@ void __handle_sysrq(int key, bool check_mask) ...@@ -578,6 +578,7 @@ void __handle_sysrq(int key, bool check_mask)
rcu_sysrq_start(); rcu_sysrq_start();
rcu_read_lock(); rcu_read_lock();
printk_prefer_direct_enter();
/* /*
* Raise the apparent loglevel to maximum so that the sysrq header * Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not * is shown to provide the user with positive feedback. We do not
...@@ -619,6 +620,7 @@ void __handle_sysrq(int key, bool check_mask) ...@@ -619,6 +620,7 @@ void __handle_sysrq(int key, bool check_mask)
pr_cont("\n"); pr_cont("\n");
console_loglevel = orig_log_level; console_loglevel = orig_log_level;
} }
printk_prefer_direct_exit();
rcu_read_unlock(); rcu_read_unlock();
rcu_sysrq_end(); rcu_sysrq_end();
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/mutex.h>
struct vc_data; struct vc_data;
struct console_font_op; struct console_font_op;
...@@ -151,6 +152,24 @@ struct console { ...@@ -151,6 +152,24 @@ struct console {
int cflag; int cflag;
uint ispeed; uint ispeed;
uint ospeed; uint ospeed;
u64 seq;
unsigned long dropped;
struct task_struct *thread;
bool blocked;
/*
* The per-console lock is used by printing kthreads to synchronize
* this console with callers of console_lock(). This is necessary in
* order to allow printing kthreads to run in parallel to each other,
* while each safely accessing the @blocked field and synchronizing
* against direct printing via console_lock/console_unlock.
*
* Note: For synchronizing against direct printing via
* console_trylock/console_unlock, see the static global
* variable @console_kthreads_active.
*/
struct mutex lock;
void *data; void *data;
struct console *next; struct console *next;
}; };
......
...@@ -170,6 +170,11 @@ extern void __printk_safe_exit(void); ...@@ -170,6 +170,11 @@ extern void __printk_safe_exit(void);
#define printk_deferred_enter __printk_safe_enter #define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit #define printk_deferred_exit __printk_safe_exit
extern void printk_prefer_direct_enter(void);
extern void printk_prefer_direct_exit(void);
extern bool pr_flush(int timeout_ms, bool reset_on_progress);
/* /*
* Please don't use printk_ratelimit(), because it shares ratelimiting state * Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use * with all other unrelated printk_ratelimit() callsites. Instead use
...@@ -220,6 +225,19 @@ static inline void printk_deferred_exit(void) ...@@ -220,6 +225,19 @@ static inline void printk_deferred_exit(void)
{ {
} }
static inline void printk_prefer_direct_enter(void)
{
}
static inline void printk_prefer_direct_exit(void)
{
}
static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
{
return true;
}
static inline int printk_ratelimit(void) static inline int printk_ratelimit(void)
{ {
return 0; return 0;
...@@ -277,45 +295,57 @@ static inline void printk_trigger_flush(void) ...@@ -277,45 +295,57 @@ static inline void printk_trigger_flush(void)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern int __printk_cpu_trylock(void); extern int __printk_cpu_sync_try_get(void);
extern void __printk_wait_on_cpu_lock(void); extern void __printk_cpu_sync_wait(void);
extern void __printk_cpu_unlock(void); extern void __printk_cpu_sync_put(void);
#else
#define __printk_cpu_sync_try_get() true
#define __printk_cpu_sync_wait()
#define __printk_cpu_sync_put()
#endif /* CONFIG_SMP */
/** /**
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
* lock and disable interrupts. * cpu-reentrant spinning lock.
* @flags: Stack-allocated storage for saving local interrupt state, * @flags: Stack-allocated storage for saving local interrupt state,
* to be passed to printk_cpu_unlock_irqrestore(). * to be passed to printk_cpu_sync_put_irqrestore().
* *
* If the lock is owned by another CPU, spin until it becomes available. * If the lock is owned by another CPU, spin until it becomes available.
* Interrupts are restored while spinning. * Interrupts are restored while spinning.
*
* CAUTION: This function must be used carefully. It does not behave like a
* typical lock. Here are important things to watch out for...
*
* * This function is reentrant on the same CPU. Therefore the calling
* code must not assume exclusive access to data if code accessing the
* data can run reentrant or within NMI context on the same CPU.
*
* * If there exists usage of this function from NMI context, it becomes
* unsafe to perform any type of locking or spinning to wait for other
* CPUs after calling this function from any context. This includes
* using spinlocks or any other busy-waiting synchronization methods.
*/ */
#define printk_cpu_lock_irqsave(flags) \ #define printk_cpu_sync_get_irqsave(flags) \
for (;;) { \ for (;;) { \
local_irq_save(flags); \ local_irq_save(flags); \
if (__printk_cpu_trylock()) \ if (__printk_cpu_sync_try_get()) \
break; \ break; \
local_irq_restore(flags); \ local_irq_restore(flags); \
__printk_wait_on_cpu_lock(); \ __printk_cpu_sync_wait(); \
} }
/** /**
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
* lock and restore interrupts. * lock and restore interrupts.
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave(). * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
*/ */
#define printk_cpu_unlock_irqrestore(flags) \ #define printk_cpu_sync_put_irqrestore(flags) \
do { \ do { \
__printk_cpu_unlock(); \ __printk_cpu_sync_put(); \
local_irq_restore(flags); \ local_irq_restore(flags); \
} while (0) \ } while (0)
#else
#define printk_cpu_lock_irqsave(flags) ((void)flags)
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
#endif /* CONFIG_SMP */
extern int kptr_restrict; extern int kptr_restrict;
......
...@@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) ...@@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* complain: * complain:
*/ */
if (sysctl_hung_task_warnings) { if (sysctl_hung_task_warnings) {
printk_prefer_direct_enter();
if (sysctl_hung_task_warnings > 0) if (sysctl_hung_task_warnings > 0)
sysctl_hung_task_warnings--; sysctl_hung_task_warnings--;
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
...@@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) ...@@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if (sysctl_hung_task_all_cpu_backtrace) if (sysctl_hung_task_all_cpu_backtrace)
hung_task_show_all_bt = true; hung_task_show_all_bt = true;
printk_prefer_direct_exit();
} }
touch_nmi_watchdog(); touch_nmi_watchdog();
...@@ -204,12 +208,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) ...@@ -204,12 +208,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
} }
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
if (hung_task_show_lock) if (hung_task_show_lock) {
printk_prefer_direct_enter();
debug_show_all_locks(); debug_show_all_locks();
printk_prefer_direct_exit();
}
if (hung_task_show_all_bt) { if (hung_task_show_all_bt) {
hung_task_show_all_bt = false; hung_task_show_all_bt = false;
printk_prefer_direct_enter();
trigger_all_cpu_backtrace(); trigger_all_cpu_backtrace();
printk_prefer_direct_exit();
} }
if (hung_task_call_panic) if (hung_task_call_panic)
......
...@@ -560,6 +560,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint, ...@@ -560,6 +560,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{ {
disable_trace_on_warning(); disable_trace_on_warning();
printk_prefer_direct_enter();
if (file) if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line, raw_smp_processor_id(), current->pid, file, line,
...@@ -597,6 +599,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint, ...@@ -597,6 +599,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/* Just a warning, don't kill lockdep. */ /* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK); add_taint(taint, LOCKDEP_STILL_OK);
printk_prefer_direct_exit();
} }
#ifndef __WARN_FLAGS #ifndef __WARN_FLAGS
......
This diff is collapsed.
...@@ -619,6 +619,7 @@ static void print_cpu_stall(unsigned long gps) ...@@ -619,6 +619,7 @@ static void print_cpu_stall(unsigned long gps)
* See Documentation/RCU/stallwarn.rst for info on how to debug * See Documentation/RCU/stallwarn.rst for info on how to debug
* RCU CPU stall warnings. * RCU CPU stall warnings.
*/ */
printk_prefer_direct_enter();
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
...@@ -656,6 +657,7 @@ static void print_cpu_stall(unsigned long gps) ...@@ -656,6 +657,7 @@ static void print_cpu_stall(unsigned long gps)
*/ */
set_tsk_need_resched(current); set_tsk_need_resched(current);
set_preempt_need_resched(); set_preempt_need_resched();
printk_prefer_direct_exit();
} }
static void check_cpu_stall(struct rcu_data *rdp) static void check_cpu_stall(struct rcu_data *rdp)
......
...@@ -447,9 +447,11 @@ static int __orderly_reboot(void) ...@@ -447,9 +447,11 @@ static int __orderly_reboot(void)
ret = run_cmd(reboot_cmd); ret = run_cmd(reboot_cmd);
if (ret) { if (ret) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly reboot: forcing the issue\n"); pr_warn("Failed to start orderly reboot: forcing the issue\n");
emergency_sync(); emergency_sync();
kernel_restart(NULL); kernel_restart(NULL);
printk_prefer_direct_exit();
} }
return ret; return ret;
...@@ -462,6 +464,7 @@ static int __orderly_poweroff(bool force) ...@@ -462,6 +464,7 @@ static int __orderly_poweroff(bool force)
ret = run_cmd(poweroff_cmd); ret = run_cmd(poweroff_cmd);
if (ret && force) { if (ret && force) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly shutdown: forcing the issue\n"); pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/* /*
...@@ -471,6 +474,7 @@ static int __orderly_poweroff(bool force) ...@@ -471,6 +474,7 @@ static int __orderly_poweroff(bool force)
*/ */
emergency_sync(); emergency_sync();
kernel_power_off(); kernel_power_off();
printk_prefer_direct_exit();
} }
return ret; return ret;
...@@ -528,6 +532,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot); ...@@ -528,6 +532,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
*/ */
static void hw_failure_emergency_poweroff_func(struct work_struct *work) static void hw_failure_emergency_poweroff_func(struct work_struct *work)
{ {
printk_prefer_direct_enter();
/* /*
* We have reached here after the emergency shutdown waiting period has * We have reached here after the emergency shutdown waiting period has
* expired. This means orderly_poweroff has not been able to shut off * expired. This means orderly_poweroff has not been able to shut off
...@@ -544,6 +550,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work) ...@@ -544,6 +550,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
*/ */
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
emergency_restart(); emergency_restart();
printk_prefer_direct_exit();
} }
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
...@@ -582,11 +590,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) ...@@ -582,11 +590,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
{ {
static atomic_t allow_proceed = ATOMIC_INIT(1); static atomic_t allow_proceed = ATOMIC_INIT(1);
printk_prefer_direct_enter();
pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
/* Shutdown should be initiated only once. */ /* Shutdown should be initiated only once. */
if (!atomic_dec_and_test(&allow_proceed)) if (!atomic_dec_and_test(&allow_proceed))
return; goto out;
/* /*
* Queue a backup emergency shutdown in the event of * Queue a backup emergency shutdown in the event of
...@@ -594,6 +604,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) ...@@ -594,6 +604,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
*/ */
hw_failure_emergency_poweroff(ms_until_forced); hw_failure_emergency_poweroff(ms_until_forced);
orderly_poweroff(true); orderly_poweroff(true);
out:
printk_prefer_direct_exit();
} }
EXPORT_SYMBOL_GPL(hw_protection_shutdown); EXPORT_SYMBOL_GPL(hw_protection_shutdown);
......
...@@ -424,6 +424,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) ...@@ -424,6 +424,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* Start period for the next softlockup warning. */ /* Start period for the next softlockup warning. */
update_report_ts(); update_report_ts();
printk_prefer_direct_enter();
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration, smp_processor_id(), duration,
current->comm, task_pid_nr(current)); current->comm, task_pid_nr(current));
...@@ -442,6 +444,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) ...@@ -442,6 +444,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (softlockup_panic) if (softlockup_panic)
panic("softlockup: hung tasks"); panic("softlockup: hung tasks");
printk_prefer_direct_exit();
} }
return HRTIMER_RESTART; return HRTIMER_RESTART;
......
...@@ -135,6 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event, ...@@ -135,6 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true) if (__this_cpu_read(hard_watchdog_warn) == true)
return; return;
printk_prefer_direct_enter();
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
this_cpu); this_cpu);
print_modules(); print_modules();
...@@ -155,6 +157,8 @@ static void watchdog_overflow_callback(struct perf_event *event, ...@@ -155,6 +157,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (hardlockup_panic) if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP"); nmi_panic(regs, "Hard LOCKUP");
printk_prefer_direct_exit();
__this_cpu_write(hard_watchdog_warn, true); __this_cpu_write(hard_watchdog_warn, true);
return; return;
} }
......
...@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl) ...@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
* Permit this cpu to perform nested stack dumps while serialising * Permit this cpu to perform nested stack dumps while serialising
* against other CPUs * against other CPUs
*/ */
printk_cpu_lock_irqsave(flags); printk_cpu_sync_get_irqsave(flags);
__dump_stack(log_lvl); __dump_stack(log_lvl);
printk_cpu_unlock_irqrestore(flags); printk_cpu_sync_put_irqrestore(flags);
} }
EXPORT_SYMBOL(dump_stack_lvl); EXPORT_SYMBOL(dump_stack_lvl);
......
...@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) ...@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
* Allow nested NMI backtraces while serializing * Allow nested NMI backtraces while serializing
* against other CPUs. * against other CPUs.
*/ */
printk_cpu_lock_irqsave(flags); printk_cpu_sync_get_irqsave(flags);
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs)); cpu, (void *)instruction_pointer(regs));
...@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) ...@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else else
dump_stack(); dump_stack();
} }
printk_cpu_unlock_irqrestore(flags); printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment