Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
1c6fd599
Commit
1c6fd599
authored
May 23, 2022
by
Petr Mladek
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'rework/kthreads' into for-linus
parents
a5c7a39f
701850dc
Changes
12
Show whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
1034 additions
and
321 deletions
+1034
-321
drivers/tty/sysrq.c
drivers/tty/sysrq.c
+2
-0
include/linux/console.h
include/linux/console.h
+19
-0
include/linux/printk.h
include/linux/printk.h
+56
-26
kernel/hung_task.c
kernel/hung_task.c
+10
-1
kernel/panic.c
kernel/panic.c
+4
-0
kernel/printk/printk.c
kernel/printk/printk.c
+916
-289
kernel/rcu/tree_stall.h
kernel/rcu/tree_stall.h
+2
-0
kernel/reboot.c
kernel/reboot.c
+13
-1
kernel/watchdog.c
kernel/watchdog.c
+4
-0
kernel/watchdog_hld.c
kernel/watchdog_hld.c
+4
-0
lib/dump_stack.c
lib/dump_stack.c
+2
-2
lib/nmi_backtrace.c
lib/nmi_backtrace.c
+2
-2
No files found.
drivers/tty/sysrq.c
View file @
1c6fd599
...
@@ -578,6 +578,7 @@ void __handle_sysrq(int key, bool check_mask)
...
@@ -578,6 +578,7 @@ void __handle_sysrq(int key, bool check_mask)
rcu_sysrq_start
();
rcu_sysrq_start
();
rcu_read_lock
();
rcu_read_lock
();
printk_prefer_direct_enter
();
/*
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
* is shown to provide the user with positive feedback. We do not
...
@@ -619,6 +620,7 @@ void __handle_sysrq(int key, bool check_mask)
...
@@ -619,6 +620,7 @@ void __handle_sysrq(int key, bool check_mask)
pr_cont
(
"
\n
"
);
pr_cont
(
"
\n
"
);
console_loglevel
=
orig_log_level
;
console_loglevel
=
orig_log_level
;
}
}
printk_prefer_direct_exit
();
rcu_read_unlock
();
rcu_read_unlock
();
rcu_sysrq_end
();
rcu_sysrq_end
();
...
...
include/linux/console.h
View file @
1c6fd599
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
#include <linux/atomic.h>
#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/types.h>
#include <linux/mutex.h>
struct
vc_data
;
struct
vc_data
;
struct
console_font_op
;
struct
console_font_op
;
...
@@ -151,6 +152,24 @@ struct console {
...
@@ -151,6 +152,24 @@ struct console {
int
cflag
;
int
cflag
;
uint
ispeed
;
uint
ispeed
;
uint
ospeed
;
uint
ospeed
;
u64
seq
;
unsigned
long
dropped
;
struct
task_struct
*
thread
;
bool
blocked
;
/*
* The per-console lock is used by printing kthreads to synchronize
* this console with callers of console_lock(). This is necessary in
* order to allow printing kthreads to run in parallel to each other,
* while each safely accessing the @blocked field and synchronizing
* against direct printing via console_lock/console_unlock.
*
* Note: For synchronizing against direct printing via
* console_trylock/console_unlock, see the static global
* variable @console_kthreads_active.
*/
struct
mutex
lock
;
void
*
data
;
void
*
data
;
struct
console
*
next
;
struct
console
*
next
;
};
};
...
...
include/linux/printk.h
View file @
1c6fd599
...
@@ -170,6 +170,11 @@ extern void __printk_safe_exit(void);
...
@@ -170,6 +170,11 @@ extern void __printk_safe_exit(void);
#define printk_deferred_enter __printk_safe_enter
#define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit
#define printk_deferred_exit __printk_safe_exit
extern
void
printk_prefer_direct_enter
(
void
);
extern
void
printk_prefer_direct_exit
(
void
);
extern
bool
pr_flush
(
int
timeout_ms
,
bool
reset_on_progress
);
/*
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use
* with all other unrelated printk_ratelimit() callsites. Instead use
...
@@ -220,6 +225,19 @@ static inline void printk_deferred_exit(void)
...
@@ -220,6 +225,19 @@ static inline void printk_deferred_exit(void)
{
{
}
}
static
inline
void
printk_prefer_direct_enter
(
void
)
{
}
static
inline
void
printk_prefer_direct_exit
(
void
)
{
}
static
inline
bool
pr_flush
(
int
timeout_ms
,
bool
reset_on_progress
)
{
return
true
;
}
static
inline
int
printk_ratelimit
(
void
)
static
inline
int
printk_ratelimit
(
void
)
{
{
return
0
;
return
0
;
...
@@ -277,45 +295,57 @@ static inline void printk_trigger_flush(void)
...
@@ -277,45 +295,57 @@ static inline void printk_trigger_flush(void)
#endif
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
extern
int
__printk_cpu_trylock
(
void
);
extern
int
__printk_cpu_sync_try_get
(
void
);
extern
void
__printk_wait_on_cpu_lock
(
void
);
extern
void
__printk_cpu_sync_wait
(
void
);
extern
void
__printk_cpu_unlock
(
void
);
extern
void
__printk_cpu_sync_put
(
void
);
#else
#define __printk_cpu_sync_try_get() true
#define __printk_cpu_sync_wait()
#define __printk_cpu_sync_put()
#endif
/* CONFIG_SMP */
/**
/**
* printk_cpu_
lock_irqsave() - Acquire the printk cpu-reentrant spinning
* printk_cpu_
sync_get_irqsave() - Disable interrupts and acquire the printk
*
lock and disable interrupts
.
*
cpu-reentrant spinning lock
.
* @flags: Stack-allocated storage for saving local interrupt state,
* @flags: Stack-allocated storage for saving local interrupt state,
* to be passed to printk_cpu_
unlock
_irqrestore().
* to be passed to printk_cpu_
sync_put
_irqrestore().
*
*
* If the lock is owned by another CPU, spin until it becomes available.
* If the lock is owned by another CPU, spin until it becomes available.
* Interrupts are restored while spinning.
* Interrupts are restored while spinning.
*
* CAUTION: This function must be used carefully. It does not behave like a
* typical lock. Here are important things to watch out for...
*
* * This function is reentrant on the same CPU. Therefore the calling
* code must not assume exclusive access to data if code accessing the
* data can run reentrant or within NMI context on the same CPU.
*
* * If there exists usage of this function from NMI context, it becomes
* unsafe to perform any type of locking or spinning to wait for other
* CPUs after calling this function from any context. This includes
* using spinlocks or any other busy-waiting synchronization methods.
*/
*/
#define printk_cpu_
lock
_irqsave(flags) \
#define printk_cpu_
sync_get
_irqsave(flags) \
for (;;) { \
for (;;) { \
local_irq_save(flags); \
local_irq_save(flags); \
if (__printk_cpu_
trylock
()) \
if (__printk_cpu_
sync_try_get
()) \
break; \
break; \
local_irq_restore(flags); \
local_irq_restore(flags); \
__printk_
wait_on_cpu_lock();
\
__printk_
cpu_sync_wait();
\
}
}
/**
/**
* printk_cpu_
unlock
_irqrestore() - Release the printk cpu-reentrant spinning
* printk_cpu_
sync_put
_irqrestore() - Release the printk cpu-reentrant spinning
* lock and restore interrupts.
* lock and restore interrupts.
* @flags: Caller's saved interrupt state, from printk_cpu_
lock
_irqsave().
* @flags: Caller's saved interrupt state, from printk_cpu_
sync_get
_irqsave().
*/
*/
#define printk_cpu_
unlock
_irqrestore(flags) \
#define printk_cpu_
sync_put
_irqrestore(flags) \
do { \
do { \
__printk_cpu_
unlock();
\
__printk_cpu_
sync_put();
\
local_irq_restore(flags); \
local_irq_restore(flags); \
} while (0) \
} while (0)
#else
#define printk_cpu_lock_irqsave(flags) ((void)flags)
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
#endif
/* CONFIG_SMP */
extern
int
kptr_restrict
;
extern
int
kptr_restrict
;
...
...
kernel/hung_task.c
View file @
1c6fd599
...
@@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
...
@@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* complain:
* complain:
*/
*/
if
(
sysctl_hung_task_warnings
)
{
if
(
sysctl_hung_task_warnings
)
{
printk_prefer_direct_enter
();
if
(
sysctl_hung_task_warnings
>
0
)
if
(
sysctl_hung_task_warnings
>
0
)
sysctl_hung_task_warnings
--
;
sysctl_hung_task_warnings
--
;
pr_err
(
"INFO: task %s:%d blocked for more than %ld seconds.
\n
"
,
pr_err
(
"INFO: task %s:%d blocked for more than %ld seconds.
\n
"
,
...
@@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
...
@@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if
(
sysctl_hung_task_all_cpu_backtrace
)
if
(
sysctl_hung_task_all_cpu_backtrace
)
hung_task_show_all_bt
=
true
;
hung_task_show_all_bt
=
true
;
printk_prefer_direct_exit
();
}
}
touch_nmi_watchdog
();
touch_nmi_watchdog
();
...
@@ -204,12 +208,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
...
@@ -204,12 +208,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
}
}
unlock:
unlock:
rcu_read_unlock
();
rcu_read_unlock
();
if
(
hung_task_show_lock
)
if
(
hung_task_show_lock
)
{
printk_prefer_direct_enter
();
debug_show_all_locks
();
debug_show_all_locks
();
printk_prefer_direct_exit
();
}
if
(
hung_task_show_all_bt
)
{
if
(
hung_task_show_all_bt
)
{
hung_task_show_all_bt
=
false
;
hung_task_show_all_bt
=
false
;
printk_prefer_direct_enter
();
trigger_all_cpu_backtrace
();
trigger_all_cpu_backtrace
();
printk_prefer_direct_exit
();
}
}
if
(
hung_task_call_panic
)
if
(
hung_task_call_panic
)
...
...
kernel/panic.c
View file @
1c6fd599
...
@@ -560,6 +560,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
...
@@ -560,6 +560,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{
{
disable_trace_on_warning
();
disable_trace_on_warning
();
printk_prefer_direct_enter
();
if
(
file
)
if
(
file
)
pr_warn
(
"WARNING: CPU: %d PID: %d at %s:%d %pS
\n
"
,
pr_warn
(
"WARNING: CPU: %d PID: %d at %s:%d %pS
\n
"
,
raw_smp_processor_id
(),
current
->
pid
,
file
,
line
,
raw_smp_processor_id
(),
current
->
pid
,
file
,
line
,
...
@@ -597,6 +599,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
...
@@ -597,6 +599,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/* Just a warning, don't kill lockdep. */
/* Just a warning, don't kill lockdep. */
add_taint
(
taint
,
LOCKDEP_STILL_OK
);
add_taint
(
taint
,
LOCKDEP_STILL_OK
);
printk_prefer_direct_exit
();
}
}
#ifndef __WARN_FLAGS
#ifndef __WARN_FLAGS
...
...
kernel/printk/printk.c
View file @
1c6fd599
...
@@ -223,6 +223,33 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
...
@@ -223,6 +223,33 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
/* Number of registered extended console drivers. */
/* Number of registered extended console drivers. */
static
int
nr_ext_console_drivers
;
static
int
nr_ext_console_drivers
;
/*
* Used to synchronize printing kthreads against direct printing via
* console_trylock/console_unlock.
*
* Values:
* -1 = console kthreads atomically blocked (via global trylock)
* 0 = no kthread printing, console not locked (via trylock)
* >0 = kthread(s) actively printing
*
* Note: For synchronizing against direct printing via
* console_lock/console_unlock, see the @lock variable in
* struct console.
*/
static
atomic_t
console_kthreads_active
=
ATOMIC_INIT
(
0
);
#define console_kthreads_atomic_tryblock() \
(atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0)
#define console_kthreads_atomic_unblock() \
atomic_cmpxchg(&console_kthreads_active, -1, 0)
#define console_kthreads_atomically_blocked() \
(atomic_read(&console_kthreads_active) == -1)
#define console_kthread_printing_tryenter() \
atomic_inc_unless_negative(&console_kthreads_active)
#define console_kthread_printing_exit() \
atomic_dec(&console_kthreads_active)
/*
/*
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* macros instead of functions so that _RET_IP_ contains useful information.
* macros instead of functions so that _RET_IP_ contains useful information.
...
@@ -271,19 +298,49 @@ static bool panic_in_progress(void)
...
@@ -271,19 +298,49 @@ static bool panic_in_progress(void)
}
}
/*
/*
* This is used for debugging the mess that is the VT code by
* Tracks whether kthread printers are all blocked. A value of true implies
* keeping track if we have the console semaphore held. It's
* that the console is locked via console_lock() or the console is suspended.
* definitely not the perfect debug tool (we don't know if _WE_
* Writing to this variable requires holding @console_sem.
* hold it and are racing, but it helps tracking those weird code
* paths in the console code where we end up in places I want
* locked without the console semaphore held).
*/
*/
static
int
console_locked
,
console_suspend
ed
;
static
bool
console_kthreads_block
ed
;
/*
/*
* If exclusive_console is non-NULL then only this console is to be printed to.
* Block all kthread printers from a schedulable context.
*
* Requires holding @console_sem.
*/
*/
static
struct
console
*
exclusive_console
;
static
void
console_kthreads_block
(
void
)
{
struct
console
*
con
;
for_each_console
(
con
)
{
mutex_lock
(
&
con
->
lock
);
con
->
blocked
=
true
;
mutex_unlock
(
&
con
->
lock
);
}
console_kthreads_blocked
=
true
;
}
/*
* Unblock all kthread printers from a schedulable context.
*
* Requires holding @console_sem.
*/
static
void
console_kthreads_unblock
(
void
)
{
struct
console
*
con
;
for_each_console
(
con
)
{
mutex_lock
(
&
con
->
lock
);
con
->
blocked
=
false
;
mutex_unlock
(
&
con
->
lock
);
}
console_kthreads_blocked
=
false
;
}
static
int
console_suspended
;
/*
/*
* Array of consoles built from command line options (console=)
* Array of consoles built from command line options (console=)
...
@@ -366,7 +423,75 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
...
@@ -366,7 +423,75 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static
DEFINE_MUTEX
(
syslog_lock
);
static
DEFINE_MUTEX
(
syslog_lock
);
/*
* A flag to signify if printk_activate_kthreads() has already started the
* kthread printers. If true, any later registered consoles must start their
* own kthread directly. The flag is write protected by the console_lock.
*/
static
bool
printk_kthreads_available
;
#ifdef CONFIG_PRINTK
#ifdef CONFIG_PRINTK
static
atomic_t
printk_prefer_direct
=
ATOMIC_INIT
(
0
);
/**
* printk_prefer_direct_enter - cause printk() calls to attempt direct
* printing to all enabled consoles
*
* Since it is not possible to call into the console printing code from any
* context, there is no guarantee that direct printing will occur.
*
* This globally effects all printk() callers.
*
* Context: Any context.
*/
void
printk_prefer_direct_enter
(
void
)
{
atomic_inc
(
&
printk_prefer_direct
);
}
/**
* printk_prefer_direct_exit - restore printk() behavior
*
* Context: Any context.
*/
void
printk_prefer_direct_exit
(
void
)
{
WARN_ON
(
atomic_dec_if_positive
(
&
printk_prefer_direct
)
<
0
);
}
/*
* Calling printk() always wakes kthread printers so that they can
* flush the new message to their respective consoles. Also, if direct
* printing is allowed, printk() tries to flush the messages directly.
*
* Direct printing is allowed in situations when the kthreads
* are not available or the system is in a problematic state.
*
* See the implementation about possible races.
*/
static
inline
bool
allow_direct_printing
(
void
)
{
/*
* Checking kthread availability is a possible race because the
* kthread printers can become permanently disabled during runtime.
* However, doing that requires holding the console_lock, so any
* pending messages will be direct printed by console_unlock().
*/
if
(
!
printk_kthreads_available
)
return
true
;
/*
* Prefer direct printing when the system is in a problematic state.
* The context that sets this state will always see the updated value.
* The other contexts do not care. Anyway, direct printing is just a
* best effort. The direct output is only possible when console_lock
* is not already taken and no kthread printers are actively printing.
*/
return
(
system_state
>
SYSTEM_RUNNING
||
oops_in_progress
||
atomic_read
(
&
printk_prefer_direct
));
}
DECLARE_WAIT_QUEUE_HEAD
(
log_wait
);
DECLARE_WAIT_QUEUE_HEAD
(
log_wait
);
/* All 3 protected by @syslog_lock. */
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
...
@@ -374,12 +499,6 @@ static u64 syslog_seq;
...
@@ -374,12 +499,6 @@ static u64 syslog_seq;
static
size_t
syslog_partial
;
static
size_t
syslog_partial
;
static
bool
syslog_time
;
static
bool
syslog_time
;
/* All 3 protected by @console_sem. */
/* the next printk record to write to the console */
static
u64
console_seq
;
static
u64
exclusive_console_stop_seq
;
static
unsigned
long
console_dropped
;
struct
latched_seq
{
struct
latched_seq
{
seqcount_latch_t
latch
;
seqcount_latch_t
latch
;
u64
val
[
2
];
u64
val
[
2
];
...
@@ -405,6 +524,9 @@ static struct latched_seq clear_seq = {
...
@@ -405,6 +524,9 @@ static struct latched_seq clear_seq = {
/* the maximum size of a formatted record (i.e. with prefix added per line) */
/* the maximum size of a formatted record (i.e. with prefix added per line) */
#define CONSOLE_LOG_MAX 1024
#define CONSOLE_LOG_MAX 1024
/* the maximum size for a dropped text message */
#define DROPPED_TEXT_MAX 64
/* the maximum size allowed to be reserved for a record */
/* the maximum size allowed to be reserved for a record */
#define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
#define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
...
@@ -746,8 +868,19 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
...
@@ -746,8 +868,19 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
goto
out
;
goto
out
;
}
}
/*
* Guarantee this task is visible on the waitqueue before
* checking the wake condition.
*
* The full memory barrier within set_current_state() of
* prepare_to_wait_event() pairs with the full memory barrier
* within wq_has_sleeper().
*
* This pairs with __wake_up_klogd:A.
*/
ret
=
wait_event_interruptible
(
log_wait
,
ret
=
wait_event_interruptible
(
log_wait
,
prb_read_valid
(
prb
,
atomic64_read
(
&
user
->
seq
),
r
));
prb_read_valid
(
prb
,
atomic64_read
(
&
user
->
seq
),
r
));
/* LMM(devkmsg_read:A) */
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
}
}
...
@@ -1513,7 +1646,18 @@ static int syslog_print(char __user *buf, int size)
...
@@ -1513,7 +1646,18 @@ static int syslog_print(char __user *buf, int size)
seq
=
syslog_seq
;
seq
=
syslog_seq
;
mutex_unlock
(
&
syslog_lock
);
mutex_unlock
(
&
syslog_lock
);
len
=
wait_event_interruptible
(
log_wait
,
prb_read_valid
(
prb
,
seq
,
NULL
));
/*
* Guarantee this task is visible on the waitqueue before
* checking the wake condition.
*
* The full memory barrier within set_current_state() of
* prepare_to_wait_event() pairs with the full memory barrier
* within wq_has_sleeper().
*
* This pairs with __wake_up_klogd:A.
*/
len
=
wait_event_interruptible
(
log_wait
,
prb_read_valid
(
prb
,
seq
,
NULL
));
/* LMM(syslog_print:A) */
mutex_lock
(
&
syslog_lock
);
mutex_lock
(
&
syslog_lock
);
if
(
len
)
if
(
len
)
...
@@ -1911,47 +2055,24 @@ static int console_trylock_spinning(void)
...
@@ -1911,47 +2055,24 @@ static int console_trylock_spinning(void)
}
}
/*
/*
* Call the
console drivers, asking them to write out
* Call the
specified console driver, asking it to write out the specified
*
log_buf[start] to log_buf[end - 1].
*
text and length. If @dropped_text is non-NULL and any records have been
*
The console_lock must be held
.
*
dropped, a dropped message will be written out first
.
*/
*/
static
void
call_console_driver
s
(
const
char
*
ext_text
,
size_t
ext_
len
,
static
void
call_console_driver
(
struct
console
*
con
,
const
char
*
text
,
size_t
len
,
const
char
*
text
,
size_t
len
)
char
*
dropped_text
)
{
{
static
char
dropped_text
[
64
];
size_t
dropped_len
;
size_t
dropped_len
=
0
;
struct
console
*
con
;
trace_console_rcuidle
(
text
,
len
);
if
(
!
console_drivers
)
return
;
if
(
con
sole_dropped
)
{
if
(
con
->
dropped
&&
dropped_text
)
{
dropped_len
=
snprintf
(
dropped_text
,
sizeof
(
dropped_text
)
,
dropped_len
=
snprintf
(
dropped_text
,
DROPPED_TEXT_MAX
,
"** %lu printk messages dropped **
\n
"
,
"** %lu printk messages dropped **
\n
"
,
console_dropped
);
con
->
dropped
);
console_dropped
=
0
;
con
->
dropped
=
0
;
con
->
write
(
con
,
dropped_text
,
dropped_len
);
}
}
for_each_console
(
con
)
{
if
(
exclusive_console
&&
con
!=
exclusive_console
)
continue
;
if
(
!
(
con
->
flags
&
CON_ENABLED
))
continue
;
if
(
!
con
->
write
)
continue
;
if
(
!
cpu_online
(
smp_processor_id
())
&&
!
(
con
->
flags
&
CON_ANYTIME
))
continue
;
if
(
con
->
flags
&
CON_EXTENDED
)
con
->
write
(
con
,
ext_text
,
ext_len
);
else
{
if
(
dropped_len
)
con
->
write
(
con
,
dropped_text
,
dropped_len
);
con
->
write
(
con
,
text
,
len
);
con
->
write
(
con
,
text
,
len
);
}
}
}
}
/*
/*
...
@@ -2026,8 +2147,10 @@ static u8 *__printk_recursion_counter(void)
...
@@ -2026,8 +2147,10 @@ static u8 *__printk_recursion_counter(void)
int
printk_delay_msec
__read_mostly
;
int
printk_delay_msec
__read_mostly
;
static
inline
void
printk_delay
(
void
)
static
inline
void
printk_delay
(
int
level
)
{
{
boot_delay_msec
(
level
);
if
(
unlikely
(
printk_delay_msec
))
{
if
(
unlikely
(
printk_delay_msec
))
{
int
m
=
printk_delay_msec
;
int
m
=
printk_delay_msec
;
...
@@ -2041,7 +2164,7 @@ static inline void printk_delay(void)
...
@@ -2041,7 +2164,7 @@ static inline void printk_delay(void)
static
inline
u32
printk_caller_id
(
void
)
static
inline
u32
printk_caller_id
(
void
)
{
{
return
in_task
()
?
task_pid_nr
(
current
)
:
return
in_task
()
?
task_pid_nr
(
current
)
:
0x80000000
+
raw_
smp_processor_id
();
0x80000000
+
smp_processor_id
();
}
}
/**
/**
...
@@ -2115,6 +2238,8 @@ static u16 printk_sprint(char *text, u16 size, int facility,
...
@@ -2115,6 +2238,8 @@ static u16 printk_sprint(char *text, u16 size, int facility,
}
}
}
}
trace_console_rcuidle
(
text
,
text_len
);
return
text_len
;
return
text_len
;
}
}
...
@@ -2123,7 +2248,6 @@ int vprintk_store(int facility, int level,
...
@@ -2123,7 +2248,6 @@ int vprintk_store(int facility, int level,
const
struct
dev_printk_info
*
dev_info
,
const
struct
dev_printk_info
*
dev_info
,
const
char
*
fmt
,
va_list
args
)
const
char
*
fmt
,
va_list
args
)
{
{
const
u32
caller_id
=
printk_caller_id
();
struct
prb_reserved_entry
e
;
struct
prb_reserved_entry
e
;
enum
printk_info_flags
flags
=
0
;
enum
printk_info_flags
flags
=
0
;
struct
printk_record
r
;
struct
printk_record
r
;
...
@@ -2133,10 +2257,14 @@ int vprintk_store(int facility, int level,
...
@@ -2133,10 +2257,14 @@ int vprintk_store(int facility, int level,
u8
*
recursion_ptr
;
u8
*
recursion_ptr
;
u16
reserve_size
;
u16
reserve_size
;
va_list
args2
;
va_list
args2
;
u32
caller_id
;
u16
text_len
;
u16
text_len
;
int
ret
=
0
;
int
ret
=
0
;
u64
ts_nsec
;
u64
ts_nsec
;
if
(
!
printk_enter_irqsave
(
recursion_ptr
,
irqflags
))
return
0
;
/*
/*
* Since the duration of printk() can vary depending on the message
* Since the duration of printk() can vary depending on the message
* and state of the ringbuffer, grab the timestamp now so that it is
* and state of the ringbuffer, grab the timestamp now so that it is
...
@@ -2145,8 +2273,7 @@ int vprintk_store(int facility, int level,
...
@@ -2145,8 +2273,7 @@ int vprintk_store(int facility, int level,
*/
*/
ts_nsec
=
local_clock
();
ts_nsec
=
local_clock
();
if
(
!
printk_enter_irqsave
(
recursion_ptr
,
irqflags
))
caller_id
=
printk_caller_id
();
return
0
;
/*
/*
* The sprintf needs to come first since the syslog prefix might be
* The sprintf needs to come first since the syslog prefix might be
...
@@ -2250,23 +2377,25 @@ asmlinkage int vprintk_emit(int facility, int level,
...
@@ -2250,23 +2377,25 @@ asmlinkage int vprintk_emit(int facility, int level,
in_sched
=
true
;
in_sched
=
true
;
}
}
boot_delay_msec
(
level
);
printk_delay
(
level
);
printk_delay
();
printed_len
=
vprintk_store
(
facility
,
level
,
dev_info
,
fmt
,
args
);
printed_len
=
vprintk_store
(
facility
,
level
,
dev_info
,
fmt
,
args
);
/* If called from the scheduler, we can not call up(). */
/* If called from the scheduler, we can not call up(). */
if
(
!
in_sched
)
{
if
(
!
in_sched
&&
allow_direct_printing
()
)
{
/*
/*
* Disable preemption to avoid being preempted while holding
* The caller may be holding system-critical or
* console_sem which would prevent anyone from printing to
* timing-sensitive locks. Disable preemption during direct
* console
* printing of all remaining records to all consoles so that
* this context can return as soon as possible. Hopefully
* another printk() caller will take over the printing.
*/
*/
preempt_disable
();
preempt_disable
();
/*
/*
* Try to acquire and then immediately release the console
* Try to acquire and then immediately release the console
* semaphore. The release will print out buffers and wake up
* semaphore. The release will print out buffers. With the
* /dev/kmsg and syslog() users.
* spinning variant, this context tries to take over the
* printing from another printing context.
*/
*/
if
(
console_trylock_spinning
())
if
(
console_trylock_spinning
())
console_unlock
();
console_unlock
();
...
@@ -2297,18 +2426,21 @@ asmlinkage __visible int _printk(const char *fmt, ...)
...
@@ -2297,18 +2426,21 @@ asmlinkage __visible int _printk(const char *fmt, ...)
}
}
EXPORT_SYMBOL
(
_printk
);
EXPORT_SYMBOL
(
_printk
);
static
bool
__pr_flush
(
struct
console
*
con
,
int
timeout_ms
,
bool
reset_on_progress
);
static
void
printk_start_kthread
(
struct
console
*
con
);
#else
/* CONFIG_PRINTK */
#else
/* CONFIG_PRINTK */
#define CONSOLE_LOG_MAX 0
#define CONSOLE_LOG_MAX 0
#define DROPPED_TEXT_MAX 0
#define printk_time false
#define printk_time false
#define prb_read_valid(rb, seq, r) false
#define prb_read_valid(rb, seq, r) false
#define prb_first_valid_seq(rb) 0
#define prb_first_valid_seq(rb) 0
#define prb_next_seq(rb) 0
static
u64
syslog_seq
;
static
u64
syslog_seq
;
static
u64
console_seq
;
static
u64
exclusive_console_stop_seq
;
static
unsigned
long
console_dropped
;
static
size_t
record_print_text
(
const
struct
printk_record
*
r
,
static
size_t
record_print_text
(
const
struct
printk_record
*
r
,
bool
syslog
,
bool
time
)
bool
syslog
,
bool
time
)
...
@@ -2325,9 +2457,14 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
...
@@ -2325,9 +2457,14 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
struct
dev_printk_info
*
dev_info
)
{
return
0
;
}
struct
dev_printk_info
*
dev_info
)
{
return
0
;
}
static
void
console_lock_spinning_enable
(
void
)
{
}
static
void
console_lock_spinning_enable
(
void
)
{
}
static
int
console_lock_spinning_disable_and_check
(
void
)
{
return
0
;
}
static
int
console_lock_spinning_disable_and_check
(
void
)
{
return
0
;
}
static
void
call_console_drivers
(
const
char
*
ext_text
,
size_t
ext_len
,
static
void
call_console_driver
(
struct
console
*
con
,
const
char
*
text
,
size_t
len
,
const
char
*
text
,
size_t
len
)
{}
char
*
dropped_text
)
{
}
static
bool
suppress_message_printing
(
int
level
)
{
return
false
;
}
static
bool
suppress_message_printing
(
int
level
)
{
return
false
;
}
static
bool
__pr_flush
(
struct
console
*
con
,
int
timeout_ms
,
bool
reset_on_progress
)
{
return
true
;
}
static
void
printk_start_kthread
(
struct
console
*
con
)
{
}
static
bool
allow_direct_printing
(
void
)
{
return
true
;
}
#endif
/* CONFIG_PRINTK */
#endif
/* CONFIG_PRINTK */
...
@@ -2515,6 +2652,7 @@ void suspend_console(void)
...
@@ -2515,6 +2652,7 @@ void suspend_console(void)
if
(
!
console_suspend_enabled
)
if
(
!
console_suspend_enabled
)
return
;
return
;
pr_info
(
"Suspending console(s) (use no_console_suspend to debug)
\n
"
);
pr_info
(
"Suspending console(s) (use no_console_suspend to debug)
\n
"
);
pr_flush
(
1000
,
true
);
console_lock
();
console_lock
();
console_suspended
=
1
;
console_suspended
=
1
;
up_console_sem
();
up_console_sem
();
...
@@ -2527,6 +2665,7 @@ void resume_console(void)
...
@@ -2527,6 +2665,7 @@ void resume_console(void)
down_console_sem
();
down_console_sem
();
console_suspended
=
0
;
console_suspended
=
0
;
console_unlock
();
console_unlock
();
pr_flush
(
1000
,
true
);
}
}
/**
/**
...
@@ -2544,6 +2683,14 @@ static int console_cpu_notify(unsigned int cpu)
...
@@ -2544,6 +2683,14 @@ static int console_cpu_notify(unsigned int cpu)
/* If trylock fails, someone else is doing the printing */
/* If trylock fails, someone else is doing the printing */
if
(
console_trylock
())
if
(
console_trylock
())
console_unlock
();
console_unlock
();
else
{
/*
* If a new CPU comes online, the conditions for
* printer_should_wake() may have changed for some
* kthread printer with !CON_ANYTIME.
*/
wake_up_klogd
();
}
}
}
return
0
;
return
0
;
}
}
...
@@ -2563,7 +2710,7 @@ void console_lock(void)
...
@@ -2563,7 +2710,7 @@ void console_lock(void)
down_console_sem
();
down_console_sem
();
if
(
console_suspended
)
if
(
console_suspended
)
return
;
return
;
console_
locked
=
1
;
console_
kthreads_block
()
;
console_may_schedule
=
1
;
console_may_schedule
=
1
;
}
}
EXPORT_SYMBOL
(
console_lock
);
EXPORT_SYMBOL
(
console_lock
);
...
@@ -2584,33 +2731,32 @@ int console_trylock(void)
...
@@ -2584,33 +2731,32 @@ int console_trylock(void)
up_console_sem
();
up_console_sem
();
return
0
;
return
0
;
}
}
console_locked
=
1
;
if
(
!
console_kthreads_atomic_tryblock
())
{
up_console_sem
();
return
0
;
}
console_may_schedule
=
0
;
console_may_schedule
=
0
;
return
1
;
return
1
;
}
}
EXPORT_SYMBOL
(
console_trylock
);
EXPORT_SYMBOL
(
console_trylock
);
int
is_console_locked
(
void
)
{
return
console_locked
;
}
EXPORT_SYMBOL
(
is_console_locked
);
/*
/*
* Check if we have any console that is capable of printing while cpu is
* This is used to help to make sure that certain paths within the VT code are
* booting or shutting down. Requires console_sem.
* running with the console lock held. It is definitely not the perfect debug
* tool (it is not known if the VT code is the task holding the console lock),
* but it helps tracking those weird code paths in the console code such as
* when the console is suspended: where the console is not locked but no
* console printing may occur.
*
* Note: This returns true when the console is suspended but is not locked.
* This is intentional because the VT code must consider that situation
* the same as if the console was locked.
*/
*/
static
int
have_callable_console
(
void
)
int
is_console_locked
(
void
)
{
{
struct
console
*
con
;
return
(
console_kthreads_blocked
||
atomic_read
(
&
console_kthreads_active
));
for_each_console
(
con
)
if
((
con
->
flags
&
CON_ENABLED
)
&&
(
con
->
flags
&
CON_ANYTIME
))
return
1
;
return
0
;
}
}
EXPORT_SYMBOL
(
is_console_locked
);
/*
/*
* Return true when this CPU should unlock console_sem without pushing all
* Return true when this CPU should unlock console_sem without pushing all
...
@@ -2631,132 +2777,125 @@ static bool abandon_console_lock_in_panic(void)
...
@@ -2631,132 +2777,125 @@ static bool abandon_console_lock_in_panic(void)
return
atomic_read
(
&
panic_cpu
)
!=
raw_smp_processor_id
();
return
atomic_read
(
&
panic_cpu
)
!=
raw_smp_processor_id
();
}
}
/*
static
inline
bool
__console_is_usable
(
short
flags
)
* Can we actually use the console at this time on this cpu?
*
* Console drivers may assume that per-cpu resources have been allocated. So
* unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
* call them until this CPU is officially up.
*/
static
inline
int
can_use_console
(
void
)
{
{
return
cpu_online
(
raw_smp_processor_id
())
||
have_callable_console
();
if
(
!
(
flags
&
CON_ENABLED
))
return
false
;
/*
* Console drivers may assume that per-cpu resources have been
* allocated. So unless they're explicitly marked as being able to
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
*/
if
(
!
cpu_online
(
raw_smp_processor_id
())
&&
!
(
flags
&
CON_ANYTIME
))
return
false
;
return
true
;
}
}
/**
/*
* console_unlock - unlock the console system
* Check if the given console is currently capable and allowed to print
*
* records.
* Releases the console_lock which the caller holds on the console system
* and the console driver list.
*
* While the console_lock was held, console output may have been buffered
* by printk(). If this is the case, console_unlock(); emits
* the output prior to releasing the lock.
*
* If there is output waiting, we wake /dev/kmsg and syslog() users.
*
*
*
console_unlock(); may be called from any context
.
*
Requires holding the console_lock
.
*/
*/
void
console_unlock
(
void
)
static
inline
bool
console_is_usable
(
struct
console
*
con
)
{
{
static
char
ext_text
[
CONSOLE_EXT_LOG_MAX
];
if
(
!
con
->
write
)
static
char
text
[
CONSOLE_LOG_MAX
];
return
false
;
static
int
panic_console_dropped
;
unsigned
long
flags
;
bool
do_cond_resched
,
retry
;
struct
printk_info
info
;
struct
printk_record
r
;
u64
__maybe_unused
next_seq
;
if
(
console_suspended
)
{
up_console_sem
();
return
;
}
prb_rec_init_rd
(
&
r
,
&
info
,
text
,
sizeof
(
text
));
return
__console_is_usable
(
con
->
flags
);
}
static
void
__console_unlock
(
void
)
{
/*
/*
* Console drivers are called with interrupts disabled, so
* Depending on whether console_lock() or console_trylock() was used,
* @console_may_schedule should be cleared before; however, we may
* appropriately allow the kthread printers to continue.
* end up dumping a lot of lines, for example, if called from
* console registration path, and should invoke cond_resched()
* between lines if allowable. Not doing so can cause a very long
* scheduling stall on a slow console leading to RCU stall and
* softlockup warnings which exacerbate the issue with more
* messages practically incapacitating the system.
*
* console_trylock() is not able to detect the preemptive
* context reliably. Therefore the value must be stored before
* and cleared after the "again" goto label.
*/
*/
do_cond_resched
=
console_may_schedule
;
if
(
console_kthreads_blocked
)
again:
console_kthreads_unblock
();
console_may_schedule
=
0
;
else
console_kthreads_atomic_unblock
();
/*
/*
* We released the console_sem lock, so we need to recheck if
* New records may have arrived while the console was locked.
* cpu is online and (if not) is there at least one CON_ANYTIME
* Wake the kthread printers to print them.
* console.
*/
*/
if
(
!
can_use_console
())
{
wake_up_klogd
();
console_locked
=
0
;
up_console_sem
();
up_console_sem
();
return
;
}
}
for
(;;)
{
/*
size_t
ext_len
=
0
;
* Print one record for the given console. The record printed is whatever
int
handover
;
* record is the next available record for the given console.
*
* @text is a buffer of size CONSOLE_LOG_MAX.
*
* If extended messages should be printed, @ext_text is a buffer of size
* CONSOLE_EXT_LOG_MAX. Otherwise @ext_text must be NULL.
*
* If dropped messages should be printed, @dropped_text is a buffer of size
* DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
*
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
* console_lock. Otherwise it is set to false. A NULL pointer may be provided
* to disable allowing the console_lock to be taken over by a printk waiter.
*
* Returns false if the given console has no next record to print, otherwise
* true.
*
* Requires the console_lock if @handover is non-NULL.
* Requires con->lock otherwise.
*/
static
bool
__console_emit_next_record
(
struct
console
*
con
,
char
*
text
,
char
*
ext_text
,
char
*
dropped_text
,
bool
*
handover
)
{
static
atomic_t
panic_console_dropped
=
ATOMIC_INIT
(
0
);
struct
printk_info
info
;
struct
printk_record
r
;
unsigned
long
flags
;
char
*
write_text
;
size_t
len
;
size_t
len
;
skip:
prb_rec_init_rd
(
&
r
,
&
info
,
text
,
CONSOLE_LOG_MAX
);
if
(
!
prb_read_valid
(
prb
,
console_seq
,
&
r
))
break
;
if
(
handover
)
*
handover
=
false
;
if
(
console_seq
!=
r
.
info
->
seq
)
{
if
(
!
prb_read_valid
(
prb
,
con
->
seq
,
&
r
))
console_dropped
+=
r
.
info
->
seq
-
console_seq
;
return
false
;
console_seq
=
r
.
info
->
seq
;
if
(
panic_in_progress
()
&&
panic_console_dropped
++
>
10
)
{
if
(
con
->
seq
!=
r
.
info
->
seq
)
{
con
->
dropped
+=
r
.
info
->
seq
-
con
->
seq
;
con
->
seq
=
r
.
info
->
seq
;
if
(
panic_in_progress
()
&&
atomic_fetch_inc_relaxed
(
&
panic_console_dropped
)
>
10
)
{
suppress_panic_printk
=
1
;
suppress_panic_printk
=
1
;
pr_warn_once
(
"Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.
\n
"
);
pr_warn_once
(
"Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.
\n
"
);
}
}
}
}
/* Skip record that has level above the console loglevel. */
if
(
suppress_message_printing
(
r
.
info
->
level
))
{
if
(
suppress_message_printing
(
r
.
info
->
level
))
{
/*
con
->
seq
++
;
* Skip record we have buffered and already printed
* directly to the console when we received it, and
* record that has level above the console loglevel.
*/
console_seq
++
;
goto
skip
;
goto
skip
;
}
}
/* Output to all consoles once old messages replayed. */
if
(
ext_text
)
{
if
(
unlikely
(
exclusive_console
&&
write_text
=
ext_text
;
console_seq
>=
exclusive_console_stop_seq
))
{
len
=
info_print_ext_header
(
ext_text
,
CONSOLE_EXT_LOG_MAX
,
r
.
info
);
exclusive_console
=
NULL
;
len
+=
msg_print_ext_body
(
ext_text
+
len
,
CONSOLE_EXT_LOG_MAX
-
len
,
}
&
r
.
text_buf
[
0
],
r
.
info
->
text_len
,
&
r
.
info
->
dev_info
);
}
else
{
/*
write_text
=
text
;
* Handle extended console text first because later
len
=
record_print_text
(
&
r
,
console_msg_format
&
MSG_FORMAT_SYSLOG
,
printk_time
);
* record_print_text() will modify the record buffer in-place.
*/
if
(
nr_ext_console_drivers
)
{
ext_len
=
info_print_ext_header
(
ext_text
,
sizeof
(
ext_text
),
r
.
info
);
ext_len
+=
msg_print_ext_body
(
ext_text
+
ext_len
,
sizeof
(
ext_text
)
-
ext_len
,
&
r
.
text_buf
[
0
],
r
.
info
->
text_len
,
&
r
.
info
->
dev_info
);
}
}
len
=
record_print_text
(
&
r
,
console_msg_format
&
MSG_FORMAT_SYSLOG
,
printk_time
);
console_seq
++
;
if
(
handover
)
{
/*
/*
* While actively printing out messages, if another printk()
* While actively printing out messages, if another printk()
* were to occur on another CPU, it may wait for this one to
* were to occur on another CPU, it may wait for this one to
...
@@ -2770,38 +2909,187 @@ void console_unlock(void)
...
@@ -2770,38 +2909,187 @@ void console_unlock(void)
printk_safe_enter_irqsave
(
flags
);
printk_safe_enter_irqsave
(
flags
);
console_lock_spinning_enable
();
console_lock_spinning_enable
();
stop_critical_timings
();
/* don't trace print latency */
/* don't trace irqsoff print latency */
call_console_drivers
(
ext_text
,
ext_len
,
text
,
len
);
stop_critical_timings
();
start_critical_timings
();
}
call_console_driver
(
con
,
write_text
,
len
,
dropped_text
);
handover
=
console_lock_spinning_disable_and_check
();
con
->
seq
++
;
if
(
handover
)
{
start_critical_timings
();
*
handover
=
console_lock_spinning_disable_and_check
();
printk_safe_exit_irqrestore
(
flags
);
printk_safe_exit_irqrestore
(
flags
);
if
(
handover
)
}
return
;
skip:
return
true
;
}
/*
* Print a record for a given console, but allow another printk() caller to
* take over the console_lock and continue printing.
*
* Requires the console_lock, but depending on @handover after the call, the
* caller may no longer have the console_lock.
*
* See __console_emit_next_record() for argument and return details.
*/
static
bool
console_emit_next_record_transferable
(
struct
console
*
con
,
char
*
text
,
char
*
ext_text
,
char
*
dropped_text
,
bool
*
handover
)
{
/*
* Handovers are only supported if threaded printers are atomically
* blocked. The context taking over the console_lock may be atomic.
*/
if
(
!
console_kthreads_atomically_blocked
())
{
*
handover
=
false
;
handover
=
NULL
;
}
return
__console_emit_next_record
(
con
,
text
,
ext_text
,
dropped_text
,
handover
);
}
/*
* Print out all remaining records to all consoles.
*
* @do_cond_resched is set by the caller. It can be true only in schedulable
* context.
*
* @next_seq is set to the sequence number after the last available record.
* The value is valid only when this function returns true. It means that all
* usable consoles are completely flushed.
*
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
* console_lock. Otherwise it is set to false.
*
* Returns true when there was at least one usable console and all messages
* were flushed to all usable consoles. A returned false informs the caller
* that everything was not flushed (either there were no usable consoles or
* another context has taken over printing or it is a panic situation and this
* is not the panic CPU or direct printing is not preferred). Regardless the
* reason, the caller should assume it is not useful to immediately try again.
*
* Requires the console_lock.
*/
static
bool
console_flush_all
(
bool
do_cond_resched
,
u64
*
next_seq
,
bool
*
handover
)
{
static
char
dropped_text
[
DROPPED_TEXT_MAX
];
static
char
ext_text
[
CONSOLE_EXT_LOG_MAX
];
static
char
text
[
CONSOLE_LOG_MAX
];
bool
any_usable
=
false
;
struct
console
*
con
;
bool
any_progress
;
*
next_seq
=
0
;
*
handover
=
false
;
/* Allow panic_cpu to take over the consoles safely */
do
{
/* Let the kthread printers do the work if they can. */
if
(
!
allow_direct_printing
())
return
false
;
any_progress
=
false
;
for_each_console
(
con
)
{
bool
progress
;
if
(
!
console_is_usable
(
con
))
continue
;
any_usable
=
true
;
if
(
con
->
flags
&
CON_EXTENDED
)
{
/* Extended consoles do not print "dropped messages". */
progress
=
console_emit_next_record_transferable
(
con
,
&
text
[
0
],
&
ext_text
[
0
],
NULL
,
handover
);
}
else
{
progress
=
console_emit_next_record_transferable
(
con
,
&
text
[
0
],
NULL
,
&
dropped_text
[
0
],
handover
);
}
if
(
*
handover
)
return
false
;
/* Track the next of the highest seq flushed. */
if
(
con
->
seq
>
*
next_seq
)
*
next_seq
=
con
->
seq
;
if
(
!
progress
)
continue
;
any_progress
=
true
;
/* Allow panic_cpu to take over the consoles safely. */
if
(
abandon_console_lock_in_panic
())
if
(
abandon_console_lock_in_panic
())
break
;
return
false
;
if
(
do_cond_resched
)
if
(
do_cond_resched
)
cond_resched
();
cond_resched
();
}
}
}
while
(
any_progress
);
/* Get consistent value of the next-to-be-used sequence number. */
return
any_usable
;
next_seq
=
console_seq
;
}
console_locked
=
0
;
/**
* console_unlock - unlock the console system
*
* Releases the console_lock which the caller holds on the console system
* and the console driver list.
*
* While the console_lock was held, console output may have been buffered
* by printk(). If this is the case, console_unlock(); emits
* the output prior to releasing the lock.
*
* console_unlock(); may be called from any context.
*/
void
console_unlock
(
void
)
{
bool
do_cond_resched
;
bool
handover
;
bool
flushed
;
u64
next_seq
;
if
(
console_suspended
)
{
up_console_sem
();
up_console_sem
();
return
;
}
/*
/*
* Someone could have filled up the buffer again, so re-check if there's
* Console drivers are called with interrupts disabled, so
* something to flush. In case we cannot trylock the console_sem again,
* @console_may_schedule should be cleared before; however, we may
* there's a new owner and the console_unlock() from them will do the
* end up dumping a lot of lines, for example, if called from
* flush, no worries.
* console registration path, and should invoke cond_resched()
* between lines if allowable. Not doing so can cause a very long
* scheduling stall on a slow console leading to RCU stall and
* softlockup warnings which exacerbate the issue with more
* messages practically incapacitating the system. Therefore, create
* a local to use for the printing loop.
*/
do_cond_resched
=
console_may_schedule
;
do
{
console_may_schedule
=
0
;
flushed
=
console_flush_all
(
do_cond_resched
,
&
next_seq
,
&
handover
);
if
(
!
handover
)
__console_unlock
();
/*
* Abort if there was a failure to flush all messages to all
* usable consoles. Either it is not possible to flush (in
* which case it would be an infinite loop of retrying) or
* another context has taken over printing.
*/
if
(
!
flushed
)
break
;
/*
* Some context may have added new records after
* console_flush_all() but before unlocking the console.
* Re-check if there is a new record to flush. If the trylock
* fails, another context is already handling the printing.
*/
*/
retry
=
prb_read_valid
(
prb
,
next_seq
,
NULL
);
}
while
(
prb_read_valid
(
prb
,
next_seq
,
NULL
)
&&
console_trylock
());
if
(
retry
&&
!
abandon_console_lock_in_panic
()
&&
console_trylock
())
goto
again
;
}
}
EXPORT_SYMBOL
(
console_unlock
);
EXPORT_SYMBOL
(
console_unlock
);
...
@@ -2832,15 +3120,21 @@ void console_unblank(void)
...
@@ -2832,15 +3120,21 @@ void console_unblank(void)
if
(
oops_in_progress
)
{
if
(
oops_in_progress
)
{
if
(
down_trylock_console_sem
()
!=
0
)
if
(
down_trylock_console_sem
()
!=
0
)
return
;
return
;
if
(
!
console_kthreads_atomic_tryblock
())
{
up_console_sem
();
return
;
}
}
else
}
else
console_lock
();
console_lock
();
console_locked
=
1
;
console_may_schedule
=
0
;
console_may_schedule
=
0
;
for_each_console
(
c
)
for_each_console
(
c
)
if
((
c
->
flags
&
CON_ENABLED
)
&&
c
->
unblank
)
if
((
c
->
flags
&
CON_ENABLED
)
&&
c
->
unblank
)
c
->
unblank
();
c
->
unblank
();
console_unlock
();
console_unlock
();
if
(
!
oops_in_progress
)
pr_flush
(
1000
,
true
);
}
}
/**
/**
...
@@ -2861,8 +3155,14 @@ void console_flush_on_panic(enum con_flush_mode mode)
...
@@ -2861,8 +3155,14 @@ void console_flush_on_panic(enum con_flush_mode mode)
console_trylock
();
console_trylock
();
console_may_schedule
=
0
;
console_may_schedule
=
0
;
if
(
mode
==
CONSOLE_REPLAY_ALL
)
if
(
mode
==
CONSOLE_REPLAY_ALL
)
{
console_seq
=
prb_first_valid_seq
(
prb
);
struct
console
*
c
;
u64
seq
;
seq
=
prb_first_valid_seq
(
prb
);
for_each_console
(
c
)
c
->
seq
=
seq
;
}
console_unlock
();
console_unlock
();
}
}
...
@@ -2893,6 +3193,7 @@ struct tty_driver *console_device(int *index)
...
@@ -2893,6 +3193,7 @@ struct tty_driver *console_device(int *index)
*/
*/
void
console_stop
(
struct
console
*
console
)
void
console_stop
(
struct
console
*
console
)
{
{
__pr_flush
(
console
,
1000
,
true
);
console_lock
();
console_lock
();
console
->
flags
&=
~
CON_ENABLED
;
console
->
flags
&=
~
CON_ENABLED
;
console_unlock
();
console_unlock
();
...
@@ -2904,6 +3205,7 @@ void console_start(struct console *console)
...
@@ -2904,6 +3205,7 @@ void console_start(struct console *console)
console_lock
();
console_lock
();
console
->
flags
|=
CON_ENABLED
;
console
->
flags
|=
CON_ENABLED
;
console_unlock
();
console_unlock
();
__pr_flush
(
console
,
1000
,
true
);
}
}
EXPORT_SYMBOL
(
console_start
);
EXPORT_SYMBOL
(
console_start
);
...
@@ -2990,6 +3292,11 @@ static void try_enable_default_console(struct console *newcon)
...
@@ -2990,6 +3292,11 @@ static void try_enable_default_console(struct console *newcon)
newcon
->
flags
|=
CON_CONSDEV
;
newcon
->
flags
|=
CON_CONSDEV
;
}
}
#define con_printk(lvl, con, fmt, ...) \
printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
(con->flags & CON_BOOT) ? "boot" : "", \
con->name, con->index, ##__VA_ARGS__)
/*
/*
* The console driver calls this routine during kernel initialization
* The console driver calls this routine during kernel initialization
* to register the console printing procedure with printk() and to
* to register the console printing procedure with printk() and to
...
@@ -3097,27 +3404,24 @@ void register_console(struct console *newcon)
...
@@ -3097,27 +3404,24 @@ void register_console(struct console *newcon)
if
(
newcon
->
flags
&
CON_EXTENDED
)
if
(
newcon
->
flags
&
CON_EXTENDED
)
nr_ext_console_drivers
++
;
nr_ext_console_drivers
++
;
if
(
newcon
->
flags
&
CON_PRINTBUFFER
)
{
newcon
->
dropped
=
0
;
/*
newcon
->
thread
=
NULL
;
* console_unlock(); will print out the buffered messages
newcon
->
blocked
=
true
;
* for us.
mutex_init
(
&
newcon
->
lock
);
*
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
* the already-registered consoles.
*
* Set exclusive_console with disabled interrupts to reduce
* race window with eventual console_flush_on_panic() that
* ignores console_lock.
*/
exclusive_console
=
newcon
;
exclusive_console_stop_seq
=
console_seq
;
if
(
newcon
->
flags
&
CON_PRINTBUFFER
)
{
/* Get a consistent copy of @syslog_seq. */
/* Get a consistent copy of @syslog_seq. */
mutex_lock
(
&
syslog_lock
);
mutex_lock
(
&
syslog_lock
);
console_
seq
=
syslog_seq
;
newcon
->
seq
=
syslog_seq
;
mutex_unlock
(
&
syslog_lock
);
mutex_unlock
(
&
syslog_lock
);
}
else
{
/* Begin with next message. */
newcon
->
seq
=
prb_next_seq
(
prb
);
}
}
if
(
printk_kthreads_available
)
printk_start_kthread
(
newcon
);
console_unlock
();
console_unlock
();
console_sysfs_notify
();
console_sysfs_notify
();
...
@@ -3128,9 +3432,7 @@ void register_console(struct console *newcon)
...
@@ -3128,9 +3432,7 @@ void register_console(struct console *newcon)
* users know there might be something in the kernel's log buffer that
* users know there might be something in the kernel's log buffer that
* went to the bootconsole (that they do not see on the real console)
* went to the bootconsole (that they do not see on the real console)
*/
*/
pr_info
(
"%sconsole [%s%d] enabled
\n
"
,
con_printk
(
KERN_INFO
,
newcon
,
"enabled
\n
"
);
(
newcon
->
flags
&
CON_BOOT
)
?
"boot"
:
""
,
newcon
->
name
,
newcon
->
index
);
if
(
bootcon_enabled
&&
if
(
bootcon_enabled
&&
((
newcon
->
flags
&
(
CON_CONSDEV
|
CON_BOOT
))
==
CON_CONSDEV
)
&&
((
newcon
->
flags
&
(
CON_CONSDEV
|
CON_BOOT
))
==
CON_CONSDEV
)
&&
!
keep_bootcon
)
{
!
keep_bootcon
)
{
...
@@ -3146,12 +3448,11 @@ EXPORT_SYMBOL(register_console);
...
@@ -3146,12 +3448,11 @@ EXPORT_SYMBOL(register_console);
int
unregister_console
(
struct
console
*
console
)
int
unregister_console
(
struct
console
*
console
)
{
{
struct
task_struct
*
thd
;
struct
console
*
con
;
struct
console
*
con
;
int
res
;
int
res
;
pr_info
(
"%sconsole [%s%d] disabled
\n
"
,
con_printk
(
KERN_INFO
,
console
,
"disabled
\n
"
);
(
console
->
flags
&
CON_BOOT
)
?
"boot"
:
""
,
console
->
name
,
console
->
index
);
res
=
_braille_unregister_console
(
console
);
res
=
_braille_unregister_console
(
console
);
if
(
res
<
0
)
if
(
res
<
0
)
...
@@ -3188,7 +3489,20 @@ int unregister_console(struct console *console)
...
@@ -3188,7 +3489,20 @@ int unregister_console(struct console *console)
console_drivers
->
flags
|=
CON_CONSDEV
;
console_drivers
->
flags
|=
CON_CONSDEV
;
console
->
flags
&=
~
CON_ENABLED
;
console
->
flags
&=
~
CON_ENABLED
;
/*
* console->thread can only be cleared under the console lock. But
* stopping the thread must be done without the console lock. The
* task that clears @thread is the task that stops the kthread.
*/
thd
=
console
->
thread
;
console
->
thread
=
NULL
;
console_unlock
();
console_unlock
();
if
(
thd
)
kthread_stop
(
thd
);
console_sysfs_notify
();
console_sysfs_notify
();
if
(
console
->
exit
)
if
(
console
->
exit
)
...
@@ -3284,12 +3598,294 @@ static int __init printk_late_init(void)
...
@@ -3284,12 +3598,294 @@ static int __init printk_late_init(void)
}
}
late_initcall
(
printk_late_init
);
late_initcall
(
printk_late_init
);
static
int
__init
printk_activate_kthreads
(
void
)
{
struct
console
*
con
;
console_lock
();
printk_kthreads_available
=
true
;
for_each_console
(
con
)
printk_start_kthread
(
con
);
console_unlock
();
return
0
;
}
early_initcall
(
printk_activate_kthreads
);
#if defined CONFIG_PRINTK
#if defined CONFIG_PRINTK
/* If @con is specified, only wait for that console. Otherwise wait for all. */
static
bool
__pr_flush
(
struct
console
*
con
,
int
timeout_ms
,
bool
reset_on_progress
)
{
int
remaining
=
timeout_ms
;
struct
console
*
c
;
u64
last_diff
=
0
;
u64
printk_seq
;
u64
diff
;
u64
seq
;
might_sleep
();
seq
=
prb_next_seq
(
prb
);
for
(;;)
{
diff
=
0
;
console_lock
();
for_each_console
(
c
)
{
if
(
con
&&
con
!=
c
)
continue
;
if
(
!
console_is_usable
(
c
))
continue
;
printk_seq
=
c
->
seq
;
if
(
printk_seq
<
seq
)
diff
+=
seq
-
printk_seq
;
}
console_unlock
();
if
(
diff
!=
last_diff
&&
reset_on_progress
)
remaining
=
timeout_ms
;
if
(
diff
==
0
||
remaining
==
0
)
break
;
if
(
remaining
<
0
)
{
/* no timeout limit */
msleep
(
100
);
}
else
if
(
remaining
<
100
)
{
msleep
(
remaining
);
remaining
=
0
;
}
else
{
msleep
(
100
);
remaining
-=
100
;
}
last_diff
=
diff
;
}
return
(
diff
==
0
);
}
/**
* pr_flush() - Wait for printing threads to catch up.
*
* @timeout_ms: The maximum time (in ms) to wait.
* @reset_on_progress: Reset the timeout if forward progress is seen.
*
* A value of 0 for @timeout_ms means no waiting will occur. A value of -1
* represents infinite waiting.
*
* If @reset_on_progress is true, the timeout will be reset whenever any
* printer has been seen to make some forward progress.
*
* Context: Process context. May sleep while acquiring console lock.
* Return: true if all enabled printers are caught up.
*/
bool
pr_flush
(
int
timeout_ms
,
bool
reset_on_progress
)
{
return
__pr_flush
(
NULL
,
timeout_ms
,
reset_on_progress
);
}
EXPORT_SYMBOL
(
pr_flush
);
static
void
__printk_fallback_preferred_direct
(
void
)
{
printk_prefer_direct_enter
();
pr_err
(
"falling back to preferred direct printing
\n
"
);
printk_kthreads_available
=
false
;
}
/*
* Enter preferred direct printing, but never exit. Mark console threads as
* unavailable. The system is then forever in preferred direct printing and
* any printing threads will exit.
*
* Must *not* be called under console_lock. Use
* __printk_fallback_preferred_direct() if already holding console_lock.
*/
static
void
printk_fallback_preferred_direct
(
void
)
{
console_lock
();
__printk_fallback_preferred_direct
();
console_unlock
();
}
/*
* Print a record for a given console, not allowing another printk() caller
* to take over. This is appropriate for contexts that do not have the
* console_lock.
*
* See __console_emit_next_record() for argument and return details.
*/
static
bool
console_emit_next_record
(
struct
console
*
con
,
char
*
text
,
char
*
ext_text
,
char
*
dropped_text
)
{
return
__console_emit_next_record
(
con
,
text
,
ext_text
,
dropped_text
,
NULL
);
}
static
bool
printer_should_wake
(
struct
console
*
con
,
u64
seq
)
{
short
flags
;
if
(
kthread_should_stop
()
||
!
printk_kthreads_available
)
return
true
;
if
(
con
->
blocked
||
console_kthreads_atomically_blocked
())
{
return
false
;
}
/*
* This is an unsafe read from con->flags, but a false positive is
* not a problem. Worst case it would allow the printer to wake up
* although it is disabled. But the printer will notice that when
* attempting to print and instead go back to sleep.
*/
flags
=
data_race
(
READ_ONCE
(
con
->
flags
));
if
(
!
__console_is_usable
(
flags
))
return
false
;
return
prb_read_valid
(
prb
,
seq
,
NULL
);
}
static
int
printk_kthread_func
(
void
*
data
)
{
struct
console
*
con
=
data
;
char
*
dropped_text
=
NULL
;
char
*
ext_text
=
NULL
;
u64
seq
=
0
;
char
*
text
;
int
error
;
text
=
kmalloc
(
CONSOLE_LOG_MAX
,
GFP_KERNEL
);
if
(
!
text
)
{
con_printk
(
KERN_ERR
,
con
,
"failed to allocate text buffer
\n
"
);
printk_fallback_preferred_direct
();
goto
out
;
}
if
(
con
->
flags
&
CON_EXTENDED
)
{
ext_text
=
kmalloc
(
CONSOLE_EXT_LOG_MAX
,
GFP_KERNEL
);
if
(
!
ext_text
)
{
con_printk
(
KERN_ERR
,
con
,
"failed to allocate ext_text buffer
\n
"
);
printk_fallback_preferred_direct
();
goto
out
;
}
}
else
{
dropped_text
=
kmalloc
(
DROPPED_TEXT_MAX
,
GFP_KERNEL
);
if
(
!
dropped_text
)
{
con_printk
(
KERN_ERR
,
con
,
"failed to allocate dropped_text buffer
\n
"
);
printk_fallback_preferred_direct
();
goto
out
;
}
}
con_printk
(
KERN_INFO
,
con
,
"printing thread started
\n
"
);
for
(;;)
{
/*
* Guarantee this task is visible on the waitqueue before
* checking the wake condition.
*
* The full memory barrier within set_current_state() of
* prepare_to_wait_event() pairs with the full memory barrier
* within wq_has_sleeper().
*
* This pairs with __wake_up_klogd:A.
*/
error
=
wait_event_interruptible
(
log_wait
,
printer_should_wake
(
con
,
seq
));
/* LMM(printk_kthread_func:A) */
if
(
kthread_should_stop
()
||
!
printk_kthreads_available
)
break
;
if
(
error
)
continue
;
error
=
mutex_lock_interruptible
(
&
con
->
lock
);
if
(
error
)
continue
;
if
(
con
->
blocked
||
!
console_kthread_printing_tryenter
())
{
/* Another context has locked the console_lock. */
mutex_unlock
(
&
con
->
lock
);
continue
;
}
/*
* Although this context has not locked the console_lock, it
* is known that the console_lock is not locked and it is not
* possible for any other context to lock the console_lock.
* Therefore it is safe to read con->flags.
*/
if
(
!
__console_is_usable
(
con
->
flags
))
{
console_kthread_printing_exit
();
mutex_unlock
(
&
con
->
lock
);
continue
;
}
/*
* Even though the printk kthread is always preemptible, it is
* still not allowed to call cond_resched() from within
* console drivers. The task may become non-preemptible in the
* console driver call chain. For example, vt_console_print()
* takes a spinlock and then can call into fbcon_redraw(),
* which can conditionally invoke cond_resched().
*/
console_may_schedule
=
0
;
console_emit_next_record
(
con
,
text
,
ext_text
,
dropped_text
);
seq
=
con
->
seq
;
console_kthread_printing_exit
();
mutex_unlock
(
&
con
->
lock
);
}
con_printk
(
KERN_INFO
,
con
,
"printing thread stopped
\n
"
);
out:
kfree
(
dropped_text
);
kfree
(
ext_text
);
kfree
(
text
);
console_lock
();
/*
* If this kthread is being stopped by another task, con->thread will
* already be NULL. That is fine. The important thing is that it is
* NULL after the kthread exits.
*/
con
->
thread
=
NULL
;
console_unlock
();
return
0
;
}
/* Must be called under console_lock. */
static
void
printk_start_kthread
(
struct
console
*
con
)
{
/*
* Do not start a kthread if there is no write() callback. The
* kthreads assume the write() callback exists.
*/
if
(
!
con
->
write
)
return
;
con
->
thread
=
kthread_run
(
printk_kthread_func
,
con
,
"pr/%s%d"
,
con
->
name
,
con
->
index
);
if
(
IS_ERR
(
con
->
thread
))
{
con
->
thread
=
NULL
;
con_printk
(
KERN_ERR
,
con
,
"unable to start printing thread
\n
"
);
__printk_fallback_preferred_direct
();
return
;
}
}
/*
/*
* Delayed printk version, for scheduler-internal messages:
* Delayed printk version, for scheduler-internal messages:
*/
*/
#define PRINTK_PENDING_WAKEUP 0x01
#define PRINTK_PENDING_WAKEUP 0x01
#define PRINTK_PENDING_OUTPUT 0x02
#define PRINTK_PENDING_
DIRECT_
OUTPUT 0x02
static
DEFINE_PER_CPU
(
int
,
printk_pending
);
static
DEFINE_PER_CPU
(
int
,
printk_pending
);
...
@@ -3297,41 +3893,71 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
...
@@ -3297,41 +3893,71 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
{
int
pending
=
this_cpu_xchg
(
printk_pending
,
0
);
int
pending
=
this_cpu_xchg
(
printk_pending
,
0
);
if
(
pending
&
PRINTK_PENDING_OUTPUT
)
{
if
(
pending
&
PRINTK_PENDING_DIRECT_OUTPUT
)
{
printk_prefer_direct_enter
();
/* If trylock fails, someone else is doing the printing */
/* If trylock fails, someone else is doing the printing */
if
(
console_trylock
())
if
(
console_trylock
())
console_unlock
();
console_unlock
();
printk_prefer_direct_exit
();
}
}
if
(
pending
&
PRINTK_PENDING_WAKEUP
)
if
(
pending
&
PRINTK_PENDING_WAKEUP
)
wake_up_interruptible
(
&
log_wait
);
wake_up_interruptible
_all
(
&
log_wait
);
}
}
static
DEFINE_PER_CPU
(
struct
irq_work
,
wake_up_klogd_work
)
=
static
DEFINE_PER_CPU
(
struct
irq_work
,
wake_up_klogd_work
)
=
IRQ_WORK_INIT_LAZY
(
wake_up_klogd_work_func
);
IRQ_WORK_INIT_LAZY
(
wake_up_klogd_work_func
);
void
wake_up_klogd
(
void
)
static
void
__wake_up_klogd
(
int
val
)
{
{
if
(
!
printk_percpu_data_ready
())
if
(
!
printk_percpu_data_ready
())
return
;
return
;
preempt_disable
();
preempt_disable
();
if
(
waitqueue_active
(
&
log_wait
))
{
/*
this_cpu_or
(
printk_pending
,
PRINTK_PENDING_WAKEUP
);
* Guarantee any new records can be seen by tasks preparing to wait
* before this context checks if the wait queue is empty.
*
* The full memory barrier within wq_has_sleeper() pairs with the full
* memory barrier within set_current_state() of
* prepare_to_wait_event(), which is called after ___wait_event() adds
* the waiter but before it has checked the wait condition.
*
* This pairs with devkmsg_read:A, syslog_print:A, and
* printk_kthread_func:A.
*/
if
(
wq_has_sleeper
(
&
log_wait
)
||
/* LMM(__wake_up_klogd:A) */
(
val
&
PRINTK_PENDING_DIRECT_OUTPUT
))
{
this_cpu_or
(
printk_pending
,
val
);
irq_work_queue
(
this_cpu_ptr
(
&
wake_up_klogd_work
));
irq_work_queue
(
this_cpu_ptr
(
&
wake_up_klogd_work
));
}
}
preempt_enable
();
preempt_enable
();
}
}
void
wake_up_klogd
(
void
)
{
__wake_up_klogd
(
PRINTK_PENDING_WAKEUP
);
}
void
defer_console_output
(
void
)
void
defer_console_output
(
void
)
{
{
if
(
!
printk_percpu_data_ready
())
/*
return
;
* New messages may have been added directly to the ringbuffer
* using vprintk_store(), so wake any waiters as well.
*/
int
val
=
PRINTK_PENDING_WAKEUP
;
preempt_disable
();
/*
this_cpu_or
(
printk_pending
,
PRINTK_PENDING_OUTPUT
);
* Make sure that some context will print the messages when direct
irq_work_queue
(
this_cpu_ptr
(
&
wake_up_klogd_work
));
* printing is allowed. This happens in situations when the kthreads
preempt_enable
();
* may not be as reliable or perhaps unusable.
*/
if
(
allow_direct_printing
())
val
|=
PRINTK_PENDING_DIRECT_OUTPUT
;
__wake_up_klogd
(
val
);
}
}
void
printk_trigger_flush
(
void
)
void
printk_trigger_flush
(
void
)
...
@@ -3667,25 +4293,25 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
...
@@ -3667,25 +4293,25 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
static
atomic_t
printk_cpu
lock
_owner
=
ATOMIC_INIT
(
-
1
);
static
atomic_t
printk_cpu
_sync
_owner
=
ATOMIC_INIT
(
-
1
);
static
atomic_t
printk_cpu
lock
_nested
=
ATOMIC_INIT
(
0
);
static
atomic_t
printk_cpu
_sync
_nested
=
ATOMIC_INIT
(
0
);
/**
/**
* __printk_
wait_on_cpu_lock
() - Busy wait until the printk cpu-reentrant
* __printk_
cpu_sync_wait
() - Busy wait until the printk cpu-reentrant
* spinning lock is not owned by any CPU.
* spinning lock is not owned by any CPU.
*
*
* Context: Any context.
* Context: Any context.
*/
*/
void
__printk_
wait_on_cpu_lock
(
void
)
void
__printk_
cpu_sync_wait
(
void
)
{
{
do
{
do
{
cpu_relax
();
cpu_relax
();
}
while
(
atomic_read
(
&
printk_cpu
lock
_owner
)
!=
-
1
);
}
while
(
atomic_read
(
&
printk_cpu
_sync
_owner
)
!=
-
1
);
}
}
EXPORT_SYMBOL
(
__printk_
wait_on_cpu_lock
);
EXPORT_SYMBOL
(
__printk_
cpu_sync_wait
);
/**
/**
* __printk_cpu_
trylock
() - Try to acquire the printk cpu-reentrant
* __printk_cpu_
sync_try_get
() - Try to acquire the printk cpu-reentrant
* spinning lock.
* spinning lock.
*
*
* If no processor has the lock, the calling processor takes the lock and
* If no processor has the lock, the calling processor takes the lock and
...
@@ -3695,7 +4321,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
...
@@ -3695,7 +4321,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
* Context: Any context. Expects interrupts to be disabled.
* Context: Any context. Expects interrupts to be disabled.
* Return: 1 on success, otherwise 0.
* Return: 1 on success, otherwise 0.
*/
*/
int
__printk_cpu_
trylock
(
void
)
int
__printk_cpu_
sync_try_get
(
void
)
{
{
int
cpu
;
int
cpu
;
int
old
;
int
old
;
...
@@ -3705,79 +4331,80 @@ int __printk_cpu_trylock(void)
...
@@ -3705,79 +4331,80 @@ int __printk_cpu_trylock(void)
/*
/*
* Guarantee loads and stores from this CPU when it is the lock owner
* Guarantee loads and stores from this CPU when it is the lock owner
* are _not_ visible to the previous lock owner. This pairs with
* are _not_ visible to the previous lock owner. This pairs with
* __printk_cpu_
unlock
:B.
* __printk_cpu_
sync_put
:B.
*
*
* Memory barrier involvement:
* Memory barrier involvement:
*
*
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
* If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
* __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
* then __printk_cpu_sync_put:A can never read from
* __printk_cpu_sync_try_get:B.
*
*
* Relies on:
* Relies on:
*
*
* RELEASE from __printk_cpu_
unlock:A to __printk_cpu_unlock
:B
* RELEASE from __printk_cpu_
sync_put:A to __printk_cpu_sync_put
:B
* of the previous CPU
* of the previous CPU
* matching
* matching
* ACQUIRE from __printk_cpu_
trylock:A to __printk_cpu_trylock:B
* ACQUIRE from __printk_cpu_
sync_try_get:A to
* of this CPU
*
__printk_cpu_sync_try_get:B
of this CPU
*/
*/
old
=
atomic_cmpxchg_acquire
(
&
printk_cpu
lock
_owner
,
-
1
,
old
=
atomic_cmpxchg_acquire
(
&
printk_cpu
_sync
_owner
,
-
1
,
cpu
);
/* LMM(__printk_cpu_
trylock
:A) */
cpu
);
/* LMM(__printk_cpu_
sync_try_get
:A) */
if
(
old
==
-
1
)
{
if
(
old
==
-
1
)
{
/*
/*
* This CPU is now the owner and begins loading/storing
* This CPU is now the owner and begins loading/storing
* data: LMM(__printk_cpu_
trylock
:B)
* data: LMM(__printk_cpu_
sync_try_get
:B)
*/
*/
return
1
;
return
1
;
}
else
if
(
old
==
cpu
)
{
}
else
if
(
old
==
cpu
)
{
/* This CPU is already the owner. */
/* This CPU is already the owner. */
atomic_inc
(
&
printk_cpu
lock
_nested
);
atomic_inc
(
&
printk_cpu
_sync
_nested
);
return
1
;
return
1
;
}
}
return
0
;
return
0
;
}
}
EXPORT_SYMBOL
(
__printk_cpu_
trylock
);
EXPORT_SYMBOL
(
__printk_cpu_
sync_try_get
);
/**
/**
* __printk_cpu_
unlock
() - Release the printk cpu-reentrant spinning lock.
* __printk_cpu_
sync_put
() - Release the printk cpu-reentrant spinning lock.
*
*
* The calling processor must be the owner of the lock.
* The calling processor must be the owner of the lock.
*
*
* Context: Any context. Expects interrupts to be disabled.
* Context: Any context. Expects interrupts to be disabled.
*/
*/
void
__printk_cpu_
unlock
(
void
)
void
__printk_cpu_
sync_put
(
void
)
{
{
if
(
atomic_read
(
&
printk_cpu
lock
_nested
))
{
if
(
atomic_read
(
&
printk_cpu
_sync
_nested
))
{
atomic_dec
(
&
printk_cpu
lock
_nested
);
atomic_dec
(
&
printk_cpu
_sync
_nested
);
return
;
return
;
}
}
/*
/*
* This CPU is finished loading/storing data:
* This CPU is finished loading/storing data:
* LMM(__printk_cpu_
unlock
:A)
* LMM(__printk_cpu_
sync_put
:A)
*/
*/
/*
/*
* Guarantee loads and stores from this CPU when it was the
* Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs
* lock owner are visible to the next lock owner. This pairs
* with __printk_cpu_
trylock
:A.
* with __printk_cpu_
sync_try_get
:A.
*
*
* Memory barrier involvement:
* Memory barrier involvement:
*
*
* If __printk_cpu_
trylock:A reads from __printk_cpu_unlock
:B,
* If __printk_cpu_
sync_try_get:A reads from __printk_cpu_sync_put
:B,
* then __printk_cpu_
trylock:B reads from __printk_cpu_unlock
:A.
* then __printk_cpu_
sync_try_get:B reads from __printk_cpu_sync_put
:A.
*
*
* Relies on:
* Relies on:
*
*
* RELEASE from __printk_cpu_
unlock:A to __printk_cpu_unlock
:B
* RELEASE from __printk_cpu_
sync_put:A to __printk_cpu_sync_put
:B
* of this CPU
* of this CPU
* matching
* matching
* ACQUIRE from __printk_cpu_
trylock:A to __printk_cpu_trylock:B
* ACQUIRE from __printk_cpu_
sync_try_get:A to
* of the next CPU
*
__printk_cpu_sync_try_get:B
of the next CPU
*/
*/
atomic_set_release
(
&
printk_cpu
lock
_owner
,
atomic_set_release
(
&
printk_cpu
_sync
_owner
,
-
1
);
/* LMM(__printk_cpu_
unlock
:B) */
-
1
);
/* LMM(__printk_cpu_
sync_put
:B) */
}
}
EXPORT_SYMBOL
(
__printk_cpu_
unlock
);
EXPORT_SYMBOL
(
__printk_cpu_
sync_put
);
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_SMP */
kernel/rcu/tree_stall.h
View file @
1c6fd599
...
@@ -619,6 +619,7 @@ static void print_cpu_stall(unsigned long gps)
...
@@ -619,6 +619,7 @@ static void print_cpu_stall(unsigned long gps)
* See Documentation/RCU/stallwarn.rst for info on how to debug
* See Documentation/RCU/stallwarn.rst for info on how to debug
* RCU CPU stall warnings.
* RCU CPU stall warnings.
*/
*/
printk_prefer_direct_enter
();
trace_rcu_stall_warning
(
rcu_state
.
name
,
TPS
(
"SelfDetected"
));
trace_rcu_stall_warning
(
rcu_state
.
name
,
TPS
(
"SelfDetected"
));
pr_err
(
"INFO: %s self-detected stall on CPU
\n
"
,
rcu_state
.
name
);
pr_err
(
"INFO: %s self-detected stall on CPU
\n
"
,
rcu_state
.
name
);
raw_spin_lock_irqsave_rcu_node
(
rdp
->
mynode
,
flags
);
raw_spin_lock_irqsave_rcu_node
(
rdp
->
mynode
,
flags
);
...
@@ -656,6 +657,7 @@ static void print_cpu_stall(unsigned long gps)
...
@@ -656,6 +657,7 @@ static void print_cpu_stall(unsigned long gps)
*/
*/
set_tsk_need_resched
(
current
);
set_tsk_need_resched
(
current
);
set_preempt_need_resched
();
set_preempt_need_resched
();
printk_prefer_direct_exit
();
}
}
static
void
check_cpu_stall
(
struct
rcu_data
*
rdp
)
static
void
check_cpu_stall
(
struct
rcu_data
*
rdp
)
...
...
kernel/reboot.c
View file @
1c6fd599
...
@@ -447,9 +447,11 @@ static int __orderly_reboot(void)
...
@@ -447,9 +447,11 @@ static int __orderly_reboot(void)
ret
=
run_cmd
(
reboot_cmd
);
ret
=
run_cmd
(
reboot_cmd
);
if
(
ret
)
{
if
(
ret
)
{
printk_prefer_direct_enter
();
pr_warn
(
"Failed to start orderly reboot: forcing the issue
\n
"
);
pr_warn
(
"Failed to start orderly reboot: forcing the issue
\n
"
);
emergency_sync
();
emergency_sync
();
kernel_restart
(
NULL
);
kernel_restart
(
NULL
);
printk_prefer_direct_exit
();
}
}
return
ret
;
return
ret
;
...
@@ -462,6 +464,7 @@ static int __orderly_poweroff(bool force)
...
@@ -462,6 +464,7 @@ static int __orderly_poweroff(bool force)
ret
=
run_cmd
(
poweroff_cmd
);
ret
=
run_cmd
(
poweroff_cmd
);
if
(
ret
&&
force
)
{
if
(
ret
&&
force
)
{
printk_prefer_direct_enter
();
pr_warn
(
"Failed to start orderly shutdown: forcing the issue
\n
"
);
pr_warn
(
"Failed to start orderly shutdown: forcing the issue
\n
"
);
/*
/*
...
@@ -471,6 +474,7 @@ static int __orderly_poweroff(bool force)
...
@@ -471,6 +474,7 @@ static int __orderly_poweroff(bool force)
*/
*/
emergency_sync
();
emergency_sync
();
kernel_power_off
();
kernel_power_off
();
printk_prefer_direct_exit
();
}
}
return
ret
;
return
ret
;
...
@@ -528,6 +532,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
...
@@ -528,6 +532,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
*/
*/
static
void
hw_failure_emergency_poweroff_func
(
struct
work_struct
*
work
)
static
void
hw_failure_emergency_poweroff_func
(
struct
work_struct
*
work
)
{
{
printk_prefer_direct_enter
();
/*
/*
* We have reached here after the emergency shutdown waiting period has
* We have reached here after the emergency shutdown waiting period has
* expired. This means orderly_poweroff has not been able to shut off
* expired. This means orderly_poweroff has not been able to shut off
...
@@ -544,6 +550,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
...
@@ -544,6 +550,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
*/
*/
pr_emerg
(
"Hardware protection shutdown failed. Trying emergency restart
\n
"
);
pr_emerg
(
"Hardware protection shutdown failed. Trying emergency restart
\n
"
);
emergency_restart
();
emergency_restart
();
printk_prefer_direct_exit
();
}
}
static
DECLARE_DELAYED_WORK
(
hw_failure_emergency_poweroff_work
,
static
DECLARE_DELAYED_WORK
(
hw_failure_emergency_poweroff_work
,
...
@@ -582,11 +590,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
...
@@ -582,11 +590,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
{
{
static
atomic_t
allow_proceed
=
ATOMIC_INIT
(
1
);
static
atomic_t
allow_proceed
=
ATOMIC_INIT
(
1
);
printk_prefer_direct_enter
();
pr_emerg
(
"HARDWARE PROTECTION shutdown (%s)
\n
"
,
reason
);
pr_emerg
(
"HARDWARE PROTECTION shutdown (%s)
\n
"
,
reason
);
/* Shutdown should be initiated only once. */
/* Shutdown should be initiated only once. */
if
(
!
atomic_dec_and_test
(
&
allow_proceed
))
if
(
!
atomic_dec_and_test
(
&
allow_proceed
))
return
;
goto
out
;
/*
/*
* Queue a backup emergency shutdown in the event of
* Queue a backup emergency shutdown in the event of
...
@@ -594,6 +604,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
...
@@ -594,6 +604,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
*/
*/
hw_failure_emergency_poweroff
(
ms_until_forced
);
hw_failure_emergency_poweroff
(
ms_until_forced
);
orderly_poweroff
(
true
);
orderly_poweroff
(
true
);
out:
printk_prefer_direct_exit
();
}
}
EXPORT_SYMBOL_GPL
(
hw_protection_shutdown
);
EXPORT_SYMBOL_GPL
(
hw_protection_shutdown
);
...
...
kernel/watchdog.c
View file @
1c6fd599
...
@@ -424,6 +424,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
...
@@ -424,6 +424,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* Start period for the next softlockup warning. */
/* Start period for the next softlockup warning. */
update_report_ts
();
update_report_ts
();
printk_prefer_direct_enter
();
pr_emerg
(
"BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]
\n
"
,
pr_emerg
(
"BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]
\n
"
,
smp_processor_id
(),
duration
,
smp_processor_id
(),
duration
,
current
->
comm
,
task_pid_nr
(
current
));
current
->
comm
,
task_pid_nr
(
current
));
...
@@ -442,6 +444,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
...
@@ -442,6 +444,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
add_taint
(
TAINT_SOFTLOCKUP
,
LOCKDEP_STILL_OK
);
add_taint
(
TAINT_SOFTLOCKUP
,
LOCKDEP_STILL_OK
);
if
(
softlockup_panic
)
if
(
softlockup_panic
)
panic
(
"softlockup: hung tasks"
);
panic
(
"softlockup: hung tasks"
);
printk_prefer_direct_exit
();
}
}
return
HRTIMER_RESTART
;
return
HRTIMER_RESTART
;
...
...
kernel/watchdog_hld.c
View file @
1c6fd599
...
@@ -135,6 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
...
@@ -135,6 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if
(
__this_cpu_read
(
hard_watchdog_warn
)
==
true
)
if
(
__this_cpu_read
(
hard_watchdog_warn
)
==
true
)
return
;
return
;
printk_prefer_direct_enter
();
pr_emerg
(
"Watchdog detected hard LOCKUP on cpu %d
\n
"
,
pr_emerg
(
"Watchdog detected hard LOCKUP on cpu %d
\n
"
,
this_cpu
);
this_cpu
);
print_modules
();
print_modules
();
...
@@ -155,6 +157,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
...
@@ -155,6 +157,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if
(
hardlockup_panic
)
if
(
hardlockup_panic
)
nmi_panic
(
regs
,
"Hard LOCKUP"
);
nmi_panic
(
regs
,
"Hard LOCKUP"
);
printk_prefer_direct_exit
();
__this_cpu_write
(
hard_watchdog_warn
,
true
);
__this_cpu_write
(
hard_watchdog_warn
,
true
);
return
;
return
;
}
}
...
...
lib/dump_stack.c
View file @
1c6fd599
...
@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
...
@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
* Permit this cpu to perform nested stack dumps while serialising
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
* against other CPUs
*/
*/
printk_cpu_
lock
_irqsave
(
flags
);
printk_cpu_
sync_get
_irqsave
(
flags
);
__dump_stack
(
log_lvl
);
__dump_stack
(
log_lvl
);
printk_cpu_
unlock
_irqrestore
(
flags
);
printk_cpu_
sync_put
_irqrestore
(
flags
);
}
}
EXPORT_SYMBOL
(
dump_stack_lvl
);
EXPORT_SYMBOL
(
dump_stack_lvl
);
...
...
lib/nmi_backtrace.c
View file @
1c6fd599
...
@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
...
@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
* Allow nested NMI backtraces while serializing
* Allow nested NMI backtraces while serializing
* against other CPUs.
* against other CPUs.
*/
*/
printk_cpu_
lock
_irqsave
(
flags
);
printk_cpu_
sync_get
_irqsave
(
flags
);
if
(
!
READ_ONCE
(
backtrace_idle
)
&&
regs
&&
cpu_in_idle
(
instruction_pointer
(
regs
)))
{
if
(
!
READ_ONCE
(
backtrace_idle
)
&&
regs
&&
cpu_in_idle
(
instruction_pointer
(
regs
)))
{
pr_warn
(
"NMI backtrace for cpu %d skipped: idling at %pS
\n
"
,
pr_warn
(
"NMI backtrace for cpu %d skipped: idling at %pS
\n
"
,
cpu
,
(
void
*
)
instruction_pointer
(
regs
));
cpu
,
(
void
*
)
instruction_pointer
(
regs
));
...
@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
...
@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
else
dump_stack
();
dump_stack
();
}
}
printk_cpu_
unlock
_irqrestore
(
flags
);
printk_cpu_
sync_put
_irqrestore
(
flags
);
cpumask_clear_cpu
(
cpu
,
to_cpumask
(
backtrace_mask
));
cpumask_clear_cpu
(
cpu
,
to_cpumask
(
backtrace_mask
));
return
true
;
return
true
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment