Commit 8d0968cc authored by Juergen Gross's avatar Juergen Gross Committed by Ingo Molnar

locking/csd_lock: Add boot parameter for controlling CSD lock debugging

Currently CSD lock debugging can be switched on and off via a kernel
config option only. Unfortunately there is at least one problem with
CSD lock handling pending for about 2 years now, which has been seen
in different environments (mostly when running virtualized under KVM
or Xen, at least once on bare metal). Multiple attempts to catch this
issue have finally led to introduction of CSD lock debug code, but
this code is not in use in most distros as it has some impact on
performance.

In order to be able to ship kernels with CONFIG_CSD_LOCK_WAIT_DEBUG
enabled even for production use, add a boot parameter for switching
the debug functionality on. This will reduce any performance impact
of the debug coding to a bare minimum when not being used.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
[ Minor edits. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210301101336.7797-2-jgross@suse.com
parent 50bf8080
...@@ -784,6 +784,12 @@ ...@@ -784,6 +784,12 @@
cs89x0_media= [HW,NET] cs89x0_media= [HW,NET]
Format: { rj45 | aui | bnc } Format: { rj45 | aui | bnc }
csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call
handling. When switched on, additional debug data is
printed to the console in case a hanging CPU is
detected, and that CPU is pinged again in order to try
to resolve the hang situation.
dasd= [HW,NET] dasd= [HW,NET]
See header of drivers/s390/block/dasd_devmap.c. See header of drivers/s390/block/dasd_devmap.c.
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
#include <linux/jump_label.h>
#include "smpboot.h" #include "smpboot.h"
#include "sched/smp.h" #include "sched/smp.h"
...@@ -102,6 +103,20 @@ void __init call_function_init(void) ...@@ -102,6 +103,20 @@ void __init call_function_init(void)
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
static int __init csdlock_debug(char *str)
{
unsigned int val = 0;
get_option(&str, &val);
if (val)
static_branch_enable(&csdlock_debug_enabled);
return 0;
}
early_param("csdlock_debug", csdlock_debug);
static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
static DEFINE_PER_CPU(void *, cur_csd_info); static DEFINE_PER_CPU(void *, cur_csd_info);
...@@ -110,7 +125,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info); ...@@ -110,7 +125,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
static atomic_t csd_bug_count = ATOMIC_INIT(0); static atomic_t csd_bug_count = ATOMIC_INIT(0);
/* Record current CSD work for current CPU, NULL to erase. */ /* Record current CSD work for current CPU, NULL to erase. */
static void csd_lock_record(call_single_data_t *csd) static void __csd_lock_record(call_single_data_t *csd)
{ {
if (!csd) { if (!csd) {
smp_mb(); /* NULL cur_csd after unlock. */ smp_mb(); /* NULL cur_csd after unlock. */
...@@ -125,7 +140,13 @@ static void csd_lock_record(call_single_data_t *csd) ...@@ -125,7 +140,13 @@ static void csd_lock_record(call_single_data_t *csd)
/* Or before unlock, as the case may be. */ /* Or before unlock, as the case may be. */
} }
static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd) static __always_inline void csd_lock_record(call_single_data_t *csd)
{
if (static_branch_unlikely(&csdlock_debug_enabled))
__csd_lock_record(csd);
}
static int csd_lock_wait_getcpu(call_single_data_t *csd)
{ {
unsigned int csd_type; unsigned int csd_type;
...@@ -140,7 +161,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd) ...@@ -140,7 +161,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
* so waiting on other types gets much less information. * so waiting on other types gets much less information.
*/ */
static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id) static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
{ {
int cpu = -1; int cpu = -1;
int cpux; int cpux;
...@@ -204,7 +225,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t ...@@ -204,7 +225,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
* previous function call. For multi-cpu calls its even more interesting * previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd. * as we'll have to ensure no other cpu is observing our csd.
*/ */
static __always_inline void csd_lock_wait(call_single_data_t *csd) static void __csd_lock_wait(call_single_data_t *csd)
{ {
int bug_id = 0; int bug_id = 0;
u64 ts0, ts1; u64 ts0, ts1;
...@@ -218,6 +239,15 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd) ...@@ -218,6 +239,15 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
smp_acquire__after_ctrl_dep(); smp_acquire__after_ctrl_dep();
} }
static __always_inline void csd_lock_wait(call_single_data_t *csd)
{
if (static_branch_unlikely(&csdlock_debug_enabled)) {
__csd_lock_wait(csd);
return;
}
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
}
#else #else
static void csd_lock_record(call_single_data_t *csd) static void csd_lock_record(call_single_data_t *csd)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment