Commit 3cd99ac3 authored by Douglas Anderson's avatar Douglas Anderson Committed by Daniel Thompson

kgdb: Fix kgdb_roundup_cpus() for arches who used smp_call_function()

When I had lockdep turned on and dropped into kgdb I got a nice splat
on my system.  Specifically it hit:
  DEBUG_LOCKS_WARN_ON(current->hardirq_context)

Specifically it looked like this:
  sysrq: SysRq : DEBUG
  ------------[ cut here ]------------
  DEBUG_LOCKS_WARN_ON(current->hardirq_context)
  WARNING: CPU: 0 PID: 0 at .../kernel/locking/lockdep.c:2875 lockdep_hardirqs_on+0xf0/0x160
  CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.19.0 #27
  pstate: 604003c9 (nZCv DAIF +PAN -UAO)
  pc : lockdep_hardirqs_on+0xf0/0x160
  ...
  Call trace:
   lockdep_hardirqs_on+0xf0/0x160
   trace_hardirqs_on+0x188/0x1ac
   kgdb_roundup_cpus+0x14/0x3c
   kgdb_cpu_enter+0x53c/0x5cc
   kgdb_handle_exception+0x180/0x1d4
   kgdb_compiled_brk_fn+0x30/0x3c
   brk_handler+0x134/0x178
   do_debug_exception+0xfc/0x178
   el1_dbg+0x18/0x78
   kgdb_breakpoint+0x34/0x58
   sysrq_handle_dbg+0x54/0x5c
   __handle_sysrq+0x114/0x21c
   handle_sysrq+0x30/0x3c
   qcom_geni_serial_isr+0x2dc/0x30c
  ...
  ...
  irq event stamp: ...45
  hardirqs last  enabled at (...44): [...] __do_softirq+0xd8/0x4e4
  hardirqs last disabled at (...45): [...] el1_irq+0x74/0x130
  softirqs last  enabled at (...42): [...] _local_bh_enable+0x2c/0x34
  softirqs last disabled at (...43): [...] irq_exit+0xa8/0x100
  ---[ end trace adf21f830c46e638 ]---

Looking closely at it, it seems like a really bad idea to be calling
local_irq_enable() in kgdb_roundup_cpus().  If nothing else that seems
like it could violate spinlock semantics and cause a deadlock.

Instead, let's use a private csd alongside
smp_call_function_single_async() to round up the other CPUs.  Using
smp_call_function_single_async() doesn't require interrupts to be
enabled so we can remove the offending bit of code.

In order to avoid duplicating this across all the architectures that
use the default kgdb_roundup_cpus(), we'll add a "weak" implementation
to debug_core.c.

Looking at all the people who previously had copies of this code,
there were a few variants.  I've attempted to keep the variants
working like they used to.  Specifically:
* For arch/arc we passed NULL to kgdb_nmicallback() instead of
  get_irq_regs().
* For arch/mips there was a bit of extra code around
  kgdb_nmicallback()

NOTE: In this patch we will still get into trouble if we try to round
up a CPU that failed to round up before.  We'll try to round it up
again and potentially hang when we try to grab the csd lock.  That's
not new behavior but we'll still try to do better in a future patch.
Suggested-by: default avatarDaniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: default avatarDouglas Anderson <dianders@chromium.org>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarDaniel Thompson <daniel.thompson@linaro.org>
parent 9ef7fa50
...@@ -192,18 +192,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) ...@@ -192,18 +192,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
instruction_pointer(regs) = ip; instruction_pointer(regs) = ip;
} }
static void kgdb_call_nmi_hook(void *ignored) void kgdb_call_nmi_hook(void *ignored)
{ {
/* Default implementation passes get_irq_regs() but we don't */
kgdb_nmicallback(raw_smp_processor_id(), NULL); kgdb_nmicallback(raw_smp_processor_id(), NULL);
} }
void kgdb_roundup_cpus(void)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
struct kgdb_arch arch_kgdb_ops = { struct kgdb_arch arch_kgdb_ops = {
/* breakpoint instruction: TRAP_S 0x3 */ /* breakpoint instruction: TRAP_S 0x3 */
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
......
...@@ -170,18 +170,6 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { ...@@ -170,18 +170,6 @@ static struct undef_hook kgdb_compiled_brkpt_hook = {
.fn = kgdb_compiled_brk_fn .fn = kgdb_compiled_brk_fn
}; };
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(void)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd) static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{ {
struct pt_regs *regs = args->regs; struct pt_regs *regs = args->regs;
......
...@@ -284,18 +284,6 @@ static struct step_hook kgdb_step_hook = { ...@@ -284,18 +284,6 @@ static struct step_hook kgdb_step_hook = {
.fn = kgdb_step_brk_fn .fn = kgdb_step_brk_fn
}; };
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(void)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd) static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{ {
struct pt_regs *regs = args->regs; struct pt_regs *regs = args->regs;
......
...@@ -115,33 +115,6 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) ...@@ -115,33 +115,6 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
instruction_pointer(regs) = pc; instruction_pointer(regs) = pc;
} }
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs.
*
* On non-SMP systems, this is not called.
*/
static void hexagon_kgdb_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(void)
{
local_irq_enable();
smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
local_irq_disable();
}
#endif
/* Not yet working */ /* Not yet working */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
......
...@@ -207,7 +207,7 @@ void arch_kgdb_breakpoint(void) ...@@ -207,7 +207,7 @@ void arch_kgdb_breakpoint(void)
".set\treorder"); ".set\treorder");
} }
static void kgdb_call_nmi_hook(void *ignored) void kgdb_call_nmi_hook(void *ignored)
{ {
mm_segment_t old_fs; mm_segment_t old_fs;
...@@ -219,13 +219,6 @@ static void kgdb_call_nmi_hook(void *ignored) ...@@ -219,13 +219,6 @@ static void kgdb_call_nmi_hook(void *ignored)
set_fs(old_fs); set_fs(old_fs);
} }
void kgdb_roundup_cpus(void)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int compute_signal(int tt) static int compute_signal(int tt)
{ {
struct hard_trap_info *ht; struct hard_trap_info *ht;
......
...@@ -117,7 +117,7 @@ int kgdb_skipexception(int exception, struct pt_regs *regs) ...@@ -117,7 +117,7 @@ int kgdb_skipexception(int exception, struct pt_regs *regs)
return kgdb_isremovedbreak(regs->nip); return kgdb_isremovedbreak(regs->nip);
} }
static int kgdb_call_nmi_hook(struct pt_regs *regs) static int kgdb_debugger_ipi(struct pt_regs *regs)
{ {
kgdb_nmicallback(raw_smp_processor_id(), regs); kgdb_nmicallback(raw_smp_processor_id(), regs);
return 0; return 0;
...@@ -502,7 +502,7 @@ int kgdb_arch_init(void) ...@@ -502,7 +502,7 @@ int kgdb_arch_init(void)
old__debugger_break_match = __debugger_break_match; old__debugger_break_match = __debugger_break_match;
old__debugger_fault_handler = __debugger_fault_handler; old__debugger_fault_handler = __debugger_fault_handler;
__debugger_ipi = kgdb_call_nmi_hook; __debugger_ipi = kgdb_debugger_ipi;
__debugger = kgdb_debugger; __debugger = kgdb_debugger;
__debugger_bpt = kgdb_handle_breakpoint; __debugger_bpt = kgdb_handle_breakpoint;
__debugger_sstep = kgdb_singlestep; __debugger_sstep = kgdb_singlestep;
......
...@@ -314,18 +314,6 @@ BUILD_TRAP_HANDLER(singlestep) ...@@ -314,18 +314,6 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(void)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd) static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{ {
int ret; int ret;
......
...@@ -176,14 +176,25 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code, ...@@ -176,14 +176,25 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_out_buffer, char *remcom_out_buffer,
struct pt_regs *regs); struct pt_regs *regs);
/**
* kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
* @ignored: This parameter is only here to match the prototype.
*
* If you're using the default implementation of kgdb_roundup_cpus()
* this function will be called per CPU. If you don't implement
* kgdb_call_nmi_hook() a default will be used.
*/
extern void kgdb_call_nmi_hook(void *ignored);
/** /**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern * kgdb_roundup_cpus - Get other CPUs into a holding pattern
* *
* On SMP systems, we need to get the attention of the other CPUs * On SMP systems, we need to get the attention of the other CPUs
* and get them into a known state. This should do what is needed * and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches, * to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example, * the NMI approach is not used for rounding up all the CPUs. Normally
* in case of MIPS, smp_call_function() is used to roundup CPUs. * those architectures can just not implement this and get the default.
* *
* On non-SMP systems, this is not called. * On non-SMP systems, this is not called.
*/ */
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/vmacache.h> #include <linux/vmacache.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/irq.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -220,6 +221,46 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs) ...@@ -220,6 +221,46 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
return 0; return 0;
} }
#ifdef CONFIG_SMP
/*
* Default (weak) implementation for kgdb_roundup_cpus
*/
static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
void __weak kgdb_call_nmi_hook(void *ignored)
{
/*
* NOTE: get_irq_regs() is supposed to get the registers from
* before the IPI interrupt happened and so is supposed to
* show where the processor was. In some situations it's
* possible we might be called without an IPI, so it might be
* safer to figure out how to make kgdb_breakpoint() work
* properly here.
*/
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void __weak kgdb_roundup_cpus(void)
{
call_single_data_t *csd;
int this_cpu = raw_smp_processor_id();
int cpu;
for_each_online_cpu(cpu) {
/* No need to roundup ourselves */
if (cpu == this_cpu)
continue;
csd = &per_cpu(kgdb_roundup_csd, cpu);
csd->func = kgdb_call_nmi_hook;
smp_call_function_single_async(cpu, csd);
}
}
#endif
/* /*
* Some architectures need cache flushes when we set/clear a * Some architectures need cache flushes when we set/clear a
* breakpoint: * breakpoint:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment