Commit fcf01044 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'kgdb-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt/linux

Pull kgdb updates from Daniel Thompson:
 "Mostly clean ups although while Doug's was chasing down a odd lockdep
  warning he also did some work to improved debugger resilience when
  some CPUs fail to respond to the round up request.

  The main changes are:

   - Fixing a lockdep warning on architectures that cannot use an NMI
     for the round up plus related changes to make CPU round up and all
     CPU backtrace more resilient.

   - Constify the arch ops tables

   - A couple of other small clean ups

  Two of the three patchsets here include changes that spill over into
  arch/. Changes in the arch space are relatively narrow in scope (and
  directly related to kgdb). Didn't get comprehensive acks but all
  impacted maintainers were Cc:ed in good time"

* tag 'kgdb-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt/linux:
  kgdb/treewide: constify struct kgdb_arch arch_kgdb_ops
  mips/kgdb: prepare arch_kgdb_ops for constness
  kdb: use bool for binary state indicators
  kdb: Don't back trace on a cpu that didn't round up
  kgdb: Don't round up a CPU that failed rounding up before
  kgdb: Fix kgdb_roundup_cpus() for arches who used smp_call_function()
  kgdb: Remove irq flags from roundup
parents fbea8c7c cc028297
...@@ -192,19 +192,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) ...@@ -192,19 +192,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
instruction_pointer(regs) = ip; instruction_pointer(regs) = ip;
} }
static void kgdb_call_nmi_hook(void *ignored) void kgdb_call_nmi_hook(void *ignored)
{ {
/* Default implementation passes get_irq_regs() but we don't */
kgdb_nmicallback(raw_smp_processor_id(), NULL); kgdb_nmicallback(raw_smp_processor_id(), NULL);
} }
void kgdb_roundup_cpus(unsigned long flags) const struct kgdb_arch arch_kgdb_ops = {
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
struct kgdb_arch arch_kgdb_ops = {
/* breakpoint instruction: TRAP_S 0x3 */ /* breakpoint instruction: TRAP_S 0x3 */
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = {0x78, 0x7e}, .gdb_bpt_instr = {0x78, 0x7e},
......
...@@ -170,18 +170,6 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { ...@@ -170,18 +170,6 @@ static struct undef_hook kgdb_compiled_brkpt_hook = {
.fn = kgdb_compiled_brk_fn .fn = kgdb_compiled_brk_fn
}; };
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd) static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{ {
struct pt_regs *regs = args->regs; struct pt_regs *regs = args->regs;
...@@ -274,7 +262,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) ...@@ -274,7 +262,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
* and we handle the normal undef case within the do_undefinstr * and we handle the normal undef case within the do_undefinstr
* handler. * handler.
*/ */
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
#ifndef __ARMEB__ #ifndef __ARMEB__
.gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
#else /* ! __ARMEB__ */ #else /* ! __ARMEB__ */
......
...@@ -284,18 +284,6 @@ static struct step_hook kgdb_step_hook = { ...@@ -284,18 +284,6 @@ static struct step_hook kgdb_step_hook = {
.fn = kgdb_step_brk_fn .fn = kgdb_step_brk_fn
}; };
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd) static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{ {
struct pt_regs *regs = args->regs; struct pt_regs *regs = args->regs;
...@@ -357,7 +345,7 @@ void kgdb_arch_exit(void) ...@@ -357,7 +345,7 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier); unregister_die_notifier(&kgdb_notifier);
} }
struct kgdb_arch arch_kgdb_ops; const struct kgdb_arch arch_kgdb_ops;
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{ {
......
...@@ -129,7 +129,7 @@ void kgdb_arch_exit(void) ...@@ -129,7 +129,7 @@ void kgdb_arch_exit(void)
/* Nothing to do */ /* Nothing to do */
} }
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trapa #2 */ /* Breakpoint instruction: trapa #2 */
.gdb_bpt_instr = { 0x57, 0x20 }, .gdb_bpt_instr = { 0x57, 0x20 },
}; };
...@@ -83,7 +83,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { ...@@ -83,7 +83,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)}, { "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)},
}; };
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
/* trap0(#0xDB) 0x0cdb0054 */ /* trap0(#0xDB) 0x0cdb0054 */
.gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c}, .gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c},
}; };
...@@ -115,38 +115,6 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) ...@@ -115,38 +115,6 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
instruction_pointer(regs) = pc; instruction_pointer(regs) = pc;
} }
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
*
* On non-SMP systems, this is not called.
*/
static void hexagon_kgdb_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
local_irq_disable();
}
#endif
/* Not yet working */ /* Not yet working */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
......
...@@ -143,7 +143,7 @@ void kgdb_arch_exit(void) ...@@ -143,7 +143,7 @@ void kgdb_arch_exit(void)
/* /*
* Global data * Global data
*/ */
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
#ifdef __MICROBLAZEEL__ #ifdef __MICROBLAZEEL__
.gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
#else #else
......
...@@ -207,7 +207,7 @@ void arch_kgdb_breakpoint(void) ...@@ -207,7 +207,7 @@ void arch_kgdb_breakpoint(void)
".set\treorder"); ".set\treorder");
} }
static void kgdb_call_nmi_hook(void *ignored) void kgdb_call_nmi_hook(void *ignored)
{ {
mm_segment_t old_fs; mm_segment_t old_fs;
...@@ -219,13 +219,6 @@ static void kgdb_call_nmi_hook(void *ignored) ...@@ -219,13 +219,6 @@ static void kgdb_call_nmi_hook(void *ignored)
set_fs(old_fs); set_fs(old_fs);
} }
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int compute_signal(int tt) static int compute_signal(int tt)
{ {
struct hard_trap_info *ht; struct hard_trap_info *ht;
...@@ -394,18 +387,16 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, ...@@ -394,18 +387,16 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
return -1; return -1;
} }
struct kgdb_arch arch_kgdb_ops; const struct kgdb_arch arch_kgdb_ops = {
#ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = { spec_op << 2, 0x00, 0x00, break_op },
#else
.gdb_bpt_instr = { break_op, 0x00, 0x00, spec_op << 2 },
#endif
};
int kgdb_arch_init(void) int kgdb_arch_init(void)
{ {
union mips_instruction insn = {
.r_format = {
.opcode = spec_op,
.func = break_op,
}
};
memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
register_die_notifier(&kgdb_notifier); register_die_notifier(&kgdb_notifier);
return 0; return 0;
......
...@@ -165,7 +165,7 @@ void kgdb_arch_exit(void) ...@@ -165,7 +165,7 @@ void kgdb_arch_exit(void)
/* Nothing to do */ /* Nothing to do */
} }
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trap 30 */ /* Breakpoint instruction: trap 30 */
.gdb_bpt_instr = { 0xba, 0x6f, 0x3b, 0x00 }, .gdb_bpt_instr = { 0xba, 0x6f, 0x3b, 0x00 },
}; };
...@@ -117,14 +117,14 @@ int kgdb_skipexception(int exception, struct pt_regs *regs) ...@@ -117,14 +117,14 @@ int kgdb_skipexception(int exception, struct pt_regs *regs)
return kgdb_isremovedbreak(regs->nip); return kgdb_isremovedbreak(regs->nip);
} }
static int kgdb_call_nmi_hook(struct pt_regs *regs) static int kgdb_debugger_ipi(struct pt_regs *regs)
{ {
kgdb_nmicallback(raw_smp_processor_id(), regs); kgdb_nmicallback(raw_smp_processor_id(), regs);
return 0; return 0;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void kgdb_roundup_cpus(unsigned long flags) void kgdb_roundup_cpus(void)
{ {
smp_send_debugger_break(); smp_send_debugger_break();
} }
...@@ -477,7 +477,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) ...@@ -477,7 +477,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
/* /*
* Global data * Global data
*/ */
struct kgdb_arch arch_kgdb_ops; const struct kgdb_arch arch_kgdb_ops;
static int kgdb_not_implemented(struct pt_regs *regs) static int kgdb_not_implemented(struct pt_regs *regs)
{ {
...@@ -502,7 +502,7 @@ int kgdb_arch_init(void) ...@@ -502,7 +502,7 @@ int kgdb_arch_init(void)
old__debugger_break_match = __debugger_break_match; old__debugger_break_match = __debugger_break_match;
old__debugger_fault_handler = __debugger_fault_handler; old__debugger_fault_handler = __debugger_fault_handler;
__debugger_ipi = kgdb_call_nmi_hook; __debugger_ipi = kgdb_debugger_ipi;
__debugger = kgdb_debugger; __debugger = kgdb_debugger;
__debugger_bpt = kgdb_handle_breakpoint; __debugger_bpt = kgdb_handle_breakpoint;
__debugger_sstep = kgdb_singlestep; __debugger_sstep = kgdb_singlestep;
......
...@@ -311,18 +311,6 @@ BUILD_TRAP_HANDLER(singlestep) ...@@ -311,18 +311,6 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd) static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{ {
int ret; int ret;
...@@ -379,7 +367,7 @@ void kgdb_arch_exit(void) ...@@ -379,7 +367,7 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier); unregister_die_notifier(&kgdb_notifier);
} }
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trapa #0x3c */ /* Breakpoint instruction: trapa #0x3c */
#ifdef CONFIG_CPU_LITTLE_ENDIAN #ifdef CONFIG_CPU_LITTLE_ENDIAN
.gdb_bpt_instr = { 0x3c, 0xc3 }, .gdb_bpt_instr = { 0x3c, 0xc3 },
......
...@@ -166,7 +166,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) ...@@ -166,7 +166,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
regs->npc = regs->pc + 4; regs->npc = regs->pc + 4;
} }
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */ /* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d }, .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
}; };
...@@ -195,7 +195,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) ...@@ -195,7 +195,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
regs->tnpc = regs->tpc + 4; regs->tnpc = regs->tpc + 4;
} }
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */ /* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 }, .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
}; };
...@@ -1014,7 +1014,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) ...@@ -1014,7 +1014,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
} }
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags) void kgdb_roundup_cpus(void)
{ {
smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
} }
......
...@@ -422,21 +422,16 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs) ...@@ -422,21 +422,16 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/** /**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern * kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
* *
* On SMP systems, we need to get the attention of the other CPUs * On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed * and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches, * to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example, * the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In * in case of MIPS, smp_call_function() is used to roundup CPUs.
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
* *
* On non-SMP systems, this is not called. * On non-SMP systems, this is not called.
*/ */
void kgdb_roundup_cpus(unsigned long flags) void kgdb_roundup_cpus(void)
{ {
apic->send_IPI_allbutself(APIC_DM_NMI); apic->send_IPI_allbutself(APIC_DM_NMI);
} }
...@@ -804,7 +799,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) ...@@ -804,7 +799,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
(char *)bpt->saved_instr, BREAK_INSTR_SIZE); (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
} }
struct kgdb_arch arch_kgdb_ops = { const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */ /* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc }, .gdb_bpt_instr = { 0xcc },
.flags = KGDB_HW_BREAKPOINT, .flags = KGDB_HW_BREAKPOINT,
......
...@@ -176,23 +176,29 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code, ...@@ -176,23 +176,29 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_out_buffer, char *remcom_out_buffer,
struct pt_regs *regs); struct pt_regs *regs);
/**
* kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
* @ignored: This parameter is only here to match the prototype.
*
* If you're using the default implementation of kgdb_roundup_cpus()
* this function will be called per CPU. If you don't implement
* kgdb_call_nmi_hook() a default will be used.
*/
extern void kgdb_call_nmi_hook(void *ignored);
/** /**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern * kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
* *
* On SMP systems, we need to get the attention of the other CPUs * On SMP systems, we need to get the attention of the other CPUs
* and get them into a known state. This should do what is needed * and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches, * to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example, * the NMI approach is not used for rounding up all the CPUs. Normally
* in case of MIPS, smp_call_function() is used to roundup CPUs. In * those architectures can just not implement this and get the default.
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
* *
* On non-SMP systems, this is not called. * On non-SMP systems, this is not called.
*/ */
extern void kgdb_roundup_cpus(unsigned long flags); extern void kgdb_roundup_cpus(void);
/** /**
* kgdb_arch_set_pc - Generic call back to the program counter * kgdb_arch_set_pc - Generic call back to the program counter
...@@ -281,7 +287,7 @@ struct kgdb_io { ...@@ -281,7 +287,7 @@ struct kgdb_io {
int is_console; int is_console;
}; };
extern struct kgdb_arch arch_kgdb_ops; extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/vmacache.h> #include <linux/vmacache.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/irq.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -220,6 +221,62 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs) ...@@ -220,6 +221,62 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
return 0; return 0;
} }
#ifdef CONFIG_SMP
/*
* Default (weak) implementation for kgdb_roundup_cpus
*/
static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
void __weak kgdb_call_nmi_hook(void *ignored)
{
/*
* NOTE: get_irq_regs() is supposed to get the registers from
* before the IPI interrupt happened and so is supposed to
* show where the processor was. In some situations it's
* possible we might be called without an IPI, so it might be
* safer to figure out how to make kgdb_breakpoint() work
* properly here.
*/
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void __weak kgdb_roundup_cpus(void)
{
call_single_data_t *csd;
int this_cpu = raw_smp_processor_id();
int cpu;
int ret;
for_each_online_cpu(cpu) {
/* No need to roundup ourselves */
if (cpu == this_cpu)
continue;
csd = &per_cpu(kgdb_roundup_csd, cpu);
/*
* If it didn't round up last time, don't try again
* since smp_call_function_single_async() will block.
*
* If rounding_up is false then we know that the
* previous call must have at least started and that
* means smp_call_function_single_async() won't block.
*/
if (kgdb_info[cpu].rounding_up)
continue;
kgdb_info[cpu].rounding_up = true;
csd->func = kgdb_call_nmi_hook;
ret = smp_call_function_single_async(cpu, csd);
if (ret)
kgdb_info[cpu].rounding_up = false;
}
}
#endif
/* /*
* Some architectures need cache flushes when we set/clear a * Some architectures need cache flushes when we set/clear a
* breakpoint: * breakpoint:
...@@ -535,6 +592,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, ...@@ -535,6 +592,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
arch_kgdb_ops.correct_hw_break(); arch_kgdb_ops.correct_hw_break();
if (trace_on) if (trace_on)
tracing_on(); tracing_on();
kgdb_info[cpu].debuggerinfo = NULL;
kgdb_info[cpu].task = NULL;
kgdb_info[cpu].exception_state &= kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--; kgdb_info[cpu].enter_kgdb--;
...@@ -593,7 +652,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, ...@@ -593,7 +652,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
/* Signal the other CPUs to enter kgdb_wait() */ /* Signal the other CPUs to enter kgdb_wait() */
else if ((!kgdb_single_step) && kgdb_do_roundup) else if ((!kgdb_single_step) && kgdb_do_roundup)
kgdb_roundup_cpus(flags); kgdb_roundup_cpus();
#endif #endif
/* /*
...@@ -667,6 +726,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, ...@@ -667,6 +726,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
if (trace_on) if (trace_on)
tracing_on(); tracing_on();
kgdb_info[cpu].debuggerinfo = NULL;
kgdb_info[cpu].task = NULL;
kgdb_info[cpu].exception_state &= kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--; kgdb_info[cpu].enter_kgdb--;
...@@ -747,6 +808,8 @@ int kgdb_nmicallback(int cpu, void *regs) ...@@ -747,6 +808,8 @@ int kgdb_nmicallback(int cpu, void *regs)
struct kgdb_state kgdb_var; struct kgdb_state kgdb_var;
struct kgdb_state *ks = &kgdb_var; struct kgdb_state *ks = &kgdb_var;
kgdb_info[cpu].rounding_up = false;
memset(ks, 0, sizeof(struct kgdb_state)); memset(ks, 0, sizeof(struct kgdb_state));
ks->cpu = cpu; ks->cpu = cpu;
ks->linux_regs = regs; ks->linux_regs = regs;
......
...@@ -42,6 +42,7 @@ struct debuggerinfo_struct { ...@@ -42,6 +42,7 @@ struct debuggerinfo_struct {
int ret_state; int ret_state;
int irq_depth; int irq_depth;
int enter_kgdb; int enter_kgdb;
bool rounding_up;
}; };
extern struct debuggerinfo_struct kgdb_info[]; extern struct debuggerinfo_struct kgdb_info[];
......
...@@ -186,7 +186,16 @@ kdb_bt(int argc, const char **argv) ...@@ -186,7 +186,16 @@ kdb_bt(int argc, const char **argv)
kdb_printf("btc: cpu status: "); kdb_printf("btc: cpu status: ");
kdb_parse("cpu\n"); kdb_parse("cpu\n");
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu)); void *kdb_tsk = KDB_TSK(cpu);
/* If a CPU failed to round up we could be here */
if (!kdb_tsk) {
kdb_printf("WARNING: no task for cpu %ld\n",
cpu);
continue;
}
sprintf(buf, "btt 0x%px\n", kdb_tsk);
kdb_parse(buf); kdb_parse(buf);
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
......
...@@ -118,13 +118,6 @@ int kdb_stub(struct kgdb_state *ks) ...@@ -118,13 +118,6 @@ int kdb_stub(struct kgdb_state *ks)
kdb_bp_remove(); kdb_bp_remove();
KDB_STATE_CLEAR(DOING_SS); KDB_STATE_CLEAR(DOING_SS);
KDB_STATE_SET(PAGER); KDB_STATE_SET(PAGER);
/* zero out any offline cpu data */
for_each_present_cpu(i) {
if (!cpu_online(i)) {
kgdb_info[i].debuggerinfo = NULL;
kgdb_info[i].task = NULL;
}
}
if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) { if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
ks->pass_exception = 1; ks->pass_exception = 1;
KDB_FLAG_SET(CATASTROPHIC); KDB_FLAG_SET(CATASTROPHIC);
......
...@@ -658,7 +658,7 @@ static void kdb_cmderror(int diag) ...@@ -658,7 +658,7 @@ static void kdb_cmderror(int diag)
*/ */
struct defcmd_set { struct defcmd_set {
int count; int count;
int usable; bool usable;
char *name; char *name;
char *usage; char *usage;
char *help; char *help;
...@@ -666,7 +666,7 @@ struct defcmd_set { ...@@ -666,7 +666,7 @@ struct defcmd_set {
}; };
static struct defcmd_set *defcmd_set; static struct defcmd_set *defcmd_set;
static int defcmd_set_count; static int defcmd_set_count;
static int defcmd_in_progress; static bool defcmd_in_progress;
/* Forward references */ /* Forward references */
static int kdb_exec_defcmd(int argc, const char **argv); static int kdb_exec_defcmd(int argc, const char **argv);
...@@ -676,9 +676,9 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) ...@@ -676,9 +676,9 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
struct defcmd_set *s = defcmd_set + defcmd_set_count - 1; struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
char **save_command = s->command; char **save_command = s->command;
if (strcmp(argv0, "endefcmd") == 0) { if (strcmp(argv0, "endefcmd") == 0) {
defcmd_in_progress = 0; defcmd_in_progress = false;
if (!s->count) if (!s->count)
s->usable = 0; s->usable = false;
if (s->usable) if (s->usable)
/* macros are always safe because when executed each /* macros are always safe because when executed each
* internal command re-enters kdb_parse() and is * internal command re-enters kdb_parse() and is
...@@ -695,7 +695,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) ...@@ -695,7 +695,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
if (!s->command) { if (!s->command) {
kdb_printf("Could not allocate new kdb_defcmd table for %s\n", kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
cmdstr); cmdstr);
s->usable = 0; s->usable = false;
return KDB_NOTIMP; return KDB_NOTIMP;
} }
memcpy(s->command, save_command, s->count * sizeof(*(s->command))); memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
...@@ -737,7 +737,7 @@ static int kdb_defcmd(int argc, const char **argv) ...@@ -737,7 +737,7 @@ static int kdb_defcmd(int argc, const char **argv)
defcmd_set_count * sizeof(*defcmd_set)); defcmd_set_count * sizeof(*defcmd_set));
s = defcmd_set + defcmd_set_count; s = defcmd_set + defcmd_set_count;
memset(s, 0, sizeof(*s)); memset(s, 0, sizeof(*s));
s->usable = 1; s->usable = true;
s->name = kdb_strdup(argv[1], GFP_KDB); s->name = kdb_strdup(argv[1], GFP_KDB);
if (!s->name) if (!s->name)
goto fail_name; goto fail_name;
...@@ -756,7 +756,7 @@ static int kdb_defcmd(int argc, const char **argv) ...@@ -756,7 +756,7 @@ static int kdb_defcmd(int argc, const char **argv)
s->help[strlen(s->help)-1] = '\0'; s->help[strlen(s->help)-1] = '\0';
} }
++defcmd_set_count; ++defcmd_set_count;
defcmd_in_progress = 1; defcmd_in_progress = true;
kfree(save_defcmd_set); kfree(save_defcmd_set);
return 0; return 0;
fail_help: fail_help:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment