Commit ac8bf564 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
  sparc64: Fix hardirq tracing in trap return path.
  sparc64: Use correct pt_regs in decode_access_size() error paths.
  sparc64: Fix PREEMPT_ACTIVE value.
  sparc64: Run NMIs on the hardirq stack.
  sparc64: Allocate sufficient stack space in ftrace stubs.
  sparc: Fix forgotten kmemleak headers inclusion
parents 34388d1c 28a1f533
...@@ -111,7 +111,7 @@ struct thread_info { ...@@ -111,7 +111,7 @@ struct thread_info {
#define THREAD_SHIFT PAGE_SHIFT #define THREAD_SHIFT PAGE_SHIFT
#endif /* PAGE_SHIFT == 13 */ #endif /* PAGE_SHIFT == 13 */
#define PREEMPT_ACTIVE 0x4000000 #define PREEMPT_ACTIVE 0x10000000
/* /*
* macros/functions for gaining access to the thread information structure * macros/functions for gaining access to the thread information structure
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kmemleak.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -46,6 +47,7 @@ ...@@ -46,6 +47,7 @@
#include "entry.h" #include "entry.h"
#include "cpumap.h" #include "cpumap.h"
#include "kstack.h"
#define NUM_IVECS (IMAP_INR + 1) #define NUM_IVECS (IMAP_INR + 1)
...@@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq) ...@@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq)
void *hardirq_stack[NR_CPUS]; void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS]; void *softirq_stack[NR_CPUS];
static __attribute__((always_inline)) void *set_hardirq_stack(void)
{
void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
if (orig_sp < sp ||
orig_sp > (sp + THREAD_SIZE)) {
sp += THREAD_SIZE - 192 - STACK_BIAS;
__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
}
return orig_sp;
}
static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
{
__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
}
void __irq_entry handler_irq(int irq, struct pt_regs *regs) void __irq_entry handler_irq(int irq, struct pt_regs *regs)
{ {
unsigned long pstate, bucket_pa; unsigned long pstate, bucket_pa;
......
...@@ -61,4 +61,23 @@ static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs * ...@@ -61,4 +61,23 @@ static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *
} }
static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
{
void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
if (orig_sp < sp ||
orig_sp > (sp + THREAD_SIZE)) {
sp += THREAD_SIZE - 192 - STACK_BIAS;
__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
}
return orig_sp;
}
static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
{
__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
}
#endif /* _KSTACK_H */ #endif /* _KSTACK_H */
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/pcr.h> #include <asm/pcr.h>
#include "kstack.h"
/* We don't have a real NMI on sparc64, but we can fake one /* We don't have a real NMI on sparc64, but we can fake one
* up using profiling counter overflow interrupts and interrupt * up using profiling counter overflow interrupts and interrupt
* levels. * levels.
...@@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) ...@@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{ {
unsigned int sum, touched = 0; unsigned int sum, touched = 0;
void *orig_sp;
clear_softint(1 << irq); clear_softint(1 << irq);
...@@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) ...@@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
nmi_enter(); nmi_enter();
orig_sp = set_hardirq_stack();
if (notify_die(DIE_NMI, "nmi", regs, 0, if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1; touched = 1;
...@@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) ...@@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
pcr_ops->write(pcr_enable); pcr_ops->write(pcr_enable);
} }
restore_hardirq_stack(orig_sp);
nmi_exit(); nmi_exit();
} }
......
...@@ -130,7 +130,17 @@ rtrap_xcall: ...@@ -130,7 +130,17 @@ rtrap_xcall:
nop nop
call trace_hardirqs_on call trace_hardirqs_on
nop nop
wrpr %l4, %pil /* Do not actually set the %pil here. We will do that
* below after we clear PSTATE_IE in the %pstate register.
* If we re-enable interrupts here, we can recurse down
* the hardirq stack potentially endlessly, causing a
* stack overflow.
*
* It is tempting to put this test and trace_hardirqs_on
* call at the 'rt_continue' label, but that will not work
* as that path hits unconditionally and we do not want to
* execute this in NMI return paths, for example.
*/
#endif #endif
rtrap_no_irq_enable: rtrap_no_irq_enable:
andcc %l1, TSTATE_PRIV, %l3 andcc %l1, TSTATE_PRIV, %l3
......
...@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn) ...@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
} }
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
static inline int decode_access_size(unsigned int insn) static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn) ...@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
return 2; return 2;
else { else {
printk("Impossible unaligned trap. insn=%08x\n", insn); printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); die_if_kernel("Byte sized unaligned access?!?!", regs);
/* GCC should never warn that control reaches the end /* GCC should never warn that control reaches the end
* of this function without returning a value because * of this function without returning a value because
...@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs) ...@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{ {
enum direction dir = decode_direction(insn); enum direction dir = decode_direction(insn);
int size = decode_access_size(insn); int size = decode_access_size(regs, insn);
int orig_asi, asi; int orig_asi, asi;
current_thread_info()->kern_una_regs = regs; current_thread_info()->kern_una_regs = regs;
......
...@@ -34,7 +34,7 @@ mcount: ...@@ -34,7 +34,7 @@ mcount:
cmp %g1, %g2 cmp %g1, %g2
be,pn %icc, 1f be,pn %icc, 1f
mov %i7, %g3 mov %i7, %g3
save %sp, -128, %sp save %sp, -176, %sp
mov %g3, %o1 mov %g3, %o1
jmpl %g1, %o7 jmpl %g1, %o7
mov %i7, %o0 mov %i7, %o0
...@@ -56,7 +56,7 @@ mcount: ...@@ -56,7 +56,7 @@ mcount:
nop nop
5: mov %i7, %g2 5: mov %i7, %g2
mov %fp, %g3 mov %fp, %g3
save %sp, -128, %sp save %sp, -176, %sp
mov %g2, %l0 mov %g2, %l0
ba,pt %xcc, ftrace_graph_caller ba,pt %xcc, ftrace_graph_caller
mov %g3, %l1 mov %g3, %l1
...@@ -85,7 +85,7 @@ ftrace_caller: ...@@ -85,7 +85,7 @@ ftrace_caller:
lduw [%g1 + %lo(function_trace_stop)], %g1 lduw [%g1 + %lo(function_trace_stop)], %g1
brnz,pn %g1, ftrace_stub brnz,pn %g1, ftrace_stub
mov %fp, %g3 mov %fp, %g3
save %sp, -128, %sp save %sp, -176, %sp
mov %g2, %o1 mov %g2, %o1
mov %g2, %l0 mov %g2, %l0
mov %g3, %l1 mov %g3, %l1
...@@ -120,7 +120,7 @@ ENTRY(ftrace_graph_caller) ...@@ -120,7 +120,7 @@ ENTRY(ftrace_graph_caller)
END(ftrace_graph_caller) END(ftrace_graph_caller)
ENTRY(return_to_handler) ENTRY(return_to_handler)
save %sp, -128, %sp save %sp, -176, %sp
call ftrace_return_to_handler call ftrace_return_to_handler
mov %fp, %o0 mov %fp, %o0
jmpl %o0 + 8, %g0 jmpl %o0 + 8, %g0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment