Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
10e26723
Commit
10e26723
authored
Nov 16, 2006
by
David S. Miller
Committed by
David S. Miller
Dec 10, 2006
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[SPARC64]: Add irqtrace/stacktrace/lockdep support.
Signed-off-by:
David S. Miller
<
davem@davemloft.net
>
parent
af1713e0
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
289 additions
and
66 deletions
+289
-66
arch/sparc64/Kconfig
arch/sparc64/Kconfig
+8
-0
arch/sparc64/Kconfig.debug
arch/sparc64/Kconfig.debug
+4
-0
arch/sparc64/kernel/Makefile
arch/sparc64/kernel/Makefile
+1
-0
arch/sparc64/kernel/entry.S
arch/sparc64/kernel/entry.S
+26
-1
arch/sparc64/kernel/head.S
arch/sparc64/kernel/head.S
+8
-0
arch/sparc64/kernel/rtrap.S
arch/sparc64/kernel/rtrap.S
+17
-6
arch/sparc64/kernel/stacktrace.c
arch/sparc64/kernel/stacktrace.c
+41
-0
arch/sparc64/kernel/sun4v_ivec.S
arch/sparc64/kernel/sun4v_ivec.S
+16
-4
arch/sparc64/mm/ultra.S
arch/sparc64/mm/ultra.S
+8
-0
include/asm-sparc64/irqflags.h
include/asm-sparc64/irqflags.h
+89
-0
include/asm-sparc64/rwsem.h
include/asm-sparc64/rwsem.h
+25
-7
include/asm-sparc64/system.h
include/asm-sparc64/system.h
+3
-46
include/asm-sparc64/ttable.h
include/asm-sparc64/ttable.h
+43
-2
No files found.
arch/sparc64/Kconfig
View file @
10e26723
...
...
@@ -26,6 +26,14 @@ config MMU
bool
default y
config STACKTRACE_SUPPORT
bool
default y
config LOCKDEP_SUPPORT
bool
default y
config TIME_INTERPOLATION
bool
default y
...
...
arch/sparc64/Kconfig.debug
View file @
10e26723
menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug"
config DEBUG_STACK_USAGE
...
...
arch/sparc64/kernel/Makefile
View file @
10e26723
...
...
@@ -14,6 +14,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
\
visemul.o prom.o of_device.o
obj-$(CONFIG_STACKTRACE)
+=
stacktrace.o
obj-$(CONFIG_PCI)
+=
ebus.o isa.o pci_common.o pci_iommu.o
\
pci_psycho.o pci_sabre.o pci_schizo.o
\
pci_sun4v.o pci_sun4v_asm.o
...
...
arch/sparc64/kernel/entry.S
View file @
10e26723
...
...
@@ -597,7 +597,12 @@ __spitfire_cee_trap_continue:
1
:
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
2
:
mov
%
l4
,
%
o1
2
:
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
mov
%
l4
,
%
o1
mov
%
l5
,
%
o2
call
spitfire_access_error
add
%
sp
,
PTREGS_OFF
,
%
o0
...
...
@@ -824,6 +829,10 @@ do_cheetah_plus_data_parity:
wrpr
%
g0
,
15
,
%
pil
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
mov
0x0
,
%
o0
call
cheetah_plus_parity_error
add
%
sp
,
PTREGS_OFF
,
%
o1
...
...
@@ -855,6 +864,10 @@ do_cheetah_plus_insn_parity:
wrpr
%
g0
,
15
,
%
pil
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
mov
0x1
,
%
o0
call
cheetah_plus_parity_error
add
%
sp
,
PTREGS_OFF
,
%
o1
...
...
@@ -1183,6 +1196,10 @@ c_fast_ecc:
wrpr
%
g0
,
15
,
%
pil
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
mov
%
l4
,
%
o1
mov
%
l5
,
%
o2
call
cheetah_fecc_handler
...
...
@@ -1211,6 +1228,10 @@ c_cee:
wrpr
%
g0
,
15
,
%
pil
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
mov
%
l4
,
%
o1
mov
%
l5
,
%
o2
call
cheetah_cee_handler
...
...
@@ -1239,6 +1260,10 @@ c_deferred:
wrpr
%
g0
,
15
,
%
pil
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
mov
%
l4
,
%
o1
mov
%
l5
,
%
o2
call
cheetah_deferred_handler
...
...
arch/sparc64/kernel/head.S
View file @
10e26723
...
...
@@ -489,6 +489,14 @@ tlb_fixup_done:
call
__bzero
sub
%
o1
,
%
o0
,
%
o1
#ifdef CONFIG_LOCKDEP
/
*
We
have
this
call
this
super
early
,
as
even
prom_init
can
grab
*
spinlocks
and
thus
call
into
the
lockdep
code
.
*/
call
lockdep_init
nop
#endif
mov
%
l6
,
%
o1
!
OpenPROM
stack
call
prom_init
mov
%
l7
,
%
o0
!
OpenPROM
cif
handler
...
...
arch/sparc64/kernel/rtrap.S
View file @
10e26723
...
...
@@ -165,14 +165,26 @@ rtrap:
__handle_softirq_continue
:
rtrap_xcall
:
sethi
%
hi
(
0xf
<<
20
),
%
l4
andcc
%
l1
,
TSTATE_PRIV
,
%
l3
and
%
l1
,
%
l4
,
%
l4
andn
%
l1
,
%
l4
,
%
l1
srl
%
l4
,
20
,
%
l4
#ifdef CONFIG_TRACE_IRQFLAGS
brnz
,
pn
%
l4
,
rtrap_no_irq_enable
nop
call
trace_hardirqs_on
nop
wrpr
%
l4
,
%
pil
rtrap_no_irq_enable
:
#endif
andcc
%
l1
,
TSTATE_PRIV
,
%
l3
bne
,
pn
%
icc
,
to_kernel
andn
%
l1
,
%
l4
,
%
l1
nop
/
*
We
must
hold
IRQs
off
and
atomically
test
schedule
+
signal
*
state
,
then
hold
them
off
all
the
way
back
to
userspace
.
*
If
we
are
returning
to
kernel
,
none
of
this
matters
.
*
If
we
are
returning
to
kernel
,
none
of
this
matters
.
Note
*
that
we
are
disabling
interrupts
via
PSTATE_IE
,
not
using
*
%
pil
.
*
*
If
we
do
not
do
this
,
there
is
a
window
where
we
would
do
*
the
tests
,
later
the
signal
/
resched
event
arrives
but
we
do
...
...
@@ -256,7 +268,6 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
ld
[%
sp
+
PTREGS_OFF
+
PT_V9_Y
],
%
o3
wr
%
o3
,
%
g0
,
%
y
srl
%
l4
,
20
,
%
l4
wrpr
%
l4
,
0x0
,
%
pil
wrpr
%
g0
,
0x1
,
%
tl
wrpr
%
l1
,
%
g0
,
%
tstate
...
...
@@ -374,8 +385,8 @@ to_kernel:
ldx
[%
g6
+
TI_FLAGS
],
%
l5
andcc
%
l5
,
_TIF_NEED_RESCHED
,
%
g0
be
,
pt
%
xcc
,
kern_fpucheck
srl
%
l4
,
20
,
%
l5
cmp
%
l
5
,
0
nop
cmp
%
l
4
,
0
bne
,
pn
%
xcc
,
kern_fpucheck
sethi
%
hi
(
PREEMPT_ACTIVE
),
%
l6
stw
%
l6
,
[%
g6
+
TI_PRE_COUNT
]
...
...
arch/sparc64/kernel/stacktrace.c
0 → 100644
View file @
10e26723
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <asm/ptrace.h>
void
save_stack_trace
(
struct
stack_trace
*
trace
,
struct
task_struct
*
task
)
{
unsigned
long
ksp
,
fp
,
thread_base
;
struct
thread_info
*
tp
;
if
(
!
task
)
task
=
current
;
tp
=
task_thread_info
(
task
);
if
(
task
==
current
)
{
flushw_all
();
__asm__
__volatile__
(
"mov %%fp, %0"
:
"=r"
(
ksp
)
);
}
else
ksp
=
tp
->
ksp
;
fp
=
ksp
+
STACK_BIAS
;
thread_base
=
(
unsigned
long
)
tp
;
do
{
struct
reg_window
*
rw
;
/* Bogus frame pointer? */
if
(
fp
<
(
thread_base
+
sizeof
(
struct
thread_info
))
||
fp
>=
(
thread_base
+
THREAD_SIZE
))
break
;
rw
=
(
struct
reg_window
*
)
fp
;
if
(
trace
->
skip
>
0
)
trace
->
skip
--
;
else
trace
->
entries
[
trace
->
nr_entries
++
]
=
rw
->
ins
[
7
];
fp
=
rw
->
ins
[
6
]
+
STACK_BIAS
;
}
while
(
trace
->
nr_entries
<
trace
->
max_entries
);
}
arch/sparc64/kernel/sun4v_ivec.S
View file @
10e26723
...
...
@@ -190,7 +190,10 @@ sun4v_res_mondo:
mov
%
g1
,
%
g4
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
/
*
Log
the
event
.
*/
add
%
sp
,
PTREGS_OFF
,
%
o0
call
sun4v_resum_error
...
...
@@ -216,7 +219,10 @@ sun4v_res_mondo_queue_full:
wrpr
%
g0
,
15
,
%
pil
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
call
sun4v_resum_overflow
add
%
sp
,
PTREGS_OFF
,
%
o0
...
...
@@ -295,7 +301,10 @@ sun4v_nonres_mondo:
mov
%
g1
,
%
g4
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
/
*
Log
the
event
.
*/
add
%
sp
,
PTREGS_OFF
,
%
o0
call
sun4v_nonresum_error
...
...
@@ -321,7 +330,10 @@ sun4v_nonres_mondo_queue_full:
wrpr
%
g0
,
15
,
%
pil
ba
,
pt
%
xcc
,
etrap_irq
rd
%
pc
,
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
call
sun4v_nonresum_overflow
add
%
sp
,
PTREGS_OFF
,
%
o0
...
...
arch/sparc64/mm/ultra.S
View file @
10e26723
...
...
@@ -477,6 +477,10 @@ xcall_sync_tick:
sethi
%
hi
(
109
f
),
%
g7
b
,
pt
%
xcc
,
etrap_irq
109
:
or
%
g7
,
%
lo
(
109
b
),
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
call
smp_synchronize_tick_client
nop
clr
%
l6
...
...
@@ -508,6 +512,10 @@ xcall_report_regs:
sethi
%
hi
(
109
f
),
%
g7
b
,
pt
%
xcc
,
etrap_irq
109
:
or
%
g7
,
%
lo
(
109
b
),
%
g7
#ifdef CONFIG_TRACE_IRQFLAGS
call
trace_hardirqs_off
nop
#endif
call
__show_regs
add
%
sp
,
PTREGS_OFF
,
%
o0
clr
%
l6
...
...
include/asm-sparc64/irqflags.h
0 → 100644
View file @
10e26723
/*
* include/asm-sparc64/irqflags.h
*
* IRQ flags handling
*
* This file gets included from lowlevel asm headers too, to provide
* wrapped versions of the local_irq_*() APIs, based on the
* raw_local_irq_*() functions from the lowlevel headers.
*/
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#ifndef __ASSEMBLY__
static
inline
unsigned
long
__raw_local_save_flags
(
void
)
{
unsigned
long
flags
;
__asm__
__volatile__
(
"rdpr %%pil, %0"
:
"=r"
(
flags
)
);
return
flags
;
}
#define raw_local_save_flags(flags) \
do { (flags) = __raw_local_save_flags(); } while (0)
static
inline
void
raw_local_irq_restore
(
unsigned
long
flags
)
{
__asm__
__volatile__
(
"wrpr %0, %%pil"
:
/* no output */
:
"r"
(
flags
)
:
"memory"
);
}
static
inline
void
raw_local_irq_disable
(
void
)
{
__asm__
__volatile__
(
"wrpr 15, %%pil"
:
/* no outputs */
:
/* no inputs */
:
"memory"
);
}
static
inline
void
raw_local_irq_enable
(
void
)
{
__asm__
__volatile__
(
"wrpr 0, %%pil"
:
/* no outputs */
:
/* no inputs */
:
"memory"
);
}
static
inline
int
raw_irqs_disabled_flags
(
unsigned
long
flags
)
{
return
(
flags
>
0
);
}
static
inline
int
raw_irqs_disabled
(
void
)
{
unsigned
long
flags
=
__raw_local_save_flags
();
return
raw_irqs_disabled_flags
(
flags
);
}
/*
* For spinlocks, etc:
*/
static
inline
unsigned
long
__raw_local_irq_save
(
void
)
{
unsigned
long
flags
=
__raw_local_save_flags
();
raw_local_irq_disable
();
return
flags
;
}
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
#endif
/* (__ASSEMBLY__) */
#endif
/* !(_ASM_IRQFLAGS_H) */
include/asm-sparc64/rwsem.h
View file @
10e26723
...
...
@@ -23,20 +23,33 @@ struct rw_semaphore {
signed
int
count
;
spinlock_t
wait_lock
;
struct
list_head
wait_list
;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct
lockdep_map
dep_map
;
#endif
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
static
__inline__
void
init_rwsem
(
struct
rw_semaphore
*
sem
)
{
sem
->
count
=
RWSEM_UNLOCKED_VALUE
;
spin_lock_init
(
&
sem
->
wait_lock
);
INIT_LIST_HEAD
(
&
sem
->
wait_list
);
}
extern
void
__init_rwsem
(
struct
rw_semaphore
*
sem
,
const
char
*
name
,
struct
lock_class_key
*
key
);
#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
} while (0)
extern
void
__down_read
(
struct
rw_semaphore
*
sem
);
extern
int
__down_read_trylock
(
struct
rw_semaphore
*
sem
);
...
...
@@ -46,6 +59,11 @@ extern void __up_read(struct rw_semaphore *sem);
extern
void
__up_write
(
struct
rw_semaphore
*
sem
);
extern
void
__downgrade_write
(
struct
rw_semaphore
*
sem
);
static
inline
void
__down_write_nested
(
struct
rw_semaphore
*
sem
,
int
subclass
)
{
__down_write
(
sem
);
}
static
inline
int
rwsem_atomic_update
(
int
delta
,
struct
rw_semaphore
*
sem
)
{
return
atomic_add_return
(
delta
,
(
atomic_t
*
)(
&
sem
->
count
));
...
...
include/asm-sparc64/system.h
View file @
10e26723
...
...
@@ -7,6 +7,9 @@
#include <asm/visasm.h>
#ifndef __ASSEMBLY__
#include <linux/irqflags.h>
/*
* Sparc (general) CPU types
*/
...
...
@@ -72,52 +75,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#endif
#define setipl(__new_ipl) \
__asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory")
#define local_irq_disable() \
__asm__ __volatile__("wrpr 15, %%pil" : : : "memory")
#define local_irq_enable() \
__asm__ __volatile__("wrpr 0, %%pil" : : : "memory")
#define getipl() \
({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; })
#define swap_pil(__new_pil) \
({ unsigned long retval; \
__asm__ __volatile__("rdpr %%pil, %0\n\t" \
"wrpr %1, %%pil" \
: "=&r" (retval) \
: "r" (__new_pil) \
: "memory"); \
retval; \
})
#define read_pil_and_cli() \
({ unsigned long retval; \
__asm__ __volatile__("rdpr %%pil, %0\n\t" \
"wrpr 15, %%pil" \
: "=r" (retval) \
: : "memory"); \
retval; \
})
#define local_save_flags(flags) ((flags) = getipl())
#define local_irq_save(flags) ((flags) = read_pil_and_cli())
#define local_irq_restore(flags) setipl((flags))
/* On sparc64 IRQ flags are the PIL register. A value of zero
* means all interrupt levels are enabled, any other value means
* only IRQ levels greater than that value will be received.
* Consequently this means that the lowest IRQ level is one.
*/
#define irqs_disabled() \
({ unsigned long flags; \
local_save_flags(flags);\
(flags > 0); \
})
#define nop() __asm__ __volatile__ ("nop")
#define read_barrier_depends() do { } while(0)
...
...
include/asm-sparc64/ttable.h
View file @
10e26723
...
...
@@ -137,10 +137,49 @@
#endif
#define BREAKPOINT_TRAP TRAP(breakpoint_trap)
#ifdef CONFIG_TRACE_IRQFLAGS
#define TRAP_IRQ(routine, level) \
rdpr %pil, %g2; \
wrpr %g0, 15, %pil; \
sethi %hi(1f-4), %g7; \
ba,pt %xcc, etrap_irq; \
or %g7, %lo(1f-4), %g7; \
nop; \
nop; \
nop; \
.subsection 2; \
1: call trace_hardirqs_off; \
nop; \
mov level, %o0; \
call routine; \
add %sp, PTREGS_OFF, %o1; \
ba,a,pt %xcc, rtrap_irq; \
.previous;
#define TICK_SMP_IRQ \
rdpr %pil, %g2; \
wrpr %g0, 15, %pil; \
sethi %hi(1f-4), %g7; \
ba,pt %xcc, etrap_irq; \
or %g7, %lo(1f-4), %g7; \
nop; \
nop; \
nop; \
.subsection 2; \
1: call trace_hardirqs_off; \
nop; \
call smp_percpu_timer_interrupt; \
add %sp, PTREGS_OFF, %o0; \
ba,a,pt %xcc, rtrap_irq; \
.previous;
#else
#define TRAP_IRQ(routine, level) \
rdpr %pil, %g2; \
wrpr %g0, 15, %pil; \
b,pt %xcc, etrap_irq; \
b
a
,pt %xcc, etrap_irq; \
rd %pc, %g7; \
mov level, %o0; \
call routine; \
...
...
@@ -151,12 +190,14 @@
rdpr %pil, %g2; \
wrpr %g0, 15, %pil; \
sethi %hi(109f), %g7; \
b,pt %xcc, etrap_irq; \
b
a
,pt %xcc, etrap_irq; \
109: or %g7, %lo(109b), %g7; \
call smp_percpu_timer_interrupt; \
add %sp, PTREGS_OFF, %o0; \
ba,a,pt %xcc, rtrap_irq;
#endif
#define TRAP_IVEC TRAP_NOSAVE(do_ivec)
#define BTRAP(lvl) TRAP_ARG(bad_trap, lvl)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment