Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
116ef0fc
Commit
116ef0fc
authored
Oct 29, 2015
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'fixes' and 'misc' into for-next
parents
9ffecb10
38850d78
a4283e41
Changes
28
Hide whitespace changes
Inline
Side-by-side
Showing
28 changed files
with
143 additions
and
162 deletions
+143
-162
Documentation/arm/SA1100/Victor
Documentation/arm/SA1100/Victor
+0
-16
Documentation/arm/memory.txt
Documentation/arm/memory.txt
+1
-1
Documentation/devicetree/bindings/arm/twd.txt
Documentation/devicetree/bindings/arm/twd.txt
+5
-0
arch/arm/Kconfig
arch/arm/Kconfig
+6
-2
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/cmpxchg.h
+12
-0
arch/arm/include/asm/irqflags.h
arch/arm/include/asm/irqflags.h
+10
-0
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mach/arch.h
+1
-1
arch/arm/include/asm/memory.h
arch/arm/include/asm/memory.h
+2
-0
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/pgtable.h
+1
-1
arch/arm/include/asm/smp.h
arch/arm/include/asm/smp.h
+2
-2
arch/arm/include/asm/unistd.h
arch/arm/include/asm/unistd.h
+0
-7
arch/arm/kernel/devtree.c
arch/arm/kernel/devtree.c
+9
-3
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-armv.S
+3
-30
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/hw_breakpoint.c
+0
-1
arch/arm/kernel/kgdb.c
arch/arm/kernel/kgdb.c
+12
-19
arch/arm/kernel/smp.c
arch/arm/kernel/smp.c
+11
-1
arch/arm/kernel/smp_twd.c
arch/arm/kernel/smp_twd.c
+5
-6
arch/arm/kernel/traps.c
arch/arm/kernel/traps.c
+0
-52
arch/arm/lib/clear_user.S
arch/arm/lib/clear_user.S
+4
-0
arch/arm/mm/Kconfig
arch/arm/mm/Kconfig
+0
-12
arch/arm/mm/dma-mapping.c
arch/arm/mm/dma-mapping.c
+7
-0
arch/arm/mm/fault.c
arch/arm/mm/fault.c
+22
-0
arch/arm/mm/fault.h
arch/arm/mm/fault.h
+1
-0
arch/arm/mm/mmu.c
arch/arm/mm/mmu.c
+4
-0
arch/arm/vdso/vdsomunge.c
arch/arm/vdso/vdsomunge.c
+13
-4
drivers/clk/clkdev.c
drivers/clk/clkdev.c
+2
-1
include/linux/amba/bus.h
include/linux/amba/bus.h
+0
-2
lib/nmi_backtrace.c
lib/nmi_backtrace.c
+10
-1
No files found.
Documentation/arm/SA1100/Victor
deleted
100644 → 0
View file @
9ffecb10
Victor is known as a "digital talking book player" manufactured by
VisuAide, Inc. to be used by blind people.
For more information related to Victor, see:
http://www.humanware.com/en-usa/products
Of course Victor is using Linux as its main operating system.
The Victor implementation for Linux is maintained by Nicolas Pitre:
nico@visuaide.com
nico@fluxnic.net
For any comments, please feel free to contact me through the above
addresses.
Documentation/arm/memory.txt
View file @
116ef0fc
...
@@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
...
@@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
located here through iotable_init().
located here through iotable_init().
VMALLOC_START is based upon the value
VMALLOC_START is based upon the value
of the high_memory variable, and VMALLOC_END
of the high_memory variable, and VMALLOC_END
is equal to 0xff
0
00000.
is equal to 0xff
8
00000.
PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
This maps the platforms RAM, and typically
This maps the platforms RAM, and typically
...
...
Documentation/devicetree/bindings/arm/twd.txt
View file @
116ef0fc
...
@@ -19,6 +19,11 @@ interrupts.
...
@@ -19,6 +19,11 @@ interrupts.
- reg : Specify the base address and the size of the TWD timer
- reg : Specify the base address and the size of the TWD timer
register window.
register window.
Optional
- always-on : a boolean property. If present, the timer is powered through
an always-on power domain, therefore it never loses context.
Example:
Example:
twd-timer@2c000600 {
twd-timer@2c000600 {
...
...
arch/arm/Kconfig
View file @
116ef0fc
...
@@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY
...
@@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY
config ARCH_RPC
config ARCH_RPC
bool "RiscPC"
bool "RiscPC"
depends on MMU
select ARCH_ACORN
select ARCH_ACORN
select ARCH_MAY_HAVE_PC_FDC
select ARCH_MAY_HAVE_PC_FDC
select ARCH_SPARSEMEM_ENABLE
select ARCH_SPARSEMEM_ENABLE
...
@@ -1410,7 +1411,6 @@ config HAVE_ARM_ARCH_TIMER
...
@@ -1410,7 +1411,6 @@ config HAVE_ARM_ARCH_TIMER
config HAVE_ARM_TWD
config HAVE_ARM_TWD
bool
bool
depends on SMP
select CLKSRC_OF if OF
select CLKSRC_OF if OF
help
help
This options enables support for the ARM timer and watchdog unit
This options enables support for the ARM timer and watchdog unit
...
@@ -1470,6 +1470,8 @@ choice
...
@@ -1470,6 +1470,8 @@ choice
config VMSPLIT_3G
config VMSPLIT_3G
bool "3G/1G user/kernel split"
bool "3G/1G user/kernel split"
config VMSPLIT_3G_OPT
bool "3G/1G user/kernel split (for full 1G low memory)"
config VMSPLIT_2G
config VMSPLIT_2G
bool "2G/2G user/kernel split"
bool "2G/2G user/kernel split"
config VMSPLIT_1G
config VMSPLIT_1G
...
@@ -1481,6 +1483,7 @@ config PAGE_OFFSET
...
@@ -1481,6 +1483,7 @@ config PAGE_OFFSET
default PHYS_OFFSET if !MMU
default PHYS_OFFSET if !MMU
default 0x40000000 if VMSPLIT_1G
default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G
default 0x80000000 if VMSPLIT_2G
default 0xB0000000 if VMSPLIT_3G_OPT
default 0xC0000000
default 0xC0000000
config NR_CPUS
config NR_CPUS
...
@@ -1695,8 +1698,9 @@ config HIGHMEM
...
@@ -1695,8 +1698,9 @@ config HIGHMEM
If unsure, say n.
If unsure, say n.
config HIGHPTE
config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem"
bool "Allocate 2nd-level pagetables from highmem"
if EXPERT
depends on HIGHMEM
depends on HIGHMEM
default y
help
help
The VM uses one page of physical memory for each page table.
The VM uses one page of physical memory for each page table.
For systems with a lot of processes, this can use a lot of
For systems with a lot of processes, this can use a lot of
...
...
arch/arm/include/asm/cmpxchg.h
View file @
116ef0fc
...
@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
...
@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch
(
size
)
{
switch
(
size
)
{
#if __LINUX_ARM_ARCH__ >= 6
#if __LINUX_ARM_ARCH__ >= 6
#ifndef CONFIG_CPU_V6
/* MIN ARCH >= V6K */
case
1
:
case
1
:
asm
volatile
(
"@ __xchg1
\n
"
asm
volatile
(
"@ __xchg1
\n
"
"1: ldrexb %0, [%3]
\n
"
"1: ldrexb %0, [%3]
\n
"
...
@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
...
@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
:
"r"
(
x
),
"r"
(
ptr
)
:
"r"
(
x
),
"r"
(
ptr
)
:
"memory"
,
"cc"
);
:
"memory"
,
"cc"
);
break
;
break
;
case
2
:
asm
volatile
(
"@ __xchg2
\n
"
"1: ldrexh %0, [%3]
\n
"
" strexh %1, %2, [%3]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
ret
),
"=&r"
(
tmp
)
:
"r"
(
x
),
"r"
(
ptr
)
:
"memory"
,
"cc"
);
break
;
#endif
case
4
:
case
4
:
asm
volatile
(
"@ __xchg4
\n
"
asm
volatile
(
"@ __xchg4
\n
"
"1: ldrex %0, [%3]
\n
"
"1: ldrex %0, [%3]
\n
"
...
...
arch/arm/include/asm/irqflags.h
View file @
116ef0fc
...
@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void)
...
@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#ifndef CONFIG_CPU_V7M
#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
#else
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif
#else
#else
/*
/*
...
@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void)
...
@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void)
: "memory", "cc"); \
: "memory", "cc"); \
})
})
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif
#endif
/*
/*
...
...
arch/arm/include/asm/mach/arch.h
View file @
116ef0fc
...
@@ -47,7 +47,7 @@ struct machine_desc {
...
@@ -47,7 +47,7 @@ struct machine_desc {
unsigned
l2c_aux_val
;
/* L2 cache aux value */
unsigned
l2c_aux_val
;
/* L2 cache aux value */
unsigned
l2c_aux_mask
;
/* L2 cache aux mask */
unsigned
l2c_aux_mask
;
/* L2 cache aux mask */
void
(
*
l2c_write_sec
)(
unsigned
long
,
unsigned
);
void
(
*
l2c_write_sec
)(
unsigned
long
,
unsigned
);
struct
smp_operations
*
smp
;
/* SMP operations */
const
struct
smp_operations
*
smp
;
/* SMP operations */
bool
(
*
smp_init
)(
void
);
bool
(
*
smp_init
)(
void
);
void
(
*
fixup
)(
struct
tag
*
,
char
**
);
void
(
*
fixup
)(
struct
tag
*
,
char
**
);
void
(
*
dt_fixup
)(
void
);
void
(
*
dt_fixup
)(
void
);
...
...
arch/arm/include/asm/memory.h
View file @
116ef0fc
...
@@ -76,10 +76,12 @@
...
@@ -76,10 +76,12 @@
*/
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
/*
* Allow 16MB-aligned ioremap pages
* Allow 16MB-aligned ioremap pages
*/
*/
#define IOREMAP_MAX_ORDER 24
#define IOREMAP_MAX_ORDER 24
#endif
#else
/* CONFIG_MMU */
#else
/* CONFIG_MMU */
...
...
arch/arm/include/asm/pgtable.h
View file @
116ef0fc
...
@@ -43,7 +43,7 @@
...
@@ -43,7 +43,7 @@
*/
*/
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END 0xff
0
00000UL
#define VMALLOC_END 0xff
8
00000UL
#define LIBRARY_TEXT_START 0x0c000000
#define LIBRARY_TEXT_START 0x0c000000
...
...
arch/arm/include/asm/smp.h
View file @
116ef0fc
...
@@ -112,7 +112,7 @@ struct smp_operations {
...
@@ -112,7 +112,7 @@ struct smp_operations {
struct
of_cpu_method
{
struct
of_cpu_method
{
const
char
*
method
;
const
char
*
method
;
struct
smp_operations
*
ops
;
const
struct
smp_operations
*
ops
;
};
};
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
...
@@ -122,6 +122,6 @@ struct of_cpu_method {
...
@@ -122,6 +122,6 @@ struct of_cpu_method {
/*
/*
* set platform specific SMP operations
* set platform specific SMP operations
*/
*/
extern
void
smp_set_ops
(
struct
smp_operations
*
);
extern
void
smp_set_ops
(
const
struct
smp_operations
*
);
#endif
/* ifndef __ASM_ARM_SMP_H */
#endif
/* ifndef __ASM_ARM_SMP_H */
arch/arm/include/asm/unistd.h
View file @
116ef0fc
...
@@ -21,13 +21,6 @@
...
@@ -21,13 +21,6 @@
*/
*/
#define __NR_syscalls (392)
#define __NR_syscalls (392)
/*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_PAUSE
...
...
arch/arm/kernel/devtree.c
View file @
116ef0fc
...
@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void)
...
@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void)
if
(
of_property_read_u32
(
cpu
,
"reg"
,
&
hwid
))
{
if
(
of_property_read_u32
(
cpu
,
"reg"
,
&
hwid
))
{
pr_debug
(
" * %s missing reg property
\n
"
,
pr_debug
(
" * %s missing reg property
\n
"
,
cpu
->
full_name
);
cpu
->
full_name
);
of_node_put
(
cpu
);
return
;
return
;
}
}
...
@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void)
...
@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void)
* 8 MSBs must be set to 0 in the DT since the reg property
* 8 MSBs must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
* defines the MPIDR[23:0].
*/
*/
if
(
hwid
&
~
MPIDR_HWID_BITMASK
)
if
(
hwid
&
~
MPIDR_HWID_BITMASK
)
{
of_node_put
(
cpu
);
return
;
return
;
}
/*
/*
* Duplicate MPIDRs are a recipe for disaster.
* Duplicate MPIDRs are a recipe for disaster.
...
@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void)
...
@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void)
* to avoid matching valid MPIDR[23:0] values.
* to avoid matching valid MPIDR[23:0] values.
*/
*/
for
(
j
=
0
;
j
<
cpuidx
;
j
++
)
for
(
j
=
0
;
j
<
cpuidx
;
j
++
)
if
(
WARN
(
tmp_map
[
j
]
==
hwid
,
"Duplicate /cpu reg "
if
(
WARN
(
tmp_map
[
j
]
==
hwid
,
"properties in the DT
\n
"
))
"Duplicate /cpu reg properties in the DT
\n
"
))
{
of_node_put
(
cpu
);
return
;
return
;
}
/*
/*
* Build a stashed array of MPIDR values. Numbering scheme
* Build a stashed array of MPIDR values. Numbering scheme
...
@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void)
...
@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void)
"max cores %u, capping them
\n
"
,
"max cores %u, capping them
\n
"
,
cpuidx
,
nr_cpu_ids
))
{
cpuidx
,
nr_cpu_ids
))
{
cpuidx
=
nr_cpu_ids
;
cpuidx
=
nr_cpu_ids
;
of_node_put
(
cpu
);
break
;
break
;
}
}
...
...
arch/arm/kernel/entry-armv.S
View file @
116ef0fc
...
@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
...
@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
.
endm
.
endm
.
macro
kuser_cmpxchg_check
.
macro
kuser_cmpxchg_check
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
!
defined
(
CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
)
#ifndef CONFIG_MMU
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#warning "NPTL on non MMU needs fixing"
#else
#else
...
@@ -859,20 +858,7 @@ __kuser_helper_start:
...
@@ -859,20 +858,7 @@ __kuser_helper_start:
__kuser_cmpxchg64
:
@
0xffff0f60
__kuser_cmpxchg64
:
@
0xffff0f60
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if defined(CONFIG_CPU_32v6K)
/
*
*
Poor
you
.
No
fast
solution
possible
...
*
The
kernel
itself
must
perform
the
operation
.
*
A
special
ghost
syscall
is
used
for
that
(
see
traps
.
c
)
.
*/
stmfd
sp
!,
{
r7
,
lr
}
ldr
r7
,
1
f
@
it
's 20 bits
swi
__ARM_NR_cmpxchg64
ldmfd
sp
!,
{
r7
,
pc
}
1
:
.
word
__ARM_NR_cmpxchg64
#elif defined(CONFIG_CPU_32v6K)
stmfd
sp
!,
{
r4
,
r5
,
r6
,
r7
}
stmfd
sp
!,
{
r4
,
r5
,
r6
,
r7
}
ldrd
r4
,
r5
,
[
r0
]
@
load
old
val
ldrd
r4
,
r5
,
[
r0
]
@
load
old
val
...
@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
...
@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
__kuser_cmpxchg
:
@
0xffff0fc0
__kuser_cmpxchg
:
@
0xffff0fc0
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if __LINUX_ARM_ARCH__ < 6
/
*
*
Poor
you
.
No
fast
solution
possible
...
*
The
kernel
itself
must
perform
the
operation
.
*
A
special
ghost
syscall
is
used
for
that
(
see
traps
.
c
)
.
*/
stmfd
sp
!,
{
r7
,
lr
}
ldr
r7
,
1
f
@
it
's 20 bits
swi
__ARM_NR_cmpxchg
ldmfd
sp
!,
{
r7
,
pc
}
1
:
.
word
__ARM_NR_cmpxchg
#elif __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_MMU
#ifdef CONFIG_MMU
...
...
arch/arm/kernel/hw_breakpoint.c
View file @
116ef0fc
...
@@ -35,7 +35,6 @@
...
@@ -35,7 +35,6 @@
#include <asm/cputype.h>
#include <asm/cputype.h>
#include <asm/current.h>
#include <asm/current.h>
#include <asm/hw_breakpoint.h>
#include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
#include <asm/traps.h>
#include <asm/traps.h>
/* Breakpoint currently in use for each BRP. */
/* Breakpoint currently in use for each BRP. */
...
...
arch/arm/kernel/kgdb.c
View file @
116ef0fc
...
@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
...
@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void
void
sleeping_thread_to_gdb_regs
(
unsigned
long
*
gdb_regs
,
struct
task_struct
*
task
)
sleeping_thread_to_gdb_regs
(
unsigned
long
*
gdb_regs
,
struct
task_struct
*
task
)
{
{
struct
pt_regs
*
thread_regs
;
struct
thread_info
*
ti
;
int
regno
;
int
regno
;
/* Just making sure... */
/* Just making sure... */
...
@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
...
@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs
[
regno
]
=
0
;
gdb_regs
[
regno
]
=
0
;
/* Otherwise, we have only some registers from switch_to() */
/* Otherwise, we have only some registers from switch_to() */
thread_regs
=
task_pt_regs
(
task
);
ti
=
task_thread_info
(
task
);
gdb_regs
[
_R0
]
=
thread_regs
->
ARM_r0
;
gdb_regs
[
_R4
]
=
ti
->
cpu_context
.
r4
;
gdb_regs
[
_R1
]
=
thread_regs
->
ARM_r1
;
gdb_regs
[
_R5
]
=
ti
->
cpu_context
.
r5
;
gdb_regs
[
_R2
]
=
thread_regs
->
ARM_r2
;
gdb_regs
[
_R6
]
=
ti
->
cpu_context
.
r6
;
gdb_regs
[
_R3
]
=
thread_regs
->
ARM_r3
;
gdb_regs
[
_R7
]
=
ti
->
cpu_context
.
r7
;
gdb_regs
[
_R4
]
=
thread_regs
->
ARM_r4
;
gdb_regs
[
_R8
]
=
ti
->
cpu_context
.
r8
;
gdb_regs
[
_R5
]
=
thread_regs
->
ARM_r5
;
gdb_regs
[
_R9
]
=
ti
->
cpu_context
.
r9
;
gdb_regs
[
_R6
]
=
thread_regs
->
ARM_r6
;
gdb_regs
[
_R10
]
=
ti
->
cpu_context
.
sl
;
gdb_regs
[
_R7
]
=
thread_regs
->
ARM_r7
;
gdb_regs
[
_FP
]
=
ti
->
cpu_context
.
fp
;
gdb_regs
[
_R8
]
=
thread_regs
->
ARM_r8
;
gdb_regs
[
_SPT
]
=
ti
->
cpu_context
.
sp
;
gdb_regs
[
_R9
]
=
thread_regs
->
ARM_r9
;
gdb_regs
[
_PC
]
=
ti
->
cpu_context
.
pc
;
gdb_regs
[
_R10
]
=
thread_regs
->
ARM_r10
;
gdb_regs
[
_FP
]
=
thread_regs
->
ARM_fp
;
gdb_regs
[
_IP
]
=
thread_regs
->
ARM_ip
;
gdb_regs
[
_SPT
]
=
thread_regs
->
ARM_sp
;
gdb_regs
[
_LR
]
=
thread_regs
->
ARM_lr
;
gdb_regs
[
_PC
]
=
thread_regs
->
ARM_pc
;
gdb_regs
[
_CPSR
]
=
thread_regs
->
ARM_cpsr
;
}
}
void
kgdb_arch_set_pc
(
struct
pt_regs
*
regs
,
unsigned
long
pc
)
void
kgdb_arch_set_pc
(
struct
pt_regs
*
regs
,
unsigned
long
pc
)
...
...
arch/arm/kernel/smp.c
View file @
116ef0fc
...
@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running);
...
@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running);
static
struct
smp_operations
smp_ops
;
static
struct
smp_operations
smp_ops
;
void
__init
smp_set_ops
(
struct
smp_operations
*
ops
)
void
__init
smp_set_ops
(
const
struct
smp_operations
*
ops
)
{
{
if
(
ops
)
if
(
ops
)
smp_ops
=
*
ops
;
smp_ops
=
*
ops
;
...
@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void)
...
@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void)
local_irq_enable
();
local_irq_enable
();
local_fiq_enable
();
local_fiq_enable
();
local_abt_enable
();
/*
/*
* OK, it's off to the idle thread for us
* OK, it's off to the idle thread for us
...
@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
...
@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
static
void
raise_nmi
(
cpumask_t
*
mask
)
static
void
raise_nmi
(
cpumask_t
*
mask
)
{
{
/*
* Generate the backtrace directly if we are running in a calling
* context that is not preemptible by the backtrace IPI. Note
* that nmi_cpu_backtrace() automatically removes the current cpu
* from mask.
*/
if
(
cpumask_test_cpu
(
smp_processor_id
(),
mask
)
&&
irqs_disabled
())
nmi_cpu_backtrace
(
NULL
);
smp_cross_call
(
mask
,
IPI_CPU_BACKTRACE
);
smp_cross_call
(
mask
,
IPI_CPU_BACKTRACE
);
}
}
...
...
arch/arm/kernel/smp_twd.c
View file @
116ef0fc
...
@@ -23,7 +23,6 @@
...
@@ -23,7 +23,6 @@
#include <linux/of_irq.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_address.h>
#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
#include <asm/smp_twd.h>
/* set up by the platform code */
/* set up by the platform code */
...
@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate;
...
@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate;
static
DEFINE_PER_CPU
(
bool
,
percpu_setup_called
);
static
DEFINE_PER_CPU
(
bool
,
percpu_setup_called
);
static
struct
clock_event_device
__percpu
*
twd_evt
;
static
struct
clock_event_device
__percpu
*
twd_evt
;
static
unsigned
int
twd_features
=
CLOCK_EVT_FEAT_PERIODIC
|
CLOCK_EVT_FEAT_ONESHOT
;
static
int
twd_ppi
;
static
int
twd_ppi
;
static
int
twd_shutdown
(
struct
clock_event_device
*
clk
)
static
int
twd_shutdown
(
struct
clock_event_device
*
clk
)
...
@@ -294,8 +295,7 @@ static void twd_timer_setup(void)
...
@@ -294,8 +295,7 @@ static void twd_timer_setup(void)
writel_relaxed
(
0
,
twd_base
+
TWD_TIMER_CONTROL
);
writel_relaxed
(
0
,
twd_base
+
TWD_TIMER_CONTROL
);
clk
->
name
=
"local_timer"
;
clk
->
name
=
"local_timer"
;
clk
->
features
=
CLOCK_EVT_FEAT_PERIODIC
|
CLOCK_EVT_FEAT_ONESHOT
|
clk
->
features
=
twd_features
;
CLOCK_EVT_FEAT_C3STOP
;
clk
->
rating
=
350
;
clk
->
rating
=
350
;
clk
->
set_state_shutdown
=
twd_shutdown
;
clk
->
set_state_shutdown
=
twd_shutdown
;
clk
->
set_state_periodic
=
twd_set_periodic
;
clk
->
set_state_periodic
=
twd_set_periodic
;
...
@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np)
...
@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np)
goto
out_irq
;
goto
out_irq
;
twd_get_clock
(
np
);
twd_get_clock
(
np
);
if
(
!
of_property_read_bool
(
np
,
"always-on"
))
twd_features
|=
CLOCK_EVT_FEAT_C3STOP
;
/*
/*
* Immediately configure the timer on the boot CPU, unless we need
* Immediately configure the timer on the boot CPU, unless we need
...
@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np)
...
@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np)
{
{
int
err
;
int
err
;
if
(
!
is_smp
()
||
!
setup_max_cpus
)
return
;
twd_ppi
=
irq_of_parse_and_map
(
np
,
0
);
twd_ppi
=
irq_of_parse_and_map
(
np
,
0
);
if
(
!
twd_ppi
)
{
if
(
!
twd_ppi
)
{
err
=
-
EINVAL
;
err
=
-
EINVAL
;
...
...
arch/arm/kernel/traps.c
View file @
116ef0fc
...
@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
...
@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
set_tls
(
regs
->
ARM_r0
);
set_tls
(
regs
->
ARM_r0
);
return
0
;
return
0
;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case
NR
(
cmpxchg
):
for
(;;)
{
extern
void
do_DataAbort
(
unsigned
long
addr
,
unsigned
int
fsr
,
struct
pt_regs
*
regs
);
unsigned
long
val
;
unsigned
long
addr
=
regs
->
ARM_r2
;
struct
mm_struct
*
mm
=
current
->
mm
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
spinlock_t
*
ptl
;
regs
->
ARM_cpsr
&=
~
PSR_C_BIT
;
down_read
(
&
mm
->
mmap_sem
);
pgd
=
pgd_offset
(
mm
,
addr
);
if
(
!
pgd_present
(
*
pgd
))
goto
bad_access
;
pmd
=
pmd_offset
(
pgd
,
addr
);
if
(
!
pmd_present
(
*
pmd
))
goto
bad_access
;
pte
=
pte_offset_map_lock
(
mm
,
pmd
,
addr
,
&
ptl
);
if
(
!
pte_present
(
*
pte
)
||
!
pte_write
(
*
pte
)
||
!
pte_dirty
(
*
pte
))
{
pte_unmap_unlock
(
pte
,
ptl
);
goto
bad_access
;
}
val
=
*
(
unsigned
long
*
)
addr
;
val
-=
regs
->
ARM_r0
;
if
(
val
==
0
)
{
*
(
unsigned
long
*
)
addr
=
regs
->
ARM_r1
;
regs
->
ARM_cpsr
|=
PSR_C_BIT
;
}
pte_unmap_unlock
(
pte
,
ptl
);
up_read
(
&
mm
->
mmap_sem
);
return
val
;
bad_access:
up_read
(
&
mm
->
mmap_sem
);
/* simulate a write access fault */
do_DataAbort
(
addr
,
15
+
(
1
<<
11
),
regs
);
}
#endif
default:
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
if not implemented, rather than raising SIGILL. This
...
...
arch/arm/lib/clear_user.S
View file @
116ef0fc
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
*/
*/
#include <linux/linkage.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.
text
.
text
...
@@ -20,6 +21,8 @@
...
@@ -20,6 +21,8 @@
*/
*/
ENTRY
(
__clear_user_std
)
ENTRY
(
__clear_user_std
)
WEAK
(
arm_clear_user
)
WEAK
(
arm_clear_user
)
UNWIND
(.
fnstart
)
UNWIND
(.
save
{
r1
,
lr
})
stmfd
sp
!,
{
r1
,
lr
}
stmfd
sp
!,
{
r1
,
lr
}
mov
r2
,
#
0
mov
r2
,
#
0
cmp
r1
,
#
4
cmp
r1
,
#
4
...
@@ -44,6 +47,7 @@ WEAK(arm_clear_user)
...
@@ -44,6 +47,7 @@ WEAK(arm_clear_user)
USER
(
strnebt
r2
,
[
r0
])
USER
(
strnebt
r2
,
[
r0
])
mov
r0
,
#
0
mov
r0
,
#
0
ldmfd
sp
!,
{
r1
,
pc
}
ldmfd
sp
!,
{
r1
,
pc
}
UNWIND
(.
fnend
)
ENDPROC
(
arm_clear_user
)
ENDPROC
(
arm_clear_user
)
ENDPROC
(
__clear_user_std
)
ENDPROC
(
__clear_user_std
)
...
...
arch/arm/mm/Kconfig
View file @
116ef0fc
...
@@ -419,28 +419,24 @@ config CPU_THUMBONLY
...
@@ -419,28 +419,24 @@ config CPU_THUMBONLY
config CPU_32v3
config CPU_32v3
bool
bool
select CPU_USE_DOMAINS if MMU
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4
config CPU_32v4
bool
bool
select CPU_USE_DOMAINS if MMU
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4T
config CPU_32v4T
bool
bool
select CPU_USE_DOMAINS if MMU
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v5
config CPU_32v5
bool
bool
select CPU_USE_DOMAINS if MMU
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
select TLS_REG_EMUL if SMP || !MMU
...
@@ -805,14 +801,6 @@ config TLS_REG_EMUL
...
@@ -805,14 +801,6 @@ config TLS_REG_EMUL
a few prototypes like that in existence) and therefore access to
a few prototypes like that in existence) and therefore access to
that required register must be emulated.
that required register must be emulated.
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
config NEED_KUSER_HELPERS
config NEED_KUSER_HELPERS
bool
bool
...
...
arch/arm/mm/dma-mapping.c
View file @
116ef0fc
...
@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
...
@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned
long
uaddr
=
vma
->
vm_start
;
unsigned
long
uaddr
=
vma
->
vm_start
;
unsigned
long
usize
=
vma
->
vm_end
-
vma
->
vm_start
;
unsigned
long
usize
=
vma
->
vm_end
-
vma
->
vm_start
;
struct
page
**
pages
=
__iommu_get_pages
(
cpu_addr
,
attrs
);
struct
page
**
pages
=
__iommu_get_pages
(
cpu_addr
,
attrs
);
unsigned
long
nr_pages
=
PAGE_ALIGN
(
size
)
>>
PAGE_SHIFT
;
unsigned
long
off
=
vma
->
vm_pgoff
;
vma
->
vm_page_prot
=
__get_dma_pgprot
(
attrs
,
vma
->
vm_page_prot
);
vma
->
vm_page_prot
=
__get_dma_pgprot
(
attrs
,
vma
->
vm_page_prot
);
if
(
!
pages
)
if
(
!
pages
)
return
-
ENXIO
;
return
-
ENXIO
;
if
(
off
>=
nr_pages
||
(
usize
>>
PAGE_SHIFT
)
>
nr_pages
-
off
)
return
-
ENXIO
;
pages
+=
off
;
do
{
do
{
int
ret
=
vm_insert_page
(
vma
,
uaddr
,
*
pages
++
);
int
ret
=
vm_insert_page
(
vma
,
uaddr
,
*
pages
++
);
if
(
ret
)
{
if
(
ret
)
{
...
...
arch/arm/mm/fault.c
View file @
116ef0fc
...
@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
...
@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
arm_notify_die
(
""
,
regs
,
&
info
,
ifsr
,
0
);
arm_notify_die
(
""
,
regs
,
&
info
,
ifsr
,
0
);
}
}
/*
* Abort handler to be used only during first unmasking of asynchronous aborts
* on the boot CPU. This makes sure that the machine will not die if the
* firmware/bootloader left an imprecise abort pending for us to trip over.
*/
static
int
__init
early_abort_handler
(
unsigned
long
addr
,
unsigned
int
fsr
,
struct
pt_regs
*
regs
)
{
pr_warn
(
"Hit pending asynchronous external abort (FSR=0x%08x) during "
"first unmask, this is most likely caused by a "
"firmware/bootloader bug.
\n
"
,
fsr
);
return
0
;
}
void
__init
early_abt_enable
(
void
)
{
fsr_info
[
22
].
fn
=
early_abort_handler
;
local_abt_enable
();
fsr_info
[
22
].
fn
=
do_bad
;
}
#ifndef CONFIG_ARM_LPAE
#ifndef CONFIG_ARM_LPAE
static
int
__init
exceptions_init
(
void
)
static
int
__init
exceptions_init
(
void
)
{
{
...
...
arch/arm/mm/fault.h
View file @
116ef0fc
...
@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
...
@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
void
do_bad_area
(
unsigned
long
addr
,
unsigned
int
fsr
,
struct
pt_regs
*
regs
);
void
do_bad_area
(
unsigned
long
addr
,
unsigned
int
fsr
,
struct
pt_regs
*
regs
);
unsigned
long
search_exception_table
(
unsigned
long
addr
);
unsigned
long
search_exception_table
(
unsigned
long
addr
);
void
early_abt_enable
(
void
);
#endif
/* __ARCH_ARM_FAULT_H */
#endif
/* __ARCH_ARM_FAULT_H */
arch/arm/mm/mmu.c
View file @
116ef0fc
...
@@ -38,6 +38,7 @@
...
@@ -38,6 +38,7 @@
#include <asm/mach/pci.h>
#include <asm/mach/pci.h>
#include <asm/fixmap.h>
#include <asm/fixmap.h>
#include "fault.h"
#include "mm.h"
#include "mm.h"
#include "tcm.h"
#include "tcm.h"
...
@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
...
@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
*/
*/
local_flush_tlb_all
();
local_flush_tlb_all
();
flush_cache_all
();
flush_cache_all
();
/* Enable asynchronous aborts */
early_abt_enable
();
}
}
static
void
__init
kmap_init
(
void
)
static
void
__init
kmap_init
(
void
)
...
...
arch/arm/vdso/vdsomunge.c
View file @
116ef0fc
...
@@ -45,7 +45,6 @@
...
@@ -45,7 +45,6 @@
* it does.
* it does.
*/
*/
#include <byteswap.h>
#include <elf.h>
#include <elf.h>
#include <errno.h>
#include <errno.h>
#include <fcntl.h>
#include <fcntl.h>
...
@@ -59,6 +58,16 @@
...
@@ -59,6 +58,16 @@
#include <sys/types.h>
#include <sys/types.h>
#include <unistd.h>
#include <unistd.h>
#define swab16(x) \
((((x) & 0x00ff) << 8) | \
(((x) & 0xff00) >> 8))
#define swab32(x) \
((((x) & 0x000000ff) << 24) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0xff000000) >> 24))
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HOST_ORDER ELFDATA2LSB
#define HOST_ORDER ELFDATA2LSB
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
...
@@ -104,17 +113,17 @@ static void cleanup(void)
...
@@ -104,17 +113,17 @@ static void cleanup(void)
static
Elf32_Word
read_elf_word
(
Elf32_Word
word
,
bool
swap
)
static
Elf32_Word
read_elf_word
(
Elf32_Word
word
,
bool
swap
)
{
{
return
swap
?
bswap_
32
(
word
)
:
word
;
return
swap
?
swab
32
(
word
)
:
word
;
}
}
static
Elf32_Half
read_elf_half
(
Elf32_Half
half
,
bool
swap
)
static
Elf32_Half
read_elf_half
(
Elf32_Half
half
,
bool
swap
)
{
{
return
swap
?
bswap_
16
(
half
)
:
half
;
return
swap
?
swab
16
(
half
)
:
half
;
}
}
static
void
write_elf_word
(
Elf32_Word
val
,
Elf32_Word
*
dst
,
bool
swap
)
static
void
write_elf_word
(
Elf32_Word
val
,
Elf32_Word
*
dst
,
bool
swap
)
{
{
*
dst
=
swap
?
bswap_
32
(
val
)
:
val
;
*
dst
=
swap
?
swab
32
(
val
)
:
val
;
}
}
int
main
(
int
argc
,
char
**
argv
)
int
main
(
int
argc
,
char
**
argv
)
...
...
drivers/clk/clkdev.c
View file @
116ef0fc
...
@@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name,
...
@@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name,
if
(
IS_ERR
(
r
))
if
(
IS_ERR
(
r
))
return
PTR_ERR
(
r
);
return
PTR_ERR
(
r
);
l
=
clkdev_create
(
r
,
alias
,
"%s"
,
alias_dev_name
);
l
=
clkdev_create
(
r
,
alias
,
alias_dev_name
?
"%s"
:
NULL
,
alias_dev_name
);
clk_put
(
r
);
clk_put
(
r
);
return
l
?
0
:
-
ENODEV
;
return
l
?
0
:
-
ENODEV
;
...
...
include/linux/amba/bus.h
View file @
116ef0fc
...
@@ -41,8 +41,6 @@ struct amba_driver {
...
@@ -41,8 +41,6 @@ struct amba_driver {
int
(
*
probe
)(
struct
amba_device
*
,
const
struct
amba_id
*
);
int
(
*
probe
)(
struct
amba_device
*
,
const
struct
amba_id
*
);
int
(
*
remove
)(
struct
amba_device
*
);
int
(
*
remove
)(
struct
amba_device
*
);
void
(
*
shutdown
)(
struct
amba_device
*
);
void
(
*
shutdown
)(
struct
amba_device
*
);
int
(
*
suspend
)(
struct
amba_device
*
,
pm_message_t
);
int
(
*
resume
)(
struct
amba_device
*
);
const
struct
amba_id
*
id_table
;
const
struct
amba_id
*
id_table
;
};
};
...
...
lib/nmi_backtrace.c
View file @
116ef0fc
...
@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
...
@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
printk
(
"%.*s"
,
(
end
-
start
)
+
1
,
buf
);
printk
(
"%.*s"
,
(
end
-
start
)
+
1
,
buf
);
}
}
/*
* When raise() is called it will be is passed a pointer to the
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
* directly from their raise() functions may rely on the mask
* they are passed being updated as a side effect of this call.
*/
void
nmi_trigger_all_cpu_backtrace
(
bool
include_self
,
void
nmi_trigger_all_cpu_backtrace
(
bool
include_self
,
void
(
*
raise
)(
cpumask_t
*
mask
))
void
(
*
raise
)(
cpumask_t
*
mask
))
{
{
...
@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
...
@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
/* Replace printk to write into the NMI seq */
/* Replace printk to write into the NMI seq */
this_cpu_write
(
printk_func
,
nmi_vprintk
);
this_cpu_write
(
printk_func
,
nmi_vprintk
);
pr_warn
(
"NMI backtrace for cpu %d
\n
"
,
cpu
);
pr_warn
(
"NMI backtrace for cpu %d
\n
"
,
cpu
);
show_regs
(
regs
);
if
(
regs
)
show_regs
(
regs
);
else
dump_stack
();
this_cpu_write
(
printk_func
,
printk_func_save
);
this_cpu_write
(
printk_func
,
printk_func_save
);
cpumask_clear_cpu
(
cpu
,
to_cpumask
(
backtrace_mask
));
cpumask_clear_cpu
(
cpu
,
to_cpumask
(
backtrace_mask
));
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment