Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ec1e20a0
Commit
ec1e20a0
authored
Nov 12, 2013
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'misc' into for-next
parents
959f5854
aed3a4ed
Changes
35
Show whitespace changes
Inline
Side-by-side
Showing
35 changed files
with
467 additions
and
279 deletions
+467
-279
arch/arm/Kconfig
arch/arm/Kconfig
+1
-5
arch/arm/Kconfig.debug
arch/arm/Kconfig.debug
+36
-5
arch/arm/common/mcpm_entry.c
arch/arm/common/mcpm_entry.c
+15
-0
arch/arm/common/mcpm_platsmp.c
arch/arm/common/mcpm_platsmp.c
+23
-4
arch/arm/common/timer-sp.c
arch/arm/common/timer-sp.c
+1
-1
arch/arm/include/asm/atomic.h
arch/arm/include/asm/atomic.h
+27
-49
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/cacheflush.h
+46
-0
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/cmpxchg.h
+46
-12
arch/arm/include/asm/cputype.h
arch/arm/include/asm/cputype.h
+1
-0
arch/arm/include/asm/hardirq.h
arch/arm/include/asm/hardirq.h
+1
-1
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/mcpm.h
+31
-0
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-2level.h
+7
-0
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable-3level.h
+3
-0
arch/arm/include/asm/setup.h
arch/arm/include/asm/setup.h
+1
-1
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/spinlock.h
+6
-2
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/tlbflush.h
+17
-31
arch/arm/include/debug/efm32.S
arch/arm/include/debug/efm32.S
+45
-0
arch/arm/include/debug/msm.S
arch/arm/include/debug/msm.S
+5
-0
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/hw_breakpoint.c
+7
-7
arch/arm/kernel/kprobes.c
arch/arm/kernel/kprobes.c
+4
-4
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/perf_event_cpu.c
+1
-1
arch/arm/kernel/setup.c
arch/arm/kernel/setup.c
+17
-7
arch/arm/kernel/smp.c
arch/arm/kernel/smp.c
+19
-0
arch/arm/kernel/smp_tlb.c
arch/arm/kernel/smp_tlb.c
+34
-2
arch/arm/kvm/arm.c
arch/arm/kvm/arm.c
+3
-3
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/lib/uaccess_with_memcpy.c
+38
-3
arch/arm/mach-footbridge/netwinder-hw.c
arch/arm/mach-footbridge/netwinder-hw.c
+4
-4
arch/arm/mach-vexpress/dcscb.c
arch/arm/mach-vexpress/dcscb.c
+4
-52
arch/arm/mach-vexpress/tc2_pm.c
arch/arm/mach-vexpress/tc2_pm.c
+2
-46
arch/arm/mm/mmap.c
arch/arm/mm/mmap.c
+2
-4
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/atomic.h
+0
-14
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/debug-monitors.c
+6
-7
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/hw_breakpoint.c
+11
-11
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/perf_event.c
+2
-2
include/linux/amba/bus.h
include/linux/amba/bus.h
+1
-1
No files found.
arch/arm/Kconfig
View file @
ec1e20a0
...
...
@@ -5,6 +5,7 @@ config ARM
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT if MMU
select CLONE_BACKWARDS
...
...
@@ -1091,11 +1092,6 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
config XSCALE_PMU
bool
depends on CPU_XSCALE
default y
config MULTI_IRQ_HANDLER
bool
help
...
...
arch/arm/Kconfig.debug
View file @
ec1e20a0
...
...
@@ -318,6 +318,7 @@ choice
config DEBUG_MSM_UART1
bool "Kernel low-level debugging messages via MSM UART1"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the first serial port on MSM devices.
...
...
@@ -325,6 +326,7 @@ choice
config DEBUG_MSM_UART2
bool "Kernel low-level debugging messages via MSM UART2"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the second serial port on MSM devices.
...
...
@@ -332,6 +334,7 @@ choice
config DEBUG_MSM_UART3
bool "Kernel low-level debugging messages via MSM UART3"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the third serial port on MSM devices.
...
...
@@ -340,6 +343,7 @@ choice
bool "Kernel low-level debugging messages via MSM 8660 UART"
depends on ARCH_MSM8X60
select MSM_HAS_DEBUG_UART_HS
select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8660 devices.
...
...
@@ -348,10 +352,20 @@ choice
bool "Kernel low-level debugging messages via MSM 8960 UART"
depends on ARCH_MSM8960
select MSM_HAS_DEBUG_UART_HS
select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8960 devices.
config DEBUG_MSM8974_UART
bool "Kernel low-level debugging messages via MSM 8974 UART"
depends on ARCH_MSM8974
select MSM_HAS_DEBUG_UART_HS
select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8974 devices.
config DEBUG_MVEBU_UART
bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
depends on ARCH_MVEBU
...
...
@@ -834,6 +848,20 @@ choice
options; the platform specific options are deprecated
and will be soon removed.
config DEBUG_LL_UART_EFM32
bool "Kernel low-level debugging via efm32 UART"
depends on ARCH_EFM32
help
Say Y here if you want the debug print routines to direct
their output to an UART or USART port on efm32 based
machines. Use the following addresses for DEBUG_UART_PHYS:
0x4000c000 | USART0
0x4000c400 | USART1
0x4000c800 | USART2
0x4000e000 | UART0
0x4000e400 | UART1
config DEBUG_LL_UART_PL01X
bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
help
...
...
@@ -880,11 +908,16 @@ config DEBUG_STI_UART
bool
depends on ARCH_STI
config DEBUG_MSM_UART
bool
depends on ARCH_MSM
config DEBUG_LL_INCLUDE
string
default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
default "debug/exynos.S" if DEBUG_EXYNOS_UART
default "debug/efm32.S" if DEBUG_LL_UART_EFM32
default "debug/icedcc.S" if DEBUG_ICEDCC
default "debug/imx.S" if DEBUG_IMX1_UART || \
DEBUG_IMX25_UART || \
...
...
@@ -895,11 +928,7 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX53_UART ||\
DEBUG_IMX6Q_UART || \
DEBUG_IMX6SL_UART
default "debug/msm.S" if DEBUG_MSM_UART1 || \
DEBUG_MSM_UART2 || \
DEBUG_MSM_UART3 || \
DEBUG_MSM8660_UART || \
DEBUG_MSM8960_UART
default "debug/msm.S" if DEBUG_MSM_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
default "debug/sti.S" if DEBUG_STI_UART
...
...
@@ -951,6 +980,7 @@ config DEBUG_UART_PHYS
default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
default 0x20201000 if DEBUG_BCM2835
default 0x4000e400 if DEBUG_LL_UART_EFM32
default 0x40090000 if ARCH_LPC32XX
default 0x40100000 if DEBUG_PXA_UART1
default 0x42000000 if ARCH_GEMINI
...
...
@@ -981,6 +1011,7 @@ config DEBUG_UART_PHYS
default 0xfff36000 if DEBUG_HIGHBANK_UART
default 0xfffff700 if ARCH_IOP33X
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X
config DEBUG_UART_VIRT
...
...
arch/arm/common/mcpm_entry.c
View file @
ec1e20a0
...
...
@@ -90,6 +90,21 @@ void mcpm_cpu_power_down(void)
BUG
();
}
int
mcpm_cpu_power_down_finish
(
unsigned
int
cpu
,
unsigned
int
cluster
)
{
int
ret
;
if
(
WARN_ON_ONCE
(
!
platform_ops
||
!
platform_ops
->
power_down_finish
))
return
-
EUNATCH
;
ret
=
platform_ops
->
power_down_finish
(
cpu
,
cluster
);
if
(
ret
)
pr_warn
(
"%s: cpu %u, cluster %u failed to power down (%d)
\n
"
,
__func__
,
cpu
,
cluster
,
ret
);
return
ret
;
}
void
mcpm_cpu_suspend
(
u64
expected_residency
)
{
phys_reset_t
phys_reset
;
...
...
arch/arm/common/mcpm_platsmp.c
View file @
ec1e20a0
...
...
@@ -19,14 +19,23 @@
#include <asm/smp.h>
#include <asm/smp_plat.h>
static
void
cpu_to_pcpu
(
unsigned
int
cpu
,
unsigned
int
*
pcpu
,
unsigned
int
*
pcluster
)
{
unsigned
int
mpidr
;
mpidr
=
cpu_logical_map
(
cpu
);
*
pcpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
*
pcluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
}
static
int
mcpm_boot_secondary
(
unsigned
int
cpu
,
struct
task_struct
*
idle
)
{
unsigned
int
mpidr
,
pcpu
,
pcluster
,
ret
;
unsigned
int
pcpu
,
pcluster
,
ret
;
extern
void
secondary_startup
(
void
);
mpidr
=
cpu_logical_map
(
cpu
);
pcpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
pcluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
cpu_to_pcpu
(
cpu
,
&
pcpu
,
&
pcluster
);
pr_debug
(
"%s: logical CPU %d is physical CPU %d cluster %d
\n
"
,
__func__
,
cpu
,
pcpu
,
pcluster
);
...
...
@@ -47,6 +56,15 @@ static void mcpm_secondary_init(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
static
int
mcpm_cpu_kill
(
unsigned
int
cpu
)
{
unsigned
int
pcpu
,
pcluster
;
cpu_to_pcpu
(
cpu
,
&
pcpu
,
&
pcluster
);
return
!
mcpm_cpu_power_down_finish
(
pcpu
,
pcluster
);
}
static
int
mcpm_cpu_disable
(
unsigned
int
cpu
)
{
/*
...
...
@@ -73,6 +91,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
.
smp_boot_secondary
=
mcpm_boot_secondary
,
.
smp_secondary_init
=
mcpm_secondary_init
,
#ifdef CONFIG_HOTPLUG_CPU
.
cpu_kill
=
mcpm_cpu_kill
,
.
cpu_disable
=
mcpm_cpu_disable
,
.
cpu_die
=
mcpm_cpu_die
,
#endif
...
...
arch/arm/common/timer-sp.c
View file @
ec1e20a0
...
...
@@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = {
static
struct
irqaction
sp804_timer_irq
=
{
.
name
=
"timer"
,
.
flags
=
IRQF_
DISABLED
|
IRQF_
TIMER
|
IRQF_IRQPOLL
,
.
flags
=
IRQF_TIMER
|
IRQF_IRQPOLL
,
.
handler
=
sp804_timer_interrupt
,
.
dev_id
=
&
sp804_clockevent
,
};
...
...
arch/arm/include/asm/atomic.h
View file @
ec1e20a0
...
...
@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static
inline
int
atomic_cmpxchg
(
atomic_t
*
ptr
,
int
old
,
int
new
)
{
unsigned
long
oldval
,
res
;
int
oldval
;
unsigned
long
res
;
smp_mb
();
...
...
@@ -134,21 +135,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return
oldval
;
}
static
inline
void
atomic_clear_mask
(
unsigned
long
mask
,
unsigned
long
*
addr
)
{
unsigned
long
tmp
,
tmp2
;
__asm__
__volatile__
(
"@ atomic_clear_mask
\n
"
"1: ldrex %0, [%3]
\n
"
" bic %0, %0, %4
\n
"
" strex %1, %0, [%3]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
tmp
),
"=&r"
(
tmp2
),
"+Qo"
(
*
addr
)
:
"r"
(
addr
),
"Ir"
(
mask
)
:
"cc"
);
}
#else
/* ARM_ARCH_6 */
#ifdef CONFIG_SMP
...
...
@@ -197,15 +183,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return
ret
;
}
static
inline
void
atomic_clear_mask
(
unsigned
long
mask
,
unsigned
long
*
addr
)
{
unsigned
long
flags
;
raw_local_irq_save
(
flags
);
*
addr
&=
~
mask
;
raw_local_irq_restore
(
flags
);
}
#endif
/* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...
...
@@ -238,15 +215,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#ifndef CONFIG_GENERIC_ATOMIC64
typedef
struct
{
u64
__aligned
(
8
)
counter
;
long
long
counter
;
}
atomic64_t
;
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
static
inline
u64
atomic64_read
(
const
atomic64_t
*
v
)
static
inline
long
long
atomic64_read
(
const
atomic64_t
*
v
)
{
u64
result
;
long
long
result
;
__asm__
__volatile__
(
"@ atomic64_read
\n
"
" ldrd %0, %H0, [%1]"
...
...
@@ -257,7 +234,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
return
result
;
}
static
inline
void
atomic64_set
(
atomic64_t
*
v
,
u64
i
)
static
inline
void
atomic64_set
(
atomic64_t
*
v
,
long
long
i
)
{
__asm__
__volatile__
(
"@ atomic64_set
\n
"
" strd %2, %H2, [%1]"
...
...
@@ -266,9 +243,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
);
}
#else
static
inline
u64
atomic64_read
(
const
atomic64_t
*
v
)
static
inline
long
long
atomic64_read
(
const
atomic64_t
*
v
)
{
u64
result
;
long
long
result
;
__asm__
__volatile__
(
"@ atomic64_read
\n
"
" ldrexd %0, %H0, [%1]"
...
...
@@ -279,9 +256,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
return
result
;
}
static
inline
void
atomic64_set
(
atomic64_t
*
v
,
u64
i
)
static
inline
void
atomic64_set
(
atomic64_t
*
v
,
long
long
i
)
{
u64
tmp
;
long
long
tmp
;
__asm__
__volatile__
(
"@ atomic64_set
\n
"
"1: ldrexd %0, %H0, [%2]
\n
"
...
...
@@ -294,9 +271,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
}
#endif
static
inline
void
atomic64_add
(
u64
i
,
atomic64_t
*
v
)
static
inline
void
atomic64_add
(
long
long
i
,
atomic64_t
*
v
)
{
u64
result
;
long
long
result
;
unsigned
long
tmp
;
__asm__
__volatile__
(
"@ atomic64_add
\n
"
...
...
@@ -311,9 +288,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
:
"cc"
);
}
static
inline
u64
atomic64_add_return
(
u64
i
,
atomic64_t
*
v
)
static
inline
long
long
atomic64_add_return
(
long
long
i
,
atomic64_t
*
v
)
{
u64
result
;
long
long
result
;
unsigned
long
tmp
;
smp_mb
();
...
...
@@ -334,9 +311,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
return
result
;
}
static
inline
void
atomic64_sub
(
u64
i
,
atomic64_t
*
v
)
static
inline
void
atomic64_sub
(
long
long
i
,
atomic64_t
*
v
)
{
u64
result
;
long
long
result
;
unsigned
long
tmp
;
__asm__
__volatile__
(
"@ atomic64_sub
\n
"
...
...
@@ -351,9 +328,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
:
"cc"
);
}
static
inline
u64
atomic64_sub_return
(
u64
i
,
atomic64_t
*
v
)
static
inline
long
long
atomic64_sub_return
(
long
long
i
,
atomic64_t
*
v
)
{
u64
result
;
long
long
result
;
unsigned
long
tmp
;
smp_mb
();
...
...
@@ -374,9 +351,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
return
result
;
}
static
inline
u64
atomic64_cmpxchg
(
atomic64_t
*
ptr
,
u64
old
,
u64
new
)
static
inline
long
long
atomic64_cmpxchg
(
atomic64_t
*
ptr
,
long
long
old
,
long
long
new
)
{
u64
oldval
;
long
long
oldval
;
unsigned
long
res
;
smp_mb
();
...
...
@@ -398,9 +376,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
return
oldval
;
}
static
inline
u64
atomic64_xchg
(
atomic64_t
*
ptr
,
u64
new
)
static
inline
long
long
atomic64_xchg
(
atomic64_t
*
ptr
,
long
long
new
)
{
u64
result
;
long
long
result
;
unsigned
long
tmp
;
smp_mb
();
...
...
@@ -419,9 +397,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
return
result
;
}
static
inline
u64
atomic64_dec_if_positive
(
atomic64_t
*
v
)
static
inline
long
long
atomic64_dec_if_positive
(
atomic64_t
*
v
)
{
u64
result
;
long
long
result
;
unsigned
long
tmp
;
smp_mb
();
...
...
@@ -445,9 +423,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
return
result
;
}
static
inline
int
atomic64_add_unless
(
atomic64_t
*
v
,
u64
a
,
u64
u
)
static
inline
int
atomic64_add_unless
(
atomic64_t
*
v
,
long
long
a
,
long
long
u
)
{
u64
val
;
long
long
val
;
unsigned
long
tmp
;
int
ret
=
1
;
...
...
arch/arm/include/asm/cacheflush.h
View file @
ec1e20a0
...
...
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
/*
* Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
* To do so we must:
*
* - Clear the SCTLR.C bit to prevent further cache allocations
* - Flush the desired level of cache
* - Clear the ACTLR "SMP" bit to disable local coherency
*
* ... and so without any intervening memory access in between those steps,
* not even to the stack.
*
* WARNING -- After this has been called:
*
* - No ldrex/strex (and similar) instructions must be used.
* - The CPU is obviously no longer coherent with the other CPUs.
* - This is unlikely to work as expected if Linux is running non-secure.
*
* Note:
*
* - This is known to apply to several ARMv7 processor implementations,
* however some exceptions may exist. Caveat emptor.
*
* - The clobber list is dictated by the call to v7_flush_dcache_*.
* fp is preserved to the stack explicitly prior disabling the cache
* since adding it to the clobber list is incompatible with having
* CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
* trampoline are inserted by the linker and to keep sp 64-bit aligned.
*/
#define v7_exit_coherency_flush(level) \
asm volatile( \
"stmfd sp!, {fp, ip} \n\t" \
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
"isb \n\t" \
"bl v7_flush_dcache_"__stringify(level)" \n\t" \
"clrex \n\t" \
"mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
"isb \n\t" \
"dsb \n\t" \
"ldmfd sp!, {fp, ip}" \
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
"r9","r10","lr","memory" )
#endif
arch/arm/include/asm/cmpxchg.h
View file @
ec1e20a0
...
...
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return
ret
;
}
static
inline
unsigned
long
long
__cmpxchg64
(
unsigned
long
long
*
ptr
,
unsigned
long
long
old
,
unsigned
long
long
new
)
{
unsigned
long
long
oldval
;
unsigned
long
res
;
__asm__
__volatile__
(
"1: ldrexd %1, %H1, [%3]
\n
"
" teq %1, %4
\n
"
" teqeq %H1, %H4
\n
"
" bne 2f
\n
"
" strexd %0, %5, %H5, [%3]
\n
"
" teq %0, #0
\n
"
" bne 1b
\n
"
"2:"
:
"=&r"
(
res
),
"=&r"
(
oldval
),
"+Qo"
(
*
ptr
)
:
"r"
(
ptr
),
"r"
(
old
),
"r"
(
new
)
:
"cc"
);
return
oldval
;
}
static
inline
unsigned
long
long
__cmpxchg64_mb
(
unsigned
long
long
*
ptr
,
unsigned
long
long
old
,
unsigned
long
long
new
)
{
unsigned
long
long
ret
;
smp_mb
();
ret
=
__cmpxchg64
(
ptr
,
old
,
new
);
smp_mb
();
return
ret
;
}
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
...
...
@@ -230,19 +266,17 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
atomic64_t, \
counter), \
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
local64_t, \
a), \
#define cmpxchg64_relaxed(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
#endif
/* __LINUX_ARM_ARCH__ >= 6 */
#endif
/* __ASM_ARM_CMPXCHG_H */
arch/arm/include/asm/cputype.h
View file @
ec1e20a0
...
...
@@ -10,6 +10,7 @@
#define CPUID_TLBTYPE 3
#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
#define CPUID_REVIDR 6
#ifdef CONFIG_CPU_V7M
#define CPUID_EXT_PFR0 0x40
...
...
arch/arm/include/asm/hardirq.h
View file @
ec1e20a0
...
...
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
#define NR_IPI
6
#define NR_IPI
7
typedef
struct
{
unsigned
int
__softirq_pending
;
...
...
arch/arm/include/asm/mcpm.h
View file @
ec1e20a0
...
...
@@ -81,9 +81,39 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*
* On success, the CPU is not guaranteed to be truly halted until
* mcpm_cpu_power_down_finish() subsequently returns non-zero for the
* specified cpu. Until then, other CPUs should make sure they do not
* trash memory the target CPU might be executing/accessing.
*/
void
mcpm_cpu_power_down
(
void
);
/**
* mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
* make sure it is powered off
*
* @cpu: CPU number within given cluster
* @cluster: cluster number for the CPU
*
* Call this function to ensure that a pending powerdown has taken
* effect and the CPU is safely parked before performing non-mcpm
* operations that may affect the CPU (such as kexec trashing the
* kernel text).
*
* It is *not* necessary to call this function if you only need to
* serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
* event.
*
* Do not call this function unless the specified CPU has already
* called mcpm_cpu_power_down() or has committed to doing so.
*
* @return:
* - zero if the CPU is in a safely parked state
* - nonzero otherwise (e.g., timeout)
*/
int
mcpm_cpu_power_down_finish
(
unsigned
int
cpu
,
unsigned
int
cluster
);
/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
...
...
@@ -126,6 +156,7 @@ int mcpm_cpu_powered_up(void);
struct
mcpm_platform_ops
{
int
(
*
power_up
)(
unsigned
int
cpu
,
unsigned
int
cluster
);
void
(
*
power_down
)(
void
);
int
(
*
power_down_finish
)(
unsigned
int
cpu
,
unsigned
int
cluster
);
void
(
*
suspend
)(
u64
);
void
(
*
powered_up
)(
void
);
};
...
...
arch/arm/include/asm/pgtable-2level.h
View file @
ec1e20a0
...
...
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
/*
* We don't have huge page support for short descriptors, for the moment
* define empty stubs for use by pin_page_for_write.
*/
#define pmd_hugewillfault(pmd) (0)
#define pmd_thp_or_huge(pmd) (0)
#endif
/* __ASSEMBLY__ */
#endif
/* _ASM_PGTABLE_2LEVEL_H */
arch/arm/include/asm/pgtable-3level.h
View file @
ec1e20a0
...
...
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
...
...
arch/arm/include/asm/setup.h
View file @
ec1e20a0
...
...
@@ -49,7 +49,7 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
extern
int
arm_add_memory
(
phys_addr_t
start
,
phys_addr_t
size
);
extern
int
arm_add_memory
(
u64
start
,
u64
size
);
extern
void
early_print
(
const
char
*
str
,
...);
extern
void
dump_machine_table
(
void
);
...
...
arch/arm/include/asm/spinlock.h
View file @
ec1e20a0
...
...
@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
dsb_sev
();
}
static
inline
int
arch_spin_value_unlocked
(
arch_spinlock_t
lock
)
{
return
lock
.
tickets
.
owner
==
lock
.
tickets
.
next
;
}
static
inline
int
arch_spin_is_locked
(
arch_spinlock_t
*
lock
)
{
struct
__raw_tickets
tickets
=
ACCESS_ONCE
(
lock
->
tickets
);
return
tickets
.
owner
!=
tickets
.
next
;
return
!
arch_spin_value_unlocked
(
ACCESS_ONCE
(
*
lock
));
}
static
inline
int
arch_spin_is_contended
(
arch_spinlock_t
*
lock
)
...
...
arch/arm/include/asm/tlbflush.h
View file @
ec1e20a0
...
...
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
asm
(
"mcr p15, 0, %0, c7, c1, 6"
:
:
"r"
(
zero
));
}
#include <asm/cputype.h>
#ifdef CONFIG_ARM_ERRATA_798181
static
inline
int
erratum_a15_798181
(
void
)
{
unsigned
int
midr
=
read_cpuid_id
();
/* Cortex-A15 r0p0..r3p2 affected */
if
((
midr
&
0xff0ffff0
)
!=
0x410fc0f0
||
midr
>
0x413fc0f2
)
return
0
;
return
1
;
}
static
inline
void
dummy_flush_tlb_a15_erratum
(
void
)
{
/*
* Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
*/
asm
(
"mcr p15, 0, %0, c8, c3, 1"
:
:
"r"
(
0
));
dsb
(
ish
);
}
#else
static
inline
int
erratum_a15_798181
(
void
)
{
return
0
;
}
static
inline
void
dummy_flush_tlb_a15_erratum
(
void
)
{
}
#endif
/*
* flush_pmd_entry
*
...
...
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
#endif
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARM_ERRATA_798181
extern
void
erratum_a15_798181_init
(
void
);
#else
static
inline
void
erratum_a15_798181_init
(
void
)
{}
#endif
extern
bool
(
*
erratum_a15_798181_handler
)(
void
);
static
inline
bool
erratum_a15_798181
(
void
)
{
if
(
unlikely
(
IS_ENABLED
(
CONFIG_ARM_ERRATA_798181
)
&&
erratum_a15_798181_handler
))
return
erratum_a15_798181_handler
();
return
false
;
}
#endif
#endif
arch/arm/include/debug/efm32.S
0 → 100644
View file @
ec1e20a0
/*
*
Copyright
(
C
)
2013
Pengutronix
*
Uwe
Kleine
-
Koenig
<
u
.
kleine
-
koenig
@
pengutronix
.
de
>
*
*
This
program
is
free
software
; you can redistribute it and/or modify
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
published
by
the
Free
Software
Foundation
.
*/
#define UARTn_CMD 0x000c
#define UARTn_CMD_TXEN 0x0004
#define UARTn_STATUS 0x0010
#define UARTn_STATUS_TXC 0x0020
#define UARTn_STATUS_TXBL 0x0040
#define UARTn_TXDATA 0x0034
.
macro
addruart
,
rx
,
tmp
ldr
\
rx
,
=(
CONFIG_DEBUG_UART_PHYS
)
/
*
*
enable
TX
.
The
driver
might
disable
it
to
save
energy
.
We
*
don
't care about disabling at the end as during debug power
*
consumption
isn
't that important.
*/
ldr
\
tmp
,
=(
UARTn_CMD_TXEN
)
str
\
tmp
,
[
\
rx
,
#
UARTn_CMD
]
.
endm
.
macro
senduart
,
rd
,
rx
strb
\
rd
,
[
\
rx
,
#
UARTn_TXDATA
]
.
endm
.
macro
waituart
,
rd
,
rx
1001
:
ldr
\
rd
,
[
\
rx
,
#
UARTn_STATUS
]
tst
\
rd
,
#
UARTn_STATUS_TXBL
beq
1001
b
.
endm
.
macro
busyuart
,
rd
,
rx
1001
:
ldr
\
rd
,
[
\
rx
,
UARTn_STATUS
]
tst
\
rd
,
#
UARTn_STATUS_TXC
bne
1001
b
.
endm
arch/arm/include/debug/msm.S
View file @
ec1e20a0
...
...
@@ -44,6 +44,11 @@
#ifdef CONFIG_DEBUG_MSM8960_UART
#define MSM_DEBUG_UART_BASE 0xF0040000
#define MSM_DEBUG_UART_PHYS 0x16440000
#endif
#ifdef CONFIG_DEBUG_MSM8974_UART
#define MSM_DEBUG_UART_BASE 0xFA71E000
#define MSM_DEBUG_UART_PHYS 0xF991E000
#endif
.
macro
addruart
,
rp
,
rv
,
tmp
...
...
arch/arm/kernel/hw_breakpoint.c
View file @
ec1e20a0
...
...
@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_base
=
ARM_BASE_BCR
;
val_base
=
ARM_BASE_BVR
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
bp_on_reg
);
slots
=
this_cpu_pt
r
(
bp_on_reg
);
max_slots
=
core_num_brps
;
}
else
{
/* Watchpoint */
ctrl_base
=
ARM_BASE_WCR
;
val_base
=
ARM_BASE_WVR
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
max_slots
=
core_num_wrps
;
}
...
...
@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if
(
info
->
ctrl
.
type
==
ARM_BREAKPOINT_EXECUTE
)
{
/* Breakpoint */
base
=
ARM_BASE_BCR
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
bp_on_reg
);
slots
=
this_cpu_pt
r
(
bp_on_reg
);
max_slots
=
core_num_brps
;
}
else
{
/* Watchpoint */
base
=
ARM_BASE_WCR
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
max_slots
=
core_num_wrps
;
}
...
...
@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct
arch_hw_breakpoint
*
info
;
struct
arch_hw_breakpoint_ctrl
ctrl
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
for
(
i
=
0
;
i
<
core_num_wrps
;
++
i
)
{
rcu_read_lock
();
...
...
@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
struct
perf_event
*
wp
,
**
slots
;
struct
arch_hw_breakpoint
*
info
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
for
(
i
=
0
;
i
<
core_num_wrps
;
++
i
)
{
rcu_read_lock
();
...
...
@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
struct
arch_hw_breakpoint
*
info
;
struct
arch_hw_breakpoint_ctrl
ctrl
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
bp_on_reg
);
slots
=
this_cpu_pt
r
(
bp_on_reg
);
/* The exception entry code places the amended lr in the PC. */
addr
=
regs
->
ARM_pc
;
...
...
arch/arm/kernel/kprobes.c
View file @
ec1e20a0
...
...
@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static
void
__kprobes
restore_previous_kprobe
(
struct
kprobe_ctlblk
*
kcb
)
{
__
get_cpu_var
(
current_kprobe
)
=
kcb
->
prev_kprobe
.
kp
;
__
this_cpu_write
(
current_kprobe
,
kcb
->
prev_kprobe
.
kp
)
;
kcb
->
kprobe_status
=
kcb
->
prev_kprobe
.
status
;
}
static
void
__kprobes
set_current_kprobe
(
struct
kprobe
*
p
)
{
__
get_cpu_var
(
current_kprobe
)
=
p
;
__
this_cpu_write
(
current_kprobe
,
p
)
;
}
static
void
__kprobes
...
...
@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
continue
;
if
(
ri
->
rp
&&
ri
->
rp
->
handler
)
{
__
get_cpu_var
(
current_kprobe
)
=
&
ri
->
rp
->
kp
;
__
this_cpu_write
(
current_kprobe
,
&
ri
->
rp
->
kp
)
;
get_kprobe_ctlblk
()
->
kprobe_status
=
KPROBE_HIT_ACTIVE
;
ri
->
rp
->
handler
(
ri
,
regs
);
__
get_cpu_var
(
current_kprobe
)
=
NULL
;
__
this_cpu_write
(
current_kprobe
,
NULL
)
;
}
orig_ret_address
=
(
unsigned
long
)
ri
->
ret_addr
;
...
...
arch/arm/kernel/perf_event_cpu.c
View file @
ec1e20a0
...
...
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
static
struct
pmu_hw_events
*
cpu_pmu_get_cpu_events
(
void
)
{
return
&
__get_cpu_var
(
cpu_hw_events
);
return
this_cpu_ptr
(
&
cpu_hw_events
);
}
static
void
cpu_pmu_free_irq
(
struct
arm_pmu
*
cpu_pmu
)
...
...
arch/arm/kernel/setup.c
View file @
ec1e20a0
...
...
@@ -599,6 +599,8 @@ static void __init setup_processor(void)
elf_hwcap
&=
~
(
HWCAP_THUMB
|
HWCAP_IDIVT
);
#endif
erratum_a15_798181_init
();
feat_v6_fixup
();
cacheid_init
();
...
...
@@ -619,9 +621,10 @@ void __init dump_machine_table(void)
/* can't use cpu_relax() here as it may require MMU setup */
;
}
int
__init
arm_add_memory
(
phys_addr_t
start
,
phys_addr_t
size
)
int
__init
arm_add_memory
(
u64
start
,
u64
size
)
{
struct
membank
*
bank
=
&
meminfo
.
bank
[
meminfo
.
nr_banks
];
u64
aligned_start
;
if
(
meminfo
.
nr_banks
>=
NR_BANKS
)
{
printk
(
KERN_CRIT
"NR_BANKS too low, "
...
...
@@ -634,10 +637,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* Size is appropriately rounded down, start is rounded up.
*/
size
-=
start
&
~
PAGE_MASK
;
bank
->
start
=
PAGE_ALIGN
(
start
);
aligned_start
=
PAGE_ALIGN
(
start
);
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
if
(
aligned_start
>
ULONG_MAX
)
{
printk
(
KERN_CRIT
"Ignoring memory at 0x%08llx outside "
"32-bit physical address space
\n
"
,
(
long
long
)
start
);
return
-
EINVAL
;
}
#ifndef CONFIG_ARM_LPAE
if
(
bank
->
start
+
size
<
bank
->
start
)
{
if
(
aligned_start
+
size
>
ULONG_MAX
)
{
printk
(
KERN_CRIT
"Truncating memory at 0x%08llx to fit in "
"32-bit physical address space
\n
"
,
(
long
long
)
start
);
/*
...
...
@@ -645,10 +654,11 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
* This means we lose a page after masking.
*/
size
=
ULONG_MAX
-
bank
->
start
;
size
=
ULONG_MAX
-
aligned_
start
;
}
#endif
bank
->
start
=
aligned_start
;
bank
->
size
=
size
&
~
(
phys_addr_t
)(
PAGE_SIZE
-
1
);
/*
...
...
@@ -669,8 +679,8 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
static
int
__init
early_mem
(
char
*
p
)
{
static
int
usermem
__initdata
=
0
;
phys_addr_t
size
;
phys_addr_t
start
;
u64
size
;
u64
start
;
char
*
endp
;
/*
...
...
arch/arm/kernel/smp.c
View file @
ec1e20a0
...
...
@@ -25,6 +25,7 @@
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
#include <linux/atomic.h>
#include <asm/smp.h>
...
...
@@ -66,6 +67,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC
,
IPI_CALL_FUNC_SINGLE
,
IPI_CPU_STOP
,
IPI_IRQ_WORK
,
};
static
DECLARE_COMPLETION
(
cpu_running
);
...
...
@@ -448,6 +450,14 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call
(
cpumask_of
(
cpu
),
IPI_CALL_FUNC_SINGLE
);
}
#ifdef CONFIG_IRQ_WORK
void
arch_irq_work_raise
(
void
)
{
if
(
is_smp
())
smp_cross_call
(
cpumask_of
(
smp_processor_id
()),
IPI_IRQ_WORK
);
}
#endif
static
const
char
*
ipi_types
[
NR_IPI
]
=
{
#define S(x,s) [x] = s
S
(
IPI_WAKEUP
,
"CPU wakeup interrupts"
),
...
...
@@ -456,6 +466,7 @@ static const char *ipi_types[NR_IPI] = {
S
(
IPI_CALL_FUNC
,
"Function call interrupts"
),
S
(
IPI_CALL_FUNC_SINGLE
,
"Single function call interrupts"
),
S
(
IPI_CPU_STOP
,
"CPU stop interrupts"
),
S
(
IPI_IRQ_WORK
,
"IRQ work interrupts"
),
};
void
show_ipi_list
(
struct
seq_file
*
p
,
int
prec
)
...
...
@@ -565,6 +576,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit
();
break
;
#ifdef CONFIG_IRQ_WORK
case
IPI_IRQ_WORK
:
irq_enter
();
irq_work_run
();
irq_exit
();
break
;
#endif
default:
printk
(
KERN_CRIT
"CPU%u: Unknown IPI message 0x%x
\n
"
,
cpu
,
ipinr
);
...
...
arch/arm/kernel/smp_tlb.c
View file @
ec1e20a0
...
...
@@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all
();
}
#ifdef CONFIG_ARM_ERRATA_798181
bool
(
*
erratum_a15_798181_handler
)(
void
);
static
bool
erratum_a15_798181_partial
(
void
)
{
asm
(
"mcr p15, 0, %0, c8, c3, 1"
:
:
"r"
(
0
));
dsb
(
ish
);
return
false
;
}
static
bool
erratum_a15_798181_broadcast
(
void
)
{
asm
(
"mcr p15, 0, %0, c8, c3, 1"
:
:
"r"
(
0
));
dsb
(
ish
);
return
true
;
}
void
erratum_a15_798181_init
(
void
)
{
unsigned
int
midr
=
read_cpuid_id
();
unsigned
int
revidr
=
read_cpuid
(
CPUID_REVIDR
);
/* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
if
((
midr
&
0xff0ffff0
)
!=
0x410fc0f0
||
midr
>
0x413fc0f2
||
(
revidr
&
0x210
)
==
0x210
)
{
return
;
}
if
(
revidr
&
0x10
)
erratum_a15_798181_handler
=
erratum_a15_798181_partial
;
else
erratum_a15_798181_handler
=
erratum_a15_798181_broadcast
;
}
#endif
static
void
ipi_flush_tlb_a15_erratum
(
void
*
arg
)
{
dmb
();
...
...
@@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void)
if
(
!
erratum_a15_798181
())
return
;
dummy_flush_tlb_a15_erratum
();
smp_call_function
(
ipi_flush_tlb_a15_erratum
,
NULL
,
1
);
}
...
...
@@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
if
(
!
erratum_a15_798181
())
return
;
dummy_flush_tlb_a15_erratum
();
this_cpu
=
get_cpu
();
a15_erratum_get_cpumask
(
this_cpu
,
mm
,
&
mask
);
smp_call_function_many
(
&
mask
,
ipi_flush_tlb_a15_erratum
,
NULL
,
1
);
...
...
arch/arm/kvm/arm.c
View file @
ec1e20a0
...
...
@@ -65,7 +65,7 @@ static bool vgic_present;
static
void
kvm_arm_set_running_vcpu
(
struct
kvm_vcpu
*
vcpu
)
{
BUG_ON
(
preemptible
());
__
get_cpu_var
(
kvm_arm_running_vcpu
)
=
vcpu
;
__
this_cpu_write
(
kvm_arm_running_vcpu
,
vcpu
)
;
}
/**
...
...
@@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
struct
kvm_vcpu
*
kvm_arm_get_running_vcpu
(
void
)
{
BUG_ON
(
preemptible
());
return
__
get_cpu_var
(
kvm_arm_running_vcpu
);
return
__
this_cpu_read
(
kvm_arm_running_vcpu
);
}
/**
...
...
@@ -815,7 +815,7 @@ static void cpu_init_hyp_mode(void *dummy)
boot_pgd_ptr
=
kvm_mmu_get_boot_httbr
();
pgd_ptr
=
kvm_mmu_get_httbr
();
stack_page
=
__
get_cpu_var
(
kvm_arm_hyp_stack_page
);
stack_page
=
__
this_cpu_read
(
kvm_arm_hyp_stack_page
);
hyp_stack_ptr
=
stack_page
+
PAGE_SIZE
;
vector_ptr
=
(
unsigned
long
)
__kvm_hyp_vector
;
...
...
arch/arm/lib/uaccess_with_memcpy.c
View file @
ec1e20a0
...
...
@@ -18,6 +18,7 @@
#include <linux/hardirq.h>
/* for in_atomic() */
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <asm/current.h>
#include <asm/page.h>
...
...
@@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
return
0
;
pmd
=
pmd_offset
(
pud
,
addr
);
if
(
unlikely
(
pmd_none
(
*
pmd
)
||
pmd_bad
(
*
pmd
)))
if
(
unlikely
(
pmd_none
(
*
pmd
)))
return
0
;
/*
* A pmd can be bad if it refers to a HugeTLB or THP page.
*
* Both THP and HugeTLB pages have the same pmd layout
* and should not be manipulated by the pte functions.
*
* Lock the page table for the destination and check
* to see that it's still huge and whether or not we will
* need to fault on write, or if we have a splitting THP.
*/
if
(
unlikely
(
pmd_thp_or_huge
(
*
pmd
)))
{
ptl
=
&
current
->
mm
->
page_table_lock
;
spin_lock
(
ptl
);
if
(
unlikely
(
!
pmd_thp_or_huge
(
*
pmd
)
||
pmd_hugewillfault
(
*
pmd
)
||
pmd_trans_splitting
(
*
pmd
)))
{
spin_unlock
(
ptl
);
return
0
;
}
*
ptep
=
NULL
;
*
ptlp
=
ptl
;
return
1
;
}
if
(
unlikely
(
pmd_bad
(
*
pmd
)))
return
0
;
pte
=
pte_offset_map_lock
(
current
->
mm
,
pmd
,
addr
,
&
ptl
);
...
...
@@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
from
+=
tocopy
;
n
-=
tocopy
;
if
(
pte
)
pte_unmap_unlock
(
pte
,
ptl
);
else
spin_unlock
(
ptl
);
}
if
(
!
atomic
)
up_read
(
&
current
->
mm
->
mmap_sem
);
...
...
@@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
addr
+=
tocopy
;
n
-=
tocopy
;
if
(
pte
)
pte_unmap_unlock
(
pte
,
ptl
);
else
spin_unlock
(
ptl
);
}
up_read
(
&
current
->
mm
->
mmap_sem
);
...
...
arch/arm/mach-footbridge/netwinder-hw.c
View file @
ec1e20a0
...
...
@@ -692,14 +692,14 @@ static void netwinder_led_set(struct led_classdev *cdev,
unsigned
long
flags
;
u32
reg
;
spin_lock_irqsave
(
&
nw_gpio_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
nw_gpio_lock
,
flags
);
reg
=
nw_gpio_read
();
if
(
b
!=
LED_OFF
)
reg
&=
~
led
->
mask
;
else
reg
|=
led
->
mask
;
nw_gpio_modify_op
(
led
->
mask
,
reg
);
spin_unlock_irqrestore
(
&
nw_gpio_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
nw_gpio_lock
,
flags
);
}
static
enum
led_brightness
netwinder_led_get
(
struct
led_classdev
*
cdev
)
...
...
@@ -709,9 +709,9 @@ static enum led_brightness netwinder_led_get(struct led_classdev *cdev)
unsigned
long
flags
;
u32
reg
;
spin_lock_irqsave
(
&
nw_gpio_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
nw_gpio_lock
,
flags
);
reg
=
nw_gpio_read
();
spin_unlock_irqrestore
(
&
nw_gpio_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
nw_gpio_lock
,
flags
);
return
(
reg
&
led
->
mask
)
?
LED_OFF
:
LED_FULL
;
}
...
...
arch/arm/mach-vexpress/dcscb.c
View file @
ec1e20a0
...
...
@@ -133,38 +133,8 @@ static void dcscb_power_down(void)
if
(
last_man
&&
__mcpm_outbound_enter_critical
(
cpu
,
cluster
))
{
arch_spin_unlock
(
&
dcscb_lock
);
/*
* Flush all cache levels for this cluster.
*
* To do so we do:
* - Clear the SCTLR.C bit to prevent further cache allocations
* - Flush the whole cache
* - Clear the ACTLR "SMP" bit to disable local coherency
*
* Let's do it in the safest possible way i.e. with
* no memory access within the following sequence
* including to the stack.
*
* Note: fp is preserved to the stack explicitly prior doing
* this since adding it to the clobber list is incompatible
* with having CONFIG_FRAME_POINTER=y.
*/
asm
volatile
(
"str fp, [sp, #-4]!
\n\t
"
"mrc p15, 0, r0, c1, c0, 0 @ get CR
\n\t
"
"bic r0, r0, #"
__stringify
(
CR_C
)
"
\n\t
"
"mcr p15, 0, r0, c1, c0, 0 @ set CR
\n\t
"
"isb
\n\t
"
"bl v7_flush_dcache_all
\n\t
"
"clrex
\n\t
"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR
\n\t
"
"bic r0, r0, #(1 << 6) @ disable local coherency
\n\t
"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR
\n\t
"
"isb
\n\t
"
"dsb
\n\t
"
"ldr fp, [sp], #4"
:
:
:
"r0"
,
"r1"
,
"r2"
,
"r3"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"r9"
,
"r10"
,
"lr"
,
"memory"
);
/* Flush all cache levels for this cluster. */
v7_exit_coherency_flush
(
all
);
/*
* This is a harmless no-op. On platforms with a real
...
...
@@ -183,26 +153,8 @@ static void dcscb_power_down(void)
}
else
{
arch_spin_unlock
(
&
dcscb_lock
);
/*
* Flush the local CPU cache.
* Let's do it in the safest possible way as above.
*/
asm
volatile
(
"str fp, [sp, #-4]!
\n\t
"
"mrc p15, 0, r0, c1, c0, 0 @ get CR
\n\t
"
"bic r0, r0, #"
__stringify
(
CR_C
)
"
\n\t
"
"mcr p15, 0, r0, c1, c0, 0 @ set CR
\n\t
"
"isb
\n\t
"
"bl v7_flush_dcache_louis
\n\t
"
"clrex
\n\t
"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR
\n\t
"
"bic r0, r0, #(1 << 6) @ disable local coherency
\n\t
"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR
\n\t
"
"isb
\n\t
"
"dsb
\n\t
"
"ldr fp, [sp], #4"
:
:
:
"r0"
,
"r1"
,
"r2"
,
"r3"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"r9"
,
"r10"
,
"lr"
,
"memory"
);
/* Disable and flush the local CPU cache. */
v7_exit_coherency_flush
(
louis
);
}
__mcpm_cpu_down
(
cpu
,
cluster
);
...
...
arch/arm/mach-vexpress/tc2_pm.c
View file @
ec1e20a0
...
...
@@ -156,32 +156,7 @@ static void tc2_pm_down(u64 residency)
:
:
"r"
(
0x400
)
);
}
/*
* We need to disable and flush the whole (L1 and L2) cache.
* Let's do it in the safest possible way i.e. with
* no memory access within the following sequence
* including the stack.
*
* Note: fp is preserved to the stack explicitly prior doing
* this since adding it to the clobber list is incompatible
* with having CONFIG_FRAME_POINTER=y.
*/
asm
volatile
(
"str fp, [sp, #-4]!
\n\t
"
"mrc p15, 0, r0, c1, c0, 0 @ get CR
\n\t
"
"bic r0, r0, #"
__stringify
(
CR_C
)
"
\n\t
"
"mcr p15, 0, r0, c1, c0, 0 @ set CR
\n\t
"
"isb
\n\t
"
"bl v7_flush_dcache_all
\n\t
"
"clrex
\n\t
"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR
\n\t
"
"bic r0, r0, #(1 << 6) @ disable local coherency
\n\t
"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR
\n\t
"
"isb
\n\t
"
"dsb
\n\t
"
"ldr fp, [sp], #4"
:
:
:
"r0"
,
"r1"
,
"r2"
,
"r3"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"r9"
,
"r10"
,
"lr"
,
"memory"
);
v7_exit_coherency_flush
(
all
);
cci_disable_port_by_cpu
(
mpidr
);
...
...
@@ -197,26 +172,7 @@ static void tc2_pm_down(u64 residency)
arch_spin_unlock
(
&
tc2_pm_lock
);
/*
* We need to disable and flush only the L1 cache.
* Let's do it in the safest possible way as above.
*/
asm
volatile
(
"str fp, [sp, #-4]!
\n\t
"
"mrc p15, 0, r0, c1, c0, 0 @ get CR
\n\t
"
"bic r0, r0, #"
__stringify
(
CR_C
)
"
\n\t
"
"mcr p15, 0, r0, c1, c0, 0 @ set CR
\n\t
"
"isb
\n\t
"
"bl v7_flush_dcache_louis
\n\t
"
"clrex
\n\t
"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR
\n\t
"
"bic r0, r0, #(1 << 6) @ disable local coherency
\n\t
"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR
\n\t
"
"isb
\n\t
"
"dsb
\n\t
"
"ldr fp, [sp], #4"
:
:
:
"r0"
,
"r1"
,
"r2"
,
"r3"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"r9"
,
"r10"
,
"lr"
,
"memory"
);
v7_exit_coherency_flush
(
louis
);
}
__mcpm_cpu_down
(
cpu
,
cluster
);
...
...
arch/arm/mm/mmap.c
View file @
ec1e20a0
...
...
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
* We don't use supersection mappings for mmap() on /dev/mem, which
* means that we can't map the memory area above the 4G barrier into
* userspace.
* Do not allow /dev/mem mappings beyond the supported physical range.
*/
int
valid_mmap_phys_addr_range
(
unsigned
long
pfn
,
size_t
size
)
{
return
!
(
pfn
+
(
size
>>
PAGE_SHIFT
)
>
0x00100000
);
return
(
pfn
+
(
size
>>
PAGE_SHIFT
))
<=
(
1
+
(
PHYS_MASK
>>
PAGE_SHIFT
)
);
}
#ifdef CONFIG_STRICT_DEVMEM
...
...
arch/arm64/include/asm/atomic.h
View file @
ec1e20a0
...
...
@@ -126,20 +126,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return
oldval
;
}
static
inline
void
atomic_clear_mask
(
unsigned
long
mask
,
unsigned
long
*
addr
)
{
unsigned
long
tmp
,
tmp2
;
asm
volatile
(
"// atomic_clear_mask
\n
"
"1: ldxr %0, %2
\n
"
" bic %0, %0, %3
\n
"
" stxr %w1, %0, %2
\n
"
" cbnz %w1, 1b"
:
"=&r"
(
tmp
),
"=&r"
(
tmp2
),
"+Q"
(
*
addr
)
:
"Ir"
(
mask
)
:
"cc"
);
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static
inline
int
__atomic_add_unless
(
atomic_t
*
v
,
int
a
,
int
u
)
...
...
arch/arm64/kernel/debug-monitors.c
View file @
ec1e20a0
...
...
@@ -27,7 +27,6 @@
#include <linux/uaccess.h>
#include <asm/debug-monitors.h>
#include <asm/local.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
...
...
@@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable);
* Keep track of debug users on each core.
* The ref counts are per-cpu so we use a local_t type.
*/
static
DEFINE_PER_CPU
(
local_
t
,
mde_ref_count
);
static
DEFINE_PER_CPU
(
local_
t
,
kde_ref_count
);
static
DEFINE_PER_CPU
(
in
t
,
mde_ref_count
);
static
DEFINE_PER_CPU
(
in
t
,
kde_ref_count
);
void
enable_debug_monitors
(
enum
debug_el
el
)
{
...
...
@@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el)
WARN_ON
(
preemptible
());
if
(
local_inc_return
(
&
__get_cpu_var
(
mde_ref_count
)
)
==
1
)
if
(
this_cpu_inc_return
(
mde_ref_count
)
==
1
)
enable
=
DBG_MDSCR_MDE
;
if
(
el
==
DBG_ACTIVE_EL1
&&
local_inc_return
(
&
__get_cpu_var
(
kde_ref_count
)
)
==
1
)
this_cpu_inc_return
(
kde_ref_count
)
==
1
)
enable
|=
DBG_MDSCR_KDE
;
if
(
enable
&&
debug_enabled
)
{
...
...
@@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el)
WARN_ON
(
preemptible
());
if
(
local_dec_and_test
(
&
__get_cpu_var
(
mde_ref_count
))
)
if
(
this_cpu_dec_return
(
mde_ref_count
)
==
0
)
disable
=
~
DBG_MDSCR_MDE
;
if
(
el
==
DBG_ACTIVE_EL1
&&
local_dec_and_test
(
&
__get_cpu_var
(
kde_ref_count
))
)
this_cpu_dec_return
(
kde_ref_count
)
==
0
)
disable
&=
~
DBG_MDSCR_KDE
;
if
(
disable
)
{
...
...
arch/arm64/kernel/hw_breakpoint.c
View file @
ec1e20a0
...
...
@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_reg
=
AARCH64_DBG_REG_BCR
;
val_reg
=
AARCH64_DBG_REG_BVR
;
slots
=
__get_cpu_va
r
(
bp_on_reg
);
slots
=
this_cpu_pt
r
(
bp_on_reg
);
max_slots
=
core_num_brps
;
reg_enable
=
!
debug_info
->
bps_disabled
;
}
else
{
/* Watchpoint */
ctrl_reg
=
AARCH64_DBG_REG_WCR
;
val_reg
=
AARCH64_DBG_REG_WVR
;
slots
=
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
max_slots
=
core_num_wrps
;
reg_enable
=
!
debug_info
->
wps_disabled
;
}
...
...
@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if
(
info
->
ctrl
.
type
==
ARM_BREAKPOINT_EXECUTE
)
{
/* Breakpoint */
base
=
AARCH64_DBG_REG_BCR
;
slots
=
__get_cpu_va
r
(
bp_on_reg
);
slots
=
this_cpu_pt
r
(
bp_on_reg
);
max_slots
=
core_num_brps
;
}
else
{
/* Watchpoint */
base
=
AARCH64_DBG_REG_WCR
;
slots
=
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
max_slots
=
core_num_wrps
;
}
...
...
@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
switch
(
reg
)
{
case
AARCH64_DBG_REG_BCR
:
slots
=
__get_cpu_va
r
(
bp_on_reg
);
slots
=
this_cpu_pt
r
(
bp_on_reg
);
max_slots
=
core_num_brps
;
break
;
case
AARCH64_DBG_REG_WCR
:
slots
=
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
max_slots
=
core_num_wrps
;
break
;
default:
...
...
@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
struct
debug_info
*
debug_info
;
struct
arch_hw_breakpoint_ctrl
ctrl
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
bp_on_reg
);
slots
=
this_cpu_pt
r
(
bp_on_reg
);
addr
=
instruction_pointer
(
regs
);
debug_info
=
&
current
->
thread
.
debug
;
...
...
@@ -596,7 +596,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
user_enable_single_step
(
current
);
}
else
{
toggle_bp_registers
(
AARCH64_DBG_REG_BCR
,
DBG_ACTIVE_EL1
,
0
);
kernel_step
=
&
__get_cpu_var
(
stepping_kernel_bp
);
kernel_step
=
this_cpu_ptr
(
&
stepping_kernel_bp
);
if
(
*
kernel_step
!=
ARM_KERNEL_STEP_NONE
)
return
0
;
...
...
@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct
arch_hw_breakpoint
*
info
;
struct
arch_hw_breakpoint_ctrl
ctrl
;
slots
=
(
struct
perf_event
**
)
__get_cpu_va
r
(
wp_on_reg
);
slots
=
this_cpu_pt
r
(
wp_on_reg
);
debug_info
=
&
current
->
thread
.
debug
;
for
(
i
=
0
;
i
<
core_num_wrps
;
++
i
)
{
...
...
@@ -698,7 +698,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
user_enable_single_step
(
current
);
}
else
{
toggle_bp_registers
(
AARCH64_DBG_REG_WCR
,
DBG_ACTIVE_EL1
,
0
);
kernel_step
=
&
__get_cpu_var
(
stepping_kernel_bp
);
kernel_step
=
this_cpu_ptr
(
&
stepping_kernel_bp
);
if
(
*
kernel_step
!=
ARM_KERNEL_STEP_NONE
)
return
0
;
...
...
@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
struct
debug_info
*
debug_info
=
&
current
->
thread
.
debug
;
int
handled_exception
=
0
,
*
kernel_step
;
kernel_step
=
&
__get_cpu_var
(
stepping_kernel_bp
);
kernel_step
=
this_cpu_ptr
(
&
stepping_kernel_bp
);
/*
* Called from single-step exception handler.
...
...
arch/arm64/kernel/perf_event.c
View file @
ec1e20a0
...
...
@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
*/
regs
=
get_irq_regs
();
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuc
=
this_cpu_ptr
(
&
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
cpu_pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
...
...
@@ -1257,7 +1257,7 @@ device_initcall(register_pmu_driver);
static
struct
pmu_hw_events
*
armpmu_get_cpu_events
(
void
)
{
return
&
__get_cpu_var
(
cpu_hw_events
);
return
this_cpu_ptr
(
&
cpu_hw_events
);
}
static
void
__init
cpu_pmu_init
(
struct
arm_pmu
*
armpmu
)
...
...
include/linux/amba/bus.h
View file @
ec1e20a0
...
...
@@ -21,7 +21,7 @@
#include <linux/resource.h>
#include <linux/regulator/consumer.h>
#define AMBA_NR_IRQS
2
#define AMBA_NR_IRQS
9
#define AMBA_CID 0xb105f00d
struct
clk
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment