Commit c706c7eb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM development updates from Russell King:
 "Included in this update:

   - moving PSCI code from ARM64/ARM to drivers/

   - removal of some architecture internals from global kernel view

   - addition of software based "privileged no access" support using the
     old domains register to turn off the ability for kernel
     loads/stores to access userspace.  Only the proper accessors will
     be usable.

   - addition of early fixup support for early console

   - re-addition (and reimplementation) of OMAP special interconnect
     barrier

   - removal of finish_arch_switch()

   - only expose cpuX/online in sysfs if hotpluggable

   - a number of code cleanups"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (41 commits)
  ARM: software-based priviledged-no-access support
  ARM: entry: provide uaccess assembly macro hooks
  ARM: entry: get rid of multiple macro definitions
  ARM: 8421/1: smp: Collapse arch_cpu_idle_dead() into cpu_die()
  ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore()
  ARM: mm: improve do_ldrd_abort macro
  ARM: entry: ensure that IRQs are enabled when calling syscall_trace_exit()
  ARM: entry: efficiency cleanups
  ARM: entry: get rid of asm_trace_hardirqs_on_cond
  ARM: uaccess: simplify user access assembly
  ARM: domains: remove DOMAIN_TABLE
  ARM: domains: keep vectors in separate domain
  ARM: domains: get rid of manager mode for user domain
  ARM: domains: move initial domain setting value to asm/domains.h
  ARM: domains: provide domain_mask()
  ARM: domains: switch to keeping domain value in register
  ARM: 8419/1: dma-mapping: harmonize definition of DMA_ERROR_CODE
  ARM: 8417/1: refactor bitops functions with BIT_MASK() and BIT_WORD()
  ARM: 8416/1: Feroceon: use of_iomap() to map register base
  ARM: 8415/1: early fixmap support for earlycon
  ...
parents 79b0691d 3ff32a0d
...@@ -67,6 +67,12 @@ Optional properties: ...@@ -67,6 +67,12 @@ Optional properties:
disable if zero. disable if zero.
- arm,prefetch-offset : Override prefetch offset value. Valid values are - arm,prefetch-offset : Override prefetch offset value. Valid values are
0-7, 15, 23, and 31. 0-7, 15, 23, and 31.
- arm,shared-override : The default behavior of the pl310 cache controller with
respect to the shareable attribute is to transform "normal memory
non-cacheable transactions" into "cacheable no allocate" (for reads) or
"write through no write allocate" (for writes).
On systems where this may cause DMA buffer corruption, this property must be
specified to indicate that such transforms are precluded.
- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1> - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
(forcibly enable), property absent (retain settings set by firmware) (forcibly enable), property absent (retain settings set by firmware)
- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable), - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
......
...@@ -26,13 +26,19 @@ Required properties: ...@@ -26,13 +26,19 @@ Required properties:
Optional properties: Optional properties:
- interrupt-affinity : Valid only when using SPIs, specifies a list of phandles - interrupt-affinity : When using SPIs, specifies a list of phandles to CPU
to CPU nodes corresponding directly to the affinity of nodes corresponding directly to the affinity of
the SPIs listed in the interrupts property. the SPIs listed in the interrupts property.
This property should be present when there is more than When using a PPI, specifies a list of phandles to CPU
nodes corresponding to the set of CPUs which have
a PMU of this type signalling the PPI listed in the
interrupts property.
This property should be present when there is more than
a single SPI. a single SPI.
- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
events. events.
......
...@@ -806,11 +806,13 @@ F: arch/arm/include/asm/floppy.h ...@@ -806,11 +806,13 @@ F: arch/arm/include/asm/floppy.h
ARM PMU PROFILING AND DEBUGGING ARM PMU PROFILING AND DEBUGGING
M: Will Deacon <will.deacon@arm.com> M: Will Deacon <will.deacon@arm.com>
S: Maintained S: Maintained
F: arch/arm/kernel/perf_event* F: arch/arm/kernel/perf_*
F: arch/arm/oprofile/common.c F: arch/arm/oprofile/common.c
F: arch/arm/include/asm/pmu.h
F: arch/arm/kernel/hw_breakpoint.c F: arch/arm/kernel/hw_breakpoint.c
F: arch/arm/include/asm/hw_breakpoint.h F: arch/arm/include/asm/hw_breakpoint.h
F: arch/arm/include/asm/perf_event.h
F: drivers/perf/arm_pmu.c
F: include/linux/perf/arm_pmu.h
ARM PORT ARM PORT
M: Russell King <linux@arm.linux.org.uk> M: Russell King <linux@arm.linux.org.uk>
...@@ -8120,6 +8122,15 @@ F: include/linux/power_supply.h ...@@ -8120,6 +8122,15 @@ F: include/linux/power_supply.h
F: drivers/power/ F: drivers/power/
X: drivers/power/avs/ X: drivers/power/avs/
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/firmware/psci.c
F: include/linux/psci.h
F: include/uapi/linux/psci.h
PNP SUPPORT PNP SUPPORT
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
S: Maintained S: Maintained
......
...@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64 ...@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
config ARCH_HAS_BANDGAP config ARCH_HAS_BANDGAP
bool bool
config FIX_EARLYCON_MEM
def_bool y if MMU
config GENERIC_HWEIGHT config GENERIC_HWEIGHT
bool bool
default y default y
...@@ -1496,6 +1499,7 @@ config HOTPLUG_CPU ...@@ -1496,6 +1499,7 @@ config HOTPLUG_CPU
config ARM_PSCI config ARM_PSCI
bool "Support for the ARM Power State Coordination Interface (PSCI)" bool "Support for the ARM Power State Coordination Interface (PSCI)"
depends on CPU_V7 depends on CPU_V7
select ARM_PSCI_FW
help help
Say Y here if you want Linux to communicate with system firmware Say Y here if you want Linux to communicate with system firmware
implementing the PSCI specification for CPU-centric power implementing the PSCI specification for CPU-centric power
...@@ -1700,13 +1704,24 @@ config HIGHPTE ...@@ -1700,13 +1704,24 @@ config HIGHPTE
consumed by page tables. Setting this option will allow consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory. user-space 2nd level page tables to reside in high memory.
config HW_PERF_EVENTS config CPU_SW_DOMAIN_PAN
bool "Enable hardware performance counter support for perf events" bool "Enable use of CPU domains to implement privileged no-access"
depends on PERF_EVENTS depends on MMU && !ARM_LPAE
default y default y
help help
Enable hardware performance counter support for perf events. If Increase kernel security by ensuring that normal kernel accesses
disabled, perf events will use software events only. are unable to access userspace addresses. This can help prevent
use-after-free bugs becoming an exploitable privilege escalation
by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced.
CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible.
config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
config SYS_SUPPORTS_HUGETLBFS config SYS_SUPPORTS_HUGETLBFS
def_bool y def_bool y
......
...@@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu) ...@@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu)
return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
} }
static int mcpm_cpu_disable(unsigned int cpu) static bool mcpm_cpu_can_disable(unsigned int cpu)
{ {
/* /* We assume all CPUs may be shut down. */
* We assume all CPUs may be shut down. return true;
* This would be the hook to use for eventual Secure
* OS migration requests as described in the PSCI spec.
*/
return 0;
} }
static void mcpm_cpu_die(unsigned int cpu) static void mcpm_cpu_die(unsigned int cpu)
...@@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = { ...@@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
.smp_secondary_init = mcpm_secondary_init, .smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = mcpm_cpu_kill, .cpu_kill = mcpm_cpu_kill,
.cpu_disable = mcpm_cpu_disable, .cpu_can_disable = mcpm_cpu_can_disable,
.cpu_die = mcpm_cpu_die, .cpu_die = mcpm_cpu_die,
#endif #endif
}; };
......
...@@ -12,7 +12,6 @@ generic-y += irq_regs.h ...@@ -12,7 +12,6 @@ generic-y += irq_regs.h
generic-y += kdebug.h generic-y += kdebug.h
generic-y += local.h generic-y += local.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += msgbuf.h generic-y += msgbuf.h
generic-y += param.h generic-y += param.h
......
...@@ -108,33 +108,37 @@ ...@@ -108,33 +108,37 @@
.endm .endm
#endif #endif
.macro asm_trace_hardirqs_off .macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS) #if defined(CONFIG_TRACE_IRQFLAGS)
.if \save
stmdb sp!, {r0-r3, ip, lr} stmdb sp!, {r0-r3, ip, lr}
.endif
bl trace_hardirqs_off bl trace_hardirqs_off
.if \save
ldmia sp!, {r0-r3, ip, lr} ldmia sp!, {r0-r3, ip, lr}
.endif
#endif #endif
.endm .endm
.macro asm_trace_hardirqs_on_cond, cond .macro asm_trace_hardirqs_on, cond=al, save=1
#if defined(CONFIG_TRACE_IRQFLAGS) #if defined(CONFIG_TRACE_IRQFLAGS)
/* /*
* actually the registers should be pushed and pop'd conditionally, but * actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered * after bl the flags are certainly clobbered
*/ */
.if \save
stmdb sp!, {r0-r3, ip, lr} stmdb sp!, {r0-r3, ip, lr}
.endif
bl\cond trace_hardirqs_on bl\cond trace_hardirqs_on
.if \save
ldmia sp!, {r0-r3, ip, lr} ldmia sp!, {r0-r3, ip, lr}
.endif
#endif #endif
.endm .endm
.macro asm_trace_hardirqs_on .macro disable_irq, save=1
asm_trace_hardirqs_on_cond al
.endm
.macro disable_irq
disable_irq_notrace disable_irq_notrace
asm_trace_hardirqs_off asm_trace_hardirqs_off \save
.endm .endm
.macro enable_irq .macro enable_irq
...@@ -173,7 +177,7 @@ ...@@ -173,7 +177,7 @@
.macro restore_irqs, oldcpsr .macro restore_irqs, oldcpsr
tst \oldcpsr, #PSR_I_BIT tst \oldcpsr, #PSR_I_BIT
asm_trace_hardirqs_on_cond eq asm_trace_hardirqs_on cond=eq
restore_irqs_notrace \oldcpsr restore_irqs_notrace \oldcpsr
.endm .endm
...@@ -445,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -445,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif #endif
.endm .endm
.macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
.if \isb
instr_sync
.endif
#endif
.endm
.macro uaccess_enable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_ENABLE
mcr p15, 0, \tmp, c3, c0, 0
.if \isb
instr_sync
.endif
#endif
.endm
.macro uaccess_save, tmp
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc p15, 0, \tmp, c3, c0, 0
str \tmp, [sp, #S_FRAME_SIZE]
#endif
.endm
.macro uaccess_restore
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr r0, [sp, #S_FRAME_SIZE]
mcr p15, 0, r0, c3, c0, 0
#endif
.endm
.macro uaccess_save_and_disable, tmp
uaccess_save \tmp
uaccess_disable \tmp
.endm
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg .macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define __ASM_BARRIER_H #define __ASM_BARRIER_H
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/outercache.h>
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
...@@ -37,12 +36,20 @@ ...@@ -37,12 +36,20 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory") #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif #endif
#ifdef CONFIG_ARM_HEAVY_MB
extern void (*soc_mb)(void);
extern void arm_heavy_mb(void);
#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
#else
#define __arm_heavy_mb(x...) dsb(x)
#endif
#ifdef CONFIG_ARCH_HAS_BARRIERS #ifdef CONFIG_ARCH_HAS_BARRIERS
#include <mach/barriers.h> #include <mach/barriers.h>
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() do { dsb(); outer_sync(); } while (0) #define mb() __arm_heavy_mb()
#define rmb() dsb() #define rmb() dsb()
#define wmb() do { dsb(st); outer_sync(); } while (0) #define wmb() __arm_heavy_mb(st)
#define dma_rmb() dmb(osh) #define dma_rmb() dmb(osh)
#define dma_wmb() dmb(oshst) #define dma_wmb() dmb(oshst)
#else #else
......
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
{ {
unsigned long flags; unsigned long flags;
unsigned long mask = 1UL << (bit & 31); unsigned long mask = BIT_MASK(bit);
p += bit >> 5; p += BIT_WORD(bit);
raw_local_irq_save(flags); raw_local_irq_save(flags);
*p |= mask; *p |= mask;
...@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long * ...@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
{ {
unsigned long flags; unsigned long flags;
unsigned long mask = 1UL << (bit & 31); unsigned long mask = BIT_MASK(bit);
p += bit >> 5; p += BIT_WORD(bit);
raw_local_irq_save(flags); raw_local_irq_save(flags);
*p &= ~mask; *p &= ~mask;
...@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long ...@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
{ {
unsigned long flags; unsigned long flags;
unsigned long mask = 1UL << (bit & 31); unsigned long mask = BIT_MASK(bit);
p += bit >> 5; p += BIT_WORD(bit);
raw_local_irq_save(flags); raw_local_irq_save(flags);
*p ^= mask; *p ^= mask;
...@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) ...@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{ {
unsigned long flags; unsigned long flags;
unsigned int res; unsigned int res;
unsigned long mask = 1UL << (bit & 31); unsigned long mask = BIT_MASK(bit);
p += bit >> 5; p += BIT_WORD(bit);
raw_local_irq_save(flags); raw_local_irq_save(flags);
res = *p; res = *p;
...@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) ...@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{ {
unsigned long flags; unsigned long flags;
unsigned int res; unsigned int res;
unsigned long mask = 1UL << (bit & 31); unsigned long mask = BIT_MASK(bit);
p += bit >> 5; p += BIT_WORD(bit);
raw_local_irq_save(flags); raw_local_irq_save(flags);
res = *p; res = *p;
...@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) ...@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{ {
unsigned long flags; unsigned long flags;
unsigned int res; unsigned int res;
unsigned long mask = 1UL << (bit & 31); unsigned long mask = BIT_MASK(bit);
p += bit >> 5; p += BIT_WORD(bit);
raw_local_irq_save(flags); raw_local_irq_save(flags);
res = *p; res = *p;
......
...@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is * is visible to DMA, or data written by DMA to system memory is
* visible to the CPU. * visible to the CPU.
*/ */
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range #define dmac_flush_range cpu_cache.dma_flush_range
#else #else
...@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); ...@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is * is visible to DMA, or data written by DMA to system memory is
* visible to the CPU. * visible to the CPU.
*/ */
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *);
#endif #endif
...@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { } ...@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len); void *kaddr, unsigned long len);
/**
* secure_flush_area - ensure coherency across the secure boundary
* @addr: virtual address
* @size: size of region
*
* Ensure that the specified area of memory is coherent across the secure
* boundary from the non-secure side. This is used when calling secure
* firmware where the secure firmware does not ensure coherency.
*/
static inline void secure_flush_area(const void *addr, size_t size)
{
phys_addr_t phys = __pa(addr);
__cpuc_flush_dcache_area((void *)addr, size);
outer_flush_range(phys, phys + size);
}
#endif #endif
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <xen/xen.h> #include <xen/xen.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops arm_dma_ops; extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops; extern struct dma_map_ops arm_coherent_dma_ops;
......
...@@ -34,15 +34,14 @@ ...@@ -34,15 +34,14 @@
*/ */
#ifndef CONFIG_IO_36 #ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0 #define DOMAIN_KERNEL 0
#define DOMAIN_TABLE 0
#define DOMAIN_USER 1 #define DOMAIN_USER 1
#define DOMAIN_IO 2 #define DOMAIN_IO 2
#else #else
#define DOMAIN_KERNEL 2 #define DOMAIN_KERNEL 2
#define DOMAIN_TABLE 2
#define DOMAIN_USER 1 #define DOMAIN_USER 1
#define DOMAIN_IO 0 #define DOMAIN_IO 0
#endif #endif
#define DOMAIN_VECTORS 3
/* /*
* Domain types * Domain types
...@@ -55,11 +54,46 @@ ...@@ -55,11 +54,46 @@
#define DOMAIN_MANAGER 1 #define DOMAIN_MANAGER 1
#endif #endif
#define domain_val(dom,type) ((type) << (2*(dom))) #define domain_mask(dom) ((3) << (2 * (dom)))
#define domain_val(dom,type) ((type) << (2 * (dom)))
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#else
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#endif
#define __DACR_DEFAULT \
domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
#define DACR_UACCESS_DISABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
#define DACR_UACCESS_ENABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_USE_DOMAINS static inline unsigned int get_domain(void)
{
unsigned int domain;
asm(
"mrc p15, 0, %0, c3, c0 @ get domain"
: "=r" (domain));
return domain;
}
static inline void set_domain(unsigned val) static inline void set_domain(unsigned val)
{ {
asm volatile( asm volatile(
...@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val) ...@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
isb(); isb();
} }
#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \ #define modify_domain(dom,type) \
do { \ do { \
struct thread_info *thread = current_thread_info(); \ unsigned int domain = get_domain(); \
unsigned int domain = thread->cpu_domain; \ domain &= ~domain_mask(dom); \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \ domain = domain | domain_val(dom, type); \
thread->cpu_domain = domain | domain_val(dom, type); \ set_domain(domain); \
set_domain(thread->cpu_domain); \
} while (0) } while (0)
#else #else
static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { } static inline void modify_domain(unsigned dom, unsigned type) { }
#endif #endif
......
...@@ -6,9 +6,13 @@ ...@@ -6,9 +6,13 @@
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/pgtable.h>
enum fixed_addresses { enum fixed_addresses {
FIX_KMAP_BEGIN, FIX_EARLYCON_MEM_BASE,
__end_of_permanent_fixed_addresses,
FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */ /* Support writing RO kernel text via kprobes, jump labels, etc. */
...@@ -18,7 +22,16 @@ enum fixed_addresses { ...@@ -18,7 +22,16 @@ enum fixed_addresses {
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
void __init early_fixmap_init(void);
#include <asm-generic/fixmap.h> #include <asm-generic/fixmap.h>
......
...@@ -22,8 +22,11 @@ ...@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags; \
smp_mb(); \ smp_mb(); \
prefetchw(uaddr); \ prefetchw(uaddr); \
__ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \ "1: ldrex %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -34,12 +37,15 @@ ...@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \ __futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
unsigned int __ua_flags;
int ret; int ret;
u32 val; u32 val;
...@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb(); smp_mb();
/* Prefetching cannot fault */ /* Prefetching cannot fault */
prefetchw(uaddr); prefetchw(uaddr);
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n" "1: ldrex %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
...@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val) : "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
uaccess_restore(__ua_flags);
smp_mb(); smp_mb();
*uval = val; *uval = val;
...@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h> #include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \ "1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \ __futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
unsigned int __ua_flags;
int ret = 0; int ret = 0;
u32 val; u32 val;
...@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
preempt_disable(); preempt_disable();
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n" "1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
...@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "+r" (ret), "=&r" (val) : "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
uaccess_restore(__ua_flags);
*uval = val; *uval = val;
preempt_enable(); preempt_enable();
......
...@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } ...@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif #endif
......
...@@ -129,21 +129,4 @@ static inline void outer_resume(void) { } ...@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
#endif #endif
#ifdef CONFIG_OUTER_CACHE_SYNC
/**
* outer_sync - perform a sync point for outer cache
*
* Ensure that all outer cache operations are complete and any store
* buffers are drained.
*/
static inline void outer_sync(void)
{
if (outer_cache.sync)
outer_cache.sync();
}
#else
static inline void outer_sync(void)
{ }
#endif
#endif /* __ASM_OUTERCACHE_H */ #endif /* __ASM_OUTERCACHE_H */
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/* /*
* - section * - section
......
...@@ -14,34 +14,11 @@ ...@@ -14,34 +14,11 @@
#ifndef __ASM_ARM_PSCI_H #ifndef __ASM_ARM_PSCI_H
#define __ASM_ARM_PSCI_H #define __ASM_ARM_PSCI_H
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
struct psci_power_state {
u16 id;
u8 type;
u8 affinity_level;
};
struct psci_operations {
int (*cpu_suspend)(struct psci_power_state state,
unsigned long entry_point);
int (*cpu_off)(struct psci_power_state state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid);
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
};
extern struct psci_operations psci_ops;
extern struct smp_operations psci_smp_ops; extern struct smp_operations psci_smp_ops;
#ifdef CONFIG_ARM_PSCI #ifdef CONFIG_ARM_PSCI
int psci_init(void);
bool psci_smp_available(void); bool psci_smp_available(void);
#else #else
static inline int psci_init(void) { return 0; }
static inline bool psci_smp_available(void) { return false; } static inline bool psci_smp_available(void) { return false; }
#endif #endif
......
...@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void); ...@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
extern int __cpu_disable(void); extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu); extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
...@@ -105,6 +104,7 @@ struct smp_operations { ...@@ -105,6 +104,7 @@ struct smp_operations {
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int (*cpu_kill)(unsigned int cpu); int (*cpu_kill)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu); void (*cpu_die)(unsigned int cpu);
bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu); int (*cpu_disable)(unsigned int cpu);
#endif #endif
#endif #endif
......
...@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void) ...@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
extern int platform_can_secondary_boot(void); extern int platform_can_secondary_boot(void);
extern int platform_can_cpu_hotplug(void); extern int platform_can_cpu_hotplug(void);
#ifdef CONFIG_HOTPLUG_CPU
extern int platform_can_hotplug_cpu(unsigned int cpu);
#else
static inline int platform_can_hotplug_cpu(unsigned int cpu)
{
return 0;
}
#endif
#endif #endif
...@@ -74,9 +74,6 @@ struct thread_info { ...@@ -74,9 +74,6 @@ struct thread_info {
.flags = 0, \ .flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \ .addr_limit = KERNEL_DS, \
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
} }
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
...@@ -136,22 +133,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, ...@@ -136,22 +133,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/* /*
* thread information flags: * thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SYSCAL_AUDIT - syscall auditing active
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/ */
#define TIF_SIGPENDING 0 #define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NEED_RESCHED 1 #define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 7 #define TIF_UPROBE 3 /* breakpointed or singlestepping */
#define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 10 #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
#define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17 #define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
......
...@@ -49,6 +49,35 @@ struct exception_table_entry ...@@ -49,6 +49,35 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs); extern int fixup_exception(struct pt_regs *regs);
/*
* These two functions allow hooking accesses to userspace to increase
* system integrity by ensuring that the kernel can not inadvertantly
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
static inline unsigned int uaccess_save_and_enable(void)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */
set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain;
#else
return 0;
#endif
}
static inline void uaccess_restore(unsigned int flags)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain(flags);
#endif
}
/* /*
* These two are intentionally not defined anywhere - if the kernel * These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug. * code generates any references to them, that's a bug.
...@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *); ...@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \ register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
if (sizeof((x)) >= 8) \ if (sizeof((x)) >= 8) \
...@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *); ...@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
break; \ break; \
default: __e = __get_user_bad(); break; \ default: __e = __get_user_bad(); break; \
} \ } \
uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \ x = (typeof(*(p))) __r2; \
__e; \ __e; \
}) })
...@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long); ...@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \ __put_user_x(__r2, __p, __e, __l, 1); \
...@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long); ...@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \ break; \
default: __e = __put_user_bad(); break; \ default: __e = __put_user_bad(); break; \
} \ } \
uaccess_restore(__ua_flags); \
__e; \ __e; \
}) })
...@@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs) ...@@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs)
do { \ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \ unsigned long __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \ might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \ default: (__gu_val) = __get_user_bad(); \
} \ } \
uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \ (x) = (__typeof__(*(ptr)))__gu_val; \
} while (0) } while (0)
#define __get_user_asm_byte(x, addr, err) \ #define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " TUSER(ldrb) " %1,[%2],#0\n" \ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \ "2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \ " .align 2\n" \
...@@ -329,6 +365,9 @@ do { \ ...@@ -329,6 +365,9 @@ do { \
: "r" (addr), "i" (-EFAULT) \ : "r" (addr), "i" (-EFAULT) \
: "cc") : "cc")
#define __get_user_asm_byte(x, addr, err) \
__get_user_asm(x, addr, err, ldrb)
#ifndef __ARMEB__ #ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \ #define __get_user_asm_half(x, __gu_addr, err) \
({ \ ({ \
...@@ -348,22 +387,7 @@ do { \ ...@@ -348,22 +387,7 @@ do { \
#endif #endif
#define __get_user_asm_word(x, addr, err) \ #define __get_user_asm_word(x, addr, err) \
__asm__ __volatile__( \ __get_user_asm(x, addr, err, ldr)
"1: " TUSER(ldr) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .popsection" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
#define __put_user(x, ptr) \ #define __put_user(x, ptr) \
({ \ ({ \
...@@ -381,9 +405,11 @@ do { \ ...@@ -381,9 +405,11 @@ do { \
#define __put_user_err(x, ptr, err) \ #define __put_user_err(x, ptr, err) \
do { \ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \ unsigned long __pu_addr = (unsigned long)(ptr); \
unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \ __typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \ might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
...@@ -391,11 +417,12 @@ do { \ ...@@ -391,11 +417,12 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \ default: __put_user_bad(); \
} \ } \
uaccess_restore(__ua_flags); \
} while (0) } while (0)
#define __put_user_asm_byte(x, __pu_addr, err) \ #define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " TUSER(strb) " %1,[%2],#0\n" \ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \ "2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \ " .align 2\n" \
...@@ -410,6 +437,9 @@ do { \ ...@@ -410,6 +437,9 @@ do { \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc") : "cc")
#define __put_user_asm_byte(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, strb)
#ifndef __ARMEB__ #ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \ #define __put_user_asm_half(x, __pu_addr, err) \
({ \ ({ \
...@@ -427,21 +457,7 @@ do { \ ...@@ -427,21 +457,7 @@ do { \
#endif #endif
#define __put_user_asm_word(x, __pu_addr, err) \ #define __put_user_asm_word(x, __pu_addr, err) \
__asm__ __volatile__( \ __put_user_asm(x, __pu_addr, err, str)
"1: " TUSER(str) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .popsection" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__ #ifndef __ARMEB__
#define __reg_oper0 "%R2" #define __reg_oper0 "%R2"
...@@ -474,11 +490,46 @@ do { \ ...@@ -474,11 +490,46 @@ do { \
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); arm_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); static inline unsigned long __must_check
extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); __copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
}
extern unsigned long __must_check
arm_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
}
extern unsigned long __must_check
arm_clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check
__clear_user_std(void __user *addr, unsigned long n);
static inline unsigned long __must_check
__clear_user(void __user *addr, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_clear_user(addr, n);
uaccess_restore(__ua_flags);
return n;
}
#else #else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
...@@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo ...@@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n; return n;
} }
/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
......
...@@ -71,8 +71,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o ...@@ -71,8 +71,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
perf_event_xscale.o perf_event_v6.o \
perf_event_v7.o perf_event_v7.o
CFLAGS_pj4-cp0.o := -marm CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
...@@ -89,7 +88,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o ...@@ -89,7 +88,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
ifeq ($(CONFIG_ARM_PSCI),y) ifeq ($(CONFIG_ARM_PSCI),y)
obj-y += psci.o psci-call.o obj-y += psci-call.o
obj-$(CONFIG_SMP) += psci_smp.o obj-$(CONFIG_SMP) += psci_smp.o
endif endif
......
...@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy); ...@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(arm_copy_from_user);
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(arm_copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_2);
......
...@@ -149,10 +149,10 @@ ENDPROC(__und_invalid) ...@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
#define SPFIX(code...) #define SPFIX(code...)
#endif #endif
.macro svc_entry, stack_hole=0, trace=1 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.save {r0 - pc} ) UNWIND(.save {r0 - pc} )
sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp ) SPFIX( mov r0, sp )
...@@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ...@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5} ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" "" mov r6, #-1 @ "" "" "" ""
add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 ) SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack @ from the exception stack
...@@ -185,6 +185,11 @@ ENDPROC(__und_invalid) ...@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
@ @
stmia r7, {r2 - r6} stmia r7, {r2 - r6}
uaccess_save r0
.if \uaccess
uaccess_disable r0
.endif
.if \trace .if \trace
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off bl trace_hardirqs_off
...@@ -194,7 +199,7 @@ ENDPROC(__und_invalid) ...@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
.align 5 .align 5
__dabt_svc: __dabt_svc:
svc_entry svc_entry uaccess=0
mov r2, sp mov r2, sp
dabt_helper dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
...@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt) ...@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
#error "sizeof(struct pt_regs) must be a multiple of 8" #error "sizeof(struct pt_regs) must be a multiple of 8"
#endif #endif
.macro usr_entry, trace=1 .macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
...@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt) ...@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
ARM( stmdb r0, {sp, lr}^ ) ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
.if \uaccess
uaccess_disable ip
.endif
@ Enable the alignment trap while in kernel mode @ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7) ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0) ATRAP( mcrne p15, 0, r8, c1, c0, 0)
...@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt) ...@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
.align 5 .align 5
__dabt_usr: __dabt_usr:
usr_entry usr_entry uaccess=0
kuser_cmpxchg_check kuser_cmpxchg_check
mov r2, sp mov r2, sp
dabt_helper dabt_helper
...@@ -458,7 +467,7 @@ ENDPROC(__irq_usr) ...@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
.align 5 .align 5
__und_usr: __und_usr:
usr_entry usr_entry uaccess=0
mov r2, r4 mov r2, r4
mov r3, r5 mov r3, r5
...@@ -484,6 +493,8 @@ __und_usr: ...@@ -484,6 +493,8 @@ __und_usr:
1: ldrt r0, [r4] 1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction ARM_BE8(rev r0, r0) @ little endian instruction
uaccess_disable ip
@ r0 = 32-bit ARM instruction which caused the exception @ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction @ r4 = PC value for the faulting instruction
...@@ -518,9 +529,10 @@ __und_usr_thumb: ...@@ -518,9 +529,10 @@ __und_usr_thumb:
2: ldrht r5, [r4] 2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0 cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_fault_16 @ 16bit undefined instruction blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2] 3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction ARM_BE8(rev16 r0, r0) @ little endian instruction
uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16 orr r0, r0, r5, lsl #16
...@@ -715,6 +727,8 @@ ENDPROC(no_fp) ...@@ -715,6 +727,8 @@ ENDPROC(no_fp)
__und_usr_fault_32: __und_usr_fault_32:
mov r1, #4 mov r1, #4
b 1f b 1f
__und_usr_fault_16_pan:
uaccess_disable ip
__und_usr_fault_16: __und_usr_fault_16:
mov r1, #2 mov r1, #2
1: mov r0, sp 1: mov r0, sp
...@@ -770,6 +784,8 @@ ENTRY(__switch_to) ...@@ -770,6 +784,8 @@ ENTRY(__switch_to)
ldr r4, [r2, #TI_TP_VALUE] ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4] ldr r5, [r2, #TI_TP_VALUE + 4]
#ifdef CONFIG_CPU_USE_DOMAINS #ifdef CONFIG_CPU_USE_DOMAINS
mrc p15, 0, r6, c3, c0, 0 @ Get domain register
str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN] ldr r6, [r2, #TI_CPU_DOMAIN]
#endif #endif
switch_tls r1, r4, r5, r3, r7 switch_tls r1, r4, r5, r3, r7
......
...@@ -24,35 +24,55 @@ ...@@ -24,35 +24,55 @@
.align 5 .align 5
#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/* /*
* This is the fast syscall return path. We do as little as * This is the fast syscall return path. We do as little as possible here,
* possible here, and this includes saving r0 back into the SVC * such as avoiding writing r0 to the stack. We only use this path if we
* stack. * have tracing and context tracking disabled - the overheads from those
* features make this path too inefficient.
*/ */
ret_fast_syscall: ret_fast_syscall:
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.cantunwind ) UNWIND(.cantunwind )
disable_irq @ disable interrupts disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne __sys_trace_return
tst r1, #_TIF_WORK_MASK
bne fast_work_pending bne fast_work_pending
asm_trace_hardirqs_on
/* perform architecture specific actions before user return */ /* perform architecture specific actions before user return */
arch_ret_to_user r1, lr arch_ret_to_user r1, lr
ct_user_enter
restore_user_regs fast = 1, offset = S_OFF restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend ) UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* /* Ok, we need to do extra processing, enter the slow path. */
* Ok, we need to do extra processing, enter the slow path.
*/
fast_work_pending: fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0 str r0, [sp, #S_R0+S_OFF]! @ returned r0
work_pending: /* fall through to work_pending */
#else
/*
* The "replacement" ret_fast_syscall for when tracing or context tracking
* is enabled. As we will need to call out to some C functions, we save
* r0 first to avoid needing to save registers around each C function call.
*/
ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
#endif
tst r1, #_TIF_SYSCALL_WORK
bne __sys_trace_return_nosave
slow_work_pending:
mov r0, sp @ 'regs' mov r0, sp @ 'regs'
mov r2, why @ 'syscall' mov r2, why @ 'syscall'
bl do_work_pending bl do_work_pending
...@@ -65,16 +85,19 @@ ENDPROC(ret_fast_syscall) ...@@ -65,16 +85,19 @@ ENDPROC(ret_fast_syscall)
/* /*
* "slow" syscall return path. "why" tells us if this was a real syscall. * "slow" syscall return path. "why" tells us if this was a real syscall.
* IRQs may be enabled here, so always disable them. Note that we use the
* "notrace" version to avoid calling into the tracing code unnecessarily.
* do_work_pending() will update this state if necessary.
*/ */
ENTRY(ret_to_user) ENTRY(ret_to_user)
ret_slow_syscall: ret_slow_syscall:
disable_irq @ disable interrupts disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq) ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS] ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK tst r1, #_TIF_WORK_MASK
bne work_pending bne slow_work_pending
no_work_pending: no_work_pending:
asm_trace_hardirqs_on asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */ /* perform architecture specific actions before user return */
arch_ret_to_user r1, lr arch_ret_to_user r1, lr
...@@ -174,6 +197,8 @@ ENTRY(vector_swi) ...@@ -174,6 +197,8 @@ ENTRY(vector_swi)
USER( ldr scno, [lr, #-4] ) @ get SWI instruction USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif #endif
uaccess_disable tbl
adr tbl, sys_call_table @ load syscall table pointer adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT) #if defined(CONFIG_OABI_COMPAT)
...@@ -252,6 +277,12 @@ __sys_trace_return: ...@@ -252,6 +277,12 @@ __sys_trace_return:
bl syscall_trace_exit bl syscall_trace_exit
b ret_slow_syscall b ret_slow_syscall
__sys_trace_return_nosave:
enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
.align 5 .align 5
#ifdef CONFIG_ALIGNMENT_TRAP #ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object .type __cr_alignment, #object
......
...@@ -196,7 +196,7 @@ ...@@ -196,7 +196,7 @@
msr cpsr_c, \rtemp @ switch back to the SVC mode msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm .endm
#ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr, irq = 0 .macro svc_exit, rpsr, irq = 0
.if \irq != 0 .if \irq != 0
@ IRQs already off @ IRQs already off
...@@ -215,6 +215,10 @@ ...@@ -215,6 +215,10 @@
blne trace_hardirqs_off blne trace_hardirqs_off
#endif #endif
.endif .endif
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
msr spsr_cxsf, \rpsr msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321 @ We must avoid clrex due to Cortex-A15 erratum #830321
...@@ -222,6 +226,20 @@ ...@@ -222,6 +226,20 @@
strex r1, r2, [r0] @ clear the exclusive monitor strex r1, r2, [r0] @ clear the exclusive monitor
#endif #endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#else
@ Thumb mode SVC restore
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
ldr lr, [sp], #4
rfeia sp!
#endif
.endm .endm
@ @
...@@ -241,6 +259,9 @@ ...@@ -241,6 +259,9 @@
@ on the stack remains correct). @ on the stack remains correct).
@ @
.macro svc_exit_via_fiq .macro svc_exit_via_fiq
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp mov r0, sp
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
@ clobber state restored below) @ clobber state restored below)
...@@ -250,9 +271,27 @@ ...@@ -250,9 +271,27 @@
msr spsr_cxsf, r9 msr spsr_cxsf, r9
ldr r0, [r0, #S_R0] ldr r0, [r0, #S_R0]
ldmia r8, {pc}^ ldmia r8, {pc}^
#else
@ Thumb mode restore
add r0, sp, #S_R2
ldr lr, [sp, #S_LR]
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
@ clobber state restored below)
ldmia r0, {r2 - r12}
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
msr cpsr_c, r1
sub r0, #S_R2
add r8, r0, #S_PC
ldmia r0, {r0 - r1}
rfeia r8
#endif
.endm .endm
.macro restore_user_regs, fast = 0, offset = 0 .macro restore_user_regs, fast = 0, offset = 0
uaccess_enable r1, isb=0
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r2, sp mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc ldr lr, [r2, #\offset + S_PC]! @ get pc
...@@ -270,72 +309,16 @@ ...@@ -270,72 +309,16 @@
@ after ldm {}^ @ after ldm {}^
add sp, sp, #\offset + S_FRAME_SIZE add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
.endm #elif defined(CONFIG_CPU_V7M)
@ V7M restore.
#else /* CONFIG_THUMB2_KERNEL */ @ Note that we don't need to do clrex here as clearing the local
.macro svc_exit, rpsr, irq = 0 @ monitor is part of the exception entry and exit sequence.
.if \irq != 0
@ IRQs already off
#ifdef CONFIG_TRACE_IRQFLAGS
@ The parent context IRQs must have been enabled to get here in
@ the first place, so there's no point checking the PSR I bit.
bl trace_hardirqs_on
#endif
.else
@ IRQs off again before pulling preserved data off the stack
disable_irq_notrace
#ifdef CONFIG_TRACE_IRQFLAGS
tst \rpsr, #PSR_I_BIT
bleq trace_hardirqs_on
tst \rpsr, #PSR_I_BIT
blne trace_hardirqs_off
#endif
.endif
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
ldr lr, [sp], #4
rfeia sp!
.endm
@
@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
@
@ For full details see non-Thumb implementation above.
@
.macro svc_exit_via_fiq
add r0, sp, #S_R2
ldr lr, [sp, #S_LR]
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
@ clobber state restored below)
ldmia r0, {r2 - r12}
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
msr cpsr_c, r1
sub r0, #S_R2
add r8, r0, #S_PC
ldmia r0, {r0 - r1}
rfeia r8
.endm
#ifdef CONFIG_CPU_V7M
/*
* Note we don't need to do clrex here as clearing the local monitor is
* part of each exception entry and exit sequence.
*/
.macro restore_user_regs, fast = 0, offset = 0
.if \offset .if \offset
add sp, #\offset add sp, #\offset
.endif .endif
v7m_exception_slow_exit ret_r0 = \fast v7m_exception_slow_exit ret_r0 = \fast
.endm #else
#else /* ifdef CONFIG_CPU_V7M */ @ Thumb mode restore
.macro restore_user_regs, fast = 0, offset = 0
mov r2, sp mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
...@@ -353,9 +336,8 @@ ...@@ -353,9 +336,8 @@
.endif .endif
add sp, sp, #S_FRAME_SIZE - S_SP add sp, sp, #S_FRAME_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
.endm
#endif /* ifdef CONFIG_CPU_V7M / else */
#endif /* !CONFIG_THUMB2_KERNEL */ #endif /* !CONFIG_THUMB2_KERNEL */
.endm
/* /*
* Context tracking subsystem. Used to instrument transitions * Context tracking subsystem. Used to instrument transitions
......
...@@ -464,10 +464,7 @@ __enable_mmu: ...@@ -464,10 +464,7 @@ __enable_mmu:
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0 mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else #else
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ mov r5, #DACR_INIT
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif #endif
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-l2x0.h>
#include <asm/outercache.h>
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/irq.h> #include <asm/mach/irq.h>
......
...@@ -34,9 +34,9 @@ ...@@ -34,9 +34,9 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
enum armv6_perf_types { enum armv6_perf_types {
......
...@@ -21,11 +21,11 @@ ...@@ -21,11 +21,11 @@
#include <asm/cp15.h> #include <asm/cp15.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/vfp.h> #include <asm/vfp.h>
#include "../vfp/vfpinstr.h" #include "../vfp/vfpinstr.h"
#include <linux/of.h> #include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
/* /*
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
enum xscale_perf_types { enum xscale_perf_types {
......
...@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void) ...@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
ledtrig_cpu(CPU_LED_IDLE_END); ledtrig_cpu(CPU_LED_IDLE_END);
} }
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
cpu_die();
}
#endif
void __show_regs(struct pt_regs *regs) void __show_regs(struct pt_regs *regs)
{ {
unsigned long flags; unsigned long flags;
...@@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs) ...@@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs)
buf[4] = '\0'; buf[4] = '\0';
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", {
buf, interrupts_enabled(regs) ? "n" : "ff", unsigned int domain = get_domain();
fast_interrupts_enabled(regs) ? "n" : "ff", const char *segment;
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)], #ifdef CONFIG_CPU_SW_DOMAIN_PAN
get_fs() == get_ds() ? "kernel" : "user"); /*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if (user_mode(regs))
domain = DACR_UACCESS_ENABLE;
else
domain = *(unsigned int *)(regs + 1);
#endif
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
else if (get_fs() == get_ds())
segment = "kernel";
else
segment = "user";
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
buf, interrupts_enabled(regs) ? "n" : "ff",
fast_interrupts_enabled(regs) ? "n" : "ff",
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)], segment);
}
#else #else
printk("xPSR: %08lx\n", regs->ARM_cpsr); printk("xPSR: %08lx\n", regs->ARM_cpsr);
#endif #endif
...@@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs) ...@@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs)
buf[0] = '\0'; buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU #ifdef CONFIG_CPU_CP15_MMU
{ {
unsigned int transbase, dac; unsigned int transbase, dac = get_domain();
asm("mrc p15, 0, %0, c2, c0\n\t" asm("mrc p15, 0, %0, c2, c0\n\t"
"mrc p15, 0, %1, c3, c0\n" : "=r" (transbase));
: "=r" (transbase), "=r" (dac));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac); transbase, dac);
} }
...@@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
/*
* Copy the initial value of the domain access control register
* from the current thread: thread->addr_limit will have been
* copied from the current thread via setup_thread_stack() in
* kernel/fork.c
*/
thread->cpu_domain = get_domain();
if (likely(!(p->flags & PF_KTHREAD))) { if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs(); *childregs = *current_pt_regs();
childregs->ARM_r0 = 0; childregs->ARM_r0 = 0;
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/psci.h>
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
#include <asm/psci.h> #include <asm/psci.h>
...@@ -51,22 +53,34 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -51,22 +53,34 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
if (psci_ops.cpu_on) if (psci_ops.cpu_on)
return psci_ops.cpu_on(cpu_logical_map(cpu), return psci_ops.cpu_on(cpu_logical_map(cpu),
__pa(secondary_startup)); virt_to_idmap(&secondary_startup));
return -ENODEV; return -ENODEV;
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
if (!psci_ops.cpu_off)
return -EOPNOTSUPP;
/* Trusted OS will deny CPU_OFF */
if (psci_tos_resident_on(cpu))
return -EPERM;
return 0;
}
void __ref psci_cpu_die(unsigned int cpu) void __ref psci_cpu_die(unsigned int cpu)
{ {
const struct psci_power_state ps = { u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
.type = PSCI_POWER_STATE_TYPE_POWER_DOWN, PSCI_0_2_POWER_STATE_TYPE_SHIFT;
};
if (psci_ops.cpu_off) if (psci_ops.cpu_off)
psci_ops.cpu_off(ps); psci_ops.cpu_off(state);
/* We should never return */ /* We should never return */
panic("psci: cpu %d failed to shutdown\n", cpu); panic("psci: cpu %d failed to shutdown\n", cpu);
} }
int __ref psci_cpu_kill(unsigned int cpu) int __ref psci_cpu_kill(unsigned int cpu)
...@@ -109,6 +123,7 @@ bool __init psci_smp_available(void) ...@@ -109,6 +123,7 @@ bool __init psci_smp_available(void)
struct smp_operations __initdata psci_smp_ops = { struct smp_operations __initdata psci_smp_ops = {
.smp_boot_secondary = psci_boot_secondary, .smp_boot_secondary = psci_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = psci_cpu_disable,
.cpu_die = psci_cpu_die, .cpu_die = psci_cpu_die,
.cpu_kill = psci_cpu_kill, .cpu_kill = psci_cpu_kill,
#endif #endif
......
...@@ -31,12 +31,14 @@ ...@@ -31,12 +31,14 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/psci.h>
#include <asm/unified.h> #include <asm/unified.h>
#include <asm/cp15.h> #include <asm/cp15.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/fixmap.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/psci.h> #include <asm/psci.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -954,6 +956,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -954,6 +956,9 @@ void __init setup_arch(char **cmdline_p)
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line; *cmdline_p = cmd_line;
if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
early_fixmap_init();
parse_early_param(); parse_early_param();
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
...@@ -972,7 +977,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -972,7 +977,7 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree(); unflatten_device_tree();
arm_dt_init_cpu_maps(); arm_dt_init_cpu_maps();
psci_init(); psci_dt_init();
xen_early_init(); xen_early_init();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (is_smp()) { if (is_smp()) {
...@@ -1015,7 +1020,7 @@ static int __init topology_init(void) ...@@ -1015,7 +1020,7 @@ static int __init topology_init(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
cpuinfo->cpu.hotpluggable = 1; cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
register_cpu(&cpuinfo->cpu, cpu); register_cpu(&cpuinfo->cpu, cpu);
} }
......
...@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall) ...@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall)
asmlinkage int asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{ {
/*
* The assembly code enters us with IRQs off, but it hasn't
* informed the tracing code of that for efficiency reasons.
* Update the trace code with the current status.
*/
trace_hardirqs_off();
do { do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) { if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule(); schedule();
......
...@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu) ...@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
if (smp_ops.cpu_disable) if (smp_ops.cpu_disable)
return smp_ops.cpu_disable(cpu); return smp_ops.cpu_disable(cpu);
return 0;
}
int platform_can_hotplug_cpu(unsigned int cpu)
{
/* cpu_die must be specified to support hotplug */
if (!smp_ops.cpu_die)
return 0;
if (smp_ops.cpu_can_disable)
return smp_ops.cpu_can_disable(cpu);
/* /*
* By default, allow disabling all CPUs except the first one, * By default, allow disabling all CPUs except the first one,
* since this is special on a lot of platforms, e.g. because * since this is special on a lot of platforms, e.g. because
* of clock tick interrupts. * of clock tick interrupts.
*/ */
return cpu == 0 ? -EPERM : 0; return cpu != 0;
} }
/* /*
* __cpu_disable runs on the processor to be shutdown. * __cpu_disable runs on the processor to be shutdown.
*/ */
...@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu) ...@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
* of the other hotplug-cpu capable cores, so presumably coming * of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this. * out of idle fixes this.
*/ */
void __ref cpu_die(void) void arch_cpu_idle_dead(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
......
...@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data, ...@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
while (1) { while (1) {
unsigned long temp; unsigned long temp;
unsigned int __ua_flags;
__ua_flags = uaccess_save_and_enable();
if (type == TYPE_SWPB) if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp); __user_swpb_asm(*data, address, res, temp);
else else
__user_swp_asm(*data, address, res, temp); __user_swp_asm(*data, address, res, temp);
uaccess_restore(__ua_flags);
if (likely(res != -EAGAIN) || signal_pending(current)) if (likely(res != -EAGAIN) || signal_pending(current))
break; break;
......
...@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base) ...@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base); kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2); flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */ #else /* ifndef CONFIG_CPU_V7M */
/* /*
* on V7-M there is no need to copy the vector table to a dedicated * on V7-M there is no need to copy the vector table to a dedicated
......
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
.text .text
/* Prototype: int __clear_user(void *addr, size_t sz) /* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory * Purpose : clear some user memory
* Params : addr - user memory address to clear * Params : addr - user memory address to clear
* : sz - number of bytes to clear * : sz - number of bytes to clear
* Returns : number of bytes NOT cleared * Returns : number of bytes NOT cleared
*/ */
ENTRY(__clear_user_std) ENTRY(__clear_user_std)
WEAK(__clear_user) WEAK(arm_clear_user)
stmfd sp!, {r1, lr} stmfd sp!, {r1, lr}
mov r2, #0 mov r2, #0
cmp r1, #4 cmp r1, #4
...@@ -44,7 +44,7 @@ WEAK(__clear_user) ...@@ -44,7 +44,7 @@ WEAK(__clear_user)
USER( strnebt r2, [r0]) USER( strnebt r2, [r0])
mov r0, #0 mov r0, #0
ldmfd sp!, {r1, pc} ldmfd sp!, {r1, pc}
ENDPROC(__clear_user) ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std) ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax" .pushsection .text.fixup,"ax"
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_from_user(void *to, const void *from, size_t n) * size_t arm_copy_from_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -89,11 +89,11 @@ ...@@ -89,11 +89,11 @@
.text .text
ENTRY(__copy_from_user) ENTRY(arm_copy_from_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_from_user) ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
.align 0 .align 0
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_to_user(void *to, const void *from, size_t n) * size_t arm_copy_to_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -93,11 +93,11 @@ ...@@ -93,11 +93,11 @@
.text .text
ENTRY(__copy_to_user_std) ENTRY(__copy_to_user_std)
WEAK(__copy_to_user) WEAK(arm_copy_to_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_to_user) ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std) ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax" .pushsection .text.fixup,"ax"
......
...@@ -17,6 +17,19 @@ ...@@ -17,6 +17,19 @@
.text .text
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
uaccess_enable ip
.endm
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
#else
.macro save_regs .macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr} stmfd sp!, {r1, r2, r4 - r8, lr}
.endm .endm
...@@ -24,6 +37,7 @@ ...@@ -24,6 +37,7 @@
.macro load_regs .macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc} ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm .endm
#endif
.macro load1b, reg1 .macro load1b, reg1
ldrusr \reg1, r0, 1 ldrusr \reg1, r0, 1
......
...@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) ...@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
} }
unsigned long unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n) arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
/* /*
* This test is stubbed out of the main function above to keep * This test is stubbed out of the main function above to keep
...@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n) ...@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
return n; return n;
} }
unsigned long __clear_user(void __user *addr, unsigned long n) unsigned long arm_clear_user(void __user *addr, unsigned long n)
{ {
/* See rational for this in __copy_to_user() above. */ /* See rational for this in __copy_to_user() above. */
if (n < 64) if (n < 64)
......
...@@ -28,8 +28,8 @@ ...@@ -28,8 +28,8 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/amba/bus.h> #include <linux/amba/bus.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/psci.h>
#include <asm/psci.h>
#include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
......
...@@ -16,19 +16,21 @@ ...@@ -16,19 +16,21 @@
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/psci.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/psci.h>
#include <uapi/linux/psci.h>
#define HIGHBANK_SUSPEND_PARAM \
((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
(1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
(PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
static int highbank_suspend_finish(unsigned long val) static int highbank_suspend_finish(unsigned long val)
{ {
const struct psci_power_state ps = { return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume));
.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
.affinity_level = 1,
};
return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
} }
static int highbank_pm_enter(suspend_state_t state) static int highbank_pm_enter(suspend_state_t state)
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/outercache.h>
#include <mach/hardware.h> #include <mach/hardware.h>
#include <mach/cputype.h> #include <mach/cputype.h>
#include <mach/addr-map.h> #include <mach/addr-map.h>
......
...@@ -29,6 +29,7 @@ config ARCH_OMAP4 ...@@ -29,6 +29,7 @@ config ARCH_OMAP4
select HAVE_ARM_SCU if SMP select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP select HAVE_ARM_TWD if SMP
select OMAP_INTERCONNECT select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
select PL310_ERRATA_588369 if CACHE_L2X0 select PL310_ERRATA_588369 if CACHE_L2X0
select PL310_ERRATA_727915 if CACHE_L2X0 select PL310_ERRATA_727915 if CACHE_L2X0
select PM_OPP if PM select PM_OPP if PM
...@@ -46,6 +47,7 @@ config SOC_OMAP5 ...@@ -46,6 +47,7 @@ config SOC_OMAP5
select HAVE_ARM_TWD if SMP select HAVE_ARM_TWD if SMP
select HAVE_ARM_ARCH_TIMER select HAVE_ARM_ARCH_TIMER
select ARM_ERRATA_798181 if SMP select ARM_ERRATA_798181 if SMP
select OMAP_INTERCONNECT_BARRIER
config SOC_AM33XX config SOC_AM33XX
bool "TI AM33XX" bool "TI AM33XX"
...@@ -71,6 +73,7 @@ config SOC_DRA7XX ...@@ -71,6 +73,7 @@ config SOC_DRA7XX
select HAVE_ARM_ARCH_TIMER select HAVE_ARM_ARCH_TIMER
select IRQ_CROSSBAR select IRQ_CROSSBAR
select ARM_ERRATA_798181 if SMP select ARM_ERRATA_798181 if SMP
select OMAP_INTERCONNECT_BARRIER
config ARCH_OMAP2PLUS config ARCH_OMAP2PLUS
bool bool
...@@ -92,6 +95,10 @@ config ARCH_OMAP2PLUS ...@@ -92,6 +95,10 @@ config ARCH_OMAP2PLUS
help help
Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
config OMAP_INTERCONNECT_BARRIER
bool
select ARM_HEAVY_MB
if ARCH_OMAP2PLUS if ARCH_OMAP2PLUS
......
...@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void) ...@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
void __init omap_reserve(void) void __init omap_reserve(void)
{ {
omap_secure_ram_reserve_memblock(); omap_secure_ram_reserve_memblock();
omap_barrier_reserve_memblock();
} }
...@@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd) ...@@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
} }
#endif #endif
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
void omap_barrier_reserve_memblock(void);
void omap_barriers_init(void);
#else
static inline void omap_barrier_reserve_memblock(void)
{
}
#endif
/* This gets called from mach-omap2/io.c, do not call this */ /* This gets called from mach-omap2/io.c, do not call this */
void __init omap2_set_globals_tap(u32 class, void __iomem *tap); void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
......
/*
* OMAP memory barrier header.
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __MACH_BARRIERS_H
#define __MACH_BARRIERS_H
#include <asm/outercache.h>
extern void omap_bus_sync(void);
#define rmb() dsb()
#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
#define mb() wmb()
#endif /* __MACH_BARRIERS_H */
...@@ -352,6 +352,7 @@ void __init am33xx_map_io(void) ...@@ -352,6 +352,7 @@ void __init am33xx_map_io(void)
void __init omap4_map_io(void) void __init omap4_map_io(void)
{ {
iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
omap_barriers_init();
} }
#endif #endif
...@@ -359,6 +360,7 @@ void __init omap4_map_io(void) ...@@ -359,6 +360,7 @@ void __init omap4_map_io(void)
void __init omap5_map_io(void) void __init omap5_map_io(void)
{ {
iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc)); iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
omap_barriers_init();
} }
#endif #endif
......
...@@ -51,6 +51,127 @@ static void __iomem *twd_base; ...@@ -51,6 +51,127 @@ static void __iomem *twd_base;
#define IRQ_LOCALTIMER 29 #define IRQ_LOCALTIMER 29
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
/* Used to implement memory barrier on DRAM path */
#define OMAP4_DRAM_BARRIER_VA 0xfe600000
static void __iomem *dram_sync, *sram_sync;
static phys_addr_t dram_sync_paddr;
static u32 dram_sync_size;
/*
* The OMAP4 bus structure contains asynchrnous bridges which can buffer
* data writes from the MPU. These asynchronous bridges can be found on
* paths between the MPU to EMIF, and the MPU to L3 interconnects.
*
* We need to be careful about re-ordering which can happen as a result
* of different accesses being performed via different paths, and
* therefore different asynchronous bridges.
*/
/*
* OMAP4 interconnect barrier which is called for each mb() and wmb().
* This is to ensure that normal paths to DRAM (normal memory, cacheable
* accesses) are properly synchronised with writes to DMA coherent memory
* (normal memory, uncacheable) and device writes.
*
* The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
* path, as we need to ensure that data is visible to other system
* masters prior to writes to those system masters being seen.
*
* Note: the SRAM path is not synchronised via mb() and wmb().
*/
static void omap4_mb(void)
{
if (dram_sync)
writel_relaxed(0, dram_sync);
}
/*
* OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
*
* If a data is stalled inside asynchronous bridge because of back
* pressure, it may be accepted multiple times, creating pointer
* misalignment that will corrupt next transfers on that data path until
* next reset of the system. No recovery procedure once the issue is hit,
* the path remains consistently broken.
*
* Async bridges can be found on paths between MPU to EMIF and MPU to L3
* interconnects.
*
* This situation can happen only when the idle is initiated by a Master
* Request Disconnection (which is trigged by software when executing WFI
* on the CPU).
*
* The work-around for this errata needs all the initiators connected
* through an async bridge to ensure that data path is properly drained
* before issuing WFI. This condition will be met if one Strongly ordered
* access is performed to the target right before executing the WFI.
*
* In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
* IO barrier ensure that there is no synchronisation loss on initiators
* operating on both interconnect port simultaneously.
*
* This is a stronger version of the OMAP4 memory barrier below, and
* operates on both the MPU->MA->EMIF path but also the MPU->OCP path
* as well, and is necessary prior to executing a WFI.
*/
void omap_interconnect_sync(void)
{
if (dram_sync && sram_sync) {
writel_relaxed(readl_relaxed(dram_sync), dram_sync);
writel_relaxed(readl_relaxed(sram_sync), sram_sync);
isb();
}
}
static int __init omap4_sram_init(void)
{
struct device_node *np;
struct gen_pool *sram_pool;
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
if (!np)
pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
__func__);
sram_pool = of_gen_pool_get(np, "sram", 0);
if (!sram_pool)
pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
__func__);
else
sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
return 0;
}
omap_arch_initcall(omap4_sram_init);
/* Steal one page physical memory for barrier implementation */
void __init omap_barrier_reserve_memblock(void)
{
dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
}
void __init omap_barriers_init(void)
{
struct map_desc dram_io_desc[1];
dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
dram_io_desc[0].length = dram_sync_size;
dram_io_desc[0].type = MT_MEMORY_RW_SO;
iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
dram_sync = (void __iomem *) dram_io_desc[0].virtual;
pr_info("OMAP4: Map %pa to %p for dram barrier\n",
&dram_sync_paddr, dram_sync);
soc_mb = omap4_mb;
}
#endif
void gic_dist_disable(void) void gic_dist_disable(void)
{ {
if (gic_dist_base_addr) if (gic_dist_base_addr)
......
...@@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume) ...@@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume)
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
ENTRY(omap_bus_sync)
ret lr
ENDPROC(omap_bus_sync)
ENTRY(omap_do_wfi) ENTRY(omap_do_wfi)
stmfd sp!, {lr} stmfd sp!, {lr}
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
/* Drain interconnect write buffers. */ /* Drain interconnect write buffers. */
bl omap_bus_sync bl omap_interconnect_sync
#endif
/* /*
* Execute an ISB instruction to ensure that all of the * Execute an ISB instruction to ensure that all of the
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/rtc/sirfsoc_rtciobrg.h> #include <linux/rtc/sirfsoc_rtciobrg.h>
#include <asm/outercache.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-l2x0.h>
......
...@@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void); ...@@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void);
extern void shmobile_smp_sleep(void); extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn, extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg); unsigned long arg);
extern int shmobile_smp_cpu_disable(unsigned int cpu); extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
extern void shmobile_boot_scu(void); extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus); extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
extern void shmobile_smp_scu_cpu_die(unsigned int cpu); extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
......
...@@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg) ...@@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int shmobile_smp_cpu_disable(unsigned int cpu) bool shmobile_smp_cpu_can_disable(unsigned int cpu)
{ {
return 0; /* Hotplug of any CPU is supported */ return true; /* Hotplug of any CPU is supported */
} }
#endif #endif
...@@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = { ...@@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = {
.smp_prepare_cpus = r8a7790_smp_prepare_cpus, .smp_prepare_cpus = r8a7790_smp_prepare_cpus,
.smp_boot_secondary = shmobile_smp_apmu_boot_secondary, .smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = shmobile_smp_cpu_disable, .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_apmu_cpu_die, .cpu_die = shmobile_smp_apmu_cpu_die,
.cpu_kill = shmobile_smp_apmu_cpu_kill, .cpu_kill = shmobile_smp_apmu_cpu_kill,
#endif #endif
......
...@@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = { ...@@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = {
.smp_prepare_cpus = r8a7791_smp_prepare_cpus, .smp_prepare_cpus = r8a7791_smp_prepare_cpus,
.smp_boot_secondary = r8a7791_smp_boot_secondary, .smp_boot_secondary = r8a7791_smp_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = shmobile_smp_cpu_disable, .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_apmu_cpu_die, .cpu_die = shmobile_smp_apmu_cpu_die,
.cpu_kill = shmobile_smp_apmu_cpu_kill, .cpu_kill = shmobile_smp_apmu_cpu_kill,
#endif #endif
......
...@@ -60,7 +60,7 @@ struct smp_operations sh73a0_smp_ops __initdata = { ...@@ -60,7 +60,7 @@ struct smp_operations sh73a0_smp_ops __initdata = {
.smp_prepare_cpus = sh73a0_smp_prepare_cpus, .smp_prepare_cpus = sh73a0_smp_prepare_cpus,
.smp_boot_secondary = sh73a0_boot_secondary, .smp_boot_secondary = sh73a0_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = shmobile_smp_cpu_disable, .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_scu_cpu_die, .cpu_die = shmobile_smp_scu_cpu_die,
.cpu_kill = shmobile_smp_scu_cpu_kill, .cpu_kill = shmobile_smp_scu_cpu_kill,
#endif #endif
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <asm/outercache.h>
#include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-l2x0.h>
#include "db8500-regs.h" #include "db8500-regs.h"
......
...@@ -20,10 +20,10 @@ ...@@ -20,10 +20,10 @@
#include <linux/mfd/dbx500-prcmu.h> #include <linux/mfd/dbx500-prcmu.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/perf/arm_pmu.h>
#include <linux/regulator/machine.h> #include <linux/regulator/machine.h>
#include <linux/random.h> #include <linux/random.h>
#include <asm/pmu.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include "setup.h" #include "setup.h"
......
...@@ -883,6 +883,7 @@ config OUTER_CACHE ...@@ -883,6 +883,7 @@ config OUTER_CACHE
config OUTER_CACHE_SYNC config OUTER_CACHE_SYNC
bool bool
select ARM_HEAVY_MB
help help
The outer cache has a outer_cache_fns.sync function pointer The outer cache has a outer_cache_fns.sync function pointer
that can be used to drain the write buffer of the outer cache. that can be used to drain the write buffer of the outer cache.
...@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS ...@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
This option allows the use of custom mandatory barriers This option allows the use of custom mandatory barriers
included via the mach/barriers.h file. included via the mach/barriers.h file.
config ARM_HEAVY_MB
bool
config ARCH_SUPPORTS_BIG_ENDIAN config ARCH_SUPPORTS_BIG_ENDIAN
bool bool
help help
......
...@@ -19,6 +19,7 @@ ENTRY(v4_early_abort) ...@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r4] @ read aborted ARM instruction ldr r3, [r4] @ read aborted ARM instruction
uaccess_disable ip @ disable userspace access
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write? tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
......
...@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort) ...@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction ldreq r3, [r4] @ read aborted ARM instruction
uaccess_disable ip @ disable user access
bic r1, r1, #1 << 11 @ clear bits 11 of FSR bic r1, r1, #1 << 11 @ clear bits 11 of FSR
do_ldrd_abort tmp=ip, insn=r3 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq do_DataAbort @ yes
tst r3, #1 << 20 @ check write tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11 orreq r1, r1, #1 << 11
b do_DataAbort b do_DataAbort
...@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort) ...@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
bne do_DataAbort bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction ldreq r3, [r4] @ read aborted ARM instruction
do_ldrd_abort tmp=ip, insn=r3 uaccess_disable ip @ disable userspace access
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq do_DataAbort @ yes
tst r3, #1 << 20 @ L = 0 -> write tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
b do_DataAbort b do_DataAbort
...@@ -26,16 +26,18 @@ ENTRY(v6_early_abort) ...@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
ldr ip, =0x4107b36 ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136? teq ip, r3, lsr #4 @ r0 ARM1136?
bne do_DataAbort bne 1f
tst r5, #PSR_J_BIT @ Java? tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb? tsteq r5, #PSR_T_BIT @ Thumb?
bne do_DataAbort bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3) ARM_BE8(rev r3, r3)
do_ldrd_abort tmp=ip, insn=r3 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
#endif #endif
1: uaccess_disable ip @ disable userspace access
b do_DataAbort b do_DataAbort
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
ENTRY(v7_early_abort) ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
uaccess_disable ip @ disable userspace access
/* /*
* V6 code adjusts the returned DFSR. * V6 code adjusts the returned DFSR.
......
...@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort) ...@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
#endif #endif
bne .data_thumb_abort bne .data_thumb_abort
ldr r8, [r4] @ read arm instruction ldr r8, [r4] @ read arm instruction
uaccess_disable ip @ disable userspace access
tst r8, #1 << 20 @ L = 1 -> write? tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24 and r7, r8, #15 << 24
...@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort) ...@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
.data_thumb_abort: .data_thumb_abort:
ldrh r8, [r4] @ read instruction ldrh r8, [r4] @ read instruction
uaccess_disable ip @ disable userspace access
tst r8, #1 << 11 @ L = 1 -> write? tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12 and r7, r8, #15 << 12
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
tst \psr, #PSR_T_BIT tst \psr, #PSR_T_BIT
beq not_thumb beq not_thumb
ldrh \tmp, [\pc] @ Read aborted Thumb instruction ldrh \tmp, [\pc] @ Read aborted Thumb instruction
uaccess_disable ip @ disable userspace access
and \tmp, \tmp, # 0xfe00 @ Mask opcode field and \tmp, \tmp, # 0xfe00 @ Mask opcode field
cmp \tmp, # 0x5600 @ Is it ldrsb? cmp \tmp, # 0x5600 @ Is it ldrsb?
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
...@@ -29,12 +30,9 @@ not_thumb: ...@@ -29,12 +30,9 @@ not_thumb:
* [7:4] == 1101 * [7:4] == 1101
* [20] == 0 * [20] == 0
*/ */
.macro do_ldrd_abort, tmp, insn .macro teq_ldrd, tmp, insn
tst \insn, #0x0e100000 @ [27:25,20] == 0 mov \tmp, #0x0e100000
bne not_ldrd orr \tmp, #0x000000f0
and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 and \tmp, \insn, \tmp
cmp \tmp, #0x000000d0 teq \tmp, #0x000000d0
beq do_DataAbort
not_ldrd:
.endm .endm
...@@ -368,7 +368,6 @@ int __init feroceon_of_init(void) ...@@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
struct device_node *node; struct device_node *node;
void __iomem *base; void __iomem *base;
bool l2_wt_override = false; bool l2_wt_override = false;
struct resource res;
#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
l2_wt_override = true; l2_wt_override = true;
...@@ -376,10 +375,7 @@ int __init feroceon_of_init(void) ...@@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
node = of_find_matching_node(NULL, feroceon_ids); node = of_find_matching_node(NULL, feroceon_ids);
if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) { if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
if (of_address_to_resource(node, 0, &res)) base = of_iomap(node, 0);
return -ENODEV;
base = ioremap(res.start, resource_size(&res));
if (!base) if (!base)
return -ENOMEM; return -ENOMEM;
......
...@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np, ...@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
} }
} }
if (of_property_read_bool(np, "arm,shared-override")) {
*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
}
prefetch = l2x0_saved_regs.prefetch_ctrl; prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val); ret = of_property_read_u32(np, "arm,double-linefill", &val);
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <asm/system_info.h> #include <asm/system_info.h>
#include <asm/dma-contiguous.h> #include <asm/dma-contiguous.h>
#include "dma.h"
#include "mm.h" #include "mm.h"
/* /*
...@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
if (is_coherent || nommu()) if (nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
addr = __alloc_from_contiguous(dev, size, prot, &page,
caller, want_vaddr);
else if (is_coherent)
addr = __alloc_simple_buffer(dev, size, gfp, &page); addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT)) else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page); addr = __alloc_from_pool(size, &page);
else if (!dev_get_cma_area(dev))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
else else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
caller, want_vaddr);
if (page) if (page)
*handle = pfn_to_dma(dev, page_to_pfn(page)); *handle = pfn_to_dma(dev, page_to_pfn(page));
...@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size, static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{ {
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory; void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory)) if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory; return memory;
return __dma_alloc(dev, size, handle, gfp, prot, true, return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0)); attrs, __builtin_return_address(0));
} }
...@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, ...@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (is_coherent || nommu()) { if (nommu()) {
__dma_free_buffer(page, size); __dma_free_buffer(page, size);
} else if (__free_from_pool(cpu_addr, size)) { } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
return; return;
} else if (!dev_get_cma_area(dev)) { } else if (!dev_get_cma_area(dev)) {
if (want_vaddr) if (want_vaddr && !is_coherent)
__dma_free_remap(cpu_addr, size); __dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size); __dma_free_buffer(page, size);
} else { } else {
......
#ifndef DMA_H
#define DMA_H
#include <asm/glue-cache.h>
#ifndef MULTI_CACHE
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
#else
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
#endif
#endif
...@@ -21,6 +21,21 @@ ...@@ -21,6 +21,21 @@
#include "mm.h" #include "mm.h"
#ifdef CONFIG_ARM_HEAVY_MB
void (*soc_mb)(void);
void arm_heavy_mb(void)
{
#ifdef CONFIG_OUTER_CACHE_SYNC
if (outer_cache.sync)
outer_cache.sync();
#endif
if (soc_mb)
soc_mb();
}
EXPORT_SYMBOL(arm_heavy_mb);
#endif
#ifdef CONFIG_CPU_CACHE_VIPT #ifdef CONFIG_CPU_CACHE_VIPT
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
......
...@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page) ...@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx); vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
/* /*
...@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) { if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx(); type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt()) if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
...@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn) ...@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
return page_address(page); return page_address(page);
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx); vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr))); BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
......
...@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = { ...@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY, L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER, .domain = DOMAIN_VECTORS,
}, },
[MT_HIGH_VECTORS] = { [MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY, L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER, .domain = DOMAIN_VECTORS,
}, },
[MT_MEMORY_RWX] = { [MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
...@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type) ...@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
} }
EXPORT_SYMBOL(get_mem_type); EXPORT_SYMBOL(get_mem_type);
static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
{
return &bm_pte[pte_index(addr)];
}
static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
{
return pte_offset_kernel(dir, addr);
}
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
return pmd;
}
void __init early_fixmap_init(void)
{
pmd_t *pmd;
/*
* The early fixmap range spans multiple pmds, for which
* we are not prepared:
*/
BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
!= FIXADDR_TOP >> PMD_SHIFT);
pmd = fixmap_pmd(FIXADDR_TOP);
pmd_populate_kernel(&init_mm, pmd, bm_pte);
pte_offset_fixmap = pte_offset_early_fixmap;
}
/* /*
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
* As a result, this can only be called with preemption disabled, as under * As a result, this can only be called with preemption disabled, as under
...@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type); ...@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{ {
unsigned long vaddr = __fix_to_virt(idx); unsigned long vaddr = __fix_to_virt(idx);
pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */ /* Make sure fixmap region does not exceed available allocation. */
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
...@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md) ...@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
} }
if ((md->type == MT_DEVICE || md->type == MT_ROM) && if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual); (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
...@@ -1219,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void) ...@@ -1219,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
/* /*
* Set up the device mappings. Since we clear out the page tables for all * Set up the device mappings. Since we clear out the page tables for all
* mappings above VMALLOC_START, we will remove any debug device mappings. * mappings above VMALLOC_START, except early fixmap, we might remove debug
* This means you have to be careful how you debug this function, or any * device mappings. This means earlycon can be used to debug this function
* called function. This means you can't use any function or debugging * Any other function or debugging method which may touch any device _will_
* method which may touch any device, otherwise the kernel _will_ crash. * crash the kernel.
*/ */
static void __init devicemaps_init(const struct machine_desc *mdesc) static void __init devicemaps_init(const struct machine_desc *mdesc)
{ {
...@@ -1237,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) ...@@ -1237,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
early_trap_init(vectors); early_trap_init(vectors);
for (addr = VMALLOC_START; addr; addr += PMD_SIZE) /*
* Clear page table except top pmd used by early fixmaps
*/
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr)); pmd_clear(pmd_off_k(addr));
/* /*
...@@ -1489,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc) ...@@ -1489,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#endif #endif
static void __init early_fixmap_shutdown(void)
{
int i;
unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
pte_offset_fixmap = pte_offset_late_fixmap;
pmd_clear(fixmap_pmd(va));
local_flush_tlb_kernel_page(va);
for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
pte_t *pte;
struct map_desc map;
map.virtual = fix_to_virt(i);
pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
/* Only i/o device mappings are supported ATM */
if (pte_none(*pte) ||
(pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
continue;
map.pfn = pte_pfn(*pte);
map.type = MT_DEVICE;
map.length = PAGE_SIZE;
create_mapping(&map);
}
}
/* /*
* paging_init() sets up the page tables, initialises the zone memory * paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables. * maps, and sets up the zero page, bad page and bad page tables.
...@@ -1502,6 +1575,7 @@ void __init paging_init(const struct machine_desc *mdesc) ...@@ -1502,6 +1575,7 @@ void __init paging_init(const struct machine_desc *mdesc)
map_lowmem(); map_lowmem();
memblock_set_current_limit(arm_lowmem_limit); memblock_set_current_limit(arm_lowmem_limit);
dma_contiguous_remap(); dma_contiguous_remap();
early_fixmap_shutdown();
devicemaps_init(mdesc); devicemaps_init(mdesc);
kmap_init(); kmap_init();
tcm_init(); tcm_init();
......
...@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if (!new_pte) if (!new_pte)
goto no_pte; goto no_pte;
#ifndef CONFIG_ARM_LPAE
/*
* Modify the PTE pointer to have the correct domain. This
* needs to be the vectors domain to avoid the low vectors
* being unmapped.
*/
pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
#endif
init_pud = pud_offset(init_pgd, 0); init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0); init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0); init_pte = pte_offset_map(init_pmd, 0);
......
...@@ -20,6 +20,7 @@ config ARM64 ...@@ -20,6 +20,7 @@ config ARM64
select ARM_GIC_V2M if PCI_MSI select ARM_GIC_V2M if PCI_MSI
select ARM_GIC_V3 select ARM_GIC_V3
select ARM_GIC_V3_ITS if PCI_MSI select ARM_GIC_V3_ITS if PCI_MSI
select ARM_PSCI_FW
select BUILDTIME_EXTABLE_SORT select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
#ifndef _ASM_ACPI_H #ifndef _ASM_ACPI_H
#define _ASM_ACPI_H #define _ASM_ACPI_H
#include <linux/mm.h>
#include <linux/irqchip/arm-gic-acpi.h> #include <linux/irqchip/arm-gic-acpi.h>
#include <linux/mm.h>
#include <linux/psci.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/psci.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
/* Macros for consistency checks of the GICC subtable of MADT */ /* Macros for consistency checks of the GICC subtable of MADT */
......
...@@ -18,23 +18,17 @@ ...@@ -18,23 +18,17 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/psci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/psci.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/system_misc.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
static bool psci_power_state_loses_context(u32 state) static bool psci_power_state_loses_context(u32 state)
{ {
...@@ -50,122 +44,8 @@ static bool psci_power_state_is_valid(u32 state) ...@@ -50,122 +44,8 @@ static bool psci_power_state_is_valid(u32 state)
return !(state & ~valid_mask); return !(state & ~valid_mask);
} }
/*
* The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
* calls to its resident CPU, so we must avoid issuing those. We never migrate
* a Trusted OS even if it claims to be capable of migration -- doing so will
* require cooperation with a Trusted OS driver.
*/
static int resident_cpu = -1;
struct psci_operations {
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid);
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
};
static struct psci_operations psci_ops;
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
asmlinkage psci_fn __invoke_psci_fn_hvc;
asmlinkage psci_fn __invoke_psci_fn_smc;
static psci_fn *invoke_psci_fn;
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
PSCI_FN_MAX,
};
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
static u32 psci_function_id[PSCI_FN_MAX];
static int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
return 0;
case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP;
case PSCI_RET_INVALID_PARAMS:
return -EINVAL;
case PSCI_RET_DENIED:
return -EPERM;
};
return -EINVAL;
}
static u32 psci_get_version(void)
{
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
static int psci_cpu_suspend(u32 state, unsigned long entry_point)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);
}
static int psci_cpu_off(u32 state)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_CPU_OFF];
err = invoke_psci_fn(fn, state, 0, 0);
return psci_to_linux_errno(err);
}
static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_CPU_ON];
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
return psci_to_linux_errno(err);
}
static int psci_migrate(unsigned long cpuid)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_MIGRATE];
err = invoke_psci_fn(fn, cpuid, 0, 0);
return psci_to_linux_errno(err);
}
static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level)
{
return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
lowest_affinity_level, 0);
}
static int psci_migrate_info_type(void)
{
return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
}
static unsigned long psci_migrate_info_up_cpu(void)
{
return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
}
static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
{ {
int i, ret, count = 0; int i, ret, count = 0;
...@@ -230,238 +110,6 @@ static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) ...@@ -230,238 +110,6 @@ static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
return ret; return ret;
} }
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) {
pr_warn("missing \"method\" property\n");
return -ENXIO;
}
if (!strcmp("hvc", method)) {
invoke_psci_fn = __invoke_psci_fn_hvc;
} else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
}
return 0;
}
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
static void psci_sys_poweroff(void)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
/*
* Detect the presence of a resident Trusted OS which may cause CPU_OFF to
* return DENIED (which would be fatal).
*/
static void __init psci_init_migrate(void)
{
unsigned long cpuid;
int type, cpu;
type = psci_ops.migrate_info_type();
if (type == PSCI_0_2_TOS_MP) {
pr_info("Trusted OS migration not required\n");
return;
}
if (type == PSCI_RET_NOT_SUPPORTED) {
pr_info("MIGRATE_INFO_TYPE not supported.\n");
return;
}
if (type != PSCI_0_2_TOS_UP_MIGRATE &&
type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
return;
}
cpuid = psci_migrate_info_up_cpu();
if (cpuid & ~MPIDR_HWID_BITMASK) {
pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
cpuid);
return;
}
cpu = get_logical_index(cpuid);
resident_cpu = cpu >= 0 ? cpu : -1;
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
}
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
psci_ops.cpu_suspend = psci_cpu_suspend;
psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
psci_ops.cpu_off = psci_cpu_off;
psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
psci_ops.cpu_on = psci_cpu_on;
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
psci_ops.migrate = psci_migrate;
psci_ops.affinity_info = psci_affinity_info;
psci_ops.migrate_info_type = psci_migrate_info_type;
arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff;
}
/*
* Probe function for PSCI firmware versions >= 0.2
*/
static int __init psci_probe(void)
{
u32 ver = psci_get_version();
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
pr_err("Conflicting PSCI version detected.\n");
return -EINVAL;
}
psci_0_2_set_functions();
psci_init_migrate();
return 0;
}
typedef int (*psci_initcall_t)(const struct device_node *);
/*
* PSCI init function for PSCI versions >=0.2
*
* Probe based on PSCI PSCI_VERSION function
*/
static int __init psci_0_2_init(struct device_node *np)
{
int err;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
/*
* Starting with v0.2, the PSCI specification introduced a call
* (PSCI_VERSION) that allows probing the firmware version, so
* that PSCI function IDs and version specific initialization
* can be carried out according to the specific version reported
* by firmware
*/
err = psci_probe();
out_put_node:
of_node_put(np);
return err;
}
/*
* PSCI < v0.2 get PSCI Function IDs via DT.
*/
static int __init psci_0_1_init(struct device_node *np)
{
u32 id;
int err;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend;
}
if (!of_property_read_u32(np, "cpu_off", &id)) {
psci_function_id[PSCI_FN_CPU_OFF] = id;
psci_ops.cpu_off = psci_cpu_off;
}
if (!of_property_read_u32(np, "cpu_on", &id)) {
psci_function_id[PSCI_FN_CPU_ON] = id;
psci_ops.cpu_on = psci_cpu_on;
}
if (!of_property_read_u32(np, "migrate", &id)) {
psci_function_id[PSCI_FN_MIGRATE] = id;
psci_ops.migrate = psci_migrate;
}
out_put_node:
of_node_put(np);
return err;
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{},
};
int __init psci_dt_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
if (!np)
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
return init_fn(np);
}
#ifdef CONFIG_ACPI
/*
* We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
* explicitly clarified in SBBR
*/
int __init psci_acpi_init(void)
{
if (!acpi_psci_present()) {
pr_info("is not implemented in ACPI.\n");
return -EOPNOTSUPP;
}
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
invoke_psci_fn = __invoke_psci_fn_hvc;
else
invoke_psci_fn = __invoke_psci_fn_smc;
return psci_probe();
}
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int __init cpu_psci_cpu_init(unsigned int cpu) static int __init cpu_psci_cpu_init(unsigned int cpu)
...@@ -489,11 +137,6 @@ static int cpu_psci_cpu_boot(unsigned int cpu) ...@@ -489,11 +137,6 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static bool psci_tos_resident_on(int cpu)
{
return cpu == resident_cpu;
}
static int cpu_psci_cpu_disable(unsigned int cpu) static int cpu_psci_cpu_disable(unsigned int cpu)
{ {
/* Fail early if we don't have CPU_OFF support */ /* Fail early if we don't have CPU_OFF support */
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/psci.h>
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
...@@ -60,7 +61,6 @@ ...@@ -60,7 +61,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/memblock.h> #include <asm/memblock.h>
#include <asm/psci.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/virt.h> #include <asm/virt.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
......
...@@ -176,6 +176,8 @@ source "drivers/powercap/Kconfig" ...@@ -176,6 +176,8 @@ source "drivers/powercap/Kconfig"
source "drivers/mcb/Kconfig" source "drivers/mcb/Kconfig"
source "drivers/perf/Kconfig"
source "drivers/ras/Kconfig" source "drivers/ras/Kconfig"
source "drivers/thunderbolt/Kconfig" source "drivers/thunderbolt/Kconfig"
......
...@@ -161,6 +161,7 @@ obj-$(CONFIG_NTB) += ntb/ ...@@ -161,6 +161,7 @@ obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/ obj-$(CONFIG_FMC) += fmc/
obj-$(CONFIG_POWERCAP) += powercap/ obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/ obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_PERF_EVENTS) += perf/
obj-$(CONFIG_RAS) += ras/ obj-$(CONFIG_RAS) += ras/
obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
......
...@@ -25,16 +25,21 @@ ...@@ -25,16 +25,21 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/psci.h>
#include <asm/cpuidle.h> #include <asm/cpuidle.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/psci.h>
#include <uapi/linux/psci.h>
#define CALXEDA_IDLE_PARAM \
((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
(0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
(PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
static int calxeda_idle_finish(unsigned long val) static int calxeda_idle_finish(unsigned long val)
{ {
const struct psci_power_state ps = { return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume));
.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
};
return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
} }
static int calxeda_pwrdown_idle(struct cpuidle_device *dev, static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
......
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
menu "Firmware Drivers" menu "Firmware Drivers"
config ARM_PSCI_FW
bool
config EDD config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk" tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86 depends on X86
......
# #
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_DMI) += dmi_scan.o obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o obj-$(CONFIG_EDD) += edd.o
......
...@@ -8,39 +8,63 @@ ...@@ -8,39 +8,63 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details. * GNU General Public License for more details.
* *
* Copyright (C) 2012 ARM Limited * Copyright (C) 2015 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/ */
#define pr_fmt(fmt) "psci: " fmt #define pr_fmt(fmt) "psci: " fmt
#include <linux/init.h> #include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/reboot.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/printk.h>
#include <linux/psci.h>
#include <linux/reboot.h>
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
#include <asm/compiler.h> #include <asm/cputype.h>
#include <asm/errno.h>
#include <asm/psci.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/smp_plat.h>
struct psci_operations psci_ops; /*
* While a 64-bit OS can make calls with SMC32 calling conventions, for some
* calls it is necessary to use SMC64 to pass or return 64-bit values. For such
* calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width)
* function ID.
*/
#ifdef CONFIG_64BIT
#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name
#else
#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN_##name
#endif
static int (*invoke_psci_fn)(u32, u32, u32, u32); /*
typedef int (*psci_initcall_t)(const struct device_node *); * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
* calls to its resident CPU, so we must avoid issuing those. We never migrate
* a Trusted OS even if it claims to be capable of migration -- doing so will
* require cooperation with a Trusted OS driver.
*/
static int resident_cpu = -1;
bool psci_tos_resident_on(int cpu)
{
return cpu == resident_cpu;
}
struct psci_operations psci_ops;
asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32); typedef unsigned long (psci_fn)(unsigned long, unsigned long,
asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32); unsigned long, unsigned long);
asmlinkage psci_fn __invoke_psci_fn_hvc;
asmlinkage psci_fn __invoke_psci_fn_smc;
static psci_fn *invoke_psci_fn;
enum psci_function { enum psci_function {
PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON, PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF, PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE, PSCI_FN_MIGRATE,
PSCI_FN_AFFINITY_INFO,
PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX, PSCI_FN_MAX,
}; };
...@@ -62,44 +86,28 @@ static int psci_to_linux_errno(int errno) ...@@ -62,44 +86,28 @@ static int psci_to_linux_errno(int errno)
return -EINVAL; return -EINVAL;
} }
static u32 psci_power_state_pack(struct psci_power_state state) static u32 psci_get_version(void)
{ {
return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT) return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
& PSCI_0_2_POWER_STATE_ID_MASK) |
((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
& PSCI_0_2_POWER_STATE_TYPE_MASK) |
((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
& PSCI_0_2_POWER_STATE_AFFL_MASK);
} }
static int psci_get_version(void) static int psci_cpu_suspend(u32 state, unsigned long entry_point)
{ {
int err; int err;
u32 fn;
err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
return err;
}
static int psci_cpu_suspend(struct psci_power_state state,
unsigned long entry_point)
{
int err;
u32 fn, power_state;
fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
power_state = psci_power_state_pack(state); err = invoke_psci_fn(fn, state, entry_point, 0);
err = invoke_psci_fn(fn, power_state, entry_point, 0);
return psci_to_linux_errno(err); return psci_to_linux_errno(err);
} }
static int psci_cpu_off(struct psci_power_state state) static int psci_cpu_off(u32 state)
{ {
int err; int err;
u32 fn, power_state; u32 fn;
fn = psci_function_id[PSCI_FN_CPU_OFF]; fn = psci_function_id[PSCI_FN_CPU_OFF];
power_state = psci_power_state_pack(state); err = invoke_psci_fn(fn, state, 0, 0);
err = invoke_psci_fn(fn, power_state, 0, 0);
return psci_to_linux_errno(err); return psci_to_linux_errno(err);
} }
...@@ -126,22 +134,19 @@ static int psci_migrate(unsigned long cpuid) ...@@ -126,22 +134,19 @@ static int psci_migrate(unsigned long cpuid)
static int psci_affinity_info(unsigned long target_affinity, static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level) unsigned long lowest_affinity_level)
{ {
int err; return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO),
u32 fn; target_affinity, lowest_affinity_level, 0);
fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
return err;
} }
static int psci_migrate_info_type(void) static int psci_migrate_info_type(void)
{ {
int err; return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
u32 fn; }
fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; static unsigned long psci_migrate_info_up_cpu(void)
err = invoke_psci_fn(fn, 0, 0, 0); {
return err; return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU),
0, 0, 0);
} }
static int get_set_conduit_method(struct device_node *np) static int get_set_conduit_method(struct device_node *np)
...@@ -177,61 +182,115 @@ static void psci_sys_poweroff(void) ...@@ -177,61 +182,115 @@ static void psci_sys_poweroff(void)
} }
/* /*
* PSCI Function IDs for v0.2+ are well defined so use * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
* standard values. * return DENIED (which would be fatal).
*/ */
static int psci_0_2_init(struct device_node *np) static void __init psci_init_migrate(void)
{ {
int err, ver; unsigned long cpuid;
int type, cpu = -1;
err = get_set_conduit_method(np); type = psci_ops.migrate_info_type();
if (err) if (type == PSCI_0_2_TOS_MP) {
goto out_put_node; pr_info("Trusted OS migration not required\n");
return;
}
ver = psci_get_version(); if (type == PSCI_RET_NOT_SUPPORTED) {
pr_info("MIGRATE_INFO_TYPE not supported.\n");
return;
}
if (ver == PSCI_RET_NOT_SUPPORTED) { if (type != PSCI_0_2_TOS_UP_MIGRATE &&
/* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */ type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
err = -EOPNOTSUPP; return;
goto out_put_node; }
} else {
pr_info("PSCIv%d.%d detected in firmware.\n", cpuid = psci_migrate_info_up_cpu();
PSCI_VERSION_MAJOR(ver), if (cpuid & ~MPIDR_HWID_BITMASK) {
PSCI_VERSION_MINOR(ver)); pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
cpuid);
if (PSCI_VERSION_MAJOR(ver) == 0 && return;
PSCI_VERSION_MINOR(ver) < 2) {
err = -EINVAL;
pr_err("Conflicting PSCI version detected.\n");
goto out_put_node;
}
} }
cpu = get_logical_index(cpuid);
resident_cpu = cpu >= 0 ? cpu : -1;
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
}
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n"); pr_info("Using standard PSCI v0.2 function IDs\n");
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND; psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND);
psci_ops.cpu_suspend = psci_cpu_suspend; psci_ops.cpu_suspend = psci_cpu_suspend;
psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
psci_ops.cpu_off = psci_cpu_off; psci_ops.cpu_off = psci_cpu_off;
psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON; psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON);
psci_ops.cpu_on = psci_cpu_on; psci_ops.cpu_on = psci_cpu_on;
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE; psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE);
psci_ops.migrate = psci_migrate; psci_ops.migrate = psci_migrate;
psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
psci_ops.affinity_info = psci_affinity_info; psci_ops.affinity_info = psci_affinity_info;
psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
PSCI_0_2_FN_MIGRATE_INFO_TYPE;
psci_ops.migrate_info_type = psci_migrate_info_type; psci_ops.migrate_info_type = psci_migrate_info_type;
arm_pm_restart = psci_sys_reset; arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff; pm_power_off = psci_sys_poweroff;
}
/*
* Probe function for PSCI firmware versions >= 0.2
*/
static int __init psci_probe(void)
{
u32 ver = psci_get_version();
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
pr_err("Conflicting PSCI version detected.\n");
return -EINVAL;
}
psci_0_2_set_functions();
psci_init_migrate();
return 0;
}
typedef int (*psci_initcall_t)(const struct device_node *);
/*
* PSCI init function for PSCI versions >=0.2
*
* Probe based on PSCI PSCI_VERSION function
*/
static int __init psci_0_2_init(struct device_node *np)
{
int err;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
/*
* Starting with v0.2, the PSCI specification introduced a call
* (PSCI_VERSION) that allows probing the firmware version, so
* that PSCI function IDs and version specific initialization
* can be carried out according to the specific version reported
* by firmware
*/
err = psci_probe();
out_put_node: out_put_node:
of_node_put(np); of_node_put(np);
...@@ -241,7 +300,7 @@ static int psci_0_2_init(struct device_node *np) ...@@ -241,7 +300,7 @@ static int psci_0_2_init(struct device_node *np)
/* /*
* PSCI < v0.2 get PSCI Function IDs via DT. * PSCI < v0.2 get PSCI Function IDs via DT.
*/ */
static int psci_0_1_init(struct device_node *np) static int __init psci_0_1_init(struct device_node *np)
{ {
u32 id; u32 id;
int err; int err;
...@@ -279,21 +338,45 @@ static int psci_0_1_init(struct device_node *np) ...@@ -279,21 +338,45 @@ static int psci_0_1_init(struct device_node *np)
} }
static const struct of_device_id const psci_of_match[] __initconst = { static const struct of_device_id const psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init}, { .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init}, { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{}, {},
}; };
int __init psci_init(void) int __init psci_dt_init(void)
{ {
struct device_node *np; struct device_node *np;
const struct of_device_id *matched_np; const struct of_device_id *matched_np;
psci_initcall_t init_fn; psci_initcall_t init_fn;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
if (!np) if (!np)
return -ENODEV; return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data; init_fn = (psci_initcall_t)matched_np->data;
return init_fn(np); return init_fn(np);
} }
#ifdef CONFIG_ACPI
/*
* We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
* explicitly clarified in SBBR
*/
int __init psci_acpi_init(void)
{
if (!acpi_psci_present()) {
pr_info("is not implemented in ACPI.\n");
return -EOPNOTSUPP;
}
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
invoke_psci_fn = __invoke_psci_fn_hvc;
else
invoke_psci_fn = __invoke_psci_fn_smc;
return psci_probe();
}
#endif
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/qcom_scm.h> #include <linux/qcom_scm.h>
#include <asm/outercache.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include "qcom_scm.h" #include "qcom_scm.h"
...@@ -219,8 +218,7 @@ static int __qcom_scm_call(const struct qcom_scm_command *cmd) ...@@ -219,8 +218,7 @@ static int __qcom_scm_call(const struct qcom_scm_command *cmd)
* Flush the command buffer so that the secure world sees * Flush the command buffer so that the secure world sees
* the correct data. * the correct data.
*/ */
__cpuc_flush_dcache_area((void *)cmd, cmd->len); secure_flush_area(cmd, cmd->len);
outer_flush_range(cmd_addr, cmd_addr + cmd->len);
ret = smc(cmd_addr); ret = smc(cmd_addr);
if (ret < 0) if (ret < 0)
......
#
# Performance Monitor Drivers
#
menu "Performance monitor support"
config ARM_PMU
depends on PERF_EVENTS && ARM
bool "ARM PMU framework"
default y
help
Say y if you want to use CPU performance monitors on ARM-based
systems.
endmenu
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
...@@ -15,7 +15,8 @@ ...@@ -15,7 +15,8 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/of.h> #include <linux/of_device.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -24,7 +25,6 @@ ...@@ -24,7 +25,6 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/pmu.h>
static int static int
armpmu_map_cache_event(const unsigned (*cache_map) armpmu_map_cache_event(const unsigned (*cache_map)
...@@ -790,52 +790,77 @@ static int probe_current_pmu(struct arm_pmu *pmu, ...@@ -790,52 +790,77 @@ static int probe_current_pmu(struct arm_pmu *pmu,
static int of_pmu_irq_cfg(struct arm_pmu *pmu) static int of_pmu_irq_cfg(struct arm_pmu *pmu)
{ {
int i, irq, *irqs; int *irqs, i = 0;
bool using_spi = false;
struct platform_device *pdev = pmu->plat_device; struct platform_device *pdev = pmu->plat_device;
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
if (irq >= 0 && irq_is_percpu(irq))
return 0;
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs) if (!irqs)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < pdev->num_resources; ++i) { do {
struct device_node *dn; struct device_node *dn;
int cpu; int cpu, irq;
dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", /* See if we have an affinity entry */
i); dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
if (!dn) { if (!dn)
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
of_node_full_name(pdev->dev.of_node), i);
break; break;
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
irq = platform_get_irq(pdev, i);
if (irq >= 0) {
bool spi = !irq_is_percpu(irq);
if (i > 0 && spi != using_spi) {
pr_err("PPI/SPI IRQ type mismatch for %s!\n",
dn->name);
kfree(irqs);
return -EINVAL;
}
using_spi = spi;
} }
/* Now look up the logical CPU number */
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) if (dn == of_cpu_device_node_get(cpu))
break; break;
if (cpu >= nr_cpu_ids) { if (cpu >= nr_cpu_ids) {
pr_warn("Failed to find logical CPU for %s\n", pr_warn("Failed to find logical CPU for %s\n",
dn->name); dn->name);
of_node_put(dn); of_node_put(dn);
cpumask_setall(&pmu->supported_cpus);
break; break;
} }
of_node_put(dn); of_node_put(dn);
irqs[i] = cpu; /* For SPIs, we need to track the affinity per IRQ */
if (using_spi) {
if (i >= pdev->num_resources) {
of_node_put(dn);
break;
}
irqs[i] = cpu;
}
/* Keep track of the CPUs containing this PMU type */
cpumask_set_cpu(cpu, &pmu->supported_cpus); cpumask_set_cpu(cpu, &pmu->supported_cpus);
} of_node_put(dn);
i++;
} while (1);
/* If we didn't manage to parse anything, claim to support all CPUs */
if (cpumask_weight(&pmu->supported_cpus) == 0)
cpumask_setall(&pmu->supported_cpus);
if (i == pdev->num_resources) { /* If we matched up the IRQ affinities, use them to route the SPIs */
if (using_spi && i == pdev->num_resources)
pmu->irq_affinity = irqs; pmu->irq_affinity = irqs;
} else { else
kfree(irqs); kfree(irqs);
cpumask_setall(&pmu->supported_cpus);
}
return 0; return 0;
} }
......
...@@ -30,7 +30,7 @@ struct arm_pmu_platdata { ...@@ -30,7 +30,7 @@ struct arm_pmu_platdata {
irq_handler_t pmu_handler); irq_handler_t pmu_handler);
}; };
#ifdef CONFIG_HW_PERF_EVENTS #ifdef CONFIG_ARM_PMU
/* /*
* The ARMv7 CPU PMU supports up to 32 event counters. * The ARMv7 CPU PMU supports up to 32 event counters.
...@@ -149,6 +149,6 @@ int arm_pmu_device_probe(struct platform_device *pdev, ...@@ -149,6 +149,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table, const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table); const struct pmu_probe_info *probe_table);
#endif /* CONFIG_HW_PERF_EVENTS */ #endif /* CONFIG_ARM_PMU */
#endif /* __ARM_PMU_H__ */ #endif /* __ARM_PMU_H__ */
...@@ -8,15 +8,39 @@ ...@@ -8,15 +8,39 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details. * GNU General Public License for more details.
* *
* Copyright (C) 2013 ARM Limited * Copyright (C) 2015 ARM Limited
*/ */
#ifndef __ASM_PSCI_H #ifndef __LINUX_PSCI_H
#define __ASM_PSCI_H #define __LINUX_PSCI_H
#include <linux/init.h>
#include <linux/types.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
bool psci_tos_resident_on(int cpu);
struct psci_operations {
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid);
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
};
extern struct psci_operations psci_ops;
#if defined(CONFIG_ARM_PSCI_FW)
int __init psci_dt_init(void); int __init psci_dt_init(void);
#else
static inline int psci_dt_init(void) { return 0; }
#endif
#ifdef CONFIG_ACPI #if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
int __init psci_acpi_init(void); int __init psci_acpi_init(void);
bool __init acpi_psci_present(void); bool __init acpi_psci_present(void);
bool __init acpi_psci_use_hvc(void); bool __init acpi_psci_use_hvc(void);
...@@ -25,4 +49,4 @@ static inline int psci_acpi_init(void) { return 0; } ...@@ -25,4 +49,4 @@ static inline int psci_acpi_init(void) { return 0; }
static inline bool acpi_psci_present(void) { return false; } static inline bool acpi_psci_present(void) { return false; }
#endif #endif
#endif /* __ASM_PSCI_H */ #endif /* __LINUX_PSCI_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment