Commit f698d314 authored by Russell King (Oracle)'s avatar Russell King (Oracle)

Merge branches 'amba', 'cfi', 'clkdev' and 'misc' into for-linus

...@@ -35,6 +35,7 @@ config ARM ...@@ -35,6 +35,7 @@ config ARM
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
...@@ -1233,9 +1234,9 @@ config HIGHPTE ...@@ -1233,9 +1234,9 @@ config HIGHPTE
consumed by page tables. Setting this option will allow consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory. user-space 2nd level page tables to reside in high memory.
config CPU_SW_DOMAIN_PAN config ARM_PAN
bool "Enable use of CPU domains to implement privileged no-access" bool "Enable privileged no-access"
depends on MMU && !ARM_LPAE depends on MMU
default y default y
help help
Increase kernel security by ensuring that normal kernel accesses Increase kernel security by ensuring that normal kernel accesses
...@@ -1244,10 +1245,26 @@ config CPU_SW_DOMAIN_PAN ...@@ -1244,10 +1245,26 @@ config CPU_SW_DOMAIN_PAN
by ensuring that magic values (such as LIST_POISON) will always by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced. fault when dereferenced.
The implementation uses CPU domains when !CONFIG_ARM_LPAE and
disabling of TTBR0 page table walks with CONFIG_ARM_LPAE.
config CPU_SW_DOMAIN_PAN
def_bool y
depends on ARM_PAN && !ARM_LPAE
help
Enable use of CPU domains to implement privileged no-access.
CPUs with low-vector mappings use a best-efforts implementation. CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible. the remainder of userspace will become appropriately inaccessible.
config CPU_TTBR0_PAN
def_bool y
depends on ARM_PAN && ARM_LPAE
help
Enable privileged no-access by disabling TTBR0 page table walks when
running in kernel mode.
config HW_PERF_EVENTS config HW_PERF_EVENTS
def_bool y def_bool y
depends on ARM_PMU depends on ARM_PMU
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/opcodes-virt.h> #include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/uaccess-asm.h> #include <asm/uaccess-asm.h>
......
...@@ -118,6 +118,10 @@ ...@@ -118,6 +118,10 @@
# define MULTI_CACHE 1 # define MULTI_CACHE 1
#endif #endif
#ifdef CONFIG_CPU_CACHE_NOP
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V7M) #if defined(CONFIG_CPU_V7M)
# define MULTI_CACHE 1 # define MULTI_CACHE 1
#endif #endif
...@@ -126,29 +130,15 @@ ...@@ -126,29 +130,15 @@
#error Unknown cache maintenance model #error Unknown cache maintenance model
#endif #endif
#ifndef __ASSEMBLER__
static inline void nop_flush_icache_all(void) { }
static inline void nop_flush_kern_cache_all(void) { }
static inline void nop_flush_kern_cache_louis(void) { }
static inline void nop_flush_user_cache_all(void) { }
static inline void nop_flush_user_cache_range(unsigned long a,
unsigned long b, unsigned int c) { }
static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
static inline int nop_coherent_user_range(unsigned long a,
unsigned long b) { return 0; }
static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
static inline void nop_dma_flush_range(const void *a, const void *b) { }
static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#endif
#ifndef MULTI_CACHE #ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
/* This function only has a dedicated assembly callback on the v7 cache */
#ifdef CONFIG_CPU_CACHE_V7
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis) #define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
#else
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all)
#endif
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
......
...@@ -84,6 +84,7 @@ static inline void decode_ctrl_reg(u32 reg, ...@@ -84,6 +84,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf) #define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
#define ARM_ENTRY_BREAKPOINT 0x1 #define ARM_ENTRY_BREAKPOINT 0x1
#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2 #define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
#define ARM_ENTRY_CFI_BREAKPOINT 0x3
#define ARM_ENTRY_SYNC_WATCHPOINT 0xa #define ARM_ENTRY_SYNC_WATCHPOINT 0xa
/* DSCR monitor/halting bits. */ /* DSCR monitor/halting bits. */
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#define PHYS_MASK_SHIFT (40) #define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
#ifndef CONFIG_CPU_TTBR0_PAN
/* /*
* TTBR0/TTBR1 split (PAGE_OFFSET): * TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used) * 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
...@@ -93,5 +94,30 @@ ...@@ -93,5 +94,30 @@
#endif #endif
#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16) #define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
#else
/*
* With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess
* disabled regions when TTBR0 is disabled.
*/
#define TTBR1_OFFSET 0 /* pointing to swapper_pg_dir */
#define TTBR1_SIZE 0 /* TTBR1 size controlled via TTBCR.T0SZ */
#endif
/*
* TTBCR register bits.
*/
#define TTBCR_EAE (1 << 31)
#define TTBCR_IMP (1 << 30)
#define TTBCR_SH1_MASK (3 << 28)
#define TTBCR_ORGN1_MASK (3 << 26)
#define TTBCR_IRGN1_MASK (3 << 24)
#define TTBCR_EPD1 (1 << 23)
#define TTBCR_A1 (1 << 22)
#define TTBCR_T1SZ_MASK (7 << 16)
#define TTBCR_SH0_MASK (3 << 12)
#define TTBCR_ORGN0_MASK (3 << 10)
#define TTBCR_IRGN0_MASK (3 << 8)
#define TTBCR_EPD0 (1 << 7)
#define TTBCR_T0SZ_MASK (7 << 0)
#endif #endif
...@@ -178,6 +178,18 @@ extern void cpu_resume(void); ...@@ -178,6 +178,18 @@ extern void cpu_resume(void);
}) })
#endif #endif
static inline unsigned int cpu_get_ttbcr(void)
{
unsigned int ttbcr;
asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
return ttbcr;
}
static inline void cpu_set_ttbcr(unsigned int ttbcr)
{
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr) : "memory");
}
#else /*!CONFIG_MMU */ #else /*!CONFIG_MMU */
#define cpu_switch_mm(pgd,mm) { } #define cpu_switch_mm(pgd,mm) { }
......
...@@ -20,6 +20,7 @@ struct pt_regs { ...@@ -20,6 +20,7 @@ struct pt_regs {
struct svc_pt_regs { struct svc_pt_regs {
struct pt_regs regs; struct pt_regs regs;
u32 dacr; u32 dacr;
u32 ttbcr;
}; };
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs) #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
......
...@@ -39,8 +39,9 @@ ...@@ -39,8 +39,9 @@
#endif #endif
.endm .endm
#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
.macro uaccess_disable, tmp, isb=1 .macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* /*
* Whenever we re-enter userspace, the domains should always be * Whenever we re-enter userspace, the domains should always be
* set appropriately. * set appropriately.
...@@ -50,11 +51,9 @@ ...@@ -50,11 +51,9 @@
.if \isb .if \isb
instr_sync instr_sync
.endif .endif
#endif
.endm .endm
.macro uaccess_enable, tmp, isb=1 .macro uaccess_enable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* /*
* Whenever we re-enter userspace, the domains should always be * Whenever we re-enter userspace, the domains should always be
* set appropriately. * set appropriately.
...@@ -64,13 +63,59 @@ ...@@ -64,13 +63,59 @@
.if \isb .if \isb
instr_sync instr_sync
.endif .endif
#endif
.endm .endm
#elif defined(CONFIG_CPU_TTBR0_PAN)
.macro uaccess_disable, tmp, isb=1
/*
* Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
* from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
* addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
*/
mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
orr \tmp, \tmp, #TTBCR_A1
mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
.if \isb
instr_sync
.endif
.endm
.macro uaccess_enable, tmp, isb=1
/*
* Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
* TTBR0 (A1 = 0).
*/
mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
bic \tmp, \tmp, #TTBCR_A1
mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
.if \isb
instr_sync
.endif
.endm
#else
.macro uaccess_disable, tmp, isb=1
.endm
.macro uaccess_enable, tmp, isb=1
.endm
#endif
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS) #if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
#define DACR(x...) x #define DACR(x...) x
#else #else
#define DACR(x...) #define DACR(x...)
#endif
#ifdef CONFIG_CPU_TTBR0_PAN
#define PAN(x...) x
#else
#define PAN(x...)
#endif #endif
/* /*
...@@ -86,6 +131,8 @@ ...@@ -86,6 +131,8 @@
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
DACR( mrc p15, 0, \tmp0, c3, c0, 0) DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR]) DACR( str \tmp0, [sp, #SVC_DACR])
PAN( mrc p15, 0, \tmp0, c2, c0, 2)
PAN( str \tmp0, [sp, #SVC_TTBCR])
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
/* kernel=client, user=no access */ /* kernel=client, user=no access */
mov \tmp2, #DACR_UACCESS_DISABLE mov \tmp2, #DACR_UACCESS_DISABLE
...@@ -104,8 +151,11 @@ ...@@ -104,8 +151,11 @@
.macro uaccess_exit, tsk, tmp0, tmp1 .macro uaccess_exit, tsk, tmp0, tmp1
DACR( ldr \tmp0, [sp, #SVC_DACR]) DACR( ldr \tmp0, [sp, #SVC_DACR])
DACR( mcr p15, 0, \tmp0, c3, c0, 0) DACR( mcr p15, 0, \tmp0, c3, c0, 0)
PAN( ldr \tmp0, [sp, #SVC_TTBCR])
PAN( mcr p15, 0, \tmp0, c2, c0, 2)
.endm .endm
#undef DACR #undef DACR
#undef PAN
#endif /* __ASM_UACCESS_ASM_H__ */ #endif /* __ASM_UACCESS_ASM_H__ */
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <asm/domain.h> #include <asm/domain.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <asm/unified.h> #include <asm/unified.h>
#include <asm/pgtable.h>
#include <asm/proc-fns.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/extable.h> #include <asm/extable.h>
...@@ -24,9 +26,10 @@ ...@@ -24,9 +26,10 @@
* perform such accesses (eg, via list poison values) which could then * perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation. * be exploited for priviledge escalation.
*/ */
#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
static __always_inline unsigned int uaccess_save_and_enable(void) static __always_inline unsigned int uaccess_save_and_enable(void)
{ {
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain(); unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */ /* Set the current domain access to permit user accesses */
...@@ -34,19 +37,49 @@ static __always_inline unsigned int uaccess_save_and_enable(void) ...@@ -34,19 +37,49 @@ static __always_inline unsigned int uaccess_save_and_enable(void)
domain_val(DOMAIN_USER, DOMAIN_CLIENT)); domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain; return old_domain;
#else
return 0;
#endif
} }
static __always_inline void uaccess_restore(unsigned int flags) static __always_inline void uaccess_restore(unsigned int flags)
{ {
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */ /* Restore the user access mask */
set_domain(flags); set_domain(flags);
#endif
} }
#elif defined(CONFIG_CPU_TTBR0_PAN)
static __always_inline unsigned int uaccess_save_and_enable(void)
{
unsigned int old_ttbcr = cpu_get_ttbcr();
/*
* Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
* TTBR0 (A1 = 0).
*/
cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK));
isb();
return old_ttbcr;
}
static inline void uaccess_restore(unsigned int flags)
{
cpu_set_ttbcr(flags);
isb();
}
#else
static inline unsigned int uaccess_save_and_enable(void)
{
return 0;
}
static inline void uaccess_restore(unsigned int flags)
{
}
#endif
/* /*
* These two are intentionally not defined anywhere - if the kernel * These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug. * code generates any references to them, that's a bug.
......
...@@ -85,6 +85,7 @@ int main(void) ...@@ -85,6 +85,7 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr)); DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
DEFINE(SVC_TTBCR, offsetof(struct svc_pt_regs, ttbcr));
DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK(); BLANK();
DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3])); DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3]));
......
...@@ -271,6 +271,10 @@ ENTRY(ftrace_stub) ...@@ -271,6 +271,10 @@ ENTRY(ftrace_stub)
ret lr ret lr
ENDPROC(ftrace_stub) ENDPROC(ftrace_stub)
ENTRY(ftrace_stub_graph)
ret lr
ENDPROC(ftrace_stub_graph)
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
__INIT __INIT
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/hw_breakpoint.h> #include <linux/hw_breakpoint.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cfi.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/coresight.h> #include <linux/coresight.h>
...@@ -903,6 +904,37 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) ...@@ -903,6 +904,37 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
watchpoint_single_step_handler(addr); watchpoint_single_step_handler(addr);
} }
#ifdef CONFIG_CFI_CLANG
static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
{
/*
* TODO: implementing target and type to pass to CFI using the more
* elaborate report_cfi_failure() requires compiler work. To be able
* to properly extract target information the compiler needs to
* emit a stable instructions sequence for the CFI checks so we can
* decode the instructions preceding the trap and figure out which
* registers were used.
*/
switch (report_cfi_failure_noaddr(regs, instruction_pointer(regs))) {
case BUG_TRAP_TYPE_BUG:
die("Oops - CFI", regs, 0);
break;
case BUG_TRAP_TYPE_WARN:
/* Skip the breaking instruction */
instruction_pointer(regs) += 4;
break;
default:
die("Unknown CFI error", regs, 0);
break;
}
}
#else
static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
{
}
#endif
/* /*
* Called from either the Data Abort Handler [watchpoint] or the * Called from either the Data Abort Handler [watchpoint] or the
* Prefetch Abort Handler [breakpoint] with interrupts disabled. * Prefetch Abort Handler [breakpoint] with interrupts disabled.
...@@ -932,6 +964,9 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, ...@@ -932,6 +964,9 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
case ARM_ENTRY_SYNC_WATCHPOINT: case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs); watchpoint_handler(addr, fsr, regs);
break; break;
case ARM_ENTRY_CFI_BREAKPOINT:
hw_breakpoint_cfi_handler(regs);
break;
default: default:
ret = 1; /* Unhandled fault. */ ret = 1; /* Unhandled fault. */
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/uaccess.h>
extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid); extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void); extern void cpu_resume_mmu(void);
...@@ -26,6 +27,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -26,6 +27,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
if (!idmap_pgd) if (!idmap_pgd)
return -EINVAL; return -EINVAL;
/*
* Needed for the MMU disabling/enabing code to be able to run from
* TTBR0 addresses.
*/
if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN))
uaccess_save_and_enable();
/* /*
* Function graph tracer state gets incosistent when the kernel * Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka suspend finishers) hence * calls functions that never return (aka suspend finishers) hence
......
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
.text .text
#ifdef CONFIG_CPU_SW_DOMAIN_PAN #if defined(CONFIG_CPU_SW_DOMAIN_PAN)
.macro save_regs .macro save_regs
mrc p15, 0, ip, c3, c0, 0 mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr} stmfd sp!, {r1, r2, r4 - r8, ip, lr}
...@@ -25,7 +26,23 @@ ...@@ -25,7 +26,23 @@
mcr p15, 0, ip, c3, c0, 0 mcr p15, 0, ip, c3, c0, 0
ret lr ret lr
.endm .endm
#elif defined(CONFIG_CPU_TTBR0_PAN)
.macro save_regs
mrc p15, 0, ip, c2, c0, 2 @ read TTBCR
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
uaccess_enable ip
.endm
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
mcr p15, 0, ip, c2, c0, 2 @ restore TTBCR
ret lr
.endm
#else #else
.macro save_regs .macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr} stmfd sp!, {r1, r2, r4 - r8, lr}
.endm .endm
...@@ -33,6 +50,7 @@ ...@@ -33,6 +50,7 @@
.macro load_regs .macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc} ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm .endm
#endif #endif
.macro load1b, reg1 .macro load1b, reg1
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright (C) 1995, 1996 Russell King * Copyright (C) 1995, 1996 Russell King
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/delay.h> #include <asm/delay.h>
...@@ -24,21 +25,26 @@ ...@@ -24,21 +25,26 @@
* HZ <= 1000 * HZ <= 1000
*/ */
ENTRY(__loop_udelay) SYM_TYPED_FUNC_START(__loop_udelay)
ldr r2, .LC1 ldr r2, .LC1
mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0 b __loop_const_udelay
SYM_FUNC_END(__loop_udelay)
SYM_TYPED_FUNC_START(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
ldr r2, .LC0 ldr r2, .LC0
ldr r2, [r2] ldr r2, [r2]
umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy
adds r1, r1, #0xffffffff @ rounding up ... adds r1, r1, #0xffffffff @ rounding up ...
adcs r0, r0, r0 @ and right shift by 31 adcs r0, r0, r0 @ and right shift by 31
reteq lr reteq lr
b __loop_delay
SYM_FUNC_END(__loop_const_udelay)
.align 3 .align 3
@ Delay routine @ Delay routine
ENTRY(__loop_delay) SYM_TYPED_FUNC_START(__loop_delay)
subs r0, r0, #1 subs r0, r0, #1
#if 0 #if 0
retls lr retls lr
...@@ -58,6 +64,4 @@ ENTRY(__loop_delay) ...@@ -58,6 +64,4 @@ ENTRY(__loop_delay)
#endif #endif
bhi __loop_delay bhi __loop_delay
ret lr ret lr
ENDPROC(__loop_udelay) SYM_FUNC_END(__loop_delay)
ENDPROC(__loop_const_udelay)
ENDPROC(__loop_delay)
...@@ -45,6 +45,7 @@ obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o ...@@ -45,6 +45,7 @@ obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o
obj-y += cache.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
...@@ -62,6 +63,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions ...@@ -62,6 +63,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
obj-y += tlb.o
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
...@@ -88,6 +90,7 @@ obj-$(CONFIG_CPU_V6) += proc-v6.o ...@@ -88,6 +90,7 @@ obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o obj-$(CONFIG_CPU_V7M) += proc-v7m.o
obj-$(CONFIG_CFI_CLANG) += proc.o
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright (C) 2015-2016 Broadcom * Copyright (C) 2015-2016 Broadcom
*/ */
#include <linux/cfi_types.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/io.h> #include <linux/io.h>
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -39,11 +40,11 @@ ...@@ -39,11 +40,11 @@
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(fa_flush_icache_all) SYM_TYPED_FUNC_START(fa_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(fa_flush_icache_all) SYM_FUNC_END(fa_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -51,14 +52,14 @@ ENDPROC(fa_flush_icache_all) ...@@ -51,14 +52,14 @@ ENDPROC(fa_flush_icache_all)
* Clean and invalidate all cache entries in a particular address * Clean and invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(fa_flush_user_cache_all) SYM_FUNC_ALIAS(fa_flush_user_cache_all, fa_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(fa_flush_kern_cache_all) SYM_TYPED_FUNC_START(fa_flush_kern_cache_all)
mov ip, #0 mov ip, #0
mov r2, #VM_EXEC mov r2, #VM_EXEC
__flush_whole_cache: __flush_whole_cache:
...@@ -69,6 +70,7 @@ __flush_whole_cache: ...@@ -69,6 +70,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr ret lr
SYM_FUNC_END(fa_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -80,7 +82,7 @@ __flush_whole_cache: ...@@ -80,7 +82,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned) * - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space * - flags - vma_area_struct flags describing address space
*/ */
ENTRY(fa_flush_user_cache_range) SYM_TYPED_FUNC_START(fa_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT @ total size >= limit? cmp r3, #CACHE_DLIMIT @ total size >= limit?
...@@ -97,6 +99,7 @@ ENTRY(fa_flush_user_cache_range) ...@@ -97,6 +99,7 @@ ENTRY(fa_flush_user_cache_range)
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr ret lr
SYM_FUNC_END(fa_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -108,8 +111,11 @@ ENTRY(fa_flush_user_cache_range) ...@@ -108,8 +111,11 @@ ENTRY(fa_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(fa_coherent_kern_range) SYM_TYPED_FUNC_START(fa_coherent_kern_range)
/* fall through */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b fa_coherent_user_range
#endif
SYM_FUNC_END(fa_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -121,7 +127,7 @@ ENTRY(fa_coherent_kern_range) ...@@ -121,7 +127,7 @@ ENTRY(fa_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(fa_coherent_user_range) SYM_TYPED_FUNC_START(fa_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -133,6 +139,7 @@ ENTRY(fa_coherent_user_range) ...@@ -133,6 +139,7 @@ ENTRY(fa_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr ret lr
SYM_FUNC_END(fa_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -143,7 +150,7 @@ ENTRY(fa_coherent_user_range) ...@@ -143,7 +150,7 @@ ENTRY(fa_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - size of region * - size - size of region
*/ */
ENTRY(fa_flush_kern_dcache_area) SYM_TYPED_FUNC_START(fa_flush_kern_dcache_area)
add r1, r0, r1 add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -153,6 +160,7 @@ ENTRY(fa_flush_kern_dcache_area) ...@@ -153,6 +160,7 @@ ENTRY(fa_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr ret lr
SYM_FUNC_END(fa_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -203,7 +211,7 @@ fa_dma_clean_range: ...@@ -203,7 +211,7 @@ fa_dma_clean_range:
* - start - virtual start address of region * - start - virtual start address of region
* - end - virtual end address of region * - end - virtual end address of region
*/ */
ENTRY(fa_dma_flush_range) SYM_TYPED_FUNC_START(fa_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -212,6 +220,7 @@ ENTRY(fa_dma_flush_range) ...@@ -212,6 +220,7 @@ ENTRY(fa_dma_flush_range)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr ret lr
SYM_FUNC_END(fa_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -219,13 +228,13 @@ ENTRY(fa_dma_flush_range) ...@@ -219,13 +228,13 @@ ENTRY(fa_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(fa_dma_map_area) SYM_TYPED_FUNC_START(fa_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq fa_dma_clean_range beq fa_dma_clean_range
bcs fa_dma_inv_range bcs fa_dma_inv_range
b fa_dma_flush_range b fa_dma_flush_range
ENDPROC(fa_dma_map_area) SYM_FUNC_END(fa_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -233,14 +242,6 @@ ENDPROC(fa_dma_map_area) ...@@ -233,14 +242,6 @@ ENDPROC(fa_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(fa_dma_unmap_area) SYM_TYPED_FUNC_START(fa_dma_unmap_area)
ret lr ret lr
ENDPROC(fa_dma_unmap_area) SYM_FUNC_END(fa_dma_unmap_area)
.globl fa_flush_kern_cache_louis
.equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions fa
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include "proc-macros.S" #include "proc-macros.S"
ENTRY(nop_flush_icache_all) /*
* These are all open-coded instead of aliased, to make clear
* what is going on here: all functions are stubbed out.
*/
SYM_TYPED_FUNC_START(nop_flush_icache_all)
ret lr ret lr
ENDPROC(nop_flush_icache_all) SYM_FUNC_END(nop_flush_icache_all)
.globl nop_flush_kern_cache_all SYM_TYPED_FUNC_START(nop_flush_kern_cache_all)
.equ nop_flush_kern_cache_all, nop_flush_icache_all ret lr
SYM_FUNC_END(nop_flush_kern_cache_all)
.globl nop_flush_kern_cache_louis
.equ nop_flush_kern_cache_louis, nop_flush_icache_all
.globl nop_flush_user_cache_all SYM_TYPED_FUNC_START(nop_flush_user_cache_all)
.equ nop_flush_user_cache_all, nop_flush_icache_all ret lr
SYM_FUNC_END(nop_flush_user_cache_all)
.globl nop_flush_user_cache_range SYM_TYPED_FUNC_START(nop_flush_user_cache_range)
.equ nop_flush_user_cache_range, nop_flush_icache_all ret lr
SYM_FUNC_END(nop_flush_user_cache_range)
.globl nop_coherent_kern_range SYM_TYPED_FUNC_START(nop_coherent_kern_range)
.equ nop_coherent_kern_range, nop_flush_icache_all ret lr
SYM_FUNC_END(nop_coherent_kern_range)
ENTRY(nop_coherent_user_range) SYM_TYPED_FUNC_START(nop_coherent_user_range)
mov r0, 0 mov r0, 0
ret lr ret lr
ENDPROC(nop_coherent_user_range) SYM_FUNC_END(nop_coherent_user_range)
.globl nop_flush_kern_dcache_area
.equ nop_flush_kern_dcache_area, nop_flush_icache_all
.globl nop_dma_flush_range SYM_TYPED_FUNC_START(nop_flush_kern_dcache_area)
.equ nop_dma_flush_range, nop_flush_icache_all ret lr
SYM_FUNC_END(nop_flush_kern_dcache_area)
.globl nop_dma_map_area
.equ nop_dma_map_area, nop_flush_icache_all
.globl nop_dma_unmap_area SYM_TYPED_FUNC_START(nop_dma_flush_range)
.equ nop_dma_unmap_area, nop_flush_icache_all ret lr
SYM_FUNC_END(nop_dma_flush_range)
__INITDATA SYM_TYPED_FUNC_START(nop_dma_map_area)
ret lr
SYM_FUNC_END(nop_dma_map_area)
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) SYM_TYPED_FUNC_START(nop_dma_unmap_area)
define_cache_functions nop ret lr
SYM_FUNC_END(nop_dma_unmap_area)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/page.h> #include <asm/page.h>
#include "proc-macros.S" #include "proc-macros.S"
...@@ -15,9 +16,9 @@ ...@@ -15,9 +16,9 @@
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(v4_flush_icache_all) SYM_TYPED_FUNC_START(v4_flush_icache_all)
ret lr ret lr
ENDPROC(v4_flush_icache_all) SYM_FUNC_END(v4_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -27,21 +28,22 @@ ENDPROC(v4_flush_icache_all) ...@@ -27,21 +28,22 @@ ENDPROC(v4_flush_icache_all)
* *
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
ENTRY(v4_flush_user_cache_all) SYM_FUNC_ALIAS(v4_flush_user_cache_all, v4_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(v4_flush_kern_cache_all) SYM_TYPED_FUNC_START(v4_flush_kern_cache_all)
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
ret lr ret lr
#else #else
/* FALLTHROUGH */ ret lr
#endif #endif
SYM_FUNC_END(v4_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -53,14 +55,15 @@ ENTRY(v4_flush_kern_cache_all) ...@@ -53,14 +55,15 @@ ENTRY(v4_flush_kern_cache_all)
* - end - end address (exclusive, may not be aligned) * - end - end address (exclusive, may not be aligned)
* - flags - vma_area_struct flags describing address space * - flags - vma_area_struct flags describing address space
*/ */
ENTRY(v4_flush_user_cache_range) SYM_TYPED_FUNC_START(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ flush ID cache mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
ret lr ret lr
#else #else
/* FALLTHROUGH */ ret lr
#endif #endif
SYM_FUNC_END(v4_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -72,8 +75,9 @@ ENTRY(v4_flush_user_cache_range) ...@@ -72,8 +75,9 @@ ENTRY(v4_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4_coherent_kern_range) SYM_TYPED_FUNC_START(v4_coherent_kern_range)
/* FALLTHROUGH */ ret lr
SYM_FUNC_END(v4_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -85,9 +89,10 @@ ENTRY(v4_coherent_kern_range) ...@@ -85,9 +89,10 @@ ENTRY(v4_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4_coherent_user_range) SYM_TYPED_FUNC_START(v4_coherent_user_range)
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(v4_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -98,8 +103,11 @@ ENTRY(v4_coherent_user_range) ...@@ -98,8 +103,11 @@ ENTRY(v4_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(v4_flush_kern_dcache_area) SYM_TYPED_FUNC_START(v4_flush_kern_dcache_area)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b v4_dma_flush_range
#endif
SYM_FUNC_END(v4_flush_kern_dcache_area)
/* /*
* dma_flush_range(start, end) * dma_flush_range(start, end)
...@@ -109,12 +117,13 @@ ENTRY(v4_flush_kern_dcache_area) ...@@ -109,12 +117,13 @@ ENTRY(v4_flush_kern_dcache_area)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4_dma_flush_range) SYM_TYPED_FUNC_START(v4_dma_flush_range)
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif #endif
ret lr ret lr
SYM_FUNC_END(v4_dma_flush_range)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -122,10 +131,11 @@ ENTRY(v4_dma_flush_range) ...@@ -122,10 +131,11 @@ ENTRY(v4_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v4_dma_unmap_area) SYM_TYPED_FUNC_START(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE teq r2, #DMA_TO_DEVICE
bne v4_dma_flush_range bne v4_dma_flush_range
/* FALLTHROUGH */ ret lr
SYM_FUNC_END(v4_dma_unmap_area)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -133,15 +143,6 @@ ENTRY(v4_dma_unmap_area) ...@@ -133,15 +143,6 @@ ENTRY(v4_dma_unmap_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v4_dma_map_area) SYM_TYPED_FUNC_START(v4_dma_map_area)
ret lr ret lr
ENDPROC(v4_dma_unmap_area) SYM_FUNC_END(v4_dma_map_area)
ENDPROC(v4_dma_map_area)
.globl v4_flush_kern_cache_louis
.equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v4
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/page.h> #include <asm/page.h>
#include "proc-macros.S" #include "proc-macros.S"
...@@ -53,11 +54,11 @@ flush_base: ...@@ -53,11 +54,11 @@ flush_base:
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(v4wb_flush_icache_all) SYM_TYPED_FUNC_START(v4wb_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(v4wb_flush_icache_all) SYM_FUNC_END(v4wb_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -65,14 +66,14 @@ ENDPROC(v4wb_flush_icache_all) ...@@ -65,14 +66,14 @@ ENDPROC(v4wb_flush_icache_all)
* Clean and invalidate all cache entries in a particular address * Clean and invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(v4wb_flush_user_cache_all) SYM_FUNC_ALIAS(v4wb_flush_user_cache_all, v4wb_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(v4wb_flush_kern_cache_all) SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache: __flush_whole_cache:
...@@ -93,6 +94,7 @@ __flush_whole_cache: ...@@ -93,6 +94,7 @@ __flush_whole_cache:
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr ret lr
SYM_FUNC_END(v4wb_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -104,7 +106,7 @@ __flush_whole_cache: ...@@ -104,7 +106,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned) * - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space * - flags - vma_area_struct flags describing address space
*/ */
ENTRY(v4wb_flush_user_cache_range) SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region? tst r2, #VM_EXEC @ executable region?
...@@ -121,6 +123,7 @@ ENTRY(v4wb_flush_user_cache_range) ...@@ -121,6 +123,7 @@ ENTRY(v4wb_flush_user_cache_range)
tst r2, #VM_EXEC tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr ret lr
SYM_FUNC_END(v4wb_flush_user_cache_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -131,9 +134,12 @@ ENTRY(v4wb_flush_user_cache_range) ...@@ -131,9 +134,12 @@ ENTRY(v4wb_flush_user_cache_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(v4wb_flush_kern_dcache_area) SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area)
add r1, r0, r1 add r1, r0, r1
/* fall through */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b v4wb_coherent_user_range
#endif
SYM_FUNC_END(v4wb_flush_kern_dcache_area)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -145,8 +151,11 @@ ENTRY(v4wb_flush_kern_dcache_area) ...@@ -145,8 +151,11 @@ ENTRY(v4wb_flush_kern_dcache_area)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4wb_coherent_kern_range) SYM_TYPED_FUNC_START(v4wb_coherent_kern_range)
/* fall through */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b v4wb_coherent_user_range
#endif
SYM_FUNC_END(v4wb_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -158,7 +167,7 @@ ENTRY(v4wb_coherent_kern_range) ...@@ -158,7 +167,7 @@ ENTRY(v4wb_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4wb_coherent_user_range) SYM_TYPED_FUNC_START(v4wb_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
...@@ -169,7 +178,7 @@ ENTRY(v4wb_coherent_user_range) ...@@ -169,7 +178,7 @@ ENTRY(v4wb_coherent_user_range)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(v4wb_coherent_user_range)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -231,13 +240,13 @@ v4wb_dma_clean_range: ...@@ -231,13 +240,13 @@ v4wb_dma_clean_range:
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v4wb_dma_map_area) SYM_TYPED_FUNC_START(v4wb_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq v4wb_dma_clean_range beq v4wb_dma_clean_range
bcs v4wb_dma_inv_range bcs v4wb_dma_inv_range
b v4wb_dma_flush_range b v4wb_dma_flush_range
ENDPROC(v4wb_dma_map_area) SYM_FUNC_END(v4wb_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -245,14 +254,6 @@ ENDPROC(v4wb_dma_map_area) ...@@ -245,14 +254,6 @@ ENDPROC(v4wb_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v4wb_dma_unmap_area) SYM_TYPED_FUNC_START(v4wb_dma_unmap_area)
ret lr ret lr
ENDPROC(v4wb_dma_unmap_area) SYM_FUNC_END(v4wb_dma_unmap_area)
.globl v4wb_flush_kern_cache_louis
.equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v4wb
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/page.h> #include <asm/page.h>
#include "proc-macros.S" #include "proc-macros.S"
...@@ -43,11 +44,11 @@ ...@@ -43,11 +44,11 @@
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(v4wt_flush_icache_all) SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(v4wt_flush_icache_all) SYM_FUNC_END(v4wt_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -55,14 +56,14 @@ ENDPROC(v4wt_flush_icache_all) ...@@ -55,14 +56,14 @@ ENDPROC(v4wt_flush_icache_all)
* Invalidate all cache entries in a particular address * Invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(v4wt_flush_user_cache_all) SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(v4wt_flush_kern_cache_all) SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -70,6 +71,7 @@ __flush_whole_cache: ...@@ -70,6 +71,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
ret lr ret lr
SYM_FUNC_END(v4wt_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -81,7 +83,7 @@ __flush_whole_cache: ...@@ -81,7 +83,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned) * - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space * - flags - vma_area_struct flags describing address space
*/ */
ENTRY(v4wt_flush_user_cache_range) SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache bhs __flush_whole_cache
...@@ -93,6 +95,7 @@ ENTRY(v4wt_flush_user_cache_range) ...@@ -93,6 +95,7 @@ ENTRY(v4wt_flush_user_cache_range)
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ret lr ret lr
SYM_FUNC_END(v4wt_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -104,8 +107,11 @@ ENTRY(v4wt_flush_user_cache_range) ...@@ -104,8 +107,11 @@ ENTRY(v4wt_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4wt_coherent_kern_range) SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
/* FALLTRHOUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b v4wt_coherent_user_range
#endif
SYM_FUNC_END(v4wt_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -117,7 +123,7 @@ ENTRY(v4wt_coherent_kern_range) ...@@ -117,7 +123,7 @@ ENTRY(v4wt_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4wt_coherent_user_range) SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -125,6 +131,7 @@ ENTRY(v4wt_coherent_user_range) ...@@ -125,6 +131,7 @@ ENTRY(v4wt_coherent_user_range)
blo 1b blo 1b
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(v4wt_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -135,11 +142,12 @@ ENTRY(v4wt_coherent_user_range) ...@@ -135,11 +142,12 @@ ENTRY(v4wt_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(v4wt_flush_kern_dcache_area) SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
mov r2, #0 mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, r1 add r1, r0, r1
/* fallthrough */ b v4wt_dma_inv_range
SYM_FUNC_END(v4wt_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -167,9 +175,10 @@ v4wt_dma_inv_range: ...@@ -167,9 +175,10 @@ v4wt_dma_inv_range:
* *
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
.globl v4wt_dma_flush_range SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
.equ v4wt_dma_flush_range, v4wt_dma_inv_range b v4wt_dma_inv_range
SYM_FUNC_END(v4wt_dma_flush_range)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -177,11 +186,12 @@ v4wt_dma_inv_range: ...@@ -177,11 +186,12 @@ v4wt_dma_inv_range:
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v4wt_dma_unmap_area) SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
add r1, r1, r0 add r1, r1, r0
teq r2, #DMA_TO_DEVICE teq r2, #DMA_TO_DEVICE
bne v4wt_dma_inv_range bne v4wt_dma_inv_range
/* FALLTHROUGH */ ret lr
SYM_FUNC_END(v4wt_dma_unmap_area)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -189,15 +199,6 @@ ENTRY(v4wt_dma_unmap_area) ...@@ -189,15 +199,6 @@ ENTRY(v4wt_dma_unmap_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v4wt_dma_map_area) SYM_TYPED_FUNC_START(v4wt_dma_map_area)
ret lr ret lr
ENDPROC(v4wt_dma_unmap_area) SYM_FUNC_END(v4wt_dma_map_area)
ENDPROC(v4wt_dma_map_area)
.globl v4wt_flush_kern_cache_louis
.equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v4wt
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/unwind.h> #include <asm/unwind.h>
...@@ -34,7 +35,7 @@ ...@@ -34,7 +35,7 @@
* r0 - set to 0 * r0 - set to 0
* r1 - corrupted * r1 - corrupted
*/ */
ENTRY(v6_flush_icache_all) SYM_TYPED_FUNC_START(v6_flush_icache_all)
mov r0, #0 mov r0, #0
#ifdef CONFIG_ARM_ERRATA_411920 #ifdef CONFIG_ARM_ERRATA_411920
mrs r1, cpsr mrs r1, cpsr
...@@ -51,7 +52,7 @@ ENTRY(v6_flush_icache_all) ...@@ -51,7 +52,7 @@ ENTRY(v6_flush_icache_all)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
#endif #endif
ret lr ret lr
ENDPROC(v6_flush_icache_all) SYM_FUNC_END(v6_flush_icache_all)
/* /*
* v6_flush_cache_all() * v6_flush_cache_all()
...@@ -60,7 +61,7 @@ ENDPROC(v6_flush_icache_all) ...@@ -60,7 +61,7 @@ ENDPROC(v6_flush_icache_all)
* *
* It is assumed that: * It is assumed that:
*/ */
ENTRY(v6_flush_kern_cache_all) SYM_TYPED_FUNC_START(v6_flush_kern_cache_all)
mov r0, #0 mov r0, #0
#ifdef HARVARD_CACHE #ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
...@@ -73,6 +74,7 @@ ENTRY(v6_flush_kern_cache_all) ...@@ -73,6 +74,7 @@ ENTRY(v6_flush_kern_cache_all)
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
#endif #endif
ret lr ret lr
SYM_FUNC_END(v6_flush_kern_cache_all)
/* /*
* v6_flush_cache_all() * v6_flush_cache_all()
...@@ -81,8 +83,9 @@ ENTRY(v6_flush_kern_cache_all) ...@@ -81,8 +83,9 @@ ENTRY(v6_flush_kern_cache_all)
* *
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
ENTRY(v6_flush_user_cache_all) SYM_TYPED_FUNC_START(v6_flush_user_cache_all)
/*FALLTHROUGH*/ ret lr
SYM_FUNC_END(v6_flush_user_cache_all)
/* /*
* v6_flush_cache_range(start, end, flags) * v6_flush_cache_range(start, end, flags)
...@@ -96,8 +99,9 @@ ENTRY(v6_flush_user_cache_all) ...@@ -96,8 +99,9 @@ ENTRY(v6_flush_user_cache_all)
* It is assumed that: * It is assumed that:
* - we have a VIPT cache. * - we have a VIPT cache.
*/ */
ENTRY(v6_flush_user_cache_range) SYM_TYPED_FUNC_START(v6_flush_user_cache_range)
ret lr ret lr
SYM_FUNC_END(v6_flush_user_cache_range)
/* /*
* v6_coherent_kern_range(start,end) * v6_coherent_kern_range(start,end)
...@@ -112,8 +116,11 @@ ENTRY(v6_flush_user_cache_range) ...@@ -112,8 +116,11 @@ ENTRY(v6_flush_user_cache_range)
* It is assumed that: * It is assumed that:
* - the Icache does not read data from the write buffer * - the Icache does not read data from the write buffer
*/ */
ENTRY(v6_coherent_kern_range) SYM_TYPED_FUNC_START(v6_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b v6_coherent_user_range
#endif
SYM_FUNC_END(v6_coherent_kern_range)
/* /*
* v6_coherent_user_range(start,end) * v6_coherent_user_range(start,end)
...@@ -128,7 +135,7 @@ ENTRY(v6_coherent_kern_range) ...@@ -128,7 +135,7 @@ ENTRY(v6_coherent_kern_range)
* It is assumed that: * It is assumed that:
* - the Icache does not read data from the write buffer * - the Icache does not read data from the write buffer
*/ */
ENTRY(v6_coherent_user_range) SYM_TYPED_FUNC_START(v6_coherent_user_range)
UNWIND(.fnstart ) UNWIND(.fnstart )
#ifdef HARVARD_CACHE #ifdef HARVARD_CACHE
bic r0, r0, #CACHE_LINE_SIZE - 1 bic r0, r0, #CACHE_LINE_SIZE - 1
...@@ -159,8 +166,7 @@ ENTRY(v6_coherent_user_range) ...@@ -159,8 +166,7 @@ ENTRY(v6_coherent_user_range)
mov r0, #-EFAULT mov r0, #-EFAULT
ret lr ret lr
UNWIND(.fnend ) UNWIND(.fnend )
ENDPROC(v6_coherent_user_range) SYM_FUNC_END(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range)
/* /*
* v6_flush_kern_dcache_area(void *addr, size_t size) * v6_flush_kern_dcache_area(void *addr, size_t size)
...@@ -171,7 +177,7 @@ ENDPROC(v6_coherent_kern_range) ...@@ -171,7 +177,7 @@ ENDPROC(v6_coherent_kern_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(v6_flush_kern_dcache_area) SYM_TYPED_FUNC_START(v6_flush_kern_dcache_area)
add r1, r0, r1 add r1, r0, r1
bic r0, r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1
1: 1:
...@@ -188,7 +194,7 @@ ENTRY(v6_flush_kern_dcache_area) ...@@ -188,7 +194,7 @@ ENTRY(v6_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c10, 4 mcr p15, 0, r0, c7, c10, 4
#endif #endif
ret lr ret lr
SYM_FUNC_END(v6_flush_kern_dcache_area)
/* /*
* v6_dma_inv_range(start,end) * v6_dma_inv_range(start,end)
...@@ -253,7 +259,7 @@ v6_dma_clean_range: ...@@ -253,7 +259,7 @@ v6_dma_clean_range:
* - start - virtual start address of region * - start - virtual start address of region
* - end - virtual end address of region * - end - virtual end address of region
*/ */
ENTRY(v6_dma_flush_range) SYM_TYPED_FUNC_START(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1
1: 1:
#ifdef HARVARD_CACHE #ifdef HARVARD_CACHE
...@@ -267,6 +273,7 @@ ENTRY(v6_dma_flush_range) ...@@ -267,6 +273,7 @@ ENTRY(v6_dma_flush_range)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr ret lr
SYM_FUNC_END(v6_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -274,12 +281,12 @@ ENTRY(v6_dma_flush_range) ...@@ -274,12 +281,12 @@ ENTRY(v6_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v6_dma_map_area) SYM_TYPED_FUNC_START(v6_dma_map_area)
add r1, r1, r0 add r1, r1, r0
teq r2, #DMA_FROM_DEVICE teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range beq v6_dma_inv_range
b v6_dma_clean_range b v6_dma_clean_range
ENDPROC(v6_dma_map_area) SYM_FUNC_END(v6_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -287,17 +294,9 @@ ENDPROC(v6_dma_map_area) ...@@ -287,17 +294,9 @@ ENDPROC(v6_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v6_dma_unmap_area) SYM_TYPED_FUNC_START(v6_dma_unmap_area)
add r1, r1, r0 add r1, r1, r0
teq r2, #DMA_TO_DEVICE teq r2, #DMA_TO_DEVICE
bne v6_dma_inv_range bne v6_dma_inv_range
ret lr ret lr
ENDPROC(v6_dma_unmap_area) SYM_FUNC_END(v6_dma_unmap_area)
.globl v6_flush_kern_cache_louis
.equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v6
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/unwind.h> #include <asm/unwind.h>
...@@ -80,12 +81,12 @@ ENDPROC(v7_invalidate_l1) ...@@ -80,12 +81,12 @@ ENDPROC(v7_invalidate_l1)
* Registers: * Registers:
* r0 - set to 0 * r0 - set to 0
*/ */
ENTRY(v7_flush_icache_all) SYM_TYPED_FUNC_START(v7_flush_icache_all)
mov r0, #0 mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ret lr ret lr
ENDPROC(v7_flush_icache_all) SYM_FUNC_END(v7_flush_icache_all)
/* /*
* v7_flush_dcache_louis() * v7_flush_dcache_louis()
...@@ -193,7 +194,7 @@ ENDPROC(v7_flush_dcache_all) ...@@ -193,7 +194,7 @@ ENDPROC(v7_flush_dcache_all)
* unification in a single instruction. * unification in a single instruction.
* *
*/ */
ENTRY(v7_flush_kern_cache_all) SYM_TYPED_FUNC_START(v7_flush_kern_cache_all)
stmfd sp!, {r4-r6, r9-r10, lr} stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_all bl v7_flush_dcache_all
mov r0, #0 mov r0, #0
...@@ -201,7 +202,7 @@ ENTRY(v7_flush_kern_cache_all) ...@@ -201,7 +202,7 @@ ENTRY(v7_flush_kern_cache_all)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr} ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr ret lr
ENDPROC(v7_flush_kern_cache_all) SYM_FUNC_END(v7_flush_kern_cache_all)
/* /*
* v7_flush_kern_cache_louis(void) * v7_flush_kern_cache_louis(void)
...@@ -209,7 +210,7 @@ ENDPROC(v7_flush_kern_cache_all) ...@@ -209,7 +210,7 @@ ENDPROC(v7_flush_kern_cache_all)
* Flush the data cache up to Level of Unification Inner Shareable. * Flush the data cache up to Level of Unification Inner Shareable.
* Invalidate the I-cache to the point of unification. * Invalidate the I-cache to the point of unification.
*/ */
ENTRY(v7_flush_kern_cache_louis) SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis)
stmfd sp!, {r4-r6, r9-r10, lr} stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_louis bl v7_flush_dcache_louis
mov r0, #0 mov r0, #0
...@@ -217,7 +218,7 @@ ENTRY(v7_flush_kern_cache_louis) ...@@ -217,7 +218,7 @@ ENTRY(v7_flush_kern_cache_louis)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr} ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr ret lr
ENDPROC(v7_flush_kern_cache_louis) SYM_FUNC_END(v7_flush_kern_cache_louis)
/* /*
* v7_flush_cache_all() * v7_flush_cache_all()
...@@ -226,8 +227,9 @@ ENDPROC(v7_flush_kern_cache_louis) ...@@ -226,8 +227,9 @@ ENDPROC(v7_flush_kern_cache_louis)
* *
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
ENTRY(v7_flush_user_cache_all) SYM_TYPED_FUNC_START(v7_flush_user_cache_all)
/*FALLTHROUGH*/ ret lr
SYM_FUNC_END(v7_flush_user_cache_all)
/* /*
* v7_flush_cache_range(start, end, flags) * v7_flush_cache_range(start, end, flags)
...@@ -241,10 +243,9 @@ ENTRY(v7_flush_user_cache_all) ...@@ -241,10 +243,9 @@ ENTRY(v7_flush_user_cache_all)
* It is assumed that: * It is assumed that:
* - we have a VIPT cache. * - we have a VIPT cache.
*/ */
ENTRY(v7_flush_user_cache_range) SYM_TYPED_FUNC_START(v7_flush_user_cache_range)
ret lr ret lr
ENDPROC(v7_flush_user_cache_all) SYM_FUNC_END(v7_flush_user_cache_range)
ENDPROC(v7_flush_user_cache_range)
/* /*
* v7_coherent_kern_range(start,end) * v7_coherent_kern_range(start,end)
...@@ -259,8 +260,11 @@ ENDPROC(v7_flush_user_cache_range) ...@@ -259,8 +260,11 @@ ENDPROC(v7_flush_user_cache_range)
* It is assumed that: * It is assumed that:
* - the Icache does not read data from the write buffer * - the Icache does not read data from the write buffer
*/ */
ENTRY(v7_coherent_kern_range) SYM_TYPED_FUNC_START(v7_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b v7_coherent_user_range
#endif
SYM_FUNC_END(v7_coherent_kern_range)
/* /*
* v7_coherent_user_range(start,end) * v7_coherent_user_range(start,end)
...@@ -275,7 +279,7 @@ ENTRY(v7_coherent_kern_range) ...@@ -275,7 +279,7 @@ ENTRY(v7_coherent_kern_range)
* It is assumed that: * It is assumed that:
* - the Icache does not read data from the write buffer * - the Icache does not read data from the write buffer
*/ */
ENTRY(v7_coherent_user_range) SYM_TYPED_FUNC_START(v7_coherent_user_range)
UNWIND(.fnstart ) UNWIND(.fnstart )
dcache_line_size r2, r3 dcache_line_size r2, r3
sub r3, r2, #1 sub r3, r2, #1
...@@ -321,8 +325,7 @@ ENTRY(v7_coherent_user_range) ...@@ -321,8 +325,7 @@ ENTRY(v7_coherent_user_range)
mov r0, #-EFAULT mov r0, #-EFAULT
ret lr ret lr
UNWIND(.fnend ) UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range) SYM_FUNC_END(v7_coherent_user_range)
ENDPROC(v7_coherent_user_range)
/* /*
* v7_flush_kern_dcache_area(void *addr, size_t size) * v7_flush_kern_dcache_area(void *addr, size_t size)
...@@ -333,7 +336,7 @@ ENDPROC(v7_coherent_user_range) ...@@ -333,7 +336,7 @@ ENDPROC(v7_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(v7_flush_kern_dcache_area) SYM_TYPED_FUNC_START(v7_flush_kern_dcache_area)
dcache_line_size r2, r3 dcache_line_size r2, r3
add r1, r0, r1 add r1, r0, r1
sub r3, r2, #1 sub r3, r2, #1
...@@ -349,7 +352,7 @@ ENTRY(v7_flush_kern_dcache_area) ...@@ -349,7 +352,7 @@ ENTRY(v7_flush_kern_dcache_area)
blo 1b blo 1b
dsb st dsb st
ret lr ret lr
ENDPROC(v7_flush_kern_dcache_area) SYM_FUNC_END(v7_flush_kern_dcache_area)
/* /*
* v7_dma_inv_range(start,end) * v7_dma_inv_range(start,end)
...@@ -413,7 +416,7 @@ ENDPROC(v7_dma_clean_range) ...@@ -413,7 +416,7 @@ ENDPROC(v7_dma_clean_range)
* - start - virtual start address of region * - start - virtual start address of region
* - end - virtual end address of region * - end - virtual end address of region
*/ */
ENTRY(v7_dma_flush_range) SYM_TYPED_FUNC_START(v7_dma_flush_range)
dcache_line_size r2, r3 dcache_line_size r2, r3
sub r3, r2, #1 sub r3, r2, #1
bic r0, r0, r3 bic r0, r0, r3
...@@ -428,7 +431,7 @@ ENTRY(v7_dma_flush_range) ...@@ -428,7 +431,7 @@ ENTRY(v7_dma_flush_range)
blo 1b blo 1b
dsb st dsb st
ret lr ret lr
ENDPROC(v7_dma_flush_range) SYM_FUNC_END(v7_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -436,12 +439,12 @@ ENDPROC(v7_dma_flush_range) ...@@ -436,12 +439,12 @@ ENDPROC(v7_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v7_dma_map_area) SYM_TYPED_FUNC_START(v7_dma_map_area)
add r1, r1, r0 add r1, r1, r0
teq r2, #DMA_FROM_DEVICE teq r2, #DMA_FROM_DEVICE
beq v7_dma_inv_range beq v7_dma_inv_range
b v7_dma_clean_range b v7_dma_clean_range
ENDPROC(v7_dma_map_area) SYM_FUNC_END(v7_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -449,34 +452,9 @@ ENDPROC(v7_dma_map_area) ...@@ -449,34 +452,9 @@ ENDPROC(v7_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v7_dma_unmap_area) SYM_TYPED_FUNC_START(v7_dma_unmap_area)
add r1, r1, r0 add r1, r1, r0
teq r2, #DMA_TO_DEVICE teq r2, #DMA_TO_DEVICE
bne v7_dma_inv_range bne v7_dma_inv_range
ret lr ret lr
ENDPROC(v7_dma_unmap_area) SYM_FUNC_END(v7_dma_unmap_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v7
/* The Broadcom Brahma-B15 read-ahead cache requires some modifications
* to the v7_cache_fns, we only override the ones we need
*/
#ifndef CONFIG_CACHE_B15_RAC
globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all
#endif
globl_equ b15_flush_icache_all, v7_flush_icache_all
globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis
globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all
globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range
globl_equ b15_coherent_kern_range, v7_coherent_kern_range
globl_equ b15_coherent_user_range, v7_coherent_user_range
globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area
globl_equ b15_dma_map_area, v7_dma_map_area
globl_equ b15_dma_unmap_area, v7_dma_unmap_area
globl_equ b15_dma_flush_range, v7_dma_flush_range
define_cache_functions b15
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/unwind.h> #include <asm/unwind.h>
...@@ -159,10 +160,10 @@ ENDPROC(v7m_invalidate_l1) ...@@ -159,10 +160,10 @@ ENDPROC(v7m_invalidate_l1)
* Registers: * Registers:
* r0 - set to 0 * r0 - set to 0
*/ */
ENTRY(v7m_flush_icache_all) SYM_TYPED_FUNC_START(v7m_flush_icache_all)
invalidate_icache r0 invalidate_icache r0
ret lr ret lr
ENDPROC(v7m_flush_icache_all) SYM_FUNC_END(v7m_flush_icache_all)
/* /*
* v7m_flush_dcache_all() * v7m_flush_dcache_all()
...@@ -236,13 +237,13 @@ ENDPROC(v7m_flush_dcache_all) ...@@ -236,13 +237,13 @@ ENDPROC(v7m_flush_dcache_all)
* unification in a single instruction. * unification in a single instruction.
* *
*/ */
ENTRY(v7m_flush_kern_cache_all) SYM_TYPED_FUNC_START(v7m_flush_kern_cache_all)
stmfd sp!, {r4-r7, r9-r11, lr} stmfd sp!, {r4-r7, r9-r11, lr}
bl v7m_flush_dcache_all bl v7m_flush_dcache_all
invalidate_icache r0 invalidate_icache r0
ldmfd sp!, {r4-r7, r9-r11, lr} ldmfd sp!, {r4-r7, r9-r11, lr}
ret lr ret lr
ENDPROC(v7m_flush_kern_cache_all) SYM_FUNC_END(v7m_flush_kern_cache_all)
/* /*
* v7m_flush_cache_all() * v7m_flush_cache_all()
...@@ -251,8 +252,9 @@ ENDPROC(v7m_flush_kern_cache_all) ...@@ -251,8 +252,9 @@ ENDPROC(v7m_flush_kern_cache_all)
* *
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
ENTRY(v7m_flush_user_cache_all) SYM_TYPED_FUNC_START(v7m_flush_user_cache_all)
/*FALLTHROUGH*/ ret lr
SYM_FUNC_END(v7m_flush_user_cache_all)
/* /*
* v7m_flush_cache_range(start, end, flags) * v7m_flush_cache_range(start, end, flags)
...@@ -266,10 +268,9 @@ ENTRY(v7m_flush_user_cache_all) ...@@ -266,10 +268,9 @@ ENTRY(v7m_flush_user_cache_all)
* It is assumed that: * It is assumed that:
* - we have a VIPT cache. * - we have a VIPT cache.
*/ */
ENTRY(v7m_flush_user_cache_range) SYM_TYPED_FUNC_START(v7m_flush_user_cache_range)
ret lr ret lr
ENDPROC(v7m_flush_user_cache_all) SYM_FUNC_END(v7m_flush_user_cache_range)
ENDPROC(v7m_flush_user_cache_range)
/* /*
* v7m_coherent_kern_range(start,end) * v7m_coherent_kern_range(start,end)
...@@ -284,8 +285,11 @@ ENDPROC(v7m_flush_user_cache_range) ...@@ -284,8 +285,11 @@ ENDPROC(v7m_flush_user_cache_range)
* It is assumed that: * It is assumed that:
* - the Icache does not read data from the write buffer * - the Icache does not read data from the write buffer
*/ */
ENTRY(v7m_coherent_kern_range) SYM_TYPED_FUNC_START(v7m_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b v7m_coherent_user_range
#endif
SYM_FUNC_END(v7m_coherent_kern_range)
/* /*
* v7m_coherent_user_range(start,end) * v7m_coherent_user_range(start,end)
...@@ -300,7 +304,7 @@ ENTRY(v7m_coherent_kern_range) ...@@ -300,7 +304,7 @@ ENTRY(v7m_coherent_kern_range)
* It is assumed that: * It is assumed that:
* - the Icache does not read data from the write buffer * - the Icache does not read data from the write buffer
*/ */
ENTRY(v7m_coherent_user_range) SYM_TYPED_FUNC_START(v7m_coherent_user_range)
UNWIND(.fnstart ) UNWIND(.fnstart )
dcache_line_size r2, r3 dcache_line_size r2, r3
sub r3, r2, #1 sub r3, r2, #1
...@@ -328,8 +332,7 @@ ENTRY(v7m_coherent_user_range) ...@@ -328,8 +332,7 @@ ENTRY(v7m_coherent_user_range)
isb isb
ret lr ret lr
UNWIND(.fnend ) UNWIND(.fnend )
ENDPROC(v7m_coherent_kern_range) SYM_FUNC_END(v7m_coherent_user_range)
ENDPROC(v7m_coherent_user_range)
/* /*
* v7m_flush_kern_dcache_area(void *addr, size_t size) * v7m_flush_kern_dcache_area(void *addr, size_t size)
...@@ -340,7 +343,7 @@ ENDPROC(v7m_coherent_user_range) ...@@ -340,7 +343,7 @@ ENDPROC(v7m_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(v7m_flush_kern_dcache_area) SYM_TYPED_FUNC_START(v7m_flush_kern_dcache_area)
dcache_line_size r2, r3 dcache_line_size r2, r3
add r1, r0, r1 add r1, r0, r1
sub r3, r2, #1 sub r3, r2, #1
...@@ -352,7 +355,7 @@ ENTRY(v7m_flush_kern_dcache_area) ...@@ -352,7 +355,7 @@ ENTRY(v7m_flush_kern_dcache_area)
blo 1b blo 1b
dsb st dsb st
ret lr ret lr
ENDPROC(v7m_flush_kern_dcache_area) SYM_FUNC_END(v7m_flush_kern_dcache_area)
/* /*
* v7m_dma_inv_range(start,end) * v7m_dma_inv_range(start,end)
...@@ -408,7 +411,7 @@ ENDPROC(v7m_dma_clean_range) ...@@ -408,7 +411,7 @@ ENDPROC(v7m_dma_clean_range)
* - start - virtual start address of region * - start - virtual start address of region
* - end - virtual end address of region * - end - virtual end address of region
*/ */
ENTRY(v7m_dma_flush_range) SYM_TYPED_FUNC_START(v7m_dma_flush_range)
dcache_line_size r2, r3 dcache_line_size r2, r3
sub r3, r2, #1 sub r3, r2, #1
bic r0, r0, r3 bic r0, r0, r3
...@@ -419,7 +422,7 @@ ENTRY(v7m_dma_flush_range) ...@@ -419,7 +422,7 @@ ENTRY(v7m_dma_flush_range)
blo 1b blo 1b
dsb st dsb st
ret lr ret lr
ENDPROC(v7m_dma_flush_range) SYM_FUNC_END(v7m_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -427,12 +430,12 @@ ENDPROC(v7m_dma_flush_range) ...@@ -427,12 +430,12 @@ ENDPROC(v7m_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v7m_dma_map_area) SYM_TYPED_FUNC_START(v7m_dma_map_area)
add r1, r1, r0 add r1, r1, r0
teq r2, #DMA_FROM_DEVICE teq r2, #DMA_FROM_DEVICE
beq v7m_dma_inv_range beq v7m_dma_inv_range
b v7m_dma_clean_range b v7m_dma_clean_range
ENDPROC(v7m_dma_map_area) SYM_FUNC_END(v7m_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -440,17 +443,9 @@ ENDPROC(v7m_dma_map_area) ...@@ -440,17 +443,9 @@ ENDPROC(v7m_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(v7m_dma_unmap_area) SYM_TYPED_FUNC_START(v7m_dma_unmap_area)
add r1, r1, r0 add r1, r1, r0
teq r2, #DMA_TO_DEVICE teq r2, #DMA_TO_DEVICE
bne v7m_dma_inv_range bne v7m_dma_inv_range
ret lr ret lr
ENDPROC(v7m_dma_unmap_area) SYM_FUNC_END(v7m_dma_unmap_area)
.globl v7m_flush_kern_cache_louis
.equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v7m
This diff is collapsed.
...@@ -242,6 +242,27 @@ static inline bool is_permission_fault(unsigned int fsr) ...@@ -242,6 +242,27 @@ static inline bool is_permission_fault(unsigned int fsr)
return false; return false;
} }
#ifdef CONFIG_CPU_TTBR0_PAN
static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
{
struct svc_pt_regs *svcregs;
/* If we are in user mode: permission granted */
if (user_mode(regs))
return true;
/* uaccess state saved above pt_regs on SVC exception entry */
svcregs = to_svc_pt_regs(regs);
return !(svcregs->ttbcr & TTBCR_EPD0);
}
#else
static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
{
return true;
}
#endif
static int __kprobes static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{ {
...@@ -285,6 +306,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -285,6 +306,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
/*
* Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are
* routed via the translation fault mechanism. Check whether uaccess
* is disabled while in kernel mode.
*/
if (!ttbr0_usermode_access_allowed(regs))
goto no_context;
if (!(flags & FAULT_FLAG_USER)) if (!(flags & FAULT_FLAG_USER))
goto lock_mmap; goto lock_mmap;
......
...@@ -1687,9 +1687,8 @@ static void __init early_paging_init(const struct machine_desc *mdesc) ...@@ -1687,9 +1687,8 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
*/ */
cr = get_cr(); cr = get_cr();
set_cr(cr & ~(CR_I | CR_C)); set_cr(cr & ~(CR_I | CR_C));
asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr)); ttbcr = cpu_get_ttbcr();
asm volatile("mcr p15, 0, %0, c2, c0, 2" cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10));
: : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
flush_cache_all(); flush_cache_all();
/* /*
...@@ -1701,7 +1700,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc) ...@@ -1701,7 +1700,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
lpae_pgtables_remap(offset, pa_pgd); lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */ /* Re-enable the caches and cacheable TLB walks */
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr)); cpu_set_ttbcr(ttbcr);
set_cr(cr); set_cr(cr);
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -56,18 +57,20 @@ ...@@ -56,18 +57,20 @@
/* /*
* cpu_arm1020_proc_init() * cpu_arm1020_proc_init()
*/ */
ENTRY(cpu_arm1020_proc_init) SYM_TYPED_FUNC_START(cpu_arm1020_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm1020_proc_init)
/* /*
* cpu_arm1020_proc_fin() * cpu_arm1020_proc_fin()
*/ */
ENTRY(cpu_arm1020_proc_fin) SYM_TYPED_FUNC_START(cpu_arm1020_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm1020_proc_fin)
/* /*
* cpu_arm1020_reset(loc) * cpu_arm1020_reset(loc)
...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020_proc_fin) ...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020_reset) SYM_TYPED_FUNC_START(cpu_arm1020_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020_reset) ...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm1020_reset) SYM_FUNC_END(cpu_arm1020_reset)
.popsection .popsection
/* /*
* cpu_arm1020_do_idle() * cpu_arm1020_do_idle()
*/ */
.align 5 .align 5
ENTRY(cpu_arm1020_do_idle) SYM_TYPED_FUNC_START(cpu_arm1020_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr ret lr
SYM_FUNC_END(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020_do_idle) ...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020_do_idle)
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm1020_flush_icache_all) SYM_TYPED_FUNC_START(arm1020_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif #endif
ret lr ret lr
ENDPROC(arm1020_flush_icache_all) SYM_FUNC_END(arm1020_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -126,14 +130,14 @@ ENDPROC(arm1020_flush_icache_all) ...@@ -126,14 +130,14 @@ ENDPROC(arm1020_flush_icache_all)
* Invalidate all cache entries in a particular address * Invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(arm1020_flush_user_cache_all) SYM_FUNC_ALIAS(arm1020_flush_user_cache_all, arm1020_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm1020_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm1020_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -154,6 +158,7 @@ __flush_whole_cache: ...@@ -154,6 +158,7 @@ __flush_whole_cache:
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -165,7 +170,7 @@ __flush_whole_cache: ...@@ -165,7 +170,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags for this space * - flags - vm_flags for this space
*/ */
ENTRY(arm1020_flush_user_cache_range) SYM_TYPED_FUNC_START(arm1020_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -185,6 +190,7 @@ ENTRY(arm1020_flush_user_cache_range) ...@@ -185,6 +190,7 @@ ENTRY(arm1020_flush_user_cache_range)
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -196,8 +202,11 @@ ENTRY(arm1020_flush_user_cache_range) ...@@ -196,8 +202,11 @@ ENTRY(arm1020_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020_coherent_kern_range) SYM_TYPED_FUNC_START(arm1020_coherent_kern_range)
/* FALLTRHOUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm1020_coherent_user_range
#endif
SYM_FUNC_END(arm1020_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -209,7 +218,7 @@ ENTRY(arm1020_coherent_kern_range) ...@@ -209,7 +218,7 @@ ENTRY(arm1020_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020_coherent_user_range) SYM_TYPED_FUNC_START(arm1020_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, ip, c7, c10, 4 mcr p15, 0, ip, c7, c10, 4
...@@ -227,6 +236,7 @@ ENTRY(arm1020_coherent_user_range) ...@@ -227,6 +236,7 @@ ENTRY(arm1020_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm1020_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -237,7 +247,7 @@ ENTRY(arm1020_coherent_user_range) ...@@ -237,7 +247,7 @@ ENTRY(arm1020_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm1020_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm1020_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1 add r1, r0, r1
...@@ -249,6 +259,7 @@ ENTRY(arm1020_flush_kern_dcache_area) ...@@ -249,6 +259,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -314,7 +325,7 @@ arm1020_dma_clean_range: ...@@ -314,7 +325,7 @@ arm1020_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020_dma_flush_range) SYM_TYPED_FUNC_START(arm1020_dma_flush_range)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
...@@ -327,6 +338,7 @@ ENTRY(arm1020_dma_flush_range) ...@@ -327,6 +338,7 @@ ENTRY(arm1020_dma_flush_range)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -334,13 +346,13 @@ ENTRY(arm1020_dma_flush_range) ...@@ -334,13 +346,13 @@ ENTRY(arm1020_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1020_dma_map_area) SYM_TYPED_FUNC_START(arm1020_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm1020_dma_clean_range beq arm1020_dma_clean_range
bcs arm1020_dma_inv_range bcs arm1020_dma_inv_range
b arm1020_dma_flush_range b arm1020_dma_flush_range
ENDPROC(arm1020_dma_map_area) SYM_FUNC_END(arm1020_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -348,18 +360,12 @@ ENDPROC(arm1020_dma_map_area) ...@@ -348,18 +360,12 @@ ENDPROC(arm1020_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1020_dma_unmap_area) SYM_TYPED_FUNC_START(arm1020_dma_unmap_area)
ret lr ret lr
ENDPROC(arm1020_dma_unmap_area) SYM_FUNC_END(arm1020_dma_unmap_area)
.globl arm1020_flush_kern_cache_louis
.equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1020
.align 5 .align 5
ENTRY(cpu_arm1020_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm1020_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0 mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
...@@ -369,6 +375,7 @@ ENTRY(cpu_arm1020_dcache_clean_area) ...@@ -369,6 +375,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
bhi 1b bhi 1b
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm1020_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -380,7 +387,7 @@ ENTRY(cpu_arm1020_dcache_clean_area) ...@@ -380,7 +387,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm1020_switch_mm) SYM_TYPED_FUNC_START(cpu_arm1020_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4 mcr p15, 0, r3, c7, c10, 4
...@@ -408,14 +415,15 @@ ENTRY(cpu_arm1020_switch_mm) ...@@ -408,14 +415,15 @@ ENTRY(cpu_arm1020_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
ret lr ret lr
SYM_FUNC_END(cpu_arm1020_switch_mm)
/* /*
* cpu_arm1020_set_pte(ptep, pte) * cpu_arm1020_set_pte(ptep, pte)
* *
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm1020_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm1020_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -426,6 +434,7 @@ ENTRY(cpu_arm1020_set_pte_ext) ...@@ -426,6 +434,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
ret lr ret lr
SYM_FUNC_END(cpu_arm1020_set_pte_ext)
.type __arm1020_setup, #function .type __arm1020_setup, #function
__arm1020_setup: __arm1020_setup:
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -56,18 +57,20 @@ ...@@ -56,18 +57,20 @@
/* /*
* cpu_arm1020e_proc_init() * cpu_arm1020e_proc_init()
*/ */
ENTRY(cpu_arm1020e_proc_init) SYM_TYPED_FUNC_START(cpu_arm1020e_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm1020e_proc_init)
/* /*
* cpu_arm1020e_proc_fin() * cpu_arm1020e_proc_fin()
*/ */
ENTRY(cpu_arm1020e_proc_fin) SYM_TYPED_FUNC_START(cpu_arm1020e_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm1020e_proc_fin)
/* /*
* cpu_arm1020e_reset(loc) * cpu_arm1020e_reset(loc)
...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020e_proc_fin) ...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020e_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020e_reset) SYM_TYPED_FUNC_START(cpu_arm1020e_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020e_reset) ...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020e_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm1020e_reset) SYM_FUNC_END(cpu_arm1020e_reset)
.popsection .popsection
/* /*
* cpu_arm1020e_do_idle() * cpu_arm1020e_do_idle()
*/ */
.align 5 .align 5
ENTRY(cpu_arm1020e_do_idle) SYM_TYPED_FUNC_START(cpu_arm1020e_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr ret lr
SYM_FUNC_END(cpu_arm1020e_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020e_do_idle) ...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020e_do_idle)
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm1020e_flush_icache_all) SYM_TYPED_FUNC_START(arm1020e_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif #endif
ret lr ret lr
ENDPROC(arm1020e_flush_icache_all) SYM_FUNC_END(arm1020e_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -126,14 +130,14 @@ ENDPROC(arm1020e_flush_icache_all) ...@@ -126,14 +130,14 @@ ENDPROC(arm1020e_flush_icache_all)
* Invalidate all cache entries in a particular address * Invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(arm1020e_flush_user_cache_all) SYM_FUNC_ALIAS(arm1020e_flush_user_cache_all, arm1020e_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm1020e_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm1020e_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -153,6 +157,7 @@ __flush_whole_cache: ...@@ -153,6 +157,7 @@ __flush_whole_cache:
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020e_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -164,7 +169,7 @@ __flush_whole_cache: ...@@ -164,7 +169,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags for this space * - flags - vm_flags for this space
*/ */
ENTRY(arm1020e_flush_user_cache_range) SYM_TYPED_FUNC_START(arm1020e_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -182,6 +187,7 @@ ENTRY(arm1020e_flush_user_cache_range) ...@@ -182,6 +187,7 @@ ENTRY(arm1020e_flush_user_cache_range)
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020e_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -193,8 +199,12 @@ ENTRY(arm1020e_flush_user_cache_range) ...@@ -193,8 +199,12 @@ ENTRY(arm1020e_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020e_coherent_kern_range) SYM_TYPED_FUNC_START(arm1020e_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm1020e_coherent_user_range
#endif
SYM_FUNC_END(arm1020e_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
* *
...@@ -205,7 +215,7 @@ ENTRY(arm1020e_coherent_kern_range) ...@@ -205,7 +215,7 @@ ENTRY(arm1020e_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020e_coherent_user_range) SYM_TYPED_FUNC_START(arm1020e_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
...@@ -221,6 +231,7 @@ ENTRY(arm1020e_coherent_user_range) ...@@ -221,6 +231,7 @@ ENTRY(arm1020e_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm1020e_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -231,7 +242,7 @@ ENTRY(arm1020e_coherent_user_range) ...@@ -231,7 +242,7 @@ ENTRY(arm1020e_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm1020e_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm1020e_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1 add r1, r0, r1
...@@ -242,6 +253,7 @@ ENTRY(arm1020e_flush_kern_dcache_area) ...@@ -242,6 +253,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020e_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -302,7 +314,7 @@ arm1020e_dma_clean_range: ...@@ -302,7 +314,7 @@ arm1020e_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020e_dma_flush_range) SYM_TYPED_FUNC_START(arm1020e_dma_flush_range)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
...@@ -313,6 +325,7 @@ ENTRY(arm1020e_dma_flush_range) ...@@ -313,6 +325,7 @@ ENTRY(arm1020e_dma_flush_range)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1020e_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -320,13 +333,13 @@ ENTRY(arm1020e_dma_flush_range) ...@@ -320,13 +333,13 @@ ENTRY(arm1020e_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1020e_dma_map_area) SYM_TYPED_FUNC_START(arm1020e_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm1020e_dma_clean_range beq arm1020e_dma_clean_range
bcs arm1020e_dma_inv_range bcs arm1020e_dma_inv_range
b arm1020e_dma_flush_range b arm1020e_dma_flush_range
ENDPROC(arm1020e_dma_map_area) SYM_FUNC_END(arm1020e_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -334,18 +347,12 @@ ENDPROC(arm1020e_dma_map_area) ...@@ -334,18 +347,12 @@ ENDPROC(arm1020e_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1020e_dma_unmap_area) SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area)
ret lr ret lr
ENDPROC(arm1020e_dma_unmap_area) SYM_FUNC_END(arm1020e_dma_unmap_area)
.globl arm1020e_flush_kern_cache_louis
.equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1020e
.align 5 .align 5
ENTRY(cpu_arm1020e_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm1020e_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0 mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
...@@ -354,6 +361,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area) ...@@ -354,6 +361,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
bhi 1b bhi 1b
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm1020e_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -365,7 +373,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area) ...@@ -365,7 +373,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm1020e_switch_mm) SYM_TYPED_FUNC_START(cpu_arm1020e_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4 mcr p15, 0, r3, c7, c10, 4
...@@ -392,14 +400,15 @@ ENTRY(cpu_arm1020e_switch_mm) ...@@ -392,14 +400,15 @@ ENTRY(cpu_arm1020e_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm1020e_switch_mm)
/* /*
* cpu_arm1020e_set_pte(ptep, pte) * cpu_arm1020e_set_pte(ptep, pte)
* *
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm1020e_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm1020e_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -408,6 +417,7 @@ ENTRY(cpu_arm1020e_set_pte_ext) ...@@ -408,6 +417,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
#endif #endif
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
ret lr ret lr
SYM_FUNC_END(cpu_arm1020e_set_pte_ext)
.type __arm1020e_setup, #function .type __arm1020e_setup, #function
__arm1020e_setup: __arm1020e_setup:
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -56,18 +57,20 @@ ...@@ -56,18 +57,20 @@
/* /*
* cpu_arm1022_proc_init() * cpu_arm1022_proc_init()
*/ */
ENTRY(cpu_arm1022_proc_init) SYM_TYPED_FUNC_START(cpu_arm1022_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm1022_proc_init)
/* /*
* cpu_arm1022_proc_fin() * cpu_arm1022_proc_fin()
*/ */
ENTRY(cpu_arm1022_proc_fin) SYM_TYPED_FUNC_START(cpu_arm1022_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm1022_proc_fin)
/* /*
* cpu_arm1022_reset(loc) * cpu_arm1022_reset(loc)
...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1022_proc_fin) ...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1022_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1022_reset) SYM_TYPED_FUNC_START(cpu_arm1022_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1022_reset) ...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1022_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm1022_reset) SYM_FUNC_END(cpu_arm1022_reset)
.popsection .popsection
/* /*
* cpu_arm1022_do_idle() * cpu_arm1022_do_idle()
*/ */
.align 5 .align 5
ENTRY(cpu_arm1022_do_idle) SYM_TYPED_FUNC_START(cpu_arm1022_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr ret lr
SYM_FUNC_END(cpu_arm1022_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1022_do_idle) ...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1022_do_idle)
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm1022_flush_icache_all) SYM_TYPED_FUNC_START(arm1022_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif #endif
ret lr ret lr
ENDPROC(arm1022_flush_icache_all) SYM_FUNC_END(arm1022_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -126,14 +130,14 @@ ENDPROC(arm1022_flush_icache_all) ...@@ -126,14 +130,14 @@ ENDPROC(arm1022_flush_icache_all)
* Invalidate all cache entries in a particular address * Invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(arm1022_flush_user_cache_all) SYM_FUNC_ALIAS(arm1022_flush_user_cache_all, arm1022_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm1022_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm1022_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -152,6 +156,7 @@ __flush_whole_cache: ...@@ -152,6 +156,7 @@ __flush_whole_cache:
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1022_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -163,7 +168,7 @@ __flush_whole_cache: ...@@ -163,7 +168,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags for this space * - flags - vm_flags for this space
*/ */
ENTRY(arm1022_flush_user_cache_range) SYM_TYPED_FUNC_START(arm1022_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -181,6 +186,7 @@ ENTRY(arm1022_flush_user_cache_range) ...@@ -181,6 +186,7 @@ ENTRY(arm1022_flush_user_cache_range)
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1022_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -192,8 +198,11 @@ ENTRY(arm1022_flush_user_cache_range) ...@@ -192,8 +198,11 @@ ENTRY(arm1022_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1022_coherent_kern_range) SYM_TYPED_FUNC_START(arm1022_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm1022_coherent_user_range
#endif
SYM_FUNC_END(arm1022_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -205,7 +214,7 @@ ENTRY(arm1022_coherent_kern_range) ...@@ -205,7 +214,7 @@ ENTRY(arm1022_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1022_coherent_user_range) SYM_TYPED_FUNC_START(arm1022_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
...@@ -221,6 +230,7 @@ ENTRY(arm1022_coherent_user_range) ...@@ -221,6 +230,7 @@ ENTRY(arm1022_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm1022_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -231,7 +241,7 @@ ENTRY(arm1022_coherent_user_range) ...@@ -231,7 +241,7 @@ ENTRY(arm1022_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm1022_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm1022_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1 add r1, r0, r1
...@@ -242,6 +252,7 @@ ENTRY(arm1022_flush_kern_dcache_area) ...@@ -242,6 +252,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1022_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -302,7 +313,7 @@ arm1022_dma_clean_range: ...@@ -302,7 +313,7 @@ arm1022_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1022_dma_flush_range) SYM_TYPED_FUNC_START(arm1022_dma_flush_range)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
...@@ -313,6 +324,7 @@ ENTRY(arm1022_dma_flush_range) ...@@ -313,6 +324,7 @@ ENTRY(arm1022_dma_flush_range)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1022_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -320,13 +332,13 @@ ENTRY(arm1022_dma_flush_range) ...@@ -320,13 +332,13 @@ ENTRY(arm1022_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1022_dma_map_area) SYM_TYPED_FUNC_START(arm1022_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm1022_dma_clean_range beq arm1022_dma_clean_range
bcs arm1022_dma_inv_range bcs arm1022_dma_inv_range
b arm1022_dma_flush_range b arm1022_dma_flush_range
ENDPROC(arm1022_dma_map_area) SYM_FUNC_END(arm1022_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -334,18 +346,12 @@ ENDPROC(arm1022_dma_map_area) ...@@ -334,18 +346,12 @@ ENDPROC(arm1022_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1022_dma_unmap_area) SYM_TYPED_FUNC_START(arm1022_dma_unmap_area)
ret lr ret lr
ENDPROC(arm1022_dma_unmap_area) SYM_FUNC_END(arm1022_dma_unmap_area)
.globl arm1022_flush_kern_cache_louis
.equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1022
.align 5 .align 5
ENTRY(cpu_arm1022_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm1022_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0 mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
...@@ -354,6 +360,7 @@ ENTRY(cpu_arm1022_dcache_clean_area) ...@@ -354,6 +360,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
bhi 1b bhi 1b
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm1022_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -365,7 +372,7 @@ ENTRY(cpu_arm1022_dcache_clean_area) ...@@ -365,7 +372,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm1022_switch_mm) SYM_TYPED_FUNC_START(cpu_arm1022_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
...@@ -385,14 +392,15 @@ ENTRY(cpu_arm1022_switch_mm) ...@@ -385,14 +392,15 @@ ENTRY(cpu_arm1022_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm1022_switch_mm)
/* /*
* cpu_arm1022_set_pte_ext(ptep, pte, ext) * cpu_arm1022_set_pte_ext(ptep, pte, ext)
* *
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm1022_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm1022_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -401,6 +409,7 @@ ENTRY(cpu_arm1022_set_pte_ext) ...@@ -401,6 +409,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
#endif #endif
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
ret lr ret lr
SYM_FUNC_END(cpu_arm1022_set_pte_ext)
.type __arm1022_setup, #function .type __arm1022_setup, #function
__arm1022_setup: __arm1022_setup:
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -56,18 +57,20 @@ ...@@ -56,18 +57,20 @@
/* /*
* cpu_arm1026_proc_init() * cpu_arm1026_proc_init()
*/ */
ENTRY(cpu_arm1026_proc_init) SYM_TYPED_FUNC_START(cpu_arm1026_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm1026_proc_init)
/* /*
* cpu_arm1026_proc_fin() * cpu_arm1026_proc_fin()
*/ */
ENTRY(cpu_arm1026_proc_fin) SYM_TYPED_FUNC_START(cpu_arm1026_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm1026_proc_fin)
/* /*
* cpu_arm1026_reset(loc) * cpu_arm1026_reset(loc)
...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1026_proc_fin) ...@@ -80,7 +83,7 @@ ENTRY(cpu_arm1026_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1026_reset) SYM_TYPED_FUNC_START(cpu_arm1026_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1026_reset) ...@@ -92,16 +95,17 @@ ENTRY(cpu_arm1026_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm1026_reset) SYM_FUNC_END(cpu_arm1026_reset)
.popsection .popsection
/* /*
* cpu_arm1026_do_idle() * cpu_arm1026_do_idle()
*/ */
.align 5 .align 5
ENTRY(cpu_arm1026_do_idle) SYM_TYPED_FUNC_START(cpu_arm1026_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr ret lr
SYM_FUNC_END(cpu_arm1026_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1026_do_idle) ...@@ -112,13 +116,13 @@ ENTRY(cpu_arm1026_do_idle)
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm1026_flush_icache_all) SYM_TYPED_FUNC_START(arm1026_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif #endif
ret lr ret lr
ENDPROC(arm1026_flush_icache_all) SYM_FUNC_END(arm1026_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -126,14 +130,14 @@ ENDPROC(arm1026_flush_icache_all) ...@@ -126,14 +130,14 @@ ENDPROC(arm1026_flush_icache_all)
* Invalidate all cache entries in a particular address * Invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(arm1026_flush_user_cache_all) SYM_FUNC_ALIAS(arm1026_flush_user_cache_all, arm1026_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm1026_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm1026_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -147,6 +151,7 @@ __flush_whole_cache: ...@@ -147,6 +151,7 @@ __flush_whole_cache:
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1026_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -158,7 +163,7 @@ __flush_whole_cache: ...@@ -158,7 +163,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags for this space * - flags - vm_flags for this space
*/ */
ENTRY(arm1026_flush_user_cache_range) SYM_TYPED_FUNC_START(arm1026_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -176,6 +181,7 @@ ENTRY(arm1026_flush_user_cache_range) ...@@ -176,6 +181,7 @@ ENTRY(arm1026_flush_user_cache_range)
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1026_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -187,8 +193,12 @@ ENTRY(arm1026_flush_user_cache_range) ...@@ -187,8 +193,12 @@ ENTRY(arm1026_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1026_coherent_kern_range) SYM_TYPED_FUNC_START(arm1026_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm1026_coherent_user_range
#endif
SYM_FUNC_END(arm1026_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
* *
...@@ -199,7 +209,7 @@ ENTRY(arm1026_coherent_kern_range) ...@@ -199,7 +209,7 @@ ENTRY(arm1026_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1026_coherent_user_range) SYM_TYPED_FUNC_START(arm1026_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
...@@ -215,6 +225,7 @@ ENTRY(arm1026_coherent_user_range) ...@@ -215,6 +225,7 @@ ENTRY(arm1026_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm1026_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -225,7 +236,7 @@ ENTRY(arm1026_coherent_user_range) ...@@ -225,7 +236,7 @@ ENTRY(arm1026_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm1026_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm1026_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1 add r1, r0, r1
...@@ -236,6 +247,7 @@ ENTRY(arm1026_flush_kern_dcache_area) ...@@ -236,6 +247,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1026_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -296,7 +308,7 @@ arm1026_dma_clean_range: ...@@ -296,7 +308,7 @@ arm1026_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1026_dma_flush_range) SYM_TYPED_FUNC_START(arm1026_dma_flush_range)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
...@@ -307,6 +319,7 @@ ENTRY(arm1026_dma_flush_range) ...@@ -307,6 +319,7 @@ ENTRY(arm1026_dma_flush_range)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm1026_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -314,13 +327,13 @@ ENTRY(arm1026_dma_flush_range) ...@@ -314,13 +327,13 @@ ENTRY(arm1026_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1026_dma_map_area) SYM_TYPED_FUNC_START(arm1026_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm1026_dma_clean_range beq arm1026_dma_clean_range
bcs arm1026_dma_inv_range bcs arm1026_dma_inv_range
b arm1026_dma_flush_range b arm1026_dma_flush_range
ENDPROC(arm1026_dma_map_area) SYM_FUNC_END(arm1026_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -328,18 +341,12 @@ ENDPROC(arm1026_dma_map_area) ...@@ -328,18 +341,12 @@ ENDPROC(arm1026_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm1026_dma_unmap_area) SYM_TYPED_FUNC_START(arm1026_dma_unmap_area)
ret lr ret lr
ENDPROC(arm1026_dma_unmap_area) SYM_FUNC_END(arm1026_dma_unmap_area)
.globl arm1026_flush_kern_cache_louis
.equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1026
.align 5 .align 5
ENTRY(cpu_arm1026_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm1026_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0 mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
...@@ -348,6 +355,7 @@ ENTRY(cpu_arm1026_dcache_clean_area) ...@@ -348,6 +355,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
bhi 1b bhi 1b
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm1026_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -359,7 +367,7 @@ ENTRY(cpu_arm1026_dcache_clean_area) ...@@ -359,7 +367,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm1026_switch_mm) SYM_TYPED_FUNC_START(cpu_arm1026_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r1, #0 mov r1, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
...@@ -374,14 +382,15 @@ ENTRY(cpu_arm1026_switch_mm) ...@@ -374,14 +382,15 @@ ENTRY(cpu_arm1026_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm1026_switch_mm)
/* /*
* cpu_arm1026_set_pte_ext(ptep, pte, ext) * cpu_arm1026_set_pte_ext(ptep, pte, ext)
* *
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm1026_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm1026_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -390,6 +399,7 @@ ENTRY(cpu_arm1026_set_pte_ext) ...@@ -390,6 +399,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
#endif #endif
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
ret lr ret lr
SYM_FUNC_END(cpu_arm1026_set_pte_ext)
.type __arm1026_setup, #function .type __arm1026_setup, #function
__arm1026_setup: __arm1026_setup:
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -35,24 +36,30 @@ ...@@ -35,24 +36,30 @@
* *
* Notes : This processor does not require these * Notes : This processor does not require these
*/ */
ENTRY(cpu_arm720_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm720_dcache_clean_area)
ENTRY(cpu_arm720_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm720_dcache_clean_area)
ENTRY(cpu_arm720_proc_fin) SYM_TYPED_FUNC_START(cpu_arm720_proc_init)
ret lr
SYM_FUNC_END(cpu_arm720_proc_init)
SYM_TYPED_FUNC_START(cpu_arm720_proc_fin)
mrc p15, 0, r0, c1, c0, 0 mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm720_proc_fin)
/* /*
* Function: arm720_proc_do_idle(void) * Function: arm720_proc_do_idle(void)
* Params : r0 = unused * Params : r0 = unused
* Purpose : put the processor in proper idle mode * Purpose : put the processor in proper idle mode
*/ */
ENTRY(cpu_arm720_do_idle) SYM_TYPED_FUNC_START(cpu_arm720_do_idle)
ret lr ret lr
SYM_FUNC_END(cpu_arm720_do_idle)
/* /*
* Function: arm720_switch_mm(unsigned long pgd_phys) * Function: arm720_switch_mm(unsigned long pgd_phys)
...@@ -60,7 +67,7 @@ ENTRY(cpu_arm720_do_idle) ...@@ -60,7 +67,7 @@ ENTRY(cpu_arm720_do_idle)
* Purpose : Perform a task switch, saving the old process' state and restoring * Purpose : Perform a task switch, saving the old process' state and restoring
* the new. * the new.
*/ */
ENTRY(cpu_arm720_switch_mm) SYM_TYPED_FUNC_START(cpu_arm720_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
...@@ -68,6 +75,7 @@ ENTRY(cpu_arm720_switch_mm) ...@@ -68,6 +75,7 @@ ENTRY(cpu_arm720_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm720_switch_mm)
/* /*
* Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
...@@ -76,11 +84,12 @@ ENTRY(cpu_arm720_switch_mm) ...@@ -76,11 +84,12 @@ ENTRY(cpu_arm720_switch_mm)
* Purpose : Set a PTE and flush it out of any WB cache * Purpose : Set a PTE and flush it out of any WB cache
*/ */
.align 5 .align 5
ENTRY(cpu_arm720_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm720_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0 armv3_set_pte_ext wc_disable=0
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm720_set_pte_ext)
/* /*
* Function: arm720_reset * Function: arm720_reset
...@@ -88,7 +97,7 @@ ENTRY(cpu_arm720_set_pte_ext) ...@@ -88,7 +97,7 @@ ENTRY(cpu_arm720_set_pte_ext)
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm720_reset) SYM_TYPED_FUNC_START(cpu_arm720_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
...@@ -99,7 +108,7 @@ ENTRY(cpu_arm720_reset) ...@@ -99,7 +108,7 @@ ENTRY(cpu_arm720_reset)
bic ip, ip, #0x2100 @ ..v....s........ bic ip, ip, #0x2100 @ ..v....s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm720_reset) SYM_FUNC_END(cpu_arm720_reset)
.popsection .popsection
.type __arm710_setup, #function .type __arm710_setup, #function
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -24,21 +25,32 @@ ...@@ -24,21 +25,32 @@
* *
* These are not required. * These are not required.
*/ */
ENTRY(cpu_arm740_proc_init) SYM_TYPED_FUNC_START(cpu_arm740_proc_init)
ENTRY(cpu_arm740_do_idle)
ENTRY(cpu_arm740_dcache_clean_area)
ENTRY(cpu_arm740_switch_mm)
ret lr ret lr
SYM_FUNC_END(cpu_arm740_proc_init)
SYM_TYPED_FUNC_START(cpu_arm740_do_idle)
ret lr
SYM_FUNC_END(cpu_arm740_do_idle)
SYM_TYPED_FUNC_START(cpu_arm740_dcache_clean_area)
ret lr
SYM_FUNC_END(cpu_arm740_dcache_clean_area)
SYM_TYPED_FUNC_START(cpu_arm740_switch_mm)
ret lr
SYM_FUNC_END(cpu_arm740_switch_mm)
/* /*
* cpu_arm740_proc_fin() * cpu_arm740_proc_fin()
*/ */
ENTRY(cpu_arm740_proc_fin) SYM_TYPED_FUNC_START(cpu_arm740_proc_fin)
mrc p15, 0, r0, c1, c0, 0 mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x3f000000 @ bank/f/lock/s bic r0, r0, #0x3f000000 @ bank/f/lock/s
bic r0, r0, #0x0000000c @ w-buffer/cache bic r0, r0, #0x0000000c @ w-buffer/cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm740_proc_fin)
/* /*
* cpu_arm740_reset(loc) * cpu_arm740_reset(loc)
...@@ -46,14 +58,14 @@ ENTRY(cpu_arm740_proc_fin) ...@@ -46,14 +58,14 @@ ENTRY(cpu_arm740_proc_fin)
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm740_reset) SYM_TYPED_FUNC_START(cpu_arm740_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
mrc p15, 0, ip, c1, c0, 0 @ get ctrl register mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
bic ip, ip, #0x0000000c @ ............wc.. bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm740_reset) SYM_FUNC_END(cpu_arm740_reset)
.popsection .popsection
.type __arm740_setup, #function .type __arm740_setup, #function
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -23,18 +24,29 @@ ...@@ -23,18 +24,29 @@
* cpu_arm7tdmi_switch_mm() * cpu_arm7tdmi_switch_mm()
* *
* These are not required. * These are not required.
*/ */
ENTRY(cpu_arm7tdmi_proc_init) SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_init)
ENTRY(cpu_arm7tdmi_do_idle) ret lr
ENTRY(cpu_arm7tdmi_dcache_clean_area) SYM_FUNC_END(cpu_arm7tdmi_proc_init)
ENTRY(cpu_arm7tdmi_switch_mm)
ret lr SYM_TYPED_FUNC_START(cpu_arm7tdmi_do_idle)
ret lr
SYM_FUNC_END(cpu_arm7tdmi_do_idle)
SYM_TYPED_FUNC_START(cpu_arm7tdmi_dcache_clean_area)
ret lr
SYM_FUNC_END(cpu_arm7tdmi_dcache_clean_area)
SYM_TYPED_FUNC_START(cpu_arm7tdmi_switch_mm)
ret lr
SYM_FUNC_END(cpu_arm7tdmi_switch_mm)
/* /*
* cpu_arm7tdmi_proc_fin() * cpu_arm7tdmi_proc_fin()
*/ */
ENTRY(cpu_arm7tdmi_proc_fin) SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_fin)
ret lr ret lr
SYM_FUNC_END(cpu_arm7tdmi_proc_fin)
/* /*
* Function: cpu_arm7tdmi_reset(loc) * Function: cpu_arm7tdmi_reset(loc)
...@@ -42,9 +54,9 @@ ENTRY(cpu_arm7tdmi_proc_fin) ...@@ -42,9 +54,9 @@ ENTRY(cpu_arm7tdmi_proc_fin)
* Purpose : Sets up everything for a reset and jump to the location for soft reset. * Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/ */
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm7tdmi_reset) SYM_TYPED_FUNC_START(cpu_arm7tdmi_reset)
ret r0 ret r0
ENDPROC(cpu_arm7tdmi_reset) SYM_FUNC_END(cpu_arm7tdmi_reset)
.popsection .popsection
.type __arm7tdmi_setup, #function .type __arm7tdmi_setup, #function
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -48,18 +49,20 @@ ...@@ -48,18 +49,20 @@
/* /*
* cpu_arm920_proc_init() * cpu_arm920_proc_init()
*/ */
ENTRY(cpu_arm920_proc_init) SYM_TYPED_FUNC_START(cpu_arm920_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm920_proc_init)
/* /*
* cpu_arm920_proc_fin() * cpu_arm920_proc_fin()
*/ */
ENTRY(cpu_arm920_proc_fin) SYM_TYPED_FUNC_START(cpu_arm920_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm920_proc_fin)
/* /*
* cpu_arm920_reset(loc) * cpu_arm920_reset(loc)
...@@ -72,7 +75,7 @@ ENTRY(cpu_arm920_proc_fin) ...@@ -72,7 +75,7 @@ ENTRY(cpu_arm920_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm920_reset) SYM_TYPED_FUNC_START(cpu_arm920_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -84,17 +87,17 @@ ENTRY(cpu_arm920_reset) ...@@ -84,17 +87,17 @@ ENTRY(cpu_arm920_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm920_reset) SYM_FUNC_END(cpu_arm920_reset)
.popsection .popsection
/* /*
* cpu_arm920_do_idle() * cpu_arm920_do_idle()
*/ */
.align 5 .align 5
ENTRY(cpu_arm920_do_idle) SYM_TYPED_FUNC_START(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr ret lr
SYM_FUNC_END(cpu_arm920_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -103,11 +106,11 @@ ENTRY(cpu_arm920_do_idle) ...@@ -103,11 +106,11 @@ ENTRY(cpu_arm920_do_idle)
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm920_flush_icache_all) SYM_TYPED_FUNC_START(arm920_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(arm920_flush_icache_all) SYM_FUNC_END(arm920_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -115,15 +118,14 @@ ENDPROC(arm920_flush_icache_all) ...@@ -115,15 +118,14 @@ ENDPROC(arm920_flush_icache_all)
* Invalidate all cache entries in a particular address * Invalidate all cache entries in a particular address
* space. * space.
*/ */
ENTRY(arm920_flush_user_cache_all) SYM_FUNC_ALIAS(arm920_flush_user_cache_all, arm920_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm920_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm920_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -138,6 +140,7 @@ __flush_whole_cache: ...@@ -138,6 +140,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm920_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -149,7 +152,7 @@ __flush_whole_cache: ...@@ -149,7 +152,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags for address space * - flags - vm_flags for address space
*/ */
ENTRY(arm920_flush_user_cache_range) SYM_TYPED_FUNC_START(arm920_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -164,6 +167,7 @@ ENTRY(arm920_flush_user_cache_range) ...@@ -164,6 +167,7 @@ ENTRY(arm920_flush_user_cache_range)
tst r2, #VM_EXEC tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm920_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -175,8 +179,11 @@ ENTRY(arm920_flush_user_cache_range) ...@@ -175,8 +179,11 @@ ENTRY(arm920_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm920_coherent_kern_range) SYM_TYPED_FUNC_START(arm920_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm920_coherent_user_range
#endif
SYM_FUNC_END(arm920_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -188,7 +195,7 @@ ENTRY(arm920_coherent_kern_range) ...@@ -188,7 +195,7 @@ ENTRY(arm920_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm920_coherent_user_range) SYM_TYPED_FUNC_START(arm920_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -198,6 +205,7 @@ ENTRY(arm920_coherent_user_range) ...@@ -198,6 +205,7 @@ ENTRY(arm920_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm920_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -208,7 +216,7 @@ ENTRY(arm920_coherent_user_range) ...@@ -208,7 +216,7 @@ ENTRY(arm920_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm920_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm920_flush_kern_dcache_area)
add r1, r0, r1 add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -218,6 +226,7 @@ ENTRY(arm920_flush_kern_dcache_area) ...@@ -218,6 +226,7 @@ ENTRY(arm920_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm920_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -272,7 +281,7 @@ arm920_dma_clean_range: ...@@ -272,7 +281,7 @@ arm920_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm920_dma_flush_range) SYM_TYPED_FUNC_START(arm920_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -280,6 +289,7 @@ ENTRY(arm920_dma_flush_range) ...@@ -280,6 +289,7 @@ ENTRY(arm920_dma_flush_range)
blo 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm920_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -287,13 +297,13 @@ ENTRY(arm920_dma_flush_range) ...@@ -287,13 +297,13 @@ ENTRY(arm920_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm920_dma_map_area) SYM_TYPED_FUNC_START(arm920_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm920_dma_clean_range beq arm920_dma_clean_range
bcs arm920_dma_inv_range bcs arm920_dma_inv_range
b arm920_dma_flush_range b arm920_dma_flush_range
ENDPROC(arm920_dma_map_area) SYM_FUNC_END(arm920_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -301,24 +311,20 @@ ENDPROC(arm920_dma_map_area) ...@@ -301,24 +311,20 @@ ENDPROC(arm920_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm920_dma_unmap_area) SYM_TYPED_FUNC_START(arm920_dma_unmap_area)
ret lr ret lr
ENDPROC(arm920_dma_unmap_area) SYM_FUNC_END(arm920_dma_unmap_area)
.globl arm920_flush_kern_cache_louis #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
.equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm920
#endif
ENTRY(cpu_arm920_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm920_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE
bhi 1b bhi 1b
ret lr ret lr
SYM_FUNC_END(cpu_arm920_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -330,7 +336,7 @@ ENTRY(cpu_arm920_dcache_clean_area) ...@@ -330,7 +336,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm920_switch_mm) SYM_TYPED_FUNC_START(cpu_arm920_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -354,6 +360,7 @@ ENTRY(cpu_arm920_switch_mm) ...@@ -354,6 +360,7 @@ ENTRY(cpu_arm920_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm920_switch_mm)
/* /*
* cpu_arm920_set_pte(ptep, pte, ext) * cpu_arm920_set_pte(ptep, pte, ext)
...@@ -361,7 +368,7 @@ ENTRY(cpu_arm920_switch_mm) ...@@ -361,7 +368,7 @@ ENTRY(cpu_arm920_switch_mm)
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm920_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm920_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -369,21 +376,22 @@ ENTRY(cpu_arm920_set_pte_ext) ...@@ -369,21 +376,22 @@ ENTRY(cpu_arm920_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size .globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3 .equ cpu_arm920_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND #ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm920_do_suspend) SYM_TYPED_FUNC_START(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr} stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6} stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc} ldmfd sp!, {r4 - r6, pc}
ENDPROC(cpu_arm920_do_suspend) SYM_FUNC_END(cpu_arm920_do_suspend)
ENTRY(cpu_arm920_do_resume) SYM_TYPED_FUNC_START(cpu_arm920_do_resume)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
...@@ -393,7 +401,7 @@ ENTRY(cpu_arm920_do_resume) ...@@ -393,7 +401,7 @@ ENTRY(cpu_arm920_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register mov r0, r6 @ control register
b cpu_resume_mmu b cpu_resume_mmu
ENDPROC(cpu_arm920_do_resume) SYM_FUNC_END(cpu_arm920_do_resume)
#endif #endif
.type __arm920_setup, #function .type __arm920_setup, #function
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -50,18 +51,20 @@ ...@@ -50,18 +51,20 @@
/* /*
* cpu_arm922_proc_init() * cpu_arm922_proc_init()
*/ */
ENTRY(cpu_arm922_proc_init) SYM_TYPED_FUNC_START(cpu_arm922_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm922_proc_init)
/* /*
* cpu_arm922_proc_fin() * cpu_arm922_proc_fin()
*/ */
ENTRY(cpu_arm922_proc_fin) SYM_TYPED_FUNC_START(cpu_arm922_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm922_proc_fin)
/* /*
* cpu_arm922_reset(loc) * cpu_arm922_reset(loc)
...@@ -74,7 +77,7 @@ ENTRY(cpu_arm922_proc_fin) ...@@ -74,7 +77,7 @@ ENTRY(cpu_arm922_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm922_reset) SYM_TYPED_FUNC_START(cpu_arm922_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -86,17 +89,17 @@ ENTRY(cpu_arm922_reset) ...@@ -86,17 +89,17 @@ ENTRY(cpu_arm922_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm922_reset) SYM_FUNC_END(cpu_arm922_reset)
.popsection .popsection
/* /*
* cpu_arm922_do_idle() * cpu_arm922_do_idle()
*/ */
.align 5 .align 5
ENTRY(cpu_arm922_do_idle) SYM_TYPED_FUNC_START(cpu_arm922_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr ret lr
SYM_FUNC_END(cpu_arm922_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -105,11 +108,11 @@ ENTRY(cpu_arm922_do_idle) ...@@ -105,11 +108,11 @@ ENTRY(cpu_arm922_do_idle)
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm922_flush_icache_all) SYM_TYPED_FUNC_START(arm922_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(arm922_flush_icache_all) SYM_FUNC_END(arm922_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -117,15 +120,14 @@ ENDPROC(arm922_flush_icache_all) ...@@ -117,15 +120,14 @@ ENDPROC(arm922_flush_icache_all)
* Clean and invalidate all cache entries in a particular * Clean and invalidate all cache entries in a particular
* address space. * address space.
*/ */
ENTRY(arm922_flush_user_cache_all) SYM_FUNC_ALIAS(arm922_flush_user_cache_all, arm922_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm922_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm922_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -140,6 +142,7 @@ __flush_whole_cache: ...@@ -140,6 +142,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm922_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -151,7 +154,7 @@ __flush_whole_cache: ...@@ -151,7 +154,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags describing address space * - flags - vm_flags describing address space
*/ */
ENTRY(arm922_flush_user_cache_range) SYM_TYPED_FUNC_START(arm922_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -166,6 +169,7 @@ ENTRY(arm922_flush_user_cache_range) ...@@ -166,6 +169,7 @@ ENTRY(arm922_flush_user_cache_range)
tst r2, #VM_EXEC tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm922_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -177,8 +181,11 @@ ENTRY(arm922_flush_user_cache_range) ...@@ -177,8 +181,11 @@ ENTRY(arm922_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm922_coherent_kern_range) SYM_TYPED_FUNC_START(arm922_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm922_coherent_user_range
#endif
SYM_FUNC_END(arm922_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -190,7 +197,7 @@ ENTRY(arm922_coherent_kern_range) ...@@ -190,7 +197,7 @@ ENTRY(arm922_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm922_coherent_user_range) SYM_TYPED_FUNC_START(arm922_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -200,6 +207,7 @@ ENTRY(arm922_coherent_user_range) ...@@ -200,6 +207,7 @@ ENTRY(arm922_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm922_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -210,7 +218,7 @@ ENTRY(arm922_coherent_user_range) ...@@ -210,7 +218,7 @@ ENTRY(arm922_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm922_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm922_flush_kern_dcache_area)
add r1, r0, r1 add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -220,6 +228,7 @@ ENTRY(arm922_flush_kern_dcache_area) ...@@ -220,6 +228,7 @@ ENTRY(arm922_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm922_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -274,7 +283,7 @@ arm922_dma_clean_range: ...@@ -274,7 +283,7 @@ arm922_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm922_dma_flush_range) SYM_TYPED_FUNC_START(arm922_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -282,6 +291,7 @@ ENTRY(arm922_dma_flush_range) ...@@ -282,6 +291,7 @@ ENTRY(arm922_dma_flush_range)
blo 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm922_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -289,13 +299,13 @@ ENTRY(arm922_dma_flush_range) ...@@ -289,13 +299,13 @@ ENTRY(arm922_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm922_dma_map_area) SYM_TYPED_FUNC_START(arm922_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm922_dma_clean_range beq arm922_dma_clean_range
bcs arm922_dma_inv_range bcs arm922_dma_inv_range
b arm922_dma_flush_range b arm922_dma_flush_range
ENDPROC(arm922_dma_map_area) SYM_FUNC_END(arm922_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -303,19 +313,13 @@ ENDPROC(arm922_dma_map_area) ...@@ -303,19 +313,13 @@ ENDPROC(arm922_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm922_dma_unmap_area) SYM_TYPED_FUNC_START(arm922_dma_unmap_area)
ret lr ret lr
ENDPROC(arm922_dma_unmap_area) SYM_FUNC_END(arm922_dma_unmap_area)
.globl arm922_flush_kern_cache_louis
.equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm922
#endif
#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
ENTRY(cpu_arm922_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm922_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -323,6 +327,7 @@ ENTRY(cpu_arm922_dcache_clean_area) ...@@ -323,6 +327,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
bhi 1b bhi 1b
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm922_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -334,7 +339,7 @@ ENTRY(cpu_arm922_dcache_clean_area) ...@@ -334,7 +339,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm922_switch_mm) SYM_TYPED_FUNC_START(cpu_arm922_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -358,6 +363,7 @@ ENTRY(cpu_arm922_switch_mm) ...@@ -358,6 +363,7 @@ ENTRY(cpu_arm922_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm922_switch_mm)
/* /*
* cpu_arm922_set_pte_ext(ptep, pte, ext) * cpu_arm922_set_pte_ext(ptep, pte, ext)
...@@ -365,7 +371,7 @@ ENTRY(cpu_arm922_switch_mm) ...@@ -365,7 +371,7 @@ ENTRY(cpu_arm922_switch_mm)
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm922_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm922_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -373,6 +379,7 @@ ENTRY(cpu_arm922_set_pte_ext) ...@@ -373,6 +379,7 @@ ENTRY(cpu_arm922_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
ret lr ret lr
SYM_FUNC_END(cpu_arm922_set_pte_ext)
.type __arm922_setup, #function .type __arm922_setup, #function
__arm922_setup: __arm922_setup:
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -71,18 +72,20 @@ ...@@ -71,18 +72,20 @@
/* /*
* cpu_arm925_proc_init() * cpu_arm925_proc_init()
*/ */
ENTRY(cpu_arm925_proc_init) SYM_TYPED_FUNC_START(cpu_arm925_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm925_proc_init)
/* /*
* cpu_arm925_proc_fin() * cpu_arm925_proc_fin()
*/ */
ENTRY(cpu_arm925_proc_fin) SYM_TYPED_FUNC_START(cpu_arm925_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm925_proc_fin)
/* /*
* cpu_arm925_reset(loc) * cpu_arm925_reset(loc)
...@@ -95,14 +98,14 @@ ENTRY(cpu_arm925_proc_fin) ...@@ -95,14 +98,14 @@ ENTRY(cpu_arm925_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm925_reset) SYM_TYPED_FUNC_START(cpu_arm925_reset)
/* Send software reset to MPU and DSP */ /* Send software reset to MPU and DSP */
mov ip, #0xff000000 mov ip, #0xff000000
orr ip, ip, #0x00fe0000 orr ip, ip, #0x00fe0000
orr ip, ip, #0x0000ce00 orr ip, ip, #0x0000ce00
mov r4, #1 mov r4, #1
strh r4, [ip, #0x10] strh r4, [ip, #0x10]
ENDPROC(cpu_arm925_reset) SYM_FUNC_END(cpu_arm925_reset)
.popsection .popsection
mov ip, #0 mov ip, #0
...@@ -123,7 +126,7 @@ ENDPROC(cpu_arm925_reset) ...@@ -123,7 +126,7 @@ ENDPROC(cpu_arm925_reset)
* Called with IRQs disabled * Called with IRQs disabled
*/ */
.align 10 .align 10
ENTRY(cpu_arm925_do_idle) SYM_TYPED_FUNC_START(cpu_arm925_do_idle)
mov r0, #0 mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
...@@ -132,17 +135,18 @@ ENTRY(cpu_arm925_do_idle) ...@@ -132,17 +135,18 @@ ENTRY(cpu_arm925_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
ret lr ret lr
SYM_FUNC_END(cpu_arm925_do_idle)
/* /*
* flush_icache_all() * flush_icache_all()
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm925_flush_icache_all) SYM_TYPED_FUNC_START(arm925_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(arm925_flush_icache_all) SYM_FUNC_END(arm925_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -150,15 +154,14 @@ ENDPROC(arm925_flush_icache_all) ...@@ -150,15 +154,14 @@ ENDPROC(arm925_flush_icache_all)
* Clean and invalidate all cache entries in a particular * Clean and invalidate all cache entries in a particular
* address space. * address space.
*/ */
ENTRY(arm925_flush_user_cache_all) SYM_FUNC_ALIAS(arm925_flush_user_cache_all, arm925_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm925_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm925_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -175,6 +178,7 @@ __flush_whole_cache: ...@@ -175,6 +178,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm925_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -186,7 +190,7 @@ __flush_whole_cache: ...@@ -186,7 +190,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags describing address space * - flags - vm_flags describing address space
*/ */
ENTRY(arm925_flush_user_cache_range) SYM_TYPED_FUNC_START(arm925_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -212,6 +216,7 @@ ENTRY(arm925_flush_user_cache_range) ...@@ -212,6 +216,7 @@ ENTRY(arm925_flush_user_cache_range)
tst r2, #VM_EXEC tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm925_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -223,8 +228,11 @@ ENTRY(arm925_flush_user_cache_range) ...@@ -223,8 +228,11 @@ ENTRY(arm925_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm925_coherent_kern_range) SYM_TYPED_FUNC_START(arm925_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm925_coherent_user_range
#endif
SYM_FUNC_END(arm925_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -236,7 +244,7 @@ ENTRY(arm925_coherent_kern_range) ...@@ -236,7 +244,7 @@ ENTRY(arm925_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm925_coherent_user_range) SYM_TYPED_FUNC_START(arm925_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -246,6 +254,7 @@ ENTRY(arm925_coherent_user_range) ...@@ -246,6 +254,7 @@ ENTRY(arm925_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm925_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -256,7 +265,7 @@ ENTRY(arm925_coherent_user_range) ...@@ -256,7 +265,7 @@ ENTRY(arm925_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm925_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm925_flush_kern_dcache_area)
add r1, r0, r1 add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -266,6 +275,7 @@ ENTRY(arm925_flush_kern_dcache_area) ...@@ -266,6 +275,7 @@ ENTRY(arm925_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm925_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -324,7 +334,7 @@ arm925_dma_clean_range: ...@@ -324,7 +334,7 @@ arm925_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm925_dma_flush_range) SYM_TYPED_FUNC_START(arm925_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -337,6 +347,7 @@ ENTRY(arm925_dma_flush_range) ...@@ -337,6 +347,7 @@ ENTRY(arm925_dma_flush_range)
blo 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm925_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -344,13 +355,13 @@ ENTRY(arm925_dma_flush_range) ...@@ -344,13 +355,13 @@ ENTRY(arm925_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm925_dma_map_area) SYM_TYPED_FUNC_START(arm925_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm925_dma_clean_range beq arm925_dma_clean_range
bcs arm925_dma_inv_range bcs arm925_dma_inv_range
b arm925_dma_flush_range b arm925_dma_flush_range
ENDPROC(arm925_dma_map_area) SYM_FUNC_END(arm925_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -358,17 +369,11 @@ ENDPROC(arm925_dma_map_area) ...@@ -358,17 +369,11 @@ ENDPROC(arm925_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm925_dma_unmap_area) SYM_TYPED_FUNC_START(arm925_dma_unmap_area)
ret lr ret lr
ENDPROC(arm925_dma_unmap_area) SYM_FUNC_END(arm925_dma_unmap_area)
.globl arm925_flush_kern_cache_louis
.equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm925
ENTRY(cpu_arm925_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -377,6 +382,7 @@ ENTRY(cpu_arm925_dcache_clean_area) ...@@ -377,6 +382,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
#endif #endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(cpu_arm925_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -388,7 +394,7 @@ ENTRY(cpu_arm925_dcache_clean_area) ...@@ -388,7 +394,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm925_switch_mm) SYM_TYPED_FUNC_START(cpu_arm925_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -406,6 +412,7 @@ ENTRY(cpu_arm925_switch_mm) ...@@ -406,6 +412,7 @@ ENTRY(cpu_arm925_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm925_switch_mm)
/* /*
* cpu_arm925_set_pte_ext(ptep, pte, ext) * cpu_arm925_set_pte_ext(ptep, pte, ext)
...@@ -413,7 +420,7 @@ ENTRY(cpu_arm925_switch_mm) ...@@ -413,7 +420,7 @@ ENTRY(cpu_arm925_switch_mm)
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm925_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm925_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -423,6 +430,7 @@ ENTRY(cpu_arm925_set_pte_ext) ...@@ -423,6 +430,7 @@ ENTRY(cpu_arm925_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
ret lr ret lr
SYM_FUNC_END(cpu_arm925_set_pte_ext)
.type __arm925_setup, #function .type __arm925_setup, #function
__arm925_setup: __arm925_setup:
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -40,18 +41,20 @@ ...@@ -40,18 +41,20 @@
/* /*
* cpu_arm926_proc_init() * cpu_arm926_proc_init()
*/ */
ENTRY(cpu_arm926_proc_init) SYM_TYPED_FUNC_START(cpu_arm926_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_arm926_proc_init)
/* /*
* cpu_arm926_proc_fin() * cpu_arm926_proc_fin()
*/ */
ENTRY(cpu_arm926_proc_fin) SYM_TYPED_FUNC_START(cpu_arm926_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm926_proc_fin)
/* /*
* cpu_arm926_reset(loc) * cpu_arm926_reset(loc)
...@@ -64,7 +67,7 @@ ENTRY(cpu_arm926_proc_fin) ...@@ -64,7 +67,7 @@ ENTRY(cpu_arm926_proc_fin)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm926_reset) SYM_TYPED_FUNC_START(cpu_arm926_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -76,7 +79,7 @@ ENTRY(cpu_arm926_reset) ...@@ -76,7 +79,7 @@ ENTRY(cpu_arm926_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm926_reset) SYM_FUNC_END(cpu_arm926_reset)
.popsection .popsection
/* /*
...@@ -85,7 +88,7 @@ ENDPROC(cpu_arm926_reset) ...@@ -85,7 +88,7 @@ ENDPROC(cpu_arm926_reset)
* Called with IRQs disabled * Called with IRQs disabled
*/ */
.align 10 .align 10
ENTRY(cpu_arm926_do_idle) SYM_TYPED_FUNC_START(cpu_arm926_do_idle)
mov r0, #0 mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
...@@ -98,17 +101,18 @@ ENTRY(cpu_arm926_do_idle) ...@@ -98,17 +101,18 @@ ENTRY(cpu_arm926_do_idle)
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
msr cpsr_c, r3 @ Restore FIQ state msr cpsr_c, r3 @ Restore FIQ state
ret lr ret lr
SYM_FUNC_END(cpu_arm926_do_idle)
/* /*
* flush_icache_all() * flush_icache_all()
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm926_flush_icache_all) SYM_TYPED_FUNC_START(arm926_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(arm926_flush_icache_all) SYM_FUNC_END(arm926_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
...@@ -116,15 +120,14 @@ ENDPROC(arm926_flush_icache_all) ...@@ -116,15 +120,14 @@ ENDPROC(arm926_flush_icache_all)
* Clean and invalidate all cache entries in a particular * Clean and invalidate all cache entries in a particular
* address space. * address space.
*/ */
ENTRY(arm926_flush_user_cache_all) SYM_FUNC_ALIAS(arm926_flush_user_cache_all, arm926_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm926_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm926_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
...@@ -138,6 +141,7 @@ __flush_whole_cache: ...@@ -138,6 +141,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm926_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -149,7 +153,7 @@ __flush_whole_cache: ...@@ -149,7 +153,7 @@ __flush_whole_cache:
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags describing address space * - flags - vm_flags describing address space
*/ */
ENTRY(arm926_flush_user_cache_range) SYM_TYPED_FUNC_START(arm926_flush_user_cache_range)
mov ip, #0 mov ip, #0
sub r3, r1, r0 @ calculate total size sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT cmp r3, #CACHE_DLIMIT
...@@ -175,6 +179,7 @@ ENTRY(arm926_flush_user_cache_range) ...@@ -175,6 +179,7 @@ ENTRY(arm926_flush_user_cache_range)
tst r2, #VM_EXEC tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm926_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -186,8 +191,11 @@ ENTRY(arm926_flush_user_cache_range) ...@@ -186,8 +191,11 @@ ENTRY(arm926_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm926_coherent_kern_range) SYM_TYPED_FUNC_START(arm926_coherent_kern_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm926_coherent_user_range
#endif
SYM_FUNC_END(arm926_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -199,7 +207,7 @@ ENTRY(arm926_coherent_kern_range) ...@@ -199,7 +207,7 @@ ENTRY(arm926_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm926_coherent_user_range) SYM_TYPED_FUNC_START(arm926_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -209,6 +217,7 @@ ENTRY(arm926_coherent_user_range) ...@@ -209,6 +217,7 @@ ENTRY(arm926_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0 mov r0, #0
ret lr ret lr
SYM_FUNC_END(arm926_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -219,7 +228,7 @@ ENTRY(arm926_coherent_user_range) ...@@ -219,7 +228,7 @@ ENTRY(arm926_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm926_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm926_flush_kern_dcache_area)
add r1, r0, r1 add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -229,6 +238,7 @@ ENTRY(arm926_flush_kern_dcache_area) ...@@ -229,6 +238,7 @@ ENTRY(arm926_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm926_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -287,7 +297,7 @@ arm926_dma_clean_range: ...@@ -287,7 +297,7 @@ arm926_dma_clean_range:
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm926_dma_flush_range) SYM_TYPED_FUNC_START(arm926_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -300,6 +310,7 @@ ENTRY(arm926_dma_flush_range) ...@@ -300,6 +310,7 @@ ENTRY(arm926_dma_flush_range)
blo 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm926_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -307,13 +318,13 @@ ENTRY(arm926_dma_flush_range) ...@@ -307,13 +318,13 @@ ENTRY(arm926_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm926_dma_map_area) SYM_TYPED_FUNC_START(arm926_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm926_dma_clean_range beq arm926_dma_clean_range
bcs arm926_dma_inv_range bcs arm926_dma_inv_range
b arm926_dma_flush_range b arm926_dma_flush_range
ENDPROC(arm926_dma_map_area) SYM_FUNC_END(arm926_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -321,17 +332,11 @@ ENDPROC(arm926_dma_map_area) ...@@ -321,17 +332,11 @@ ENDPROC(arm926_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm926_dma_unmap_area) SYM_TYPED_FUNC_START(arm926_dma_unmap_area)
ret lr ret lr
ENDPROC(arm926_dma_unmap_area) SYM_FUNC_END(arm926_dma_unmap_area)
.globl arm926_flush_kern_cache_louis
.equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) SYM_TYPED_FUNC_START(cpu_arm926_dcache_clean_area)
define_cache_functions arm926
ENTRY(cpu_arm926_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -340,6 +345,7 @@ ENTRY(cpu_arm926_dcache_clean_area) ...@@ -340,6 +345,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
#endif #endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(cpu_arm926_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -351,7 +357,8 @@ ENTRY(cpu_arm926_dcache_clean_area) ...@@ -351,7 +357,8 @@ ENTRY(cpu_arm926_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm926_switch_mm)
SYM_TYPED_FUNC_START(cpu_arm926_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -367,6 +374,7 @@ ENTRY(cpu_arm926_switch_mm) ...@@ -367,6 +374,7 @@ ENTRY(cpu_arm926_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm926_switch_mm)
/* /*
* cpu_arm926_set_pte_ext(ptep, pte, ext) * cpu_arm926_set_pte_ext(ptep, pte, ext)
...@@ -374,7 +382,7 @@ ENTRY(cpu_arm926_switch_mm) ...@@ -374,7 +382,7 @@ ENTRY(cpu_arm926_switch_mm)
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 5 .align 5
ENTRY(cpu_arm926_set_pte_ext) SYM_TYPED_FUNC_START(cpu_arm926_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -384,21 +392,22 @@ ENTRY(cpu_arm926_set_pte_ext) ...@@ -384,21 +392,22 @@ ENTRY(cpu_arm926_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size .globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3 .equ cpu_arm926_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND #ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm926_do_suspend) SYM_TYPED_FUNC_START(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr} stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6} stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc} ldmfd sp!, {r4 - r6, pc}
ENDPROC(cpu_arm926_do_suspend) SYM_FUNC_END(cpu_arm926_do_suspend)
ENTRY(cpu_arm926_do_resume) SYM_TYPED_FUNC_START(cpu_arm926_do_resume)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
...@@ -408,7 +417,7 @@ ENTRY(cpu_arm926_do_resume) ...@@ -408,7 +417,7 @@ ENTRY(cpu_arm926_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register mov r0, r6 @ control register
b cpu_resume_mmu b cpu_resume_mmu
ENDPROC(cpu_arm926_do_resume) SYM_FUNC_END(cpu_arm926_do_resume)
#endif #endif
.type __arm926_setup, #function .type __arm926_setup, #function
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -25,19 +26,24 @@ ...@@ -25,19 +26,24 @@
* *
* These are not required. * These are not required.
*/ */
ENTRY(cpu_arm940_proc_init) SYM_TYPED_FUNC_START(cpu_arm940_proc_init)
ENTRY(cpu_arm940_switch_mm)
ret lr ret lr
SYM_FUNC_END(cpu_arm940_proc_init)
SYM_TYPED_FUNC_START(cpu_arm940_switch_mm)
ret lr
SYM_FUNC_END(cpu_arm940_switch_mm)
/* /*
* cpu_arm940_proc_fin() * cpu_arm940_proc_fin()
*/ */
ENTRY(cpu_arm940_proc_fin) SYM_TYPED_FUNC_START(cpu_arm940_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00001000 @ i-cache
bic r0, r0, #0x00000004 @ d-cache bic r0, r0, #0x00000004 @ d-cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr ret lr
SYM_FUNC_END(cpu_arm940_proc_fin)
/* /*
* cpu_arm940_reset(loc) * cpu_arm940_reset(loc)
...@@ -45,7 +51,7 @@ ENTRY(cpu_arm940_proc_fin) ...@@ -45,7 +51,7 @@ ENTRY(cpu_arm940_proc_fin)
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm940_reset) SYM_TYPED_FUNC_START(cpu_arm940_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache mcr p15, 0, ip, c7, c5, 0 @ flush I cache
mcr p15, 0, ip, c7, c6, 0 @ flush D cache mcr p15, 0, ip, c7, c6, 0 @ flush D cache
...@@ -55,42 +61,43 @@ ENTRY(cpu_arm940_reset) ...@@ -55,42 +61,43 @@ ENTRY(cpu_arm940_reset)
bic ip, ip, #0x00001000 @ i-cache bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0 ret r0
ENDPROC(cpu_arm940_reset) SYM_FUNC_END(cpu_arm940_reset)
.popsection .popsection
/* /*
* cpu_arm940_do_idle() * cpu_arm940_do_idle()
*/ */
.align 5 .align 5
ENTRY(cpu_arm940_do_idle) SYM_TYPED_FUNC_START(cpu_arm940_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr ret lr
SYM_FUNC_END(cpu_arm940_do_idle)
/* /*
* flush_icache_all() * flush_icache_all()
* *
* Unconditionally clean and invalidate the entire icache. * Unconditionally clean and invalidate the entire icache.
*/ */
ENTRY(arm940_flush_icache_all) SYM_TYPED_FUNC_START(arm940_flush_icache_all)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr ret lr
ENDPROC(arm940_flush_icache_all) SYM_FUNC_END(arm940_flush_icache_all)
/* /*
* flush_user_cache_all() * flush_user_cache_all()
*/ */
ENTRY(arm940_flush_user_cache_all) SYM_FUNC_ALIAS(arm940_flush_user_cache_all, arm940_flush_kern_cache_all)
/* FALLTHROUGH */
/* /*
* flush_kern_cache_all() * flush_kern_cache_all()
* *
* Clean and invalidate the entire cache. * Clean and invalidate the entire cache.
*/ */
ENTRY(arm940_flush_kern_cache_all) SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all)
mov r2, #VM_EXEC mov r2, #VM_EXEC
/* FALLTHROUGH */ b arm940_flush_user_cache_range
SYM_FUNC_END(arm940_flush_kern_cache_all)
/* /*
* flush_user_cache_range(start, end, flags) * flush_user_cache_range(start, end, flags)
...@@ -102,7 +109,7 @@ ENTRY(arm940_flush_kern_cache_all) ...@@ -102,7 +109,7 @@ ENTRY(arm940_flush_kern_cache_all)
* - end - end address (exclusive) * - end - end address (exclusive)
* - flags - vm_flags describing address space * - flags - vm_flags describing address space
*/ */
ENTRY(arm940_flush_user_cache_range) SYM_TYPED_FUNC_START(arm940_flush_user_cache_range)
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ flush D cache mcr p15, 0, ip, c7, c6, 0 @ flush D cache
...@@ -119,6 +126,7 @@ ENTRY(arm940_flush_user_cache_range) ...@@ -119,6 +126,7 @@ ENTRY(arm940_flush_user_cache_range)
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm940_flush_user_cache_range)
/* /*
* coherent_kern_range(start, end) * coherent_kern_range(start, end)
...@@ -130,8 +138,9 @@ ENTRY(arm940_flush_user_cache_range) ...@@ -130,8 +138,9 @@ ENTRY(arm940_flush_user_cache_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm940_coherent_kern_range) SYM_TYPED_FUNC_START(arm940_coherent_kern_range)
/* FALLTHROUGH */ b arm940_flush_kern_dcache_area
SYM_FUNC_END(arm940_coherent_kern_range)
/* /*
* coherent_user_range(start, end) * coherent_user_range(start, end)
...@@ -143,8 +152,11 @@ ENTRY(arm940_coherent_kern_range) ...@@ -143,8 +152,11 @@ ENTRY(arm940_coherent_kern_range)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm940_coherent_user_range) SYM_TYPED_FUNC_START(arm940_coherent_user_range)
/* FALLTHROUGH */ #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
b arm940_flush_kern_dcache_area
#endif
SYM_FUNC_END(arm940_coherent_user_range)
/* /*
* flush_kern_dcache_area(void *addr, size_t size) * flush_kern_dcache_area(void *addr, size_t size)
...@@ -155,7 +167,7 @@ ENTRY(arm940_coherent_user_range) ...@@ -155,7 +167,7 @@ ENTRY(arm940_coherent_user_range)
* - addr - kernel address * - addr - kernel address
* - size - region size * - size - region size
*/ */
ENTRY(arm940_flush_kern_dcache_area) SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area)
mov r0, #0 mov r0, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
...@@ -167,6 +179,7 @@ ENTRY(arm940_flush_kern_dcache_area) ...@@ -167,6 +179,7 @@ ENTRY(arm940_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm940_flush_kern_dcache_area)
/* /*
* dma_inv_range(start, end) * dma_inv_range(start, end)
...@@ -199,7 +212,7 @@ arm940_dma_inv_range: ...@@ -199,7 +212,7 @@ arm940_dma_inv_range:
* - end - virtual end address * - end - virtual end address
*/ */
arm940_dma_clean_range: arm940_dma_clean_range:
ENTRY(cpu_arm940_dcache_clean_area) SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
...@@ -212,6 +225,7 @@ ENTRY(cpu_arm940_dcache_clean_area) ...@@ -212,6 +225,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(cpu_arm940_dcache_clean_area)
/* /*
* dma_flush_range(start, end) * dma_flush_range(start, end)
...@@ -222,7 +236,7 @@ ENTRY(cpu_arm940_dcache_clean_area) ...@@ -222,7 +236,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
* - start - virtual start address * - start - virtual start address
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm940_dma_flush_range) SYM_TYPED_FUNC_START(arm940_dma_flush_range)
mov ip, #0 mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
...@@ -238,6 +252,7 @@ ENTRY(arm940_dma_flush_range) ...@@ -238,6 +252,7 @@ ENTRY(arm940_dma_flush_range)
bcs 1b @ segments 7 to 0 bcs 1b @ segments 7 to 0
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(arm940_dma_flush_range)
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
...@@ -245,13 +260,13 @@ ENTRY(arm940_dma_flush_range) ...@@ -245,13 +260,13 @@ ENTRY(arm940_dma_flush_range)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm940_dma_map_area) SYM_TYPED_FUNC_START(arm940_dma_map_area)
add r1, r1, r0 add r1, r1, r0
cmp r2, #DMA_TO_DEVICE cmp r2, #DMA_TO_DEVICE
beq arm940_dma_clean_range beq arm940_dma_clean_range
bcs arm940_dma_inv_range bcs arm940_dma_inv_range
b arm940_dma_flush_range b arm940_dma_flush_range
ENDPROC(arm940_dma_map_area) SYM_FUNC_END(arm940_dma_map_area)
/* /*
* dma_unmap_area(start, size, dir) * dma_unmap_area(start, size, dir)
...@@ -259,15 +274,9 @@ ENDPROC(arm940_dma_map_area) ...@@ -259,15 +274,9 @@ ENDPROC(arm940_dma_map_area)
* - size - size of region * - size - size of region
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(arm940_dma_unmap_area) SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
ret lr ret lr
ENDPROC(arm940_dma_unmap_area) SYM_FUNC_END(arm940_dma_unmap_area)
.globl arm940_flush_kern_cache_louis
.equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm940
.type __arm940_setup, #function .type __arm940_setup, #function
__arm940_setup: __arm940_setup:
......
This diff is collapsed.
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -24,17 +25,28 @@ ...@@ -24,17 +25,28 @@
* *
* These are not required. * These are not required.
*/ */
ENTRY(cpu_arm9tdmi_proc_init) SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_init)
ENTRY(cpu_arm9tdmi_do_idle)
ENTRY(cpu_arm9tdmi_dcache_clean_area)
ENTRY(cpu_arm9tdmi_switch_mm)
ret lr ret lr
SYM_FUNC_END(cpu_arm9tdmi_proc_init)
SYM_TYPED_FUNC_START(cpu_arm9tdmi_do_idle)
ret lr
SYM_FUNC_END(cpu_arm9tdmi_do_idle)
SYM_TYPED_FUNC_START(cpu_arm9tdmi_dcache_clean_area)
ret lr
SYM_FUNC_END(cpu_arm9tdmi_dcache_clean_area)
SYM_TYPED_FUNC_START(cpu_arm9tdmi_switch_mm)
ret lr
SYM_FUNC_END(cpu_arm9tdmi_switch_mm)
/* /*
* cpu_arm9tdmi_proc_fin() * cpu_arm9tdmi_proc_fin()
*/ */
ENTRY(cpu_arm9tdmi_proc_fin) SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_fin)
ret lr ret lr
SYM_FUNC_END(cpu_arm9tdmi_proc_fin)
/* /*
* Function: cpu_arm9tdmi_reset(loc) * Function: cpu_arm9tdmi_reset(loc)
...@@ -42,9 +54,9 @@ ENTRY(cpu_arm9tdmi_proc_fin) ...@@ -42,9 +54,9 @@ ENTRY(cpu_arm9tdmi_proc_fin)
* Purpose : Sets up everything for a reset and jump to the location for soft reset. * Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/ */
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_arm9tdmi_reset) SYM_TYPED_FUNC_START(cpu_arm9tdmi_reset)
ret r0 ret r0
ENDPROC(cpu_arm9tdmi_reset) SYM_FUNC_END(cpu_arm9tdmi_reset)
.popsection .popsection
.type __arm9tdmi_setup, #function .type __arm9tdmi_setup, #function
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -26,13 +27,14 @@ ...@@ -26,13 +27,14 @@
/* /*
* cpu_fa526_proc_init() * cpu_fa526_proc_init()
*/ */
ENTRY(cpu_fa526_proc_init) SYM_TYPED_FUNC_START(cpu_fa526_proc_init)
ret lr ret lr
SYM_FUNC_END(cpu_fa526_proc_init)
/* /*
* cpu_fa526_proc_fin() * cpu_fa526_proc_fin()
*/ */
ENTRY(cpu_fa526_proc_fin) SYM_TYPED_FUNC_START(cpu_fa526_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
...@@ -40,6 +42,7 @@ ENTRY(cpu_fa526_proc_fin) ...@@ -40,6 +42,7 @@ ENTRY(cpu_fa526_proc_fin)
nop nop
nop nop
ret lr ret lr
SYM_FUNC_END(cpu_fa526_proc_fin)
/* /*
* cpu_fa526_reset(loc) * cpu_fa526_reset(loc)
...@@ -52,7 +55,7 @@ ENTRY(cpu_fa526_proc_fin) ...@@ -52,7 +55,7 @@ ENTRY(cpu_fa526_proc_fin)
*/ */
.align 4 .align 4
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_fa526_reset) SYM_TYPED_FUNC_START(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */ /* TODO: Use CP8 if possible... */
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -68,24 +71,25 @@ ENTRY(cpu_fa526_reset) ...@@ -68,24 +71,25 @@ ENTRY(cpu_fa526_reset)
nop nop
nop nop
ret r0 ret r0
ENDPROC(cpu_fa526_reset) SYM_FUNC_END(cpu_fa526_reset)
.popsection .popsection
/* /*
* cpu_fa526_do_idle() * cpu_fa526_do_idle()
*/ */
.align 4 .align 4
ENTRY(cpu_fa526_do_idle) SYM_TYPED_FUNC_START(cpu_fa526_do_idle)
ret lr ret lr
SYM_FUNC_END(cpu_fa526_do_idle)
SYM_TYPED_FUNC_START(cpu_fa526_dcache_clean_area)
ENTRY(cpu_fa526_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE subs r1, r1, #CACHE_DLINESIZE
bhi 1b bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr ret lr
SYM_FUNC_END(cpu_fa526_dcache_clean_area)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -97,7 +101,7 @@ ENTRY(cpu_fa526_dcache_clean_area) ...@@ -97,7 +101,7 @@ ENTRY(cpu_fa526_dcache_clean_area)
* pgd: new page tables * pgd: new page tables
*/ */
.align 4 .align 4
ENTRY(cpu_fa526_switch_mm) SYM_TYPED_FUNC_START(cpu_fa526_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
...@@ -113,6 +117,7 @@ ENTRY(cpu_fa526_switch_mm) ...@@ -113,6 +117,7 @@ ENTRY(cpu_fa526_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_fa526_switch_mm)
/* /*
* cpu_fa526_set_pte_ext(ptep, pte, ext) * cpu_fa526_set_pte_ext(ptep, pte, ext)
...@@ -120,7 +125,7 @@ ENTRY(cpu_fa526_switch_mm) ...@@ -120,7 +125,7 @@ ENTRY(cpu_fa526_switch_mm)
* Set a PTE and flush it out * Set a PTE and flush it out
*/ */
.align 4 .align 4
ENTRY(cpu_fa526_set_pte_ext) SYM_TYPED_FUNC_START(cpu_fa526_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
armv3_set_pte_ext armv3_set_pte_ext
mov r0, r0 mov r0, r0
...@@ -129,6 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext) ...@@ -129,6 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif #endif
ret lr ret lr
SYM_FUNC_END(cpu_fa526_set_pte_ext)
.type __fa526_setup, #function .type __fa526_setup, #function
__fa526_setup: __fa526_setup:
......
This diff is collapsed.
...@@ -320,39 +320,6 @@ ENTRY(\name\()_processor_functions) ...@@ -320,39 +320,6 @@ ENTRY(\name\()_processor_functions)
#endif #endif
.endm .endm
.macro define_cache_functions name:req
.align 2
.type \name\()_cache_fns, #object
ENTRY(\name\()_cache_fns)
.long \name\()_flush_icache_all
.long \name\()_flush_kern_cache_all
.long \name\()_flush_kern_cache_louis
.long \name\()_flush_user_cache_all
.long \name\()_flush_user_cache_range
.long \name\()_coherent_kern_range
.long \name\()_coherent_user_range
.long \name\()_flush_kern_dcache_area
.long \name\()_dma_map_area
.long \name\()_dma_unmap_area
.long \name\()_dma_flush_range
.size \name\()_cache_fns, . - \name\()_cache_fns
.endm
.macro define_tlb_functions name:req, flags_up:req, flags_smp
.type \name\()_tlb_fns, #object
.align 2
ENTRY(\name\()_tlb_fns)
.long \name\()_flush_user_tlb_range
.long \name\()_flush_kern_tlb_range
.ifnb \flags_smp
ALT_SMP(.long \flags_smp )
ALT_UP(.long \flags_up )
.else
.long \flags_up
.endif
.size \name\()_tlb_fns, . - \name\()_tlb_fns
.endm
.macro globl_equ x, y .macro globl_equ x, y
.globl \x .globl \x
.equ \x, \y .equ \x, \y
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment