Commit 82fa407d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - Correct ARMs dma-mapping to use the correct printk format strings.

 - Avoid defining OBJCOPYFLAGS globally which upsets lkdtm rodata
   testing.

 - Cleanups to ARMs asm/memory.h include.

 - L2 cache cleanups.

 - Allow flat nommu binaries to be executed on ARM MMU systems.

 - Kernel hardening - add more read-only after init annotations,
   including making some kernel vdso variables const.

 - Ensure AMBA primecell clocks are appropriately defaulted.

 - ARM breakpoint cleanup.

 - Various StrongARM 11x0 and companion chip (SA1111) updates to bring
   this legacy platform to use more modern APIs for (eg) GPIOs and
   interrupts, which will allow us in the future to reduce some of the
   board-level driver clutter and elimate function callbacks into board
   code via platform data. There still appears to be interest in these
   platforms!

 - Remove the now redundant secure_flush_area() API.

 - Module PLT relocation optimisations. Ard says: This series of 4
   patches optimizes the ARM PLT generation code that is invoked at
   module load time, to get rid of the O(n^2) algorithm that results in
   pathological load times of 10 seconds or more for large modules on
   certain STB platforms.

 - ARMv7M cache maintanence support.

 - L2 cache PMU support

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (35 commits)
  ARM: sa1111: provide to_sa1111_device() macro
  ARM: sa1111: add sa1111_get_irq()
  ARM: sa1111: clean up duplication in IRQ chip implementation
  ARM: sa1111: implement a gpio_chip for SA1111 GPIOs
  ARM: sa1111: move irq cleanup to separate function
  ARM: sa1111: use devm_clk_get()
  ARM: sa1111: use devm_kzalloc()
  ARM: sa1111: ensure we only touch RAB bus type devices when removing
  ARM: 8611/1: l2x0: add PMU support
  ARM: 8610/1: V7M: Add dsb before jumping in handler mode
  ARM: 8609/1: V7M: Add support for the Cortex-M7 processor
  ARM: 8608/1: V7M: Indirect proc_info construction for V7M CPUs
  ARM: 8607/1: V7M: Wire up caches for V7M processors with cache support.
  ARM: 8606/1: V7M: introduce cache operations
  ARM: 8605/1: V7M: fix notrace variant of save_and_disable_irqs
  ARM: 8604/1: V7M: Add support for reading the CTR with read_cpuid_cachetype()
  ARM: 8603/1: V7M: Add addresses for mem-mapped V7M cache operations
  ARM: 8602/1: factor out CSSELR/CCSIDR operations that use cp15 directly
  ARM: kernel: avoid brute force search on PLT generation
  ARM: kernel: sort relocation sections before allocating PLTs
  ...
parents c7f5d36a 81a63001
......@@ -23,7 +23,6 @@ ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds
endif
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
......
......@@ -11,6 +11,8 @@
# Copyright (C) 1995-2002 Russell King
#
OBJCOPYFLAGS :=-O binary -R .comment -S
ifneq ($(MACHINE),)
include $(MACHINE)/Makefile.boot
endif
......
This diff is collapsed.
......@@ -159,7 +159,11 @@
.endm
.macro save_and_disable_irqs_notrace, oldcpsr
#ifdef CONFIG_CPU_V7M
mrs \oldcpsr, primask
#else
mrs \oldcpsr, cpsr
#endif
disable_irq_notrace
.endm
......
......@@ -501,21 +501,4 @@ static inline void set_kernel_text_ro(void) { }
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
/**
* secure_flush_area - ensure coherency across the secure boundary
* @addr: virtual address
* @size: size of region
*
* Ensure that the specified area of memory is coherent across the secure
* boundary from the non-secure side. This is used when calling secure
* firmware where the secure firmware does not ensure coherency.
*/
static inline void secure_flush_area(const void *addr, size_t size)
{
phys_addr_t phys = __pa(addr);
__cpuc_flush_dcache_area((void *)addr, size);
outer_flush_range(phys, phys + size);
}
#endif
......@@ -56,4 +56,43 @@ static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
(~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
}
#define CSSELR_ICACHE 1
#define CSSELR_DCACHE 0
#define CSSELR_L1 (0 << 1)
#define CSSELR_L2 (1 << 1)
#define CSSELR_L3 (2 << 1)
#define CSSELR_L4 (3 << 1)
#define CSSELR_L5 (4 << 1)
#define CSSELR_L6 (5 << 1)
#define CSSELR_L7 (6 << 1)
#ifndef CONFIG_CPU_V7M
static inline void set_csselr(unsigned int cache_selector)
{
asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (cache_selector));
}
static inline unsigned int read_ccsidr(void)
{
unsigned int val;
asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
return val;
}
#else /* CONFIG_CPU_V7M */
#include <linux/io.h>
#include "asm/v7m.h"
static inline void set_csselr(unsigned int cache_selector)
{
writel(cache_selector, BASEADDR_V7M_SCB + V7M_SCB_CTR);
}
static inline unsigned int read_ccsidr(void)
{
return readl(BASEADDR_V7M_SCB + V7M_SCB_CCSIDR);
}
#endif
#endif
......@@ -60,6 +60,7 @@
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_DEC 0x44
#define ARM_CPU_IMP_INTEL 0x69
/* ARM implemented processors */
......@@ -76,6 +77,17 @@
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
#define ARM_CPU_PART_MASK 0xff00fff0
/* DEC implemented cores */
#define ARM_CPU_PART_SA1100 0x4400a110
/* Intel implemented cores */
#define ARM_CPU_PART_SA1110 0x6900b110
#define ARM_CPU_REV_SA1110_A0 0
#define ARM_CPU_REV_SA1110_B0 4
#define ARM_CPU_REV_SA1110_B1 5
#define ARM_CPU_REV_SA1110_B2 6
#define ARM_CPU_REV_SA1110_B4 8
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
......@@ -152,6 +164,11 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CPUID_CACHETYPE);
}
#elif defined(CONFIG_CPU_V7M)
static inline unsigned int __attribute_const__ read_cpuid_id(void)
......@@ -159,6 +176,11 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
}
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
{
return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
}
#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
static inline unsigned int __attribute_const__ read_cpuid_id(void)
......@@ -173,6 +195,11 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
return (read_cpuid_id() & 0xFF000000) >> 24;
}
static inline unsigned int __attribute_const__ read_cpuid_revision(void)
{
return read_cpuid_id() & 0x0000000f;
}
/*
* The CPU part number is meaningless without referring to the CPU
* implementer: implementers are free to define their own part numbers
......@@ -193,11 +220,6 @@ static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
}
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CPUID_CACHETYPE);
}
static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
{
return read_cpuid(CPUID_TCM);
......@@ -208,6 +230,10 @@ static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
return read_cpuid(CPUID_MPIDR);
}
/* StrongARM-11x0 CPUs */
#define cpu_is_sa1100() (read_cpuid_part() == ARM_CPU_PART_SA1100)
#define cpu_is_sa1110() (read_cpuid_part() == ARM_CPU_PART_SA1110)
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
......
......@@ -8,8 +8,9 @@
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
#define flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))
#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
({ unsigned long __val; __get_user_unaligned(__val, rp); __val; })
#define flat_put_addr_at_rp(rp, val, relval) __put_user_unaligned(val, rp)
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
......
......@@ -118,11 +118,7 @@
#endif
#if defined(CONFIG_CPU_V7M)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE nop
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
......
......@@ -87,6 +87,15 @@
#define L310_CACHE_ID_RTL_R3P2 0x08
#define L310_CACHE_ID_RTL_R3P3 0x09
#define L2X0_EVENT_CNT_CTRL_ENABLE BIT(0)
#define L2X0_EVENT_CNT_CFG_SRC_SHIFT 2
#define L2X0_EVENT_CNT_CFG_SRC_MASK 0xf
#define L2X0_EVENT_CNT_CFG_SRC_DISABLED 0
#define L2X0_EVENT_CNT_CFG_INT_DISABLED 0
#define L2X0_EVENT_CNT_CFG_INT_INCR 1
#define L2X0_EVENT_CNT_CFG_INT_OVERFLOW 2
/* L2C auxiliary control register - bits common to L2C-210/220/310 */
#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
......@@ -157,6 +166,16 @@ static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
}
#endif
#ifdef CONFIG_CACHE_L2X0_PMU
void l2x0_pmu_register(void __iomem *base, u32 part);
void l2x0_pmu_suspend(void);
void l2x0_pmu_resume(void);
#else
static inline void l2x0_pmu_register(void __iomem *base, u32 part) {}
static inline void l2x0_pmu_suspend(void) {}
static inline void l2x0_pmu_resume(void) {}
#endif
struct l2x0_regs {
unsigned long phy_base;
unsigned long aux_ctrl;
......
......@@ -420,7 +420,7 @@ struct sa1111_dev {
u64 dma_mask;
};
#define SA1111_DEV(_d) container_of((_d), struct sa1111_dev, dev)
#define to_sa1111_device(x) container_of(x, struct sa1111_dev, dev)
#define sa1111_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define sa1111_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
......@@ -446,6 +446,8 @@ struct sa1111_driver {
int sa1111_enable_device(struct sa1111_dev *);
void sa1111_disable_device(struct sa1111_dev *);
int sa1111_get_irq(struct sa1111_dev *, unsigned num);
unsigned int sa1111_pll_clock(struct sa1111_dev *);
#define SA1111_AUDIO_ACLINK 0
......
......@@ -114,7 +114,6 @@ struct notifier_block;
struct perf_event;
struct pmu;
extern struct pmu perf_ops_bp;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
......
......@@ -159,13 +159,8 @@
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*/
#if defined(__virt_to_phys)
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
#if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
......@@ -182,10 +177,6 @@ extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
#define virt_to_pfn(kaddr) \
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
......@@ -257,12 +248,12 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
return x - PHYS_OFFSET + PAGE_OFFSET;
}
#endif
#define virt_to_pfn(kaddr) \
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
#endif
/*
* These are *only* valid on the kernel direct mapped RAM memory.
* Note: Drivers should NOT use these. They are the wrong
......
......@@ -23,10 +23,8 @@ struct mod_arch_specific {
struct unwind_table *unwind[ARM_SEC_MAX];
#endif
#ifdef CONFIG_ARM_MODULE_PLTS
struct elf32_shdr *core_plt;
struct elf32_shdr *init_plt;
int core_plt_count;
int init_plt_count;
struct elf32_shdr *plt;
int plt_count;
#endif
};
......
......@@ -24,6 +24,9 @@
#define V7M_SCB_CCR 0x14
#define V7M_SCB_CCR_STKALIGN (1 << 9)
#define V7M_SCB_CCR_DC (1 << 16)
#define V7M_SCB_CCR_IC (1 << 17)
#define V7M_SCB_CCR_BP (1 << 18)
#define V7M_SCB_SHPR2 0x1c
#define V7M_SCB_SHPR3 0x20
......@@ -47,6 +50,25 @@
#define EXC_RET_STACK_MASK 0x00000004
#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
/* Cache related definitions */
#define V7M_SCB_CLIDR 0x78 /* Cache Level ID register */
#define V7M_SCB_CTR 0x7c /* Cache Type register */
#define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */
#define V7M_SCB_CSSELR 0x84 /* Cache size selection register */
/* Cache opeartions */
#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
#define V7M_SCB_DCIMVAC 0x25c /* D-cache invalidate by MVA to PoC */
#define V7M_SCB_DCISW 0x260 /* D-cache invalidate by set-way */
#define V7M_SCB_DCCMVAU 0x264 /* D-cache clean by MVA to PoU */
#define V7M_SCB_DCCMVAC 0x268 /* D-cache clean by MVA to PoC */
#define V7M_SCB_DCCSW 0x26c /* D-cache clean by set-way */
#define V7M_SCB_DCCIMVAC 0x270 /* D-cache clean and invalidate by MVA to PoC */
#define V7M_SCB_DCCISW 0x274 /* D-cache clean and invalidate by set-way */
#define V7M_SCB_BPIALL 0x278 /* D-cache clean and invalidate by set-way */
#ifndef __ASSEMBLY__
enum reboot_mode;
......
......@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
__used __section(__cpuidle_method_of_table_end);
static struct cpuidle_ops cpuidle_ops[NR_CPUS];
static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
/**
* arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
......
......@@ -158,7 +158,21 @@ __after_proc_init:
bic r0, r0, #CR_V
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
#endif /* CONFIG_CPU_CP15 */
#elif defined (CONFIG_CPU_V7M)
/* For V7M systems we want to modify the CCR similarly to the SCTLR */
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_DC
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #V7M_SCB_CCR_BP
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_IC
#endif
movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
str r0, [r3]
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
.ltorg
......
......@@ -9,6 +9,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <asm/cache.h>
#include <asm/opcodes.h>
......@@ -30,101 +31,142 @@ struct plt_entries {
u32 lit[PLT_ENT_COUNT];
};
static bool in_init(const struct module *mod, u32 addr)
{
return addr - (u32)mod->init_layout.base < mod->init_layout.size;
}
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
struct plt_entries *plt, *plt_end;
int c, *count;
if (in_init(mod, loc)) {
plt = (void *)mod->arch.init_plt->sh_addr;
plt_end = (void *)plt + mod->arch.init_plt->sh_size;
count = &mod->arch.init_plt_count;
} else {
plt = (void *)mod->arch.core_plt->sh_addr;
plt_end = (void *)plt + mod->arch.core_plt->sh_size;
count = &mod->arch.core_plt_count;
struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
int idx = 0;
/*
* Look for an existing entry pointing to 'val'. Given that the
* relocations are sorted, this will be the last entry we allocated.
* (if one exists).
*/
if (mod->arch.plt_count > 0) {
plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
if (plt->lit[idx] == val)
return (u32)&plt->ldr[idx];
idx = (idx + 1) % PLT_ENT_COUNT;
if (!idx)
plt++;
}
/* Look for an existing entry pointing to 'val' */
for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
int i;
mod->arch.plt_count++;
BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
if (!c) {
if (!idx)
/* Populate a new set of entries */
*plt = (struct plt_entries){
{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
{ val, }
};
++*count;
return (u32)plt->ldr;
}
for (i = 0; i < PLT_ENT_COUNT; i++) {
if (!plt->lit[i]) {
plt->lit[i] = val;
++*count;
}
if (plt->lit[i] == val)
return (u32)&plt->ldr[i];
}
}
BUG();
else
plt->lit[idx] = val;
return (u32)&plt->ldr[idx];
}
static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
u32 mask)
#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
static int cmp_rel(const void *a, const void *b)
{
u32 *loc1, *loc2;
const Elf32_Rel *x = a, *y = b;
int i;
for (i = 0; i < num; i++) {
if (rel[i].r_info != rel[num].r_info)
continue;
/* sort by type and symbol index */
i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
if (i == 0)
i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
return i;
}
static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
{
u32 *tval = (u32 *)(base + rel->r_offset);
/*
* Identical relocation types against identical symbols can
* still result in different PLT entries if the addend in the
* place is different. So resolve the target of the relocation
* to compare the values.
* Do a bitwise compare on the raw addend rather than fully decoding
* the offset and doing an arithmetic comparison.
* Note that a zero-addend jump/call relocation is encoded taking the
* PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
*/
loc1 = (u32 *)(base + rel[i].r_offset);
loc2 = (u32 *)(base + rel[num].r_offset);
if (((*loc1 ^ *loc2) & mask) == 0)
return 1;
switch (ELF32_R_TYPE(rel->r_info)) {
u16 upper, lower;
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
case R_ARM_CALL:
case R_ARM_PC24:
case R_ARM_JUMP24:
return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
}
return 0;
BUG();
}
static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
{
const Elf32_Rel *prev;
/*
* Entries are sorted by type and symbol index. That means that,
* if a duplicate entry exists, it must be in the preceding
* slot.
*/
if (!num)
return false;
prev = rel + num - 1;
return cmp_rel(rel + num, prev) == 0 &&
is_zero_addend_relocation(base, prev);
}
/* Count how many PLT entries we may need */
static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
const Elf32_Rel *rel, int num)
{
unsigned int ret = 0;
const Elf32_Sym *s;
int i;
/*
* Sure, this is order(n^2), but it's usually short, and not
* time critical
*/
for (i = 0; i < num; i++)
for (i = 0; i < num; i++) {
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_ARM_CALL:
case R_ARM_PC24:
case R_ARM_JUMP24:
if (!duplicate_rel(base, rel, i,
__opcode_to_mem_arm(0x00ffffff)))
ret++;
break;
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
if (!duplicate_rel(base, rel, i,
__opcode_to_mem_thumb32(0x07ff2fff)))
/*
* We only have to consider branch targets that resolve
* to undefined symbols. This is not simply a heuristic,
* it is a fundamental limitation, since the PLT itself
* is part of the module, and needs to be within range
* as well, so modules can never grow beyond that limit.
*/
s = syms + ELF32_R_SYM(rel[i].r_info);
if (s->st_shndx != SHN_UNDEF)
break;
/*
* Jump relocations with non-zero addends against
* undefined symbols are supported by the ELF spec, but
* do not occur in practice (e.g., 'jump n bytes past
* the entry point of undefined function symbol f').
* So we need to support them, but there is no need to
* take them into consideration when trying to optimize
* this code. So let's only check for duplicates when
* the addend is zero.
*/
if (!is_zero_addend_relocation(base, rel + i) ||
!duplicate_rel(base, rel, i))
ret++;
#endif
}
}
return ret;
}
......@@ -132,52 +174,55 @@ static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
unsigned long core_plts = 0, init_plts = 0;
unsigned long plts = 0;
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
Elf32_Sym *syms = NULL;
/*
* To store the PLTs, we expand the .text section for core module code
* and the .init.text section for initialization code.
* and for initialization code.
*/
for (s = sechdrs; s < sechdrs_end; ++s)
if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
mod->arch.core_plt = s;
else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
mod->arch.init_plt = s;
if (!mod->arch.core_plt || !mod->arch.init_plt) {
pr_err("%s: sections missing\n", mod->name);
for (s = sechdrs; s < sechdrs_end; ++s) {
if (strcmp(".plt", secstrings + s->sh_name) == 0)
mod->arch.plt = s;
else if (s->sh_type == SHT_SYMTAB)
syms = (Elf32_Sym *)s->sh_addr;
}
if (!mod->arch.plt) {
pr_err("%s: module PLT section missing\n", mod->name);
return -ENOEXEC;
}
if (!syms) {
pr_err("%s: module symtab section missing\n", mod->name);
return -ENOEXEC;
}
for (s = sechdrs + 1; s < sechdrs_end; ++s) {
const Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
int numrels = s->sh_size / sizeof(Elf32_Rel);
Elf32_Shdr *dstsec = sechdrs + s->sh_info;
if (s->sh_type != SHT_REL)
continue;
if (strstr(secstrings + s->sh_name, ".init"))
init_plts += count_plts(dstsec->sh_addr, rels, numrels);
else
core_plts += count_plts(dstsec->sh_addr, rels, numrels);
/* ignore relocations that operate on non-exec sections */
if (!(dstsec->sh_flags & SHF_EXECINSTR))
continue;
/* sort by type and symbol index */
sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
}
mod->arch.core_plt->sh_type = SHT_NOBITS;
mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES;
mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
mod->arch.plt->sh_type = SHT_NOBITS;
mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.core_plt_count = 0;
mod->arch.plt_count = 0;
mod->arch.init_plt->sh_type = SHT_NOBITS;
mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES;
mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.init_plt_count = 0;
pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__,
mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size);
pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
return 0;
}
SECTIONS {
.core.plt : { BYTE(0) }
.init.plt : { BYTE(0) }
.plt : { BYTE(0) }
}
......@@ -114,19 +114,19 @@ EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU
struct processor processor __read_mostly;
struct processor processor __ro_after_init;
#endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __read_mostly;
struct cpu_tlb_fns cpu_tlb __ro_after_init;
#endif
#ifdef MULTI_USER
struct cpu_user_fns cpu_user __read_mostly;
struct cpu_user_fns cpu_user __ro_after_init;
#endif
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache __read_mostly;
struct cpu_cache_fns cpu_cache __ro_after_init;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache __read_mostly;
struct outer_cache_fns outer_cache __ro_after_init;
EXPORT_SYMBOL(outer_cache);
#endif
......@@ -290,12 +290,9 @@ static int cpu_has_aliasing_icache(unsigned int arch)
/* arch specifies the register format */
switch (arch) {
case CPU_ARCH_ARMv7:
asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
: /* No output operands */
: "r" (1));
set_csselr(CSSELR_ICACHE | CSSELR_L1);
isb();
asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
: "=r" (id_reg));
id_reg = read_ccsidr();
line_size = 4 << ((id_reg & 0x7) + 2);
num_sets = ((id_reg >> 13) & 0x7fff) + 1;
aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
......@@ -315,11 +312,12 @@ static void __init cacheid_init(void)
{
unsigned int arch = cpu_architecture();
if (arch == CPU_ARCH_ARMv7M) {
cacheid = 0;
} else if (arch >= CPU_ARCH_ARMv6) {
if (arch >= CPU_ARCH_ARMv6) {
unsigned int cachetype = read_cpuid_cachetype();
if ((cachetype & (7 << 29)) == 4 << 29) {
if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
cacheid = 0;
} else if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
cacheid = CACHEID_VIPT_NONALIASING;
......
......@@ -82,7 +82,7 @@ enum ipi_msg_type {
static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops;
static struct smp_operations smp_ops __ro_after_init;
void __init smp_set_ops(const struct smp_operations *ops)
{
......
......@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/cache.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/kernel.h>
......@@ -39,7 +40,7 @@
static struct page **vdso_text_pagelist;
/* Total number of pages needed for the data and text portions of the VDSO. */
unsigned int vdso_total_pages __read_mostly;
unsigned int vdso_total_pages __ro_after_init;
/*
* The VDSO data page.
......@@ -47,13 +48,13 @@ unsigned int vdso_total_pages __read_mostly;
static union vdso_data_store vdso_data_store __page_aligned_data;
static struct vdso_data *vdso_data = &vdso_data_store.data;
static struct page *vdso_data_page;
static struct vm_special_mapping vdso_data_mapping = {
static struct page *vdso_data_page __ro_after_init;
static const struct vm_special_mapping vdso_data_mapping = {
.name = "[vvar]",
.pages = &vdso_data_page,
};
static struct vm_special_mapping vdso_text_mapping = {
static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
.name = "[vdso]",
};
......@@ -67,7 +68,7 @@ struct elfinfo {
/* Cached result of boot-time check for whether the arch timer exists,
* and if so, whether the virtual counter is useable.
*/
static bool cntvct_ok __read_mostly;
static bool cntvct_ok __ro_after_init;
static bool __init cntvct_functional(void)
{
......
......@@ -29,7 +29,7 @@
/*
* Default to the loop-based delay implementation.
*/
struct arm_delay_ops arm_delay_ops = {
struct arm_delay_ops arm_delay_ops __ro_after_init = {
.delay = __loop_delay,
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
......
......@@ -59,7 +59,7 @@
#define XBUS_SWITCH_J17_11 ((*XBUS_SWITCH) & (1 << 5))
#define XBUS_SWITCH_J17_9 ((*XBUS_SWITCH) & (1 << 6))
#define UNCACHEABLE_ADDR (ARMCSR_BASE + 0x108)
#define UNCACHEABLE_ADDR (ARMCSR_BASE + 0x108) /* CSR_ROMBASEMASK */
/* PIC irq control */
......
......@@ -40,7 +40,7 @@
#define SCREEN_END 0xdfc00000
#define SCREEN_BASE 0xdf800000
#define UNCACHEABLE_ADDR 0xdf010000
#define UNCACHEABLE_ADDR (FLUSH_BASE + 0x10000)
/*
* IO Addresses
......
......@@ -13,7 +13,7 @@
#define __ASM_ARCH_HARDWARE_H
#define UNCACHEABLE_ADDR 0xfa050000
#define UNCACHEABLE_ADDR 0xfa050000 /* ICIP */
/*
......@@ -36,28 +36,10 @@
#define io_v2p( x ) \
( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START )
#define CPU_SA1110_A0 (0)
#define CPU_SA1110_B0 (4)
#define CPU_SA1110_B1 (5)
#define CPU_SA1110_B2 (6)
#define CPU_SA1110_B4 (8)
#define CPU_SA1100_ID (0x4401a110)
#define CPU_SA1100_MASK (0xfffffff0)
#define CPU_SA1110_ID (0x6901b110)
#define CPU_SA1110_MASK (0xfffffff0)
#define __MREG(x) IOMEM(io_p2v(x))
#ifndef __ASSEMBLY__
#include <asm/cputype.h>
#define CPU_REVISION (read_cpuid_id() & 15)
#define cpu_is_sa1100() ((read_cpuid_id() & CPU_SA1100_MASK) == CPU_SA1100_ID)
#define cpu_is_sa1110() ((read_cpuid_id() & CPU_SA1110_MASK) == CPU_SA1110_ID)
# define __REG(x) (*((volatile unsigned long __iomem *)io_p2v(x)))
# define __PREG(x) (io_v2p((unsigned long)&(x)))
......
......@@ -403,6 +403,7 @@ config CPU_V7M
bool
select CPU_32v7M
select CPU_ABRT_NOMMU
select CPU_CACHE_V7M
select CPU_CACHE_NOP
select CPU_PABRT_LEGACY
select CPU_THUMBONLY
......@@ -518,6 +519,9 @@ config CPU_CACHE_VIPT
config CPU_CACHE_FA
bool
config CPU_CACHE_V7M
bool
if MMU
# The copy-page model
config CPU_COPY_V4WT
......@@ -750,14 +754,14 @@ config CPU_HIGH_VECTOR
config CPU_ICACHE_DISABLE
bool "Disable I-Cache (I-bit)"
depends on CPU_CP15 && !(CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
depends on (CPU_CP15 && !(CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)) || CPU_V7M
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE
bool "Disable D-Cache (C-bit)"
depends on CPU_CP15 && !SMP
depends on (CPU_CP15 && !SMP) || CPU_V7M
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
......@@ -792,7 +796,7 @@ config CPU_CACHE_ROUND_ROBIN
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526
depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 || CPU_V7M
help
Say Y here to disable branch prediction. If unsure, say N.
......@@ -916,6 +920,13 @@ config CACHE_L2X0
help
This option enables the L2x0 PrimeCell.
config CACHE_L2X0_PMU
bool "L2x0 performance monitor support" if CACHE_L2X0
depends on PERF_EVENTS
help
This option enables support for the performance monitoring features
of the L220 and PL310 outer cache controllers.
if CACHE_L2X0
config PL310_ERRATA_588369
......
......@@ -43,9 +43,11 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o
AFLAGS_cache-v6.o :=-Wa,-march=armv6
AFLAGS_cache-v7.o :=-Wa,-march=armv7-a
AFLAGS_cache-v7m.o :=-Wa,-march=armv7-m
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
......@@ -101,6 +103,7 @@ AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
obj-$(CONFIG_CACHE_UNIPHIER) += cache-uniphier.o
This diff is collapsed.
......@@ -142,6 +142,8 @@ static void l2c_disable(void)
{
void __iomem *base = l2x0_base;
l2x0_pmu_suspend();
outer_cache.flush_all();
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
......@@ -159,6 +161,8 @@ static void l2c_resume(void)
/* Do not touch the controller if already enabled. */
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
l2c_enable(base, l2x0_data->num_lock);
l2x0_pmu_resume();
}
/*
......@@ -709,9 +713,8 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
if (revision >= L310_CACHE_ID_RTL_R3P0 &&
revision < L310_CACHE_ID_RTL_R3P2) {
u32 val = l2x0_saved_regs.prefetch_ctrl;
/* I don't think bit23 is required here... but iMX6 does so */
if (val & (BIT(30) | BIT(23))) {
val &= ~(BIT(30) | BIT(23));
if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
l2x0_saved_regs.prefetch_ctrl = val;
errata[n++] = "752271";
}
......@@ -892,6 +895,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
data->type, cache_id, aux);
l2x0_pmu_register(l2x0_base, cache_id);
return 0;
}
......
This diff is collapsed.
......@@ -436,7 +436,7 @@ static int __init atomic_pool_init(void)
gen_pool_set_algo(atomic_pool,
gen_pool_first_fit_order_align,
(void *)PAGE_SHIFT);
pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
atomic_pool_size / 1024);
return 0;
}
......@@ -445,7 +445,7 @@ static int __init atomic_pool_init(void)
gen_pool_destroy(atomic_pool);
atomic_pool = NULL;
out:
pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
atomic_pool_size / 1024);
return -ENOMEM;
}
......
......@@ -243,7 +243,7 @@ __setup("noalign", noalign_setup);
#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
static struct mem_type mem_types[] __ro_after_init = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
......
......@@ -7,6 +7,10 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#ifdef CONFIG_CPU_V7M
#include <asm/v7m.h>
#endif
/*
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
*/
......@@ -70,7 +74,13 @@
* on ARMv7.
*/
.macro dcache_line_size, reg, tmp
#ifdef CONFIG_CPU_V7M
movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
ldr \tmp, [\tmp]
#else
mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
#endif
lsr \tmp, \tmp, #16
and \tmp, \tmp, #0xf @ cache line size encoding
mov \reg, #4 @ bytes per word
......@@ -82,7 +92,13 @@
* on ARMv7.
*/
.macro icache_line_size, reg, tmp
#ifdef CONFIG_CPU_V7M
movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
ldr \tmp, [\tmp]
#else
mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
#endif
and \tmp, \tmp, #0xf @ cache line size encoding
mov \reg, #4 @ bytes per word
mov \reg, \reg, lsl \tmp @ actual cache line size
......
......@@ -74,14 +74,42 @@ ENTRY(cpu_v7m_do_resume)
ENDPROC(cpu_v7m_do_resume)
#endif
ENTRY(cpu_cm7_dcache_clean_area)
dcache_line_size r2, r3
movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
1: str r0, [r3] @ clean D entry
add r0, r0, r2
subs r1, r1, r2
bhi 1b
dsb
ret lr
ENDPROC(cpu_cm7_dcache_clean_area)
ENTRY(cpu_cm7_proc_fin)
movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
ldr r0, [r2]
bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC)
str r0, [r2]
ret lr
ENDPROC(cpu_cm7_proc_fin)
.section ".text.init", #alloc, #execinstr
__v7m_cm7_setup:
mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
b __v7m_setup_cont
/*
* __v7m_setup
*
* This should be able to cover all ARMv7-M cores.
*/
__v7m_setup:
mov r8, 0
__v7m_setup_cont:
@ Configure the vector table base address
ldr r0, =BASEADDR_V7M_SCB
ldr r12, =vector_table
......@@ -104,6 +132,7 @@ __v7m_setup:
badr r1, 1f
ldr r5, [r12, #11 * 4] @ read the SVC vector entry
str r1, [r12, #11 * 4] @ write the temporary SVC vector entry
dsb
mov r6, lr @ save LR
ldr sp, =init_thread_union + THREAD_START_SP
cpsie i
......@@ -116,15 +145,32 @@ __v7m_setup:
mov r1, #1
msr control, r1 @ Thread mode has unpriviledged access
@ Configure caches (if implemented)
teq r8, #0
stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
blne v7m_invalidate_l1
teq r8, #0 @ re-evalutae condition
ldmneia r12, {r0-r6, lr}
@ Configure the System Control Register to ensure 8-byte stack alignment
@ Note the STKALIGN bit is either RW or RAO.
ldr r12, [r0, V7M_SCB_CCR] @ system control register
orr r12, #V7M_SCB_CCR_STKALIGN
str r12, [r0, V7M_SCB_CCR]
ldr r0, [r0, V7M_SCB_CCR] @ system control register
orr r0, #V7M_SCB_CCR_STKALIGN
orr r0, r0, r8
ret lr
ENDPROC(__v7m_setup)
/*
* Cortex-M7 processor functions
*/
globl_equ cpu_cm7_proc_init, cpu_v7m_proc_init
globl_equ cpu_cm7_reset, cpu_v7m_reset
globl_equ cpu_cm7_do_idle, cpu_v7m_do_idle
globl_equ cpu_cm7_switch_mm, cpu_v7m_switch_mm
define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
define_processor_functions cm7, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
string cpu_arch_name, "armv7m"
......@@ -133,6 +179,50 @@ ENDPROC(__v7m_setup)
.section ".proc.info.init", #alloc
.macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions
.long 0 /* proc_info_list.__cpu_mm_mmu_flags */
.long 0 /* proc_info_list.__cpu_io_mmu_flags */
initfn \initfunc, \name
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \hwcaps
.long cpu_v7m_name
.long \proc_fns
.long 0 /* proc_info_list.tlb */
.long 0 /* proc_info_list.user */
.long \cache_fns
.endm
/*
* Match ARM Cortex-M7 processor.
*/
.type __v7m_cm7_proc_info, #object
__v7m_cm7_proc_info:
.long 0x410fc270 /* ARM Cortex-M7 0xC27 */
.long 0xff0ffff0 /* Mask off revision, patch release */
__v7m_proc __v7m_cm7_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
.size __v7m_cm7_proc_info, . - __v7m_cm7_proc_info
/*
* Match ARM Cortex-M4 processor.
*/
.type __v7m_cm4_proc_info, #object
__v7m_cm4_proc_info:
.long 0x410fc240 /* ARM Cortex-M4 0xC24 */
.long 0xff0ffff0 /* Mask off revision, patch release */
__v7m_proc __v7m_cm4_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
.size __v7m_cm4_proc_info, . - __v7m_cm4_proc_info
/*
* Match ARM Cortex-M3 processor.
*/
.type __v7m_cm3_proc_info, #object
__v7m_cm3_proc_info:
.long 0x410fc230 /* ARM Cortex-M3 0xC23 */
.long 0xff0ffff0 /* Mask off revision, patch release */
__v7m_proc __v7m_cm3_proc_info, __v7m_setup
.size __v7m_cm3_proc_info, . - __v7m_cm3_proc_info
/*
* Match any ARMv7-M processor core.
*/
......@@ -140,16 +230,6 @@ ENDPROC(__v7m_setup)
__v7m_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
.long 0 @ proc_info_list.__cpu_mm_mmu_flags
.long 0 @ proc_info_list.__cpu_io_mmu_flags
initfn __v7m_setup, __v7m_proc_info @ proc_info_list.__cpu_flush
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT
.long cpu_v7m_name
.long v7m_processor_functions @ proc_info_list.proc
.long 0 @ proc_info_list.tlb
.long 0 @ proc_info_list.user
.long nop_cache_fns @ proc_info_list.cache
__v7m_proc __v7m_proc_info, __v7m_setup
.size __v7m_proc_info, . - __v7m_proc_info
......@@ -19,6 +19,7 @@
#include <linux/amba/bus.h>
#include <linux/sizes.h>
#include <linux/limits.h>
#include <linux/clk/clk-conf.h>
#include <asm/irq.h>
......@@ -237,6 +238,10 @@ static int amba_probe(struct device *dev)
int ret;
do {
ret = of_clk_set_defaults(dev->of_node, false);
if (ret < 0)
break;
ret = dev_pm_domain_attach(dev, true);
if (ret == -EPROBE_DEFER)
break;
......
......@@ -159,7 +159,7 @@ sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
* half speed or use delayed read latching (errata 13).
*/
if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
(CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000))
(read_cpuid_revision() < ARM_CPU_REV_SA1110_B2 && sd_khz < 62000))
sd_khz /= 2;
sd->mdcnfg = MDCNFG & 0x007f007f;
......
......@@ -89,7 +89,7 @@ config BINFMT_SCRIPT
config BINFMT_FLAT
bool "Kernel support for flat binaries"
depends on !MMU || M68K
depends on !MMU || ARM || M68K
depends on !FRV || BROKEN
help
Support uClinux FLAT format binaries.
......
......@@ -114,6 +114,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment