Commit e3d8238d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Catalin Marinas:
 "Mostly refactoring/clean-up:

   - CPU ops and PSCI (Power State Coordination Interface) refactoring
     following the merging of the arm64 ACPI support, together with
     handling of Trusted (secure) OS instances

   - Using fixmap for permanent FDT mapping, removing the initial dtb
     placement requirements (within 512MB from the start of the kernel
     image).  This required moving the FDT self reservation out of the
     memreserve processing

   - Idmap (1:1 mapping used for MMU on/off) handling clean-up

   - Removing flush_cache_all() - not safe on ARM unless the MMU is off.
     Last stages of CPU power down/up are handled by firmware already

   - "Alternatives" (run-time code patching) refactoring and support for
     immediate branch patching, GICv3 CPU interface access

   - User faults handling clean-up

  And some fixes:

   - Fix for VDSO building with broken ELF toolchains

   - Fix another case of init_mm.pgd usage for user mappings (during
     ASID roll-over broadcasting)

   - Fix for FPSIMD reloading after CPU hotplug

   - Fix for missing syscall trace exit

   - Workaround for .inst asm bug

   - Compat fix for switching the user tls tpidr_el0 register"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (42 commits)
  arm64: use private ratelimit state along with show_unhandled_signals
  arm64: show unhandled SP/PC alignment faults
  arm64: vdso: work-around broken ELF toolchains in Makefile
  arm64: kernel: rename __cpu_suspend to keep it aligned with arm
  arm64: compat: print compat_sp instead of sp
  arm64: mm: Fix freeing of the wrong memmap entries with !SPARSEMEM_VMEMMAP
  arm64: entry: fix context tracking for el0_sp_pc
  arm64: defconfig: enable memtest
  arm64: mm: remove reference to tlb.S from comment block
  arm64: Do not attempt to use init_mm in reset_context()
  arm64: KVM: Switch vgic save/restore to alternative_insn
  arm64: alternative: Introduce feature for GICv3 CPU interface
  arm64: psci: fix !CONFIG_HOTPLUG_CPU build warning
  arm64: fix bug for reloading FPSIMD state after CPU hotplug.
  arm64: kernel thread don't need to save fpsimd context.
  arm64: fix missing syscall trace exit
  arm64: alternative: Work around .inst assembler bugs
  arm64: alternative: Merge alternative-asm.h into alternative.h
  arm64: alternative: Allow immediate branch as alternative instruction
  arm64: Rework alternate sequence for ARM erratum 845719
  ...
parents 4e241557 86dca36e
...@@ -45,11 +45,13 @@ sees fit.) ...@@ -45,11 +45,13 @@ sees fit.)
Requirement: MANDATORY Requirement: MANDATORY
The device tree blob (dtb) must be placed on an 8-byte boundary within The device tree blob (dtb) must be placed on an 8-byte boundary and must
the first 512 megabytes from the start of the kernel image and must not not exceed 2 megabytes in size. Since the dtb will be mapped cacheable
cross a 2-megabyte boundary. This is to allow the kernel to map the using blocks of up to 2 megabytes in size, it must not be placed within
blob using a single section mapping in the initial page tables. any 2M region which must be mapped with any specific attributes.
NOTE: versions prior to v4.2 also require that the DTB be placed within
the 512 MB region starting at text_offset bytes below the kernel Image.
3. Decompress the kernel image 3. Decompress the kernel image
------------------------------ ------------------------------
......
...@@ -218,11 +218,6 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) ...@@ -218,11 +218,6 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
return 0; return 0;
} }
static inline void vgic_arch_setup(const struct vgic_params *vgic)
{
BUG_ON(vgic->type != VGIC_V2);
}
int kvm_perf_init(void); int kvm_perf_init(void);
int kvm_perf_teardown(void); int kvm_perf_teardown(void);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <asm/kvm_psci.h> #include <asm/kvm_psci.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <uapi/linux/psci.h>
/* /*
* This is an implementation of the Power State Coordination Interface * This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022A. * as described in ARM document number ARM DEN 0022A.
......
...@@ -268,6 +268,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) ...@@ -268,6 +268,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
if (mdesc->reserve) if (mdesc->reserve)
mdesc->reserve(); mdesc->reserve();
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem(); early_init_fdt_scan_reserved_mem();
/* reserve memory for DMA contiguous allocations */ /* reserve memory for DMA contiguous allocations */
......
...@@ -72,6 +72,7 @@ config ARM64 ...@@ -72,6 +72,7 @@ config ARM64
select HAVE_RCU_TABLE_FREE select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select NO_BOOTMEM select NO_BOOTMEM
select OF select OF
......
...@@ -180,6 +180,7 @@ CONFIG_LOCKUP_DETECTOR=y ...@@ -180,6 +180,7 @@ CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set # CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set # CONFIG_FTRACE is not set
CONFIG_MEMTEST=y
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y CONFIG_ARM64_CRYPTO=y
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/irqchip/arm-gic-acpi.h> #include <linux/irqchip/arm-gic-acpi.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/psci.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
/* Basic configuration for ACPI */ /* Basic configuration for ACPI */
...@@ -39,18 +40,6 @@ extern int acpi_disabled; ...@@ -39,18 +40,6 @@ extern int acpi_disabled;
extern int acpi_noirq; extern int acpi_noirq;
extern int acpi_pci_disabled; extern int acpi_pci_disabled;
/* 1 to indicate PSCI 0.2+ is implemented */
static inline bool acpi_psci_present(void)
{
return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
}
/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */
static inline bool acpi_psci_use_hvc(void)
{
return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
}
static inline void disable_acpi(void) static inline void disable_acpi(void)
{ {
acpi_disabled = 1; acpi_disabled = 1;
...@@ -88,9 +77,11 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } ...@@ -88,9 +77,11 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void); void __init acpi_init_cpus(void);
#else #else
static inline bool acpi_psci_present(void) { return false; }
static inline bool acpi_psci_use_hvc(void) { return false; }
static inline void acpi_init_cpus(void) { } static inline void acpi_init_cpus(void) { }
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
static inline const char *acpi_get_enable_method(int cpu)
{
return acpi_psci_present() ? "psci" : NULL;
}
#endif /*_ASM_ACPI_H*/ #endif /*_ASM_ACPI_H*/
#ifndef __ASM_ALTERNATIVE_ASM_H
#define __ASM_ALTERNATIVE_ASM_H
#ifdef __ASSEMBLY__
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
.word \orig_offset - .
.word \alt_offset - .
.hword \feature
.byte \orig_len
.byte \alt_len
.endm
.macro alternative_insn insn1 insn2 cap
661: \insn1
662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
.popsection
.pushsection .altinstr_replacement, "ax"
663: \insn2
664: .popsection
.if ((664b-663b) != (662b-661b))
.error "Alternatives instruction length mismatch"
.endif
.endm
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_ASM_H */
#ifndef __ASM_ALTERNATIVE_H #ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H #define __ASM_ALTERNATIVE_H
#ifndef __ASSEMBLY__
#include <linux/types.h> #include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/stringify.h> #include <linux/stringify.h>
...@@ -24,7 +26,20 @@ void free_alternatives_memory(void); ...@@ -24,7 +26,20 @@ void free_alternatives_memory(void);
" .byte 662b-661b\n" /* source len */ \ " .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */ " .byte 664f-663f\n" /* replacement len */
/* alternative assembly primitive: */ /*
* alternative assembly primitive:
*
* If any of these .org directive fail, it means that insn1 and insn2
* don't have the same length. This used to be written as
*
* .if ((664b-663b) != (662b-661b))
* .error "Alternatives instruction length mismatch"
* .endif
*
* but most assemblers die if insn1 or insn2 have a .inst. This should
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
*/
#define ALTERNATIVE(oldinstr, newinstr, feature) \ #define ALTERNATIVE(oldinstr, newinstr, feature) \
"661:\n\t" \ "661:\n\t" \
oldinstr "\n" \ oldinstr "\n" \
...@@ -37,8 +52,31 @@ void free_alternatives_memory(void); ...@@ -37,8 +52,31 @@ void free_alternatives_memory(void);
newinstr "\n" \ newinstr "\n" \
"664:\n\t" \ "664:\n\t" \
".popsection\n\t" \ ".popsection\n\t" \
".if ((664b-663b) != (662b-661b))\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \
" .error \"Alternatives instruction length mismatch\"\n\t"\ ".org . - (662b-661b) + (664b-663b)\n"
".endif\n"
#else
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
.word \orig_offset - .
.word \alt_offset - .
.hword \feature
.byte \orig_len
.byte \alt_len
.endm
.macro alternative_insn insn1 insn2 cap
661: \insn1
662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
.popsection
.pushsection .altinstr_replacement, "ax"
663: \insn2
664: .popsection
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endm
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */ #endif /* __ASM_ALTERNATIVE_H */
#ifndef __ASM_BOOT_H
#define __ASM_BOOT_H
#include <asm/sizes.h>
/*
* arm64 requires the DTB to be 8 byte aligned and
* not exceed 2MB in size.
*/
#define MIN_FDT_ALIGN 8
#define MAX_FDT_SIZE SZ_2M
#endif
...@@ -40,10 +40,6 @@ ...@@ -40,10 +40,6 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing) * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT or ASID-tagged VIVT I-cache. * VIPT or ASID-tagged VIVT I-cache.
* *
* flush_cache_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_cache_mm(mm) * flush_cache_mm(mm)
* *
* Clean and invalidate all user space cache entries * Clean and invalidate all user space cache entries
...@@ -69,7 +65,6 @@ ...@@ -69,7 +65,6 @@
* - kaddr - page address * - kaddr - page address
* - size - region size * - size - region size
*/ */
extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len); extern void __flush_dcache_area(void *addr, size_t len);
......
...@@ -19,15 +19,15 @@ ...@@ -19,15 +19,15 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/threads.h> #include <linux/threads.h>
struct device_node;
/** /**
* struct cpu_operations - Callback operations for hotplugging CPUs. * struct cpu_operations - Callback operations for hotplugging CPUs.
* *
* @name: Name of the property as appears in a devicetree cpu node's * @name: Name of the property as appears in a devicetree cpu node's
* enable-method property. * enable-method property. On systems booting with ACPI, @name
* @cpu_init: Reads any data necessary for a specific enable-method from the * identifies the struct cpu_operations entry corresponding to
* devicetree, for a given cpu node and proposed logical id. * the boot protocol specified in the ACPI MADT table.
* @cpu_init: Reads any data necessary for a specific enable-method for a
* proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot * mechanism for doing so, tests whether it is possible to boot
* the given CPU. * the given CPU.
...@@ -40,15 +40,15 @@ struct device_node; ...@@ -40,15 +40,15 @@ struct device_node;
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed. * cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
* @cpu_init_idle: Reads any data necessary to initialize CPU idle states from * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for
* devicetree, for a given cpu node and proposed logical id. * a proposed logical id.
* @cpu_suspend: Suspends a cpu and saves the required context. May fail owing * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
* to wrong parameters or error conditions. Called from the * to wrong parameters or error conditions. Called from the
* CPU being suspended. Must be called with IRQs disabled. * CPU being suspended. Must be called with IRQs disabled.
*/ */
struct cpu_operations { struct cpu_operations {
const char *name; const char *name;
int (*cpu_init)(struct device_node *, unsigned int); int (*cpu_init)(unsigned int);
int (*cpu_prepare)(unsigned int); int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int); int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void); void (*cpu_postboot)(void);
...@@ -58,14 +58,17 @@ struct cpu_operations { ...@@ -58,14 +58,17 @@ struct cpu_operations {
int (*cpu_kill)(unsigned int cpu); int (*cpu_kill)(unsigned int cpu);
#endif #endif
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
int (*cpu_init_idle)(struct device_node *, unsigned int); int (*cpu_init_idle)(unsigned int);
int (*cpu_suspend)(unsigned long); int (*cpu_suspend)(unsigned long);
#endif #endif
}; };
extern const struct cpu_operations *cpu_ops[NR_CPUS]; extern const struct cpu_operations *cpu_ops[NR_CPUS];
int __init cpu_read_ops(struct device_node *dn, int cpu); int __init cpu_read_ops(int cpu);
void __init cpu_read_bootcpu_ops(void);
const struct cpu_operations *cpu_get_ops(const char *name); static inline void __init cpu_read_bootcpu_ops(void)
{
cpu_read_ops(0);
}
#endif /* ifndef __ASM_CPU_OPS_H */ #endif /* ifndef __ASM_CPU_OPS_H */
...@@ -24,8 +24,9 @@ ...@@ -24,8 +24,9 @@
#define ARM64_WORKAROUND_CLEAN_CACHE 0 #define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2 #define ARM64_WORKAROUND_845719 2
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
#define ARM64_NCAPS 3 #define ARM64_NCAPS 4
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -38,6 +39,11 @@ struct arm64_cpu_capabilities { ...@@ -38,6 +39,11 @@ struct arm64_cpu_capabilities {
u32 midr_model; u32 midr_model;
u32 midr_range_min, midr_range_max; u32 midr_range_min, midr_range_max;
}; };
struct { /* Feature register checking */
u64 register_mask;
u64 register_value;
};
}; };
}; };
......
...@@ -5,20 +5,16 @@ ...@@ -5,20 +5,16 @@
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_init(unsigned int cpu); extern int arm_cpuidle_init(unsigned int cpu);
extern int cpu_suspend(unsigned long arg); extern int arm_cpuidle_suspend(int index);
#else #else
static inline int arm_cpuidle_init(unsigned int cpu) static inline int arm_cpuidle_init(unsigned int cpu)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int cpu_suspend(unsigned long arg) static inline int arm_cpuidle_suspend(int index)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
#endif #endif
static inline int arm_cpuidle_suspend(int index)
{
return cpu_suspend(index);
}
#endif #endif
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/boot.h>
#include <asm/page.h> #include <asm/page.h>
/* /*
...@@ -32,6 +33,20 @@ ...@@ -32,6 +33,20 @@
*/ */
enum fixed_addresses { enum fixed_addresses {
FIX_HOLE, FIX_HOLE,
/*
* Reserve a virtual window for the FDT that is 2 MB larger than the
* maximum supported size, and put it at the top of the fixmap region.
* The additional space ensures that any FDT that does not exceed
* MAX_FDT_SIZE can be mapped regardless of whether it crosses any
* 2 MB alignment boundaries.
*
* Keep this at the top so it remains 2 MB aligned.
*/
#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
FIX_FDT_END,
FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
FIX_EARLYCON_MEM_BASE, FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0, FIX_TEXT_POKE0,
__end_of_permanent_fixed_addresses, __end_of_permanent_fixed_addresses,
......
...@@ -281,6 +281,7 @@ __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) ...@@ -281,6 +281,7 @@ __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
#undef __AARCH64_INSN_FUNCS #undef __AARCH64_INSN_FUNCS
bool aarch64_insn_is_nop(u32 insn); bool aarch64_insn_is_nop(u32 insn);
bool aarch64_insn_is_branch_imm(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn); int aarch64_insn_write(void *addr, u32 insn);
...@@ -351,6 +352,8 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, ...@@ -351,6 +352,8 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
int shift, int shift,
enum aarch64_insn_variant variant, enum aarch64_insn_variant variant,
enum aarch64_insn_logic_type type); enum aarch64_insn_logic_type type);
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
......
...@@ -117,10 +117,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) ...@@ -117,10 +117,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* ordering rules but do not guarantee any ordering relative to Normal memory * ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. * accesses.
*/ */
#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) #define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
......
...@@ -132,11 +132,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); ...@@ -132,11 +132,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void); extern u64 __vgic_v3_get_ich_vtr_el2(void);
extern char __save_vgic_v2_state[];
extern char __restore_vgic_v2_state[];
extern char __save_vgic_v3_state[];
extern char __restore_vgic_v3_state[];
#endif #endif
#endif /* __ARM_KVM_ASM_H__ */ #endif /* __ARM_KVM_ASM_H__ */
...@@ -221,29 +221,6 @@ struct vgic_sr_vectors { ...@@ -221,29 +221,6 @@ struct vgic_sr_vectors {
void *restore_vgic; void *restore_vgic;
}; };
static inline void vgic_arch_setup(const struct vgic_params *vgic)
{
extern struct vgic_sr_vectors __vgic_sr_vectors;
switch(vgic->type)
{
case VGIC_V2:
__vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
__vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
break;
#ifdef CONFIG_ARM_GIC_V3
case VGIC_V3:
__vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
__vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
break;
#endif
default:
BUG();
}
}
static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
......
...@@ -34,5 +34,6 @@ extern void init_mem_pgprot(void); ...@@ -34,5 +34,6 @@ extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size, unsigned long virt, phys_addr_t size,
pgprot_t prot); pgprot_t prot);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
#endif #endif
...@@ -24,4 +24,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); ...@@ -24,4 +24,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs) #define perf_misc_flags(regs) perf_misc_flags(regs)
#endif #endif
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->pc = (__ip); \
(regs)->regs[29] = (unsigned long) __builtin_frame_address(0); \
(regs)->sp = current_stack_pointer; \
(regs)->pstate = PSR_MODE_EL1h; \
}
#endif #endif
...@@ -28,12 +28,8 @@ ...@@ -28,12 +28,8 @@
struct mm_struct; struct mm_struct;
struct cpu_suspend_ctx; struct cpu_suspend_ctx;
extern void cpu_cache_off(void);
extern void cpu_do_idle(void); extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
void cpu_soft_restart(phys_addr_t cpu_reset,
unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
......
...@@ -78,13 +78,30 @@ struct cpu_context { ...@@ -78,13 +78,30 @@ struct cpu_context {
struct thread_struct { struct thread_struct {
struct cpu_context cpu_context; /* cpu context */ struct cpu_context cpu_context; /* cpu context */
unsigned long tp_value; unsigned long tp_value; /* TLS register */
#ifdef CONFIG_COMPAT
unsigned long tp2_value;
#endif
struct fpsimd_state fpsimd_state; struct fpsimd_state fpsimd_state;
unsigned long fault_address; /* fault info */ unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */ unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */ struct debug_info debug; /* debugging */
}; };
#ifdef CONFIG_COMPAT
#define task_user_tls(t) \
({ \
unsigned long *__tls; \
if (is_compat_thread(task_thread_info(t))) \
__tls = &(t)->thread.tp2_value; \
else \
__tls = &(t)->thread.tp_value; \
__tls; \
})
#else
#define task_user_tls(t) (&(t)->thread.tp_value)
#endif
#define INIT_THREAD { } #define INIT_THREAD { }
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
......
...@@ -14,7 +14,15 @@ ...@@ -14,7 +14,15 @@
#ifndef __ASM_PSCI_H #ifndef __ASM_PSCI_H
#define __ASM_PSCI_H #define __ASM_PSCI_H
int psci_dt_init(void); int __init psci_dt_init(void);
int psci_acpi_init(void);
#ifdef CONFIG_ACPI
int __init psci_acpi_init(void);
bool __init acpi_psci_present(void);
bool __init acpi_psci_use_hvc(void);
#else
static inline int psci_acpi_init(void) { return 0; }
static inline bool acpi_psci_present(void) { return false; }
#endif
#endif /* __ASM_PSCI_H */ #endif /* __ASM_PSCI_H */
...@@ -42,7 +42,7 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs); ...@@ -42,7 +42,7 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs);
* Discover the set of possible CPUs and determine their * Discover the set of possible CPUs and determine their
* SMP operations. * SMP operations.
*/ */
extern void of_smp_init_cpus(void); extern void smp_init_cpus(void);
/* /*
* Provide a function to raise an IPI cross call on CPUs in callmap. * Provide a function to raise an IPI cross call on CPUs in callmap.
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#ifndef __ASM_SMP_PLAT_H #ifndef __ASM_SMP_PLAT_H
#define __ASM_SMP_PLAT_H #define __ASM_SMP_PLAT_H
#include <linux/cpumask.h>
#include <asm/types.h> #include <asm/types.h>
struct mpidr_hash { struct mpidr_hash {
...@@ -39,6 +41,20 @@ static inline u32 mpidr_hash_size(void) ...@@ -39,6 +41,20 @@ static inline u32 mpidr_hash_size(void)
*/ */
extern u64 __cpu_logical_map[NR_CPUS]; extern u64 __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu] #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/*
* Retrieve logical cpu index corresponding to a given MPIDR.Aff*
* - mpidr: MPIDR.Aff* bits to be used for the look-up
*
* Returns the cpu logical index or -EINVAL on look-up error
*/
static inline int get_logical_index(u64 mpidr)
{
int cpu;
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (cpu_logical_map(cpu) == mpidr)
return cpu;
return -EINVAL;
}
void __init do_post_cpus_up_work(void); void __init do_post_cpus_up_work(void);
......
...@@ -21,6 +21,6 @@ struct sleep_save_sp { ...@@ -21,6 +21,6 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys; phys_addr_t save_ptr_stash_phys;
}; };
extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void); extern void cpu_resume(void);
#endif #endif
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <linux/reboot.h> #include <linux/reboot.h>
struct pt_regs; struct pt_regs;
...@@ -41,9 +43,19 @@ struct mm_struct; ...@@ -41,9 +43,19 @@ struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *); extern void __show_regs(struct pt_regs *);
void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
#define show_unhandled_signals_ratelimited() \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
bool __show_ratelimited = false; \
if (show_unhandled_signals && __ratelimit(&_rs)) \
__show_ratelimited = true; \
__show_ratelimited; \
})
#define UDBG_UNDEFINED (1 << 0) #define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1) #define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2) #define UDBG_BADABORT (1 << 2)
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
* TLB Management * TLB Management
* ============== * ==============
* *
* The arch/arm64/mm/tlb.S files implement these methods.
*
* The TLB specific code is expected to perform whatever tests it needs * The TLB specific code is expected to perform whatever tests it needs
* to determine if it should invalidate the TLB for each call. Start * to determine if it should invalidate the TLB for each call. Start
* addresses are inclusive and end addresses are exclusive; it is safe to * addresses are inclusive and end addresses are exclusive; it is safe to
......
...@@ -36,12 +36,6 @@ EXPORT_SYMBOL(acpi_disabled); ...@@ -36,12 +36,6 @@ EXPORT_SYMBOL(acpi_disabled);
int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled); EXPORT_SYMBOL(acpi_pci_disabled);
/* Processors with enabled flag and sane MPIDR */
static int enabled_cpus;
/* Boot CPU is valid or not in MADT */
static bool bootcpu_valid __initdata;
static bool param_acpi_off __initdata; static bool param_acpi_off __initdata;
static bool param_acpi_force __initdata; static bool param_acpi_force __initdata;
...@@ -95,122 +89,15 @@ void __init __acpi_unmap_table(char *map, unsigned long size) ...@@ -95,122 +89,15 @@ void __init __acpi_unmap_table(char *map, unsigned long size)
early_memunmap(map, size); early_memunmap(map, size);
} }
/** bool __init acpi_psci_present(void)
* acpi_map_gic_cpu_interface - generates a logical cpu number
* and map to MPIDR represented by GICC structure
*/
static void __init
acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
{ {
int i; return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK;
bool enabled = !!(processor->flags & ACPI_MADT_ENABLED);
if (mpidr == INVALID_HWID) {
pr_info("Skip MADT cpu entry with invalid MPIDR\n");
return;
}
total_cpus++;
if (!enabled)
return;
if (enabled_cpus >= NR_CPUS) {
pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
NR_CPUS, total_cpus, mpidr);
return;
}
/* Check if GICC structure of boot CPU is available in the MADT */
if (cpu_logical_map(0) == mpidr) {
if (bootcpu_valid) {
pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
mpidr);
return;
}
bootcpu_valid = true;
}
/*
* Duplicate MPIDRs are a recipe for disaster. Scan
* all initialized entries and check for
* duplicates. If any is found just ignore the CPU.
*/
for (i = 1; i < enabled_cpus; i++) {
if (cpu_logical_map(i) == mpidr) {
pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
mpidr);
return;
}
}
if (!acpi_psci_present())
return;
cpu_ops[enabled_cpus] = cpu_get_ops("psci");
/* CPU 0 was already initialized */
if (enabled_cpus) {
if (!cpu_ops[enabled_cpus])
return;
if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
return;
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(enabled_cpus) = mpidr;
}
enabled_cpus++;
}
static int __init
acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *processor;
processor = (struct acpi_madt_generic_interrupt *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
acpi_map_gic_cpu_interface(processor);
return 0;
} }
/* Parse GIC cpu interface entries in MADT for SMP init */ /* Whether HVC must be used instead of SMC as the PSCI conduit */
void __init acpi_init_cpus(void) bool __init acpi_psci_use_hvc(void)
{ {
int count, i; return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
/*
* do a partial walk of MADT to determine how many CPUs
* we have including disabled CPUs, and get information
* we need for SMP init
*/
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_gic_cpu_interface, 0);
if (!count) {
pr_err("No GIC CPU interface entries present\n");
return;
} else if (count < 0) {
pr_err("Error parsing GIC CPU interface entry\n");
return;
}
if (!bootcpu_valid) {
pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
return;
}
for (i = 0; i < enabled_cpus; i++)
set_cpu_possible(i, true);
/* Make boot-up look pretty */
pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
} }
/* /*
......
...@@ -24,8 +24,13 @@ ...@@ -24,8 +24,13 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/insn.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
struct alt_region { struct alt_region {
...@@ -33,13 +38,63 @@ struct alt_region { ...@@ -33,13 +38,63 @@ struct alt_region {
struct alt_instr *end; struct alt_instr *end;
}; };
/*
* Check if the target PC is within an alternative block.
*/
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
unsigned long replptr;
if (kernel_text_address(pc))
return 1;
replptr = (unsigned long)ALT_REPL_PTR(alt);
if (pc >= replptr && pc <= (replptr + alt->alt_len))
return 0;
/*
* Branching into *another* alternate sequence is doomed, and
* we're not even trying to fix it up.
*/
BUG();
}
static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
{
u32 insn;
insn = le32_to_cpu(*altinsnptr);
if (aarch64_insn_is_branch_imm(insn)) {
s32 offset = aarch64_get_branch_offset(insn);
unsigned long target;
target = (unsigned long)altinsnptr + offset;
/*
* If we're branching inside the alternate sequence,
* do not rewrite the instruction, as it is already
* correct. Otherwise, generate the new instruction.
*/
if (branch_insn_requires_update(alt, target)) {
offset = target - (unsigned long)insnptr;
insn = aarch64_set_branch_offset(insn, offset);
}
}
return insn;
}
static int __apply_alternatives(void *alt_region) static int __apply_alternatives(void *alt_region)
{ {
struct alt_instr *alt; struct alt_instr *alt;
struct alt_region *region = alt_region; struct alt_region *region = alt_region;
u8 *origptr, *replptr; u32 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) { for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
int i, nr_inst;
if (!cpus_have_cap(alt->cpufeature)) if (!cpus_have_cap(alt->cpufeature))
continue; continue;
...@@ -47,11 +102,17 @@ static int __apply_alternatives(void *alt_region) ...@@ -47,11 +102,17 @@ static int __apply_alternatives(void *alt_region)
pr_info_once("patching kernel code\n"); pr_info_once("patching kernel code\n");
origptr = (u8 *)&alt->orig_offset + alt->orig_offset; origptr = ALT_ORIG_PTR(alt);
replptr = (u8 *)&alt->alt_offset + alt->alt_offset; replptr = ALT_REPL_PTR(alt);
memcpy(origptr, replptr, alt->alt_len); nr_inst = alt->alt_len / sizeof(insn);
for (i = 0; i < nr_inst; i++) {
insn = get_alt_insn(alt, origptr + i, replptr + i);
*(origptr + i) = cpu_to_le32(insn);
}
flush_icache_range((uintptr_t)origptr, flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + alt->alt_len)); (uintptr_t)(origptr + nr_inst));
} }
return 0; return 0;
......
...@@ -127,7 +127,6 @@ int main(void) ...@@ -127,7 +127,6 @@ int main(void)
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic)); DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic)); DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
......
...@@ -16,11 +16,13 @@ ...@@ -16,11 +16,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <asm/cpu_ops.h> #include <linux/acpi.h>
#include <asm/smp_plat.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/acpi.h>
#include <asm/cpu_ops.h>
#include <asm/smp_plat.h>
extern const struct cpu_operations smp_spin_table_ops; extern const struct cpu_operations smp_spin_table_ops;
extern const struct cpu_operations cpu_psci_ops; extern const struct cpu_operations cpu_psci_ops;
...@@ -35,7 +37,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = { ...@@ -35,7 +37,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = {
NULL, NULL,
}; };
const struct cpu_operations * __init cpu_get_ops(const char *name) static const struct cpu_operations * __init cpu_get_ops(const char *name)
{ {
const struct cpu_operations **ops = supported_cpu_ops; const struct cpu_operations **ops = supported_cpu_ops;
...@@ -49,39 +51,53 @@ const struct cpu_operations * __init cpu_get_ops(const char *name) ...@@ -49,39 +51,53 @@ const struct cpu_operations * __init cpu_get_ops(const char *name)
return NULL; return NULL;
} }
/* static const char *__init cpu_read_enable_method(int cpu)
* Read a cpu's enable method from the device tree and record it in cpu_ops.
*/
int __init cpu_read_ops(struct device_node *dn, int cpu)
{ {
const char *enable_method = of_get_property(dn, "enable-method", NULL); const char *enable_method;
if (acpi_disabled) {
struct device_node *dn = of_get_cpu_node(cpu, NULL);
if (!dn) {
if (!cpu)
pr_err("Failed to find device node for boot cpu\n");
return NULL;
}
enable_method = of_get_property(dn, "enable-method", NULL);
if (!enable_method) { if (!enable_method) {
/* /*
* The boot CPU may not have an enable method (e.g. when * The boot CPU may not have an enable method (e.g.
* spin-table is used for secondaries). Don't warn spuriously. * when spin-table is used for secondaries).
* Don't warn spuriously.
*/ */
if (cpu != 0) if (cpu != 0)
pr_err("%s: missing enable-method property\n", pr_err("%s: missing enable-method property\n",
dn->full_name); dn->full_name);
return -ENOENT;
} }
} else {
enable_method = acpi_get_enable_method(cpu);
if (!enable_method)
pr_err("Unsupported ACPI enable-method\n");
}
return enable_method;
}
/*
* Read a cpu's enable method and record it in cpu_ops.
*/
int __init cpu_read_ops(int cpu)
{
const char *enable_method = cpu_read_enable_method(cpu);
if (!enable_method)
return -ENODEV;
cpu_ops[cpu] = cpu_get_ops(enable_method); cpu_ops[cpu] = cpu_get_ops(enable_method);
if (!cpu_ops[cpu]) { if (!cpu_ops[cpu]) {
pr_warn("%s: unsupported enable-method property: %s\n", pr_warn("Unsupported enable-method: %s\n", enable_method);
dn->full_name, enable_method);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
return 0; return 0;
} }
void __init cpu_read_bootcpu_ops(void)
{
struct device_node *dn = of_get_cpu_node(0, NULL);
if (!dn) {
pr_err("Failed to find device node for boot cpu\n");
return;
}
cpu_read_ops(dn, 0);
}
...@@ -22,7 +22,23 @@ ...@@ -22,7 +22,23 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
static bool
has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
{
u64 val;
val = read_cpuid(id_aa64pfr0_el1);
return (val & entry->register_mask) == entry->register_value;
}
static const struct arm64_cpu_capabilities arm64_features[] = { static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
.matches = has_id_aa64pfr0_feature,
.register_mask = (0xf << 24),
.register_value = (1 << 24),
},
{}, {},
}; };
......
...@@ -18,15 +18,10 @@ ...@@ -18,15 +18,10 @@
int arm_cpuidle_init(unsigned int cpu) int arm_cpuidle_init(unsigned int cpu)
{ {
int ret = -EOPNOTSUPP; int ret = -EOPNOTSUPP;
struct device_node *cpu_node = of_cpu_device_node_get(cpu);
if (!cpu_node)
return -ENODEV;
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); ret = cpu_ops[cpu]->cpu_init_idle(cpu);
of_node_put(cpu_node);
return ret; return ret;
} }
...@@ -37,7 +32,7 @@ int arm_cpuidle_init(unsigned int cpu) ...@@ -37,7 +32,7 @@ int arm_cpuidle_init(unsigned int cpu)
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
* operations back-end error code otherwise. * operations back-end error code otherwise.
*/ */
int cpu_suspend(unsigned long arg) int arm_cpuidle_suspend(int index)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -47,5 +42,5 @@ int cpu_suspend(unsigned long arg) ...@@ -47,5 +42,5 @@ int cpu_suspend(unsigned long arg)
*/ */
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return cpu_ops[cpu]->cpu_suspend(arg); return cpu_ops[cpu]->cpu_suspend(index);
} }
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
...@@ -124,21 +124,24 @@ ...@@ -124,21 +124,24 @@
msr sp_el0, x23 msr sp_el0, x23
#ifdef CONFIG_ARM64_ERRATUM_845719 #ifdef CONFIG_ARM64_ERRATUM_845719
alternative_insn \
"nop", \ #undef SEQUENCE_ORG
"tbz x22, #4, 1f", \ #undef SEQUENCE_ALT
ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR #ifdef CONFIG_PID_IN_CONTEXTIDR
alternative_insn \
"nop; nop", \ #define SEQUENCE_ORG "nop ; nop ; nop"
"mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ #define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
ARM64_WORKAROUND_845719
#else #else
alternative_insn \
"nop", \ #define SEQUENCE_ORG "nop ; nop"
"msr contextidr_el1, xzr; 1:", \ #define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
ARM64_WORKAROUND_845719
#endif #endif
alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
#endif #endif
.endif .endif
msr elr_el1, x21 // set up the return data msr elr_el1, x21 // set up the return data
...@@ -517,6 +520,7 @@ el0_sp_pc: ...@@ -517,6 +520,7 @@ el0_sp_pc:
mrs x26, far_el1 mrs x26, far_el1
// enable interrupts before calling the main handler // enable interrupts before calling the main handler
enable_dbg_and_irq enable_dbg_and_irq
ct_user_exit
mov x0, x26 mov x0, x26
mov x1, x25 mov x1, x25
mov x2, sp mov x2, sp
...@@ -608,11 +612,16 @@ ENDPROC(cpu_switch_to) ...@@ -608,11 +612,16 @@ ENDPROC(cpu_switch_to)
*/ */
ret_fast_syscall: ret_fast_syscall:
disable_irq // disable interrupts disable_irq // disable interrupts
ldr x1, [tsk, #TI_FLAGS] ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK and x2, x1, #_TIF_WORK_MASK
cbnz x2, fast_work_pending cbnz x2, fast_work_pending
enable_step_tsk x1, x2 enable_step_tsk x1, x2
kernel_exit 0, ret = 1 kernel_exit 0, ret = 1
ret_fast_syscall_trace:
enable_irq // enable interrupts
b __sys_trace_return
/* /*
* Ok, we need to do extra processing, enter the slow path. * Ok, we need to do extra processing, enter the slow path.
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/cpu.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -296,6 +297,35 @@ static void fpsimd_pm_init(void) ...@@ -296,6 +297,35 @@ static void fpsimd_pm_init(void)
static inline void fpsimd_pm_init(void) { } static inline void fpsimd_pm_init(void) { }
#endif /* CONFIG_CPU_PM */ #endif /* CONFIG_CPU_PM */
#ifdef CONFIG_HOTPLUG_CPU
static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
per_cpu(fpsimd_last_state, cpu) = NULL;
break;
}
return NOTIFY_OK;
}
static struct notifier_block fpsimd_cpu_hotplug_notifier_block = {
.notifier_call = fpsimd_cpu_hotplug_notifier,
};
static inline void fpsimd_hotplug_init(void)
{
register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block);
}
#else
static inline void fpsimd_hotplug_init(void) { }
#endif
/* /*
* FP/SIMD support code initialisation. * FP/SIMD support code initialisation.
*/ */
...@@ -315,6 +345,7 @@ static int __init fpsimd_init(void) ...@@ -315,6 +345,7 @@ static int __init fpsimd_init(void)
elf_hwcap |= HWCAP_ASIMD; elf_hwcap |= HWCAP_ASIMD;
fpsimd_pm_init(); fpsimd_pm_init();
fpsimd_hotplug_init();
return 0; return 0;
} }
......
...@@ -237,8 +237,6 @@ ENTRY(stext) ...@@ -237,8 +237,6 @@ ENTRY(stext)
bl el2_setup // Drop to EL1, w20=cpu_boot_mode bl el2_setup // Drop to EL1, w20=cpu_boot_mode
adrp x24, __PHYS_OFFSET adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag bl set_cpu_boot_mode_flag
bl __vet_fdt
bl __create_page_tables // x25=TTBR0, x26=TTBR1 bl __create_page_tables // x25=TTBR0, x26=TTBR1
/* /*
* The following calls CPU setup code, see arch/arm64/mm/proc.S for * The following calls CPU setup code, see arch/arm64/mm/proc.S for
...@@ -269,24 +267,6 @@ preserve_boot_args: ...@@ -269,24 +267,6 @@ preserve_boot_args:
b __inval_cache_range // tail call b __inval_cache_range // tail call
ENDPROC(preserve_boot_args) ENDPROC(preserve_boot_args)
/*
* Determine validity of the x21 FDT pointer.
* The dtb must be 8-byte aligned and live in the first 512M of memory.
*/
__vet_fdt:
tst x21, #0x7
b.ne 1f
cmp x21, x24
b.lt 1f
mov x0, #(1 << 29)
add x0, x0, x24
cmp x21, x0
b.ge 1f
ret
1:
mov x21, #0
ret
ENDPROC(__vet_fdt)
/* /*
* Macro to create a table entry to the next page. * Macro to create a table entry to the next page.
* *
...@@ -348,8 +328,7 @@ ENDPROC(__vet_fdt) ...@@ -348,8 +328,7 @@ ENDPROC(__vet_fdt)
* required to get the kernel running. The following sections are required: * required to get the kernel running. The following sections are required:
* - identity mapping to enable the MMU (low address, TTBR0) * - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has * - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled, including the FDT blob (TTBR1) * been enabled
* - pgd entry for fixed mappings (TTBR1)
*/ */
__create_page_tables: __create_page_tables:
adrp x25, idmap_pg_dir adrp x25, idmap_pg_dir
...@@ -382,7 +361,7 @@ __create_page_tables: ...@@ -382,7 +361,7 @@ __create_page_tables:
* Create the identity mapping. * Create the identity mapping.
*/ */
mov x0, x25 // idmap_pg_dir mov x0, x25 // idmap_pg_dir
adrp x3, KERNEL_START // __pa(KERNEL_START) adrp x3, __idmap_text_start // __pa(__idmap_text_start)
#ifndef CONFIG_ARM64_VA_BITS_48 #ifndef CONFIG_ARM64_VA_BITS_48
#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
...@@ -405,11 +384,11 @@ __create_page_tables: ...@@ -405,11 +384,11 @@ __create_page_tables:
/* /*
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
* entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in * this number conveniently equals the number of leading zeroes in
* the physical address of KERNEL_END. * the physical address of __idmap_text_end.
*/ */
adrp x5, KERNEL_END adrp x5, __idmap_text_end
clz x5, x5 clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
b.ge 1f // .. then skip additional level b.ge 1f // .. then skip additional level
...@@ -424,8 +403,8 @@ __create_page_tables: ...@@ -424,8 +403,8 @@ __create_page_tables:
#endif #endif
create_pgd_entry x0, x3, x5, x6 create_pgd_entry x0, x3, x5, x6
mov x5, x3 // __pa(KERNEL_START) mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, KERNEL_END // __pa(KERNEL_END) adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
create_block_map x0, x7, x3, x5, x6 create_block_map x0, x7, x3, x5, x6
/* /*
...@@ -438,22 +417,6 @@ __create_page_tables: ...@@ -438,22 +417,6 @@ __create_page_tables:
mov x3, x24 // phys offset mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6 create_block_map x0, x7, x3, x5, x6
/*
* Map the FDT blob (maximum 2MB; must be within 512MB of
* PHYS_OFFSET).
*/
mov x3, x21 // FDT phys address
and x3, x3, #~((1 << 21) - 1) // 2MB aligned
mov x6, #PAGE_OFFSET
sub x5, x3, x24 // subtract PHYS_OFFSET
tst x5, #~((1 << 29) - 1) // within 512MB?
csel x21, xzr, x21, ne // zero the FDT pointer
b.ne 1f
add x5, x5, x6 // __va(FDT blob)
add x6, x5, #1 << 21 // 2MB for the FDT blob
sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6
1:
/* /*
* Since the page tables have been populated with non-cacheable * Since the page tables have been populated with non-cacheable
* accesses (MMU disabled), invalidate the idmap and swapper page * accesses (MMU disabled), invalidate the idmap and swapper page
...@@ -669,6 +632,7 @@ ENDPROC(__secondary_switched) ...@@ -669,6 +632,7 @@ ENDPROC(__secondary_switched)
* *
* other registers depend on the function called upon completion * other registers depend on the function called upon completion
*/ */
.section ".idmap.text", "ax"
__enable_mmu: __enable_mmu:
ldr x5, =vectors ldr x5, =vectors
msr vbar_el1, x5 msr vbar_el1, x5
......
...@@ -77,6 +77,14 @@ bool __kprobes aarch64_insn_is_nop(u32 insn) ...@@ -77,6 +77,14 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
} }
} }
bool aarch64_insn_is_branch_imm(u32 insn)
{
return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
aarch64_insn_is_bcond(insn));
}
static DEFINE_SPINLOCK(patch_lock); static DEFINE_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap) static void __kprobes *patch_map(void *addr, int fixmap)
...@@ -1057,6 +1065,58 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, ...@@ -1057,6 +1065,58 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
} }
/*
* Decode the imm field of a branch, and return the byte offset as a
* signed value (so it can be used when computing a new branch
* target).
*/
s32 aarch64_get_branch_offset(u32 insn)
{
s32 imm;
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
return (imm << 6) >> 4;
}
if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
aarch64_insn_is_bcond(insn)) {
imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
return (imm << 13) >> 11;
}
if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
return (imm << 18) >> 16;
}
/* Unhandled instruction */
BUG();
}
/*
* Encode the displacement of a branch in the imm field and return the
* updated instruction.
*/
u32 aarch64_set_branch_offset(u32 insn, s32 offset)
{
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
offset >> 2);
if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
aarch64_insn_is_bcond(insn))
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
offset >> 2);
if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
offset >> 2);
/* Unhandled instruction */
BUG();
}
bool aarch32_insn_is_wide(u32 insn) bool aarch32_insn_is_wide(u32 insn)
{ {
return insn >= 0xe800; return insn >= 0xe800;
......
...@@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) ...@@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
} }
err = request_irq(irq, armpmu->handle_irq, err = request_irq(irq, armpmu->handle_irq,
IRQF_NOBALANCING, IRQF_NOBALANCING | IRQF_NO_THREAD,
"arm-pmu", armpmu); "arm-pmu", armpmu);
if (err) { if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n", pr_err("unable to request IRQ%d for ARM PMU counters\n",
......
...@@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly; ...@@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard); EXPORT_SYMBOL(__stack_chk_guard);
#endif #endif
void soft_restart(unsigned long addr)
{
setup_mm_for_reboot();
cpu_soft_restart(virt_to_phys(cpu_reset), addr);
/* Should never get here */
BUG();
}
/* /*
* Function pointers to optional machine specific functions * Function pointers to optional machine specific functions
*/ */
...@@ -136,9 +128,7 @@ void machine_power_off(void) ...@@ -136,9 +128,7 @@ void machine_power_off(void)
/* /*
* Restart requires that the secondary CPUs stop performing any activity * Restart requires that the secondary CPUs stop performing any activity
* while the primary CPU resets the system. Systems with a single CPU can * while the primary CPU resets the system. Systems with multiple CPUs must
* use soft_restart() as their machine descriptor's .restart hook, since that
* will cause the only available CPU to reset. Systems with multiple CPUs must
* provide a HW restart implementation, to ensure that all CPUs reset at once. * provide a HW restart implementation, to ensure that all CPUs reset at once.
* This is required so that any code running after reset on the primary CPU * This is required so that any code running after reset on the primary CPU
* doesn't have to co-ordinate with other CPUs to ensure they aren't still * doesn't have to co-ordinate with other CPUs to ensure they aren't still
...@@ -243,6 +233,7 @@ void release_thread(struct task_struct *dead_task) ...@@ -243,6 +233,7 @@ void release_thread(struct task_struct *dead_task)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{ {
if (current->mm)
fpsimd_preserve_current_state(); fpsimd_preserve_current_state();
*dst = *src; *dst = *src;
return 0; return 0;
...@@ -254,35 +245,35 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -254,35 +245,35 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p) unsigned long stk_sz, struct task_struct *p)
{ {
struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *childregs = task_pt_regs(p);
unsigned long tls = p->thread.tp_value;
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
if (likely(!(p->flags & PF_KTHREAD))) { if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs(); *childregs = *current_pt_regs();
childregs->regs[0] = 0; childregs->regs[0] = 0;
if (is_compat_thread(task_thread_info(p))) {
if (stack_start)
childregs->compat_sp = stack_start;
} else {
/* /*
* Read the current TLS pointer from tpidr_el0 as it may be * Read the current TLS pointer from tpidr_el0 as it may be
* out-of-sync with the saved value. * out-of-sync with the saved value.
*/ */
asm("mrs %0, tpidr_el0" : "=r" (tls)); asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
if (stack_start) { if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
/* 16-byte aligned stack mandatory on AArch64 */ /* 16-byte aligned stack mandatory on AArch64 */
if (stack_start & 15) else if (stack_start & 15)
return -EINVAL; return -EINVAL;
else
childregs->sp = stack_start; childregs->sp = stack_start;
} }
}
/* /*
* If a TLS pointer was passed to clone (4th argument), use it * If a TLS pointer was passed to clone (4th argument), use it
* for the new thread. * for the new thread.
*/ */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
tls = childregs->regs[3]; p->thread.tp_value = childregs->regs[3];
} else { } else {
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h; childregs->pstate = PSR_MODE_EL1h;
...@@ -291,7 +282,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -291,7 +282,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
} }
p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs; p->thread.cpu_context.sp = (unsigned long)childregs;
p->thread.tp_value = tls;
ptrace_hw_copy_thread(p); ptrace_hw_copy_thread(p);
...@@ -302,18 +292,12 @@ static void tls_thread_switch(struct task_struct *next) ...@@ -302,18 +292,12 @@ static void tls_thread_switch(struct task_struct *next)
{ {
unsigned long tpidr, tpidrro; unsigned long tpidr, tpidrro;
if (!is_compat_task()) {
asm("mrs %0, tpidr_el0" : "=r" (tpidr)); asm("mrs %0, tpidr_el0" : "=r" (tpidr));
current->thread.tp_value = tpidr; *task_user_tls(current) = tpidr;
}
if (is_compat_thread(task_thread_info(next))) { tpidr = *task_user_tls(next);
tpidr = 0; tpidrro = is_compat_thread(task_thread_info(next)) ?
tpidrro = next->thread.tp_value; next->thread.tp_value : 0;
} else {
tpidr = next->thread.tp_value;
tpidrro = 0;
}
asm( asm(
" msr tpidr_el0, %0\n" " msr tpidr_el0, %0\n"
......
This diff is collapsed.
...@@ -105,18 +105,6 @@ static struct resource mem_res[] = { ...@@ -105,18 +105,6 @@ static struct resource mem_res[] = {
#define kernel_code mem_res[0] #define kernel_code mem_res[0]
#define kernel_data mem_res[1] #define kernel_data mem_res[1]
void __init early_print(const char *str, ...)
{
char buf[256];
va_list ap;
va_start(ap, str);
vsnprintf(buf, sizeof(buf), str, ap);
va_end(ap);
printk("%s", buf);
}
/* /*
* The recorded values of x0 .. x3 upon kernel entry. * The recorded values of x0 .. x3 upon kernel entry.
*/ */
...@@ -326,12 +314,14 @@ static void __init setup_processor(void) ...@@ -326,12 +314,14 @@ static void __init setup_processor(void)
static void __init setup_machine_fdt(phys_addr_t dt_phys) static void __init setup_machine_fdt(phys_addr_t dt_phys)
{ {
if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { void *dt_virt = fixmap_remap_fdt(dt_phys);
early_print("\n"
"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" if (!dt_virt || !early_init_dt_scan(dt_virt)) {
"The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" pr_crit("\n"
"\nPlease check your bootloader.\n", "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
dt_phys, phys_to_virt(dt_phys)); "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
"\nPlease check your bootloader.",
&dt_phys, dt_virt);
while (true) while (true)
cpu_relax(); cpu_relax();
...@@ -374,8 +364,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -374,8 +364,6 @@ void __init setup_arch(char **cmdline_p)
{ {
setup_processor(); setup_processor();
setup_machine_fdt(__fdt_pointer);
init_mm.start_code = (unsigned long) _text; init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext; init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata; init_mm.end_data = (unsigned long) _edata;
...@@ -386,6 +374,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -386,6 +374,8 @@ void __init setup_arch(char **cmdline_p)
early_fixmap_init(); early_fixmap_init();
early_ioremap_init(); early_ioremap_init();
setup_machine_fdt(__fdt_pointer);
parse_early_param(); parse_early_param();
/* /*
...@@ -408,16 +398,13 @@ void __init setup_arch(char **cmdline_p) ...@@ -408,16 +398,13 @@ void __init setup_arch(char **cmdline_p)
if (acpi_disabled) { if (acpi_disabled) {
unflatten_device_tree(); unflatten_device_tree();
psci_dt_init(); psci_dt_init();
cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
of_smp_init_cpus();
#endif
} else { } else {
psci_acpi_init(); psci_acpi_init();
acpi_init_cpus();
} }
cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_init_cpus();
smp_build_mpidr_hash(); smp_build_mpidr_hash();
#endif #endif
......
...@@ -370,7 +370,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) ...@@ -370,7 +370,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
if (show_unhandled_signals) if (show_unhandled_signals)
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__, current->comm, task_pid_nr(current), __func__,
regs->pc, regs->sp); regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
return 0; return 0;
} }
...@@ -407,7 +407,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) ...@@ -407,7 +407,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
if (show_unhandled_signals) if (show_unhandled_signals)
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__, current->comm, task_pid_nr(current), __func__,
regs->pc, regs->sp); regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
return 0; return 0;
} }
......
...@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter) ...@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter)
/* /*
* x0 must contain the sctlr value retrieved from restored context * x0 must contain the sctlr value retrieved from restored context
*/ */
.pushsection ".idmap.text", "ax"
ENTRY(cpu_resume_mmu) ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1 msr sctlr_el1, x0 // restore sctlr_el1
isb isb
br x3 // global jump to virtual address br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu) ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu: cpu_resume_after_mmu:
mov x0, #0 // return zero on success mov x0, #0 // return zero on success
ldp x19, x20, [sp, #16] ldp x19, x20, [sp, #16]
...@@ -162,15 +164,12 @@ ENTRY(cpu_resume) ...@@ -162,15 +164,12 @@ ENTRY(cpu_resume)
#else #else
mov x7, xzr mov x7, xzr
#endif #endif
adrp x0, sleep_save_sp ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
add x0, x0, #:lo12:sleep_save_sp
ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
ldr x0, [x0, x7, lsl #3] ldr x0, [x0, x7, lsl #3]
/* load sp from context */ /* load sp from context */
ldr x2, [x0, #CPU_CTX_SP] ldr x2, [x0, #CPU_CTX_SP]
adrp x1, sleep_idmap_phys
/* load physical address of identity map page table in x1 */ /* load physical address of identity map page table in x1 */
ldr x1, [x1, #:lo12:sleep_idmap_phys] adrp x1, idmap_pg_dir
mov sp, x2 mov sp, x2
/* /*
* cpu_do_resume expects x0 to contain context physical address * cpu_do_resume expects x0 to contain context physical address
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/acpi.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -248,20 +249,20 @@ static int op_cpu_kill(unsigned int cpu) ...@@ -248,20 +249,20 @@ static int op_cpu_kill(unsigned int cpu)
* time and hope that it's dead, so let's skip the wait and just hope. * time and hope that it's dead, so let's skip the wait and just hope.
*/ */
if (!cpu_ops[cpu]->cpu_kill) if (!cpu_ops[cpu]->cpu_kill)
return 1; return 0;
return cpu_ops[cpu]->cpu_kill(cpu); return cpu_ops[cpu]->cpu_kill(cpu);
} }
static DECLARE_COMPLETION(cpu_died);
/* /*
* called on the thread which is asking for a CPU to be shutdown - * called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out. * waits until shutdown has completed, or it is timed out.
*/ */
void __cpu_die(unsigned int cpu) void __cpu_die(unsigned int cpu)
{ {
if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { int err;
if (!cpu_wait_death(cpu, 5)) {
pr_crit("CPU%u: cpu didn't die\n", cpu); pr_crit("CPU%u: cpu didn't die\n", cpu);
return; return;
} }
...@@ -273,8 +274,10 @@ void __cpu_die(unsigned int cpu) ...@@ -273,8 +274,10 @@ void __cpu_die(unsigned int cpu)
* verify that it has really left the kernel before we consider * verify that it has really left the kernel before we consider
* clobbering anything it might still be using. * clobbering anything it might still be using.
*/ */
if (!op_cpu_kill(cpu)) err = op_cpu_kill(cpu);
pr_warn("CPU%d may not have shut down cleanly\n", cpu); if (err)
pr_warn("CPU%d may not have shut down cleanly: %d\n",
cpu, err);
} }
/* /*
...@@ -294,7 +297,7 @@ void cpu_die(void) ...@@ -294,7 +297,7 @@ void cpu_die(void)
local_irq_disable(); local_irq_disable();
/* Tell __cpu_die() that this CPU is now safe to dispose of */ /* Tell __cpu_die() that this CPU is now safe to dispose of */
complete(&cpu_died); (void)cpu_report_death();
/* /*
* Actually shutdown the CPU. This must never fail. The specific hotplug * Actually shutdown the CPU. This must never fail. The specific hotplug
...@@ -318,19 +321,9 @@ void __init smp_prepare_boot_cpu(void) ...@@ -318,19 +321,9 @@ void __init smp_prepare_boot_cpu(void)
set_my_cpu_offset(per_cpu_offset(smp_processor_id())); set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
} }
/* static u64 __init of_get_cpu_mpidr(struct device_node *dn)
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
void __init of_smp_init_cpus(void)
{ {
struct device_node *dn = NULL; const __be32 *cell;
unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
const u32 *cell;
u64 hwid; u64 hwid;
/* /*
...@@ -341,32 +334,143 @@ void __init of_smp_init_cpus(void) ...@@ -341,32 +334,143 @@ void __init of_smp_init_cpus(void)
cell = of_get_property(dn, "reg", NULL); cell = of_get_property(dn, "reg", NULL);
if (!cell) { if (!cell) {
pr_err("%s: missing reg property\n", dn->full_name); pr_err("%s: missing reg property\n", dn->full_name);
goto next; return INVALID_HWID;
} }
hwid = of_read_number(cell, of_n_addr_cells(dn));
hwid = of_read_number(cell, of_n_addr_cells(dn));
/* /*
* Non affinity bits must be set to 0 in the DT * Non affinity bits must be set to 0 in the DT
*/ */
if (hwid & ~MPIDR_HWID_BITMASK) { if (hwid & ~MPIDR_HWID_BITMASK) {
pr_err("%s: invalid reg property\n", dn->full_name); pr_err("%s: invalid reg property\n", dn->full_name);
goto next; return INVALID_HWID;
} }
return hwid;
}
/* /*
* Duplicate MPIDRs are a recipe for disaster. Scan * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
* all initialized entries and check for * entries and check for duplicates. If any is found just ignore the
* duplicates. If any is found just ignore the cpu. * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
* cpu_logical_map was initialized to INVALID_HWID to * matching valid MPIDR values.
* avoid matching valid MPIDR values. */
*/ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
for (i = 1; (i < cpu) && (i < NR_CPUS); i++) { {
if (cpu_logical_map(i) == hwid) { unsigned int i;
for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
if (cpu_logical_map(i) == hwid)
return true;
return false;
}
/*
* Initialize cpu operations for a logical cpu and
* set it in the possible mask on success
*/
static int __init smp_cpu_setup(int cpu)
{
if (cpu_read_ops(cpu))
return -ENODEV;
if (cpu_ops[cpu]->cpu_init(cpu))
return -ENODEV;
set_cpu_possible(cpu, true);
return 0;
}
static bool bootcpu_valid __initdata;
static unsigned int cpu_count = 1;
#ifdef CONFIG_ACPI
/*
* acpi_map_gic_cpu_interface - parse processor MADT entry
*
* Carry out sanity checks on MADT processor entry and initialize
* cpu_logical_map on success
*/
static void __init
acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
{
u64 hwid = processor->arm_mpidr;
if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
return;
}
if (!(processor->flags & ACPI_MADT_ENABLED)) {
pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
return;
}
if (is_mpidr_duplicate(cpu_count, hwid)) {
pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
return;
}
/* Check if GICC structure of boot CPU is available in the MADT */
if (cpu_logical_map(0) == hwid) {
if (bootcpu_valid) {
pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
hwid);
return;
}
bootcpu_valid = true;
return;
}
if (cpu_count >= NR_CPUS)
return;
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(cpu_count) = hwid;
cpu_count++;
}
static int __init
acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *processor;
processor = (struct acpi_madt_generic_interrupt *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
acpi_map_gic_cpu_interface(processor);
return 0;
}
#else
#define acpi_table_parse_madt(...) do { } while (0)
#endif
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
void __init of_parse_and_init_cpus(void)
{
struct device_node *dn = NULL;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
u64 hwid = of_get_cpu_mpidr(dn);
if (hwid == INVALID_HWID)
goto next;
if (is_mpidr_duplicate(cpu_count, hwid)) {
pr_err("%s: duplicate cpu reg properties in the DT\n", pr_err("%s: duplicate cpu reg properties in the DT\n",
dn->full_name); dn->full_name);
goto next; goto next;
} }
}
/* /*
* The numbering scheme requires that the boot CPU * The numbering scheme requires that the boot CPU
...@@ -392,38 +496,58 @@ void __init of_smp_init_cpus(void) ...@@ -392,38 +496,58 @@ void __init of_smp_init_cpus(void)
continue; continue;
} }
if (cpu >= NR_CPUS) if (cpu_count >= NR_CPUS)
goto next;
if (cpu_read_ops(dn, cpu) != 0)
goto next;
if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next; goto next;
pr_debug("cpu logical map 0x%llx\n", hwid); pr_debug("cpu logical map 0x%llx\n", hwid);
cpu_logical_map(cpu) = hwid; cpu_logical_map(cpu_count) = hwid;
next: next:
cpu++; cpu_count++;
} }
}
/* sanity check */ /*
if (cpu > NR_CPUS) * Enumerate the possible CPU set from the device tree or ACPI and build the
pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", * cpu logical map array containing MPIDR values related to logical
cpu, NR_CPUS); * cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
void __init smp_init_cpus(void)
{
int i;
if (acpi_disabled)
of_parse_and_init_cpus();
else
/*
* do a walk of MADT to determine how many CPUs
* we have including disabled CPUs, and get information
* we need for SMP init
*/
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_gic_cpu_interface, 0);
if (cpu_count > NR_CPUS)
pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
cpu_count, NR_CPUS);
if (!bootcpu_valid) { if (!bootcpu_valid) {
pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n"); pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
return; return;
} }
/* /*
* All the cpus that made it to the cpu_logical_map have been * We need to set the cpu_logical_map entries before enabling
* validated so set them as possible cpus. * the cpus so that cpu processor description entries (DT cpu nodes
*/ * and ACPI MADT entries) can be retrieved by matching the cpu hwid
for (i = 0; i < NR_CPUS; i++) * with entries in cpu_logical_map while initializing the cpus.
if (cpu_logical_map(i) != INVALID_HWID) * If the cpu set-up fails, invalidate the cpu_logical_map entry.
set_cpu_possible(i, true); */
for (i = 1; i < NR_CPUS; i++) {
if (cpu_logical_map(i) != INVALID_HWID) {
if (smp_cpu_setup(i))
cpu_logical_map(i) = INVALID_HWID;
}
}
} }
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
......
...@@ -49,8 +49,14 @@ static void write_pen_release(u64 val) ...@@ -49,8 +49,14 @@ static void write_pen_release(u64 val)
} }
static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) static int smp_spin_table_cpu_init(unsigned int cpu)
{ {
struct device_node *dn;
dn = of_get_cpu_node(cpu, NULL);
if (!dn)
return -ENODEV;
/* /*
* Determine the address from which the CPU is polling. * Determine the address from which the CPU is polling.
*/ */
......
...@@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) ...@@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
} }
/* /*
* __cpu_suspend * cpu_suspend
* *
* arg: argument to pass to the finisher function * arg: argument to pass to the finisher function
* fn: finisher function pointer * fn: finisher function pointer
* *
*/ */
int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{ {
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
int ret; int ret;
...@@ -82,7 +82,7 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -82,7 +82,7 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* We are resuming from reset with TTBR0_EL1 set to the * We are resuming from reset with TTBR0_EL1 set to the
* idmap to enable the MMU; restore the active_mm mappings in * idmap to enable the MMU; restore the active_mm mappings in
* TTBR0_EL1 unless the active_mm == &init_mm, in which case * TTBR0_EL1 unless the active_mm == &init_mm, in which case
* the thread entered __cpu_suspend with TTBR0_EL1 set to * the thread entered cpu_suspend with TTBR0_EL1 set to
* reserved TTBR0 page tables and should be restored as such. * reserved TTBR0 page tables and should be restored as such.
*/ */
if (mm == &init_mm) if (mm == &init_mm)
...@@ -118,7 +118,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -118,7 +118,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
} }
struct sleep_save_sp sleep_save_sp; struct sleep_save_sp sleep_save_sp;
phys_addr_t sleep_idmap_phys;
static int __init cpu_suspend_init(void) static int __init cpu_suspend_init(void)
{ {
...@@ -132,9 +131,7 @@ static int __init cpu_suspend_init(void) ...@@ -132,9 +131,7 @@ static int __init cpu_suspend_init(void)
sleep_save_sp.save_ptr_stash = ctx_ptr; sleep_save_sp.save_ptr_stash = ctx_ptr;
sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
__flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
return 0; return 0;
} }
......
...@@ -335,8 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) ...@@ -335,8 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0) if (call_undef_hook(regs) == 0)
return; return;
if (show_unhandled_signals && unhandled_signal(current, SIGILL) && if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) {
printk_ratelimit()) {
pr_info("%s[%d]: undefined instruction: pc=%p\n", pr_info("%s[%d]: undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc); current->comm, task_pid_nr(current), pc);
dump_instr(KERN_INFO, regs); dump_instr(KERN_INFO, regs);
...@@ -363,7 +362,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) ...@@ -363,7 +362,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
} }
#endif #endif
if (show_unhandled_signals && printk_ratelimit()) { if (show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: syscall %d\n", current->comm, pr_info("%s[%d]: syscall %d\n", current->comm,
task_pid_nr(current), (int)regs->syscallno); task_pid_nr(current), (int)regs->syscallno);
dump_instr("", regs); dump_instr("", regs);
......
...@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin ...@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv) $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
# down to collect2, resulting in silent corruption of the vDSO image.
ccflags-y += -Wl,-shared
obj-y += vdso.o obj-y += vdso.o
extra-y += vdso.lds vdso-offsets.h extra-y += vdso.lds vdso-offsets.h
CPPFLAGS_vdso.lds += -P -C -U$(ARCH) CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
......
...@@ -38,6 +38,12 @@ jiffies = jiffies_64; ...@@ -38,6 +38,12 @@ jiffies = jiffies_64;
*(.hyp.text) \ *(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .; VMLINUX_SYMBOL(__hyp_text_end) = .;
#define IDMAP_TEXT \
. = ALIGN(SZ_4K); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .;
/* /*
* The size of the PE/COFF section that covers the kernel image, which * The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF * runs from stext to _edata, must be a round multiple of the PE/COFF
...@@ -95,6 +101,7 @@ SECTIONS ...@@ -95,6 +101,7 @@ SECTIONS
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
HYPERVISOR_TEXT HYPERVISOR_TEXT
IDMAP_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
. = ALIGN(16); . = ALIGN(16);
...@@ -167,11 +174,13 @@ SECTIONS ...@@ -167,11 +174,13 @@ SECTIONS
} }
/* /*
* The HYP init code can't be more than a page long, * The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary. * and should not cross a page boundary.
*/ */
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big or misaligned") "HYP init code too big or misaligned")
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"ID map text too big or misaligned")
/* /*
* If padding is applied before .head.text, virt<->phys conversions will fail. * If padding is applied before .head.text, virt<->phys conversions will fail.
......
...@@ -17,8 +17,10 @@ ...@@ -17,8 +17,10 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/fpsimdmacros.h> #include <asm/fpsimdmacros.h>
...@@ -808,10 +810,7 @@ ...@@ -808,10 +810,7 @@
* Call into the vgic backend for state saving * Call into the vgic backend for state saving
*/ */
.macro save_vgic_state .macro save_vgic_state
adr x24, __vgic_sr_vectors alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
ldr x24, [x24, VGIC_SAVE_FN]
kern_hyp_va x24
blr x24
mrs x24, hcr_el2 mrs x24, hcr_el2
mov x25, #HCR_INT_OVERRIDE mov x25, #HCR_INT_OVERRIDE
neg x25, x25 neg x25, x25
...@@ -828,10 +827,7 @@ ...@@ -828,10 +827,7 @@
orr x24, x24, #HCR_INT_OVERRIDE orr x24, x24, #HCR_INT_OVERRIDE
orr x24, x24, x25 orr x24, x24, x25
msr hcr_el2, x24 msr hcr_el2, x24
adr x24, __vgic_sr_vectors alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
ldr x24, [x24, #VGIC_RESTORE_FN]
kern_hyp_va x24
blr x24
.endm .endm
.macro save_timer_state .macro save_timer_state
...@@ -1062,12 +1058,6 @@ ENTRY(__kvm_flush_vm_context) ...@@ -1062,12 +1058,6 @@ ENTRY(__kvm_flush_vm_context)
ret ret
ENDPROC(__kvm_flush_vm_context) ENDPROC(__kvm_flush_vm_context)
// struct vgic_sr_vectors __vgi_sr_vectors;
.align 3
ENTRY(__vgic_sr_vectors)
.skip VGIC_SR_VECTOR_SZ
ENDPROC(__vgic_sr_vectors)
__kvm_hyp_panic: __kvm_hyp_panic:
// Guess the context by looking at VTTBR: // Guess the context by looking at VTTBR:
// If zero, then we're already a host. // If zero, then we're already a host.
......
...@@ -4,3 +4,5 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ ...@@ -4,3 +4,5 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
context.o proc.o pageattr.o context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM64_PTDUMP) += dump.o obj-$(CONFIG_ARM64_PTDUMP) += dump.o
CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
...@@ -22,83 +22,10 @@ ...@@ -22,83 +22,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative.h>
#include "proc-macros.S" #include "proc-macros.S"
/*
* __flush_dcache_all()
*
* Flush the whole D-cache.
*
* Corrupted registers: x0-x7, x9-x11
*/
__flush_dcache_all:
dmb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
lsr x3, x3, #23 // left align loc bit field
cbz x3, finished // if loc is 0, then no need to clean
mov x10, #0 // start clean at cache level 0
loop1:
add x2, x10, x10, lsr #1 // work out 3x current cache level
lsr x1, x0, x2 // extract cache type bits from clidr
and x1, x1, #7 // mask of the bits for current cache only
cmp x1, #2 // see what cache we have at this level
b.lt skip // skip if no cache, or just i-cache
save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
msr csselr_el1, x10 // select current cache level in csselr
isb // isb to sych the new cssr&csidr
mrs x1, ccsidr_el1 // read the new ccsidr
restore_irqs x9
and x2, x1, #7 // extract the length of the cache lines
add x2, x2, #4 // add 4 (line length offset)
mov x4, #0x3ff
and x4, x4, x1, lsr #3 // find maximum number on the way size
clz w5, w4 // find bit position of way size increment
mov x7, #0x7fff
and x7, x7, x1, lsr #13 // extract max number of the index size
loop2:
mov x9, x4 // create working copy of max way size
loop3:
lsl x6, x9, x5
orr x11, x10, x6 // factor way and cache number into x11
lsl x6, x7, x2
orr x11, x11, x6 // factor index number into x11
dc cisw, x11 // clean & invalidate by set/way
subs x9, x9, #1 // decrement the way
b.ge loop3
subs x7, x7, #1 // decrement the index
b.ge loop2
skip:
add x10, x10, #2 // increment cache number
cmp x3, x10
b.gt loop1
finished:
mov x10, #0 // swith back to cache level 0
msr csselr_el1, x10 // select current cache level in csselr
dsb sy
isb
ret
ENDPROC(__flush_dcache_all)
/*
* flush_cache_all()
*
* Flush the entire cache system. The data cache flush is now achieved
* using atomic clean / invalidates working outwards from L1 cache. This
* is done using Set/Way based cache maintainance instructions. The
* instruction cache can still be invalidated back to the point of
* unification in a single instruction.
*/
ENTRY(flush_cache_all)
mov x12, lr
bl __flush_dcache_all
mov x0, #0
ic ialluis // I+BTB cache invalidate
ret x12
ENDPROC(flush_cache_all)
/* /*
* flush_icache_range(start,end) * flush_icache_range(start,end)
* *
......
...@@ -92,6 +92,14 @@ static void reset_context(void *info) ...@@ -92,6 +92,14 @@ static void reset_context(void *info)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
/*
* current->active_mm could be init_mm for the idle thread immediately
* after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
* the reserved value, so no need to reset any context.
*/
if (mm == &init_mm)
return;
smp_rmb(); smp_rmb();
asid = cpu_last_asid + cpu; asid = cpu_last_asid + cpu;
......
...@@ -115,8 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, ...@@ -115,8 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
{ {
struct siginfo si; struct siginfo si;
if (show_unhandled_signals && unhandled_signal(tsk, sig) && if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) {
printk_ratelimit()) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
addr, esr); addr, esr);
...@@ -478,12 +477,19 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, ...@@ -478,12 +477,19 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct siginfo info; struct siginfo info;
struct task_struct *tsk = current;
if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
tsk->comm, task_pid_nr(tsk),
esr_get_class_string(esr), (void *)regs->pc,
(void *)regs->sp);
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_ADRALN; info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)addr; info.si_addr = (void __user *)addr;
arm64_notify_die("", regs, &info, esr); arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
} }
static struct fault_info debug_fault_info[] = { static struct fault_info debug_fault_info[] = {
......
...@@ -102,7 +102,6 @@ EXPORT_SYMBOL(flush_dcache_page); ...@@ -102,7 +102,6 @@ EXPORT_SYMBOL(flush_dcache_page);
/* /*
* Additional functions defined in assembly. * Additional functions defined in assembly.
*/ */
EXPORT_SYMBOL(flush_cache_all);
EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
...@@ -262,7 +262,7 @@ static void __init free_unused_memmap(void) ...@@ -262,7 +262,7 @@ static void __init free_unused_memmap(void)
* memmap entries are valid from the bank end aligned to * memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES. * MAX_ORDER_NR_PAGES.
*/ */
prev_end = ALIGN(start + __phys_to_pfn(reg->size), prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
MAX_ORDER_NR_PAGES); MAX_ORDER_NR_PAGES);
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/libfdt.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/memblock.h> #include <linux/memblock.h>
...@@ -643,3 +644,68 @@ void __set_fixmap(enum fixed_addresses idx, ...@@ -643,3 +644,68 @@ void __set_fixmap(enum fixed_addresses idx,
flush_tlb_kernel_range(addr, addr+PAGE_SIZE); flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
} }
} }
void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
int granularity, size, offset;
void *dt_virt;
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
* at least 8 bytes so that we can always access the size field of the
* FDT header after mapping the first chunk, double check here if that
* is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
return NULL;
/*
* Make sure that the FDT region can be mapped without the need to
* allocate additional translation table pages, so that it is safe
* to call create_mapping() this early.
*
* On 64k pages, the FDT will be mapped using PTEs, so we need to
* be in the same PMD as the rest of the fixmap.
* On 4k pages, we'll use section mappings for the FDT so we only
* have to be in the same PUD.
*/
BUILD_BUG_ON(dt_virt_base % SZ_2M);
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
granularity = PAGE_SIZE;
} else {
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
__fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
granularity = PMD_SIZE;
}
offset = dt_phys % granularity;
dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */
create_mapping(round_down(dt_phys, granularity), dt_virt_base,
granularity, prot);
if (fdt_check_header(dt_virt) != 0)
return NULL;
size = fdt_totalsize(dt_virt);
if (size > MAX_FDT_SIZE)
return NULL;
if (offset + size > granularity)
create_mapping(round_down(dt_phys, granularity), dt_virt_base,
round_up(offset + size, granularity), prot);
memblock_reserve(dt_phys, size);
return dt_virt;
}
...@@ -45,52 +45,6 @@ ...@@ -45,52 +45,6 @@
#define MAIR(attr, mt) ((attr) << ((mt) * 8)) #define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
* cpu_cache_off()
*
* Turn the CPU D-cache off.
*/
ENTRY(cpu_cache_off)
mrs x0, sctlr_el1
bic x0, x0, #1 << 2 // clear SCTLR.C
msr sctlr_el1, x0
isb
ret
ENDPROC(cpu_cache_off)
/*
* cpu_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the same state
* as it would be if it had been reset, and branch to what would be the
* reset vector. It must be executed with the flat identity mapping.
*
* - loc - location to jump to for soft reset
*/
.align 5
ENTRY(cpu_reset)
mrs x1, sctlr_el1
bic x1, x1, #1
msr sctlr_el1, x1 // disable the MMU
isb
ret x0
ENDPROC(cpu_reset)
ENTRY(cpu_soft_restart)
/* Save address of cpu_reset() and reset address */
mov x19, x0
mov x20, x1
/* Turn D-cache off */
bl cpu_cache_off
/* Push out all dirty data, and ensure cache is empty */
bl flush_cache_all
mov x0, x20
ret x19
ENDPROC(cpu_soft_restart)
/* /*
* cpu_do_idle() * cpu_do_idle()
* *
......
...@@ -572,6 +572,7 @@ static void __init early_reserve_mem_dt(void) ...@@ -572,6 +572,7 @@ static void __init early_reserve_mem_dt(void)
int len; int len;
const __be32 *prop; const __be32 *prop;
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem(); early_init_fdt_scan_reserved_mem();
dt_root = of_get_flat_dt_root(); dt_root = of_get_flat_dt_root();
......
...@@ -580,11 +580,6 @@ void __init early_init_fdt_scan_reserved_mem(void) ...@@ -580,11 +580,6 @@ void __init early_init_fdt_scan_reserved_mem(void)
if (!initial_boot_params) if (!initial_boot_params)
return; return;
/* Reserve the dtb region */
early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
fdt_totalsize(initial_boot_params),
0);
/* Process header /memreserve/ fields */ /* Process header /memreserve/ fields */
for (n = 0; ; n++) { for (n = 0; ; n++) {
fdt_get_mem_rsv(initial_boot_params, n, &base, &size); fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
...@@ -597,6 +592,20 @@ void __init early_init_fdt_scan_reserved_mem(void) ...@@ -597,6 +592,20 @@ void __init early_init_fdt_scan_reserved_mem(void)
fdt_init_reserved_mem(); fdt_init_reserved_mem();
} }
/**
* early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
*/
void __init early_init_fdt_reserve_self(void)
{
if (!initial_boot_params)
return;
/* Reserve the dtb region */
early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
fdt_totalsize(initial_boot_params),
0);
}
/** /**
* of_scan_flat_dt - scan flattened tree blob and call callback on each. * of_scan_flat_dt - scan flattened tree blob and call callback on each.
* @it: callback function * @it: callback function
......
...@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, ...@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
extern int early_init_dt_scan_memory(unsigned long node, const char *uname, extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data); int depth, void *data);
extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size); extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map); bool no_map);
...@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset); ...@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
extern void of_fdt_limit_memory(int limit); extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */ #else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {} static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {} static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {} static inline void unflatten_and_copy_device_tree(void) {}
......
...@@ -2126,9 +2126,6 @@ int kvm_vgic_hyp_init(void) ...@@ -2126,9 +2126,6 @@ int kvm_vgic_hyp_init(void)
goto out_free_irq; goto out_free_irq;
} }
/* Callback into for arch code for setup */
vgic_arch_setup(vgic);
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment