Commit cfaa9f02 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'spectre' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM spectre updates from Russell King:
 "These are the currently known final bits that resolve the Spectre
  issues. big.Little systems used to be sufficiently identical in that
  there were no differences between individual CPUs in the system that
  mattered to the kernel. With the advent of the Spectre problem, the
  CPUs now have differences in how the workaround is applied.

  As a result of previous Spectre patches, these systems ended up
  reporting quite a lot of:

     "CPUx: Spectre v2: incorrect context switching function, system vulnerable"

  messages due to the action of the big.Little switcher causing the CPUs
  to be re-initialised regularly. This series resolves that issue by
  making the CPU vtable unique to each CPU.

  However, since this is used very early, before per-cpu is setup,
  per-cpu can't be used. We also have a problem that two of the methods
  are not called from preempt-safe paths, but thankfully these remain
  identical between all CPUs in the system. To make sure, we validate
  that these are identical during boot"

* 'spectre' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: spectre-v2: per-CPU vtables to work around big.Little systems
  ARM: add PROC_VTABLE and PROC_TABLE macros
  ARM: clean up per-processor check_bugs method call
  ARM: split out processor lookup
  ARM: make lookup_processor_type() non-__init
parents 1ce80e0f 383fb3ee
......@@ -111,6 +111,7 @@
#include <linux/kernel.h>
extern unsigned int processor_id;
struct proc_info_list *lookup_processor(u32 midr);
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
......
......@@ -23,7 +23,7 @@ struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
extern struct processor {
struct processor {
/* MISC
* get data abort address/flags
*/
......@@ -79,9 +79,13 @@ extern struct processor {
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
} processor;
};
#ifndef MULTI_CPU
static inline void init_proc_vtable(const struct processor *p)
{
}
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
......@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else
#define cpu_proc_init processor._proc_init
#define cpu_proc_fin processor._proc_fin
#define cpu_reset processor.reset
#define cpu_do_idle processor._do_idle
#define cpu_dcache_clean_area processor.dcache_clean_area
#define cpu_set_pte_ext processor.set_pte_ext
#define cpu_do_switch_mm processor.switch_mm
/* These three are private to arch/arm/kernel/suspend.c */
#define cpu_do_suspend processor.do_suspend
#define cpu_do_resume processor.do_resume
extern struct processor processor;
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
#include <linux/smp.h>
/*
* This can't be a per-cpu variable because we need to access it before
* per-cpu has been initialised. We have a couple of functions that are
* called in a pre-emptible context, and so can't use smp_processor_id()
* there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
* function pointers for these are identical across all CPUs.
*/
extern struct processor *cpu_vtable[];
#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
#define PROC_TABLE(f) cpu_vtable[0]->f
static inline void init_proc_vtable(const struct processor *p)
{
unsigned int cpu = smp_processor_id();
*cpu_vtable[cpu] = *p;
WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
cpu_vtable[0]->dcache_clean_area);
WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
cpu_vtable[0]->set_pte_ext);
}
#else
#define PROC_VTABLE(f) processor.f
#define PROC_TABLE(f) processor.f
static inline void init_proc_vtable(const struct processor *p)
{
processor = *p;
}
#endif
#define cpu_proc_init PROC_VTABLE(_proc_init)
#define cpu_check_bugs PROC_VTABLE(check_bugs)
#define cpu_proc_fin PROC_VTABLE(_proc_fin)
#define cpu_reset PROC_VTABLE(reset)
#define cpu_do_idle PROC_VTABLE(_do_idle)
#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
/* These two are private to arch/arm/kernel/suspend.c */
#define cpu_do_suspend PROC_VTABLE(do_suspend)
#define cpu_do_resume PROC_VTABLE(do_resume)
#endif
extern void cpu_resume(void);
......
......@@ -6,8 +6,8 @@
void check_other_bugs(void)
{
#ifdef MULTI_CPU
if (processor.check_bugs)
processor.check_bugs();
if (cpu_check_bugs)
cpu_check_bugs();
#endif
}
......
......@@ -145,6 +145,9 @@ __mmap_switched_data:
#endif
.size __mmap_switched_data, . - __mmap_switched_data
__FINIT
.text
/*
* This provides a C-API version of __lookup_processor_type
*/
......@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type)
__FINIT
.text
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
......
......@@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU
struct processor processor __ro_after_init;
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
struct processor *cpu_vtable[NR_CPUS] = {
[0] = &processor,
};
#endif
#endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __ro_after_init;
......@@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
}
#endif
static void __init setup_processor(void)
/*
* locate processor in the list of supported processor types. The linker
* builds this table for us from the entries in arch/arm/mm/proc-*.S
*/
struct proc_info_list *lookup_processor(u32 midr)
{
struct proc_info_list *list;
struct proc_info_list *list = lookup_processor_type(midr);
/*
* locate processor in the list of supported processor
* types. The linker builds this table for us from the
* entries in arch/arm/mm/proc-*.S
*/
list = lookup_processor_type(read_cpuid_id());
if (!list) {
pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
read_cpuid_id());
while (1);
pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
smp_processor_id(), midr);
while (1)
/* can't use cpu_relax() here as it may require MMU setup */;
}
return list;
}
static void __init setup_processor(void)
{
unsigned int midr = read_cpuid_id();
struct proc_info_list *list = lookup_processor(midr);
cpu_name = list->cpu_name;
__cpu_architecture = __get_cpu_architecture();
#ifdef MULTI_CPU
processor = *list->proc;
#endif
init_proc_vtable(list->proc);
#ifdef MULTI_TLB
cpu_tlb = *list->tlb;
#endif
......@@ -699,7 +709,7 @@ static void __init setup_processor(void)
#endif
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
list->cpu_name, midr, midr & 15,
proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
......
......@@ -42,6 +42,7 @@
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/procinfo.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
......@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
#endif
}
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
static int secondary_biglittle_prepare(unsigned int cpu)
{
if (!cpu_vtable[cpu])
cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
return cpu_vtable[cpu] ? 0 : -ENOMEM;
}
static void secondary_biglittle_init(void)
{
init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
}
#else
static int secondary_biglittle_prepare(unsigned int cpu)
{
return 0;
}
static void secondary_biglittle_init(void)
{
}
#endif
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
......@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (!smp_ops.smp_boot_secondary)
return -ENOSYS;
ret = secondary_biglittle_prepare(cpu);
if (ret)
return ret;
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
......@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu;
secondary_biglittle_init();
/*
* The identity mapping is uncached (strongly ordered), so
* switch away from it before attempting any exclusive accesses.
......
......@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL";
......@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
......@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
processor.switch_mm = cpu_v7_hvc_switch_mm;
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
......@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
processor.switch_mm = cpu_v7_smc_switch_mm;
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware";
break;
......@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
return;
bl_error:
pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
cpu);
}
#else
static void cpu_v7_spectre_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment