Commit 0a21ac0d authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/ghostbusters' into for-next/core

Fix and subsequently rewrite Spectre mitigations, including the addition
of support for PR_SPEC_DISABLE_NOEXEC.

(Will Deacon and Marc Zyngier)
* for-next/ghostbusters: (22 commits)
  arm64: Add support for PR_SPEC_DISABLE_NOEXEC prctl() option
  arm64: Pull in task_stack_page() to Spectre-v4 mitigation code
  KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled
  arm64: Get rid of arm64_ssbd_state
  KVM: arm64: Convert ARCH_WORKAROUND_2 to arm64_get_spectre_v4_state()
  KVM: arm64: Get rid of kvm_arm_have_ssbd()
  KVM: arm64: Simplify handling of ARCH_WORKAROUND_2
  arm64: Rewrite Spectre-v4 mitigation code
  arm64: Move SSBD prctl() handler alongside other spectre mitigation code
  arm64: Rename ARM64_SSBD to ARM64_SPECTRE_V4
  arm64: Treat SSBS as a non-strict system feature
  arm64: Group start_thread() functions together
  KVM: arm64: Set CSV2 for guests on hardware unaffected by Spectre-v2
  arm64: Rewrite Spectre-v2 mitigation code
  arm64: Introduce separate file for spectre mitigations and reporting
  arm64: Rename ARM64_HARDEN_BRANCH_PREDICTOR to ARM64_SPECTRE_V2
  KVM: arm64: Simplify install_bp_hardening_cb()
  KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE
  arm64: Remove Spectre-related CONFIG_* options
  arm64: Run ARCH_WORKAROUND_2 enabling code on all CPUs
  ...
parents 57b8b1b4 780c083a
...@@ -1172,32 +1172,6 @@ config UNMAP_KERNEL_AT_EL0 ...@@ -1172,32 +1172,6 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y. If unsure, say Y.
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
default y
help
Speculation attacks against some high-performance processors rely on
being able to manipulate the branch predictor for a victim context by
executing aliasing branches in the attacker context. Such attacks
can be partially mitigated against by clearing internal branch
predictor state and limiting the prediction logic in some situations.
This config option will take CPU-specific actions to harden the
branch predictor against aliasing attacks and may rely on specific
instruction sequences or control bits being set by the system
firmware.
If unsure, say Y.
config ARM64_SSBD
bool "Speculative Store Bypass Disable" if EXPERT
default y
help
This enables mitigation of the bypassing of previous stores
by speculative loads.
If unsure, say Y.
config RODATA_FULL_DEFAULT_ENABLED config RODATA_FULL_DEFAULT_ENABLED
bool "Apply r/o permissions of VM areas also to their linear aliases" bool "Apply r/o permissions of VM areas also to their linear aliases"
default y default y
......
...@@ -31,13 +31,13 @@ ...@@ -31,13 +31,13 @@
#define ARM64_HAS_DCPOP 21 #define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22 #define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_SPECTRE_V2 24
#define ARM64_HAS_RAS_EXTN 25 #define ARM64_HAS_RAS_EXTN 25
#define ARM64_WORKAROUND_843419 26 #define ARM64_WORKAROUND_843419 26
#define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28 #define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29 #define ARM64_HW_DBM 29
#define ARM64_SSBD 30 #define ARM64_SPECTRE_V4 30
#define ARM64_MISMATCHED_CACHE_TYPE 31 #define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_HAS_STAGE2_FWB 32 #define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_CRC32 33 #define ARM64_HAS_CRC32 33
......
...@@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void) ...@@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void)
cpus_have_const_cap(ARM64_HAS_TLB_RANGE); cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
} }
#define ARM64_BP_HARDEN_UNKNOWN -1
#define ARM64_BP_HARDEN_WA_NEEDED 0
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
int get_spectre_v2_workaround_state(void);
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
#define ARM64_SSBD_FORCE_ENABLE 2
#define ARM64_SSBD_MITIGATED 3
static inline int arm64_get_ssbd_state(void)
{
#ifdef CONFIG_ARM64_SSBD
extern int ssbd_state;
return ssbd_state;
#else
return ARM64_SSBD_UNKNOWN;
#endif
}
void arm64_set_ssbd_mitigation(bool state);
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
......
...@@ -9,9 +9,6 @@ ...@@ -9,9 +9,6 @@
#include <asm/virt.h> #include <asm/virt.h>
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
#define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
...@@ -102,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); ...@@ -102,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern atomic_t arm64_el2_vector_last_slot; extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#endif
extern void __kvm_flush_vm_context(void); extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
......
...@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) ...@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
} }
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
{
return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
}
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
bool flag)
{
if (flag)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
else
vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) { if (vcpu_mode_is_32bit(vcpu)) {
......
...@@ -631,46 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} ...@@ -631,46 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {} static inline void kvm_clr_pmu_events(u32 clr) {}
#endif #endif
#define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
static inline int kvm_arm_harden_branch_predictor(void)
{
switch (get_spectre_v2_workaround_state()) {
case ARM64_BP_HARDEN_WA_NEEDED:
return KVM_BP_HARDEN_WA_NEEDED;
case ARM64_BP_HARDEN_NOT_REQUIRED:
return KVM_BP_HARDEN_NOT_REQUIRED;
case ARM64_BP_HARDEN_UNKNOWN:
default:
return KVM_BP_HARDEN_UNKNOWN;
}
}
#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3
static inline int kvm_arm_have_ssbd(void)
{
switch (arm64_get_ssbd_state()) {
case ARM64_SSBD_FORCE_DISABLE:
return KVM_SSBD_FORCE_DISABLE;
case ARM64_SSBD_KERNEL:
return KVM_SSBD_KERNEL;
case ARM64_SSBD_FORCE_ENABLE:
return KVM_SSBD_FORCE_ENABLE;
case ARM64_SSBD_MITIGATED:
return KVM_SSBD_MITIGATED;
case ARM64_SSBD_UNKNOWN:
default:
return KVM_SSBD_UNKNOWN;
}
}
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
/* /*
...@@ -430,19 +431,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -430,19 +431,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
return ret; return ret;
} }
#ifdef CONFIG_KVM_INDIRECT_VECTORS
/* /*
* EL2 vectors can be mapped and rerouted in a number of ways, * EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present: * depending on the kernel configuration and CPU present:
* *
* - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the * - If the CPU is affected by Spectre-v2, the hardening sequence is
* hardening sequence is placed in one of the vector slots, which is * placed in one of the vector slots, which is executed before jumping
* executed before jumping to the real vectors. * to the real vectors.
* *
* - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
* ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the * containing the hardening sequence is mapped next to the idmap page,
* hardening sequence is mapped next to the idmap page, and executed * and executed before jumping to the real vectors.
* before jumping to the real vectors.
* *
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* empty slot is selected, mapped next to the idmap page, and * empty slot is selected, mapped next to the idmap page, and
...@@ -452,19 +451,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -452,19 +451,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
* VHE, as we don't have hypervisor-specific mappings. If the system * VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored. * is VHE and yet selects this capability, it will be ignored.
*/ */
#include <asm/mmu.h>
extern void *__kvm_bp_vect_base; extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot; extern int __kvm_harden_el2_vector_slot;
/* This is called on both VHE and !VHE systems */
static inline void *kvm_get_hyp_vector(void) static inline void *kvm_get_hyp_vector(void)
{ {
struct bp_hardening_data *data = arm64_get_bp_hardening_data(); struct bp_hardening_data *data = arm64_get_bp_hardening_data();
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1; int slot = -1;
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) { if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot; slot = data->hyp_vectors_slot;
} }
...@@ -481,76 +477,6 @@ static inline void *kvm_get_hyp_vector(void) ...@@ -481,76 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
return vect; return vect;
} }
/* This is only called on a !VHE system */
static inline int kvm_map_vectors(void)
{
/*
* HBP = ARM64_HARDEN_BRANCH_PREDICTOR
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
* !HBP + !HEL2 -> use direct vectors
* HBP + !HEL2 -> use hardened vectors in place
* !HBP + HEL2 -> allocate one vector slot and use exec mapping
* HBP + HEL2 -> use hardened vertors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
/*
* Always allocate a spare vector slot, as we don't
* know yet which CPUs have a BP hardening slot that
* we can reuse.
*/
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
return create_hyp_exec_mappings(vect_pa, size,
&__kvm_bp_vect_base);
}
return 0;
}
#else
static inline void *kvm_get_hyp_vector(void)
{
return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
}
static inline int kvm_map_vectors(void)
{
return 0;
}
#endif
#ifdef CONFIG_ARM64_SSBD
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
static inline int hyp_map_aux_data(void)
{
int cpu, err;
for_each_possible_cpu(cpu) {
u64 *ptr;
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
if (err)
return err;
}
return 0;
}
#else
static inline int hyp_map_aux_data(void)
{
return 0;
}
#endif
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/* /*
......
...@@ -48,7 +48,6 @@ struct bp_hardening_data { ...@@ -48,7 +48,6 @@ struct bp_hardening_data {
bp_hardening_cb_t fn; bp_hardening_cb_t fn;
}; };
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
...@@ -60,21 +59,13 @@ static inline void arm64_apply_bp_hardening(void) ...@@ -60,21 +59,13 @@ static inline void arm64_apply_bp_hardening(void)
{ {
struct bp_hardening_data *d; struct bp_hardening_data *d;
if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
return; return;
d = arm64_get_bp_hardening_data(); d = arm64_get_bp_hardening_data();
if (d->fn) if (d->fn)
d->fn(); d->fn();
} }
#else
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
{
return NULL;
}
static inline void arm64_apply_bp_hardening(void) { }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
extern void arm64_memblock_init(void); extern void arm64_memblock_init(void);
extern void paging_init(void); extern void paging_init(void);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h> #include <asm/pointer_auth.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/spectre.h>
#include <asm/types.h> #include <asm/types.h>
/* /*
...@@ -197,40 +198,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) ...@@ -197,40 +198,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON; regs->pmr_save = GIC_PRIO_IRQON;
} }
static inline void set_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_SSBS_BIT;
}
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_AA32_SSBS_BIT;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc, static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
start_thread_common(regs, pc); start_thread_common(regs, pc);
regs->pstate = PSR_MODE_EL0t; regs->pstate = PSR_MODE_EL0t;
spectre_v4_enable_task_mitigation(current);
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
set_ssbs_bit(regs);
regs->sp = sp; regs->sp = sp;
} }
static inline bool is_ttbr0_addr(unsigned long addr)
{
/* entry assembly clears tags for TTBR0 addrs */
return addr < TASK_SIZE;
}
static inline bool is_ttbr1_addr(unsigned long addr)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
}
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
...@@ -244,13 +220,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -244,13 +220,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate |= PSR_AA32_E_BIT; regs->pstate |= PSR_AA32_E_BIT;
#endif #endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) spectre_v4_enable_task_mitigation(current);
set_compat_ssbs_bit(regs);
regs->compat_sp = sp; regs->compat_sp = sp;
} }
#endif #endif
static inline bool is_ttbr0_addr(unsigned long addr)
{
/* entry assembly clears tags for TTBR0 addrs */
return addr < TASK_SIZE;
}
static inline bool is_ttbr1_addr(unsigned long addr)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
}
/* Forward declaration, a strange C thing */ /* Forward declaration, a strange C thing */
struct task_struct; struct task_struct;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Interface for managing mitigations for Spectre vulnerabilities.
*
* Copyright (C) 2020 Google LLC
* Author: Will Deacon <will@kernel.org>
*/
#ifndef __ASM_SPECTRE_H
#define __ASM_SPECTRE_H
#include <asm/cpufeature.h>
/* Watch out, ordering is important here. */
enum mitigation_state {
SPECTRE_UNAFFECTED,
SPECTRE_MITIGATED,
SPECTRE_VULNERABLE,
};
struct task_struct;
enum mitigation_state arm64_get_spectre_v2_state(void);
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
enum mitigation_state arm64_get_spectre_v4_state(void);
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
#endif /* __ASM_SPECTRE_H */
...@@ -242,6 +242,15 @@ struct kvm_vcpu_events { ...@@ -242,6 +242,15 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
/*
* Only two states can be presented by the host kernel:
* - NOT_REQUIRED: the guest doesn't need to do anything
* - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
*
* All the other values are deprecated. The host still accepts all
* values (they are ABI), but will narrow them to the above two.
*/
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
......
...@@ -17,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ ...@@ -17,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \ return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \ cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \ smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o syscall.o proton-pack.o
targets += efi-entry.o targets += efi-entry.o
...@@ -57,7 +57,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o ...@@ -57,7 +57,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
......
This diff is collapsed.
...@@ -227,7 +227,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ...@@ -227,7 +227,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
...@@ -487,7 +487,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = { ...@@ -487,7 +487,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
}; };
static const struct arm64_ftr_bits ftr_id_pfr2[] = { static const struct arm64_ftr_bits ftr_id_pfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -1585,48 +1585,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) ...@@ -1585,48 +1585,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21)); WARN_ON(val & (7 << 27 | 7 << 21));
} }
#ifdef CONFIG_ARM64_SSBD
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
{
if (user_mode(regs))
return 1;
if (instr & BIT(PSTATE_Imm_shift))
regs->pstate |= PSR_SSBS_BIT;
else
regs->pstate &= ~PSR_SSBS_BIT;
arm64_skip_faulting_instruction(regs, 4);
return 0;
}
static struct undef_hook ssbs_emulation_hook = {
.instr_mask = ~(1U << PSTATE_Imm_shift),
.instr_val = 0xd500401f | PSTATE_SSBS,
.fn = ssbs_emulation_handler,
};
static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
{
static bool undef_hook_registered = false;
static DEFINE_RAW_SPINLOCK(hook_lock);
raw_spin_lock(&hook_lock);
if (!undef_hook_registered) {
register_undef_hook(&ssbs_emulation_hook);
undef_hook_registered = true;
}
raw_spin_unlock(&hook_lock);
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
arm64_set_ssbd_mitigation(false);
} else {
arm64_set_ssbd_mitigation(true);
}
}
#endif /* CONFIG_ARM64_SSBD */
#ifdef CONFIG_ARM64_PAN #ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{ {
...@@ -2004,19 +1962,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2004,19 +1962,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR0_CRC32_SHIFT, .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.min_field_value = 1, .min_field_value = 1,
}, },
#ifdef CONFIG_ARM64_SSBD
{ {
.desc = "Speculative Store Bypassing Safe (SSBS)", .desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS, .capability = ARM64_SSBS,
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_SSBS_SHIFT, .field_pos = ID_AA64PFR1_SSBS_SHIFT,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
.cpu_enable = cpu_enable_ssbs,
}, },
#endif
#ifdef CONFIG_ARM64_CNP #ifdef CONFIG_ARM64_CNP
{ {
.desc = "Common not Private translations", .desc = "Common not Private translations",
......
...@@ -132,9 +132,8 @@ alternative_else_nop_endif ...@@ -132,9 +132,8 @@ alternative_else_nop_endif
* them if required. * them if required.
*/ */
.macro apply_ssbd, state, tmp1, tmp2 .macro apply_ssbd, state, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD alternative_cb spectre_v4_patch_fw_mitigation_enable
alternative_cb arm64_enable_wa2_handling b .L__asm_ssbd_skip\@ // Patched to NOP
b .L__asm_ssbd_skip\@
alternative_cb_end alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, .L__asm_ssbd_skip\@ cbz \tmp2, .L__asm_ssbd_skip\@
...@@ -142,11 +141,10 @@ alternative_cb_end ...@@ -142,11 +141,10 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state mov w1, #\state
alternative_cb arm64_update_smccc_conduit alternative_cb spectre_v4_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0 nop // Patched to SMC/HVC #0
alternative_cb_end alternative_cb_end
.L__asm_ssbd_skip\@: .L__asm_ssbd_skip\@:
#endif
.endm .endm
.macro kernel_entry, el, regsize = 64 .macro kernel_entry, el, regsize = 64
...@@ -697,11 +695,9 @@ el0_irq_naked: ...@@ -697,11 +695,9 @@ el0_irq_naked:
bl trace_hardirqs_off bl trace_hardirqs_off
#endif #endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f tbz x22, #55, 1f
bl do_el0_irq_bp_hardening bl do_el0_irq_bp_hardening
1: 1:
#endif
irq_handler irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
......
...@@ -331,11 +331,7 @@ int swsusp_arch_suspend(void) ...@@ -331,11 +331,7 @@ int swsusp_arch_suspend(void)
* mitigation off behind our back, let's set the state * mitigation off behind our back, let's set the state
* to what we expect it to be. * to what we expect it to be.
*/ */
switch (arm64_get_ssbd_state()) { spectre_v4_enable_mitigation(NULL);
case ARM64_SSBD_FORCE_ENABLE:
case ARM64_SSBD_KERNEL:
arm64_set_ssbd_mitigation(true);
}
} }
local_daif_restore(flags); local_daif_restore(flags);
......
...@@ -64,12 +64,10 @@ __efistub__ctype = _ctype; ...@@ -64,12 +64,10 @@ __efistub__ctype = _ctype;
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym; #define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
/* Alternative callbacks for init-time patching of nVHE hyp code. */ /* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask); KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data); KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state); KVM_NVHE_ALIAS(kvm_vgic_global_state);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/nospec.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/unistd.h> #include <linux/unistd.h>
...@@ -421,8 +422,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -421,8 +422,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
cpus_have_const_cap(ARM64_HAS_UAO)) cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT; childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) spectre_v4_enable_task_mitigation(p);
set_ssbs_bit(childregs);
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON; childregs->pmr_save = GIC_PRIO_IRQON;
...@@ -472,8 +472,6 @@ void uao_thread_switch(struct task_struct *next) ...@@ -472,8 +472,6 @@ void uao_thread_switch(struct task_struct *next)
*/ */
static void ssbs_thread_switch(struct task_struct *next) static void ssbs_thread_switch(struct task_struct *next)
{ {
struct pt_regs *regs = task_pt_regs(next);
/* /*
* Nothing to do for kernel threads, but 'regs' may be junk * Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early. * (e.g. idle task) so check the flags and bail early.
...@@ -485,18 +483,10 @@ static void ssbs_thread_switch(struct task_struct *next) ...@@ -485,18 +483,10 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to * If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field. * context-switch the PSTATE field.
*/ */
if (cpu_have_feature(cpu_feature(SSBS))) if (cpus_have_const_cap(ARM64_SSBS))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
return; return;
if (compat_user_mode(regs)) spectre_v4_enable_task_mitigation(next);
set_compat_ssbs_bit(regs);
else if (user_mode(regs))
set_ssbs_bit(regs);
} }
/* /*
...@@ -620,6 +610,11 @@ void arch_setup_new_exec(void) ...@@ -620,6 +610,11 @@ void arch_setup_new_exec(void)
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
ptrauth_thread_init_user(current); ptrauth_thread_init_user(current);
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
PR_SPEC_ENABLE);
}
} }
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
*/
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
static void ssbd_ssbs_enable(struct task_struct *task)
{
u64 val = is_compat_thread(task_thread_info(task)) ?
PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
task_pt_regs(task)->pstate |= val;
}
static void ssbd_ssbs_disable(struct task_struct *task)
{
u64 val = is_compat_thread(task_thread_info(task)) ?
PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
task_pt_regs(task)->pstate &= ~val;
}
/*
* prctl interface for SSBD
*/
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
{
int state = arm64_get_ssbd_state();
/* Unsupported */
if (state == ARM64_SSBD_UNKNOWN)
return -ENODEV;
/* Treat the unaffected/mitigated state separately */
if (state == ARM64_SSBD_MITIGATED) {
switch (ctrl) {
case PR_SPEC_ENABLE:
return -EPERM;
case PR_SPEC_DISABLE:
case PR_SPEC_FORCE_DISABLE:
return 0;
}
}
/*
* Things are a bit backward here: the arm64 internal API
* *enables the mitigation* when the userspace API *disables
* speculation*. So much fun.
*/
switch (ctrl) {
case PR_SPEC_ENABLE:
/* If speculation is force disabled, enable is not allowed */
if (state == ARM64_SSBD_FORCE_ENABLE ||
task_spec_ssb_force_disable(task))
return -EPERM;
task_clear_spec_ssb_disable(task);
clear_tsk_thread_flag(task, TIF_SSBD);
ssbd_ssbs_enable(task);
break;
case PR_SPEC_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
return -EPERM;
task_set_spec_ssb_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
ssbd_ssbs_disable(task);
break;
case PR_SPEC_FORCE_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
return -EPERM;
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
ssbd_ssbs_disable(task);
break;
default:
return -ERANGE;
}
return 0;
}
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
unsigned long ctrl)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssbd_prctl_set(task, ctrl);
default:
return -ENODEV;
}
}
static int ssbd_prctl_get(struct task_struct *task)
{
switch (arm64_get_ssbd_state()) {
case ARM64_SSBD_UNKNOWN:
return -ENODEV;
case ARM64_SSBD_FORCE_ENABLE:
return PR_SPEC_DISABLE;
case ARM64_SSBD_KERNEL:
if (task_spec_ssb_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ssb_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
case ARM64_SSBD_FORCE_DISABLE:
return PR_SPEC_ENABLE;
default:
return PR_SPEC_NOT_AFFECTED;
}
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssbd_prctl_get(task);
default:
return -ENODEV;
}
}
...@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void) ...@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void)
* have turned the mitigation on. If the user has forcefully * have turned the mitigation on. If the user has forcefully
* disabled it, make sure their wishes are obeyed. * disabled it, make sure their wishes are obeyed.
*/ */
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) spectre_v4_enable_mitigation(NULL);
arm64_set_ssbd_mitigation(false);
} }
/* /*
......
...@@ -57,9 +57,6 @@ config KVM_ARM_PMU ...@@ -57,9 +57,6 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines. virtual machines.
config KVM_INDIRECT_VECTORS
def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
endif # KVM endif # KVM
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
} }
} }
static int kvm_map_vectors(void)
{
/*
* SV2 = ARM64_SPECTRE_V2
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
* !SV2 + !HEL2 -> use direct vectors
* SV2 + !HEL2 -> use hardened vectors in place
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
* SV2 + HEL2 -> use hardened vectors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
/*
* Always allocate a spare vector slot, as we don't
* know yet which CPUs have a BP hardening slot that
* we can reuse.
*/
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
return create_hyp_exec_mappings(vect_pa, size,
&__kvm_bp_vect_base);
}
return 0;
}
static void cpu_init_hyp_mode(void) static void cpu_init_hyp_mode(void)
{ {
phys_addr_t pgd_ptr; phys_addr_t pgd_ptr;
...@@ -1292,7 +1326,7 @@ static void cpu_init_hyp_mode(void) ...@@ -1292,7 +1326,7 @@ static void cpu_init_hyp_mode(void)
* at EL2. * at EL2.
*/ */
if (this_cpu_has_cap(ARM64_SSBS) && if (this_cpu_has_cap(ARM64_SSBS) &&
arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
kvm_call_hyp_nvhe(__kvm_enable_ssbs); kvm_call_hyp_nvhe(__kvm_enable_ssbs);
} }
} }
...@@ -1549,10 +1583,6 @@ static int init_hyp_mode(void) ...@@ -1549,10 +1583,6 @@ static int init_hyp_mode(void)
} }
} }
err = hyp_map_aux_data();
if (err)
kvm_err("Cannot map host auxiliary data: %d\n", err);
return 0; return 0;
out_err: out_err:
......
...@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ ...@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \ -DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_KVM) += vhe/ nvhe/ obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
...@@ -116,35 +116,6 @@ el1_hvc_guest: ...@@ -116,35 +116,6 @@ el1_hvc_guest:
ARM_SMCCC_ARCH_WORKAROUND_2) ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap cbnz w1, el1_trap
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
b wa2_end
alternative_cb_end
get_vcpu_ptr x2, x0
ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
// Sanitize the argument and update the guest flags
ldr x1, [sp, #8] // Guest's x1
clz w1, w1 // Murphy's device:
lsr w1, w1, #5 // w1 = !!w1 without using
eor w1, w1, #1 // the flags...
bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
str x0, [x2, #VCPU_WORKAROUND_FLAGS]
/* Check that we actually need to perform the call */
hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
cbz x0, wa2_end
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
smc #0
/* Don't leak data from the SMC call */
mov x3, xzr
wa2_end:
mov x2, xzr
mov x1, xzr
#endif
wa_epilogue: wa_epilogue:
mov x0, xzr mov x0, xzr
add sp, sp, #16 add sp, sp, #16
...@@ -288,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector) ...@@ -288,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_error // Error 32-bit EL1 valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector) SYM_CODE_END(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry .macro hyp_ventry
.align 7 .align 7
1: esb 1: esb
...@@ -338,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs) ...@@ -338,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b .org 1b
SYM_CODE_END(__bp_harden_hyp_vecs) SYM_CODE_END(__bp_harden_hyp_vecs)
#endif
...@@ -479,39 +479,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -479,39 +479,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return false; return false;
} }
static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
{
if (!cpus_have_final_cap(ARM64_SSBD))
return false;
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
}
static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ARM64_SSBD
/*
* The host runs with the workaround always present. If the
* guest wants it disabled, so be it...
*/
if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
#endif
}
static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ARM64_SSBD
/*
* If the guest has disabled the workaround, bring it back on.
*/
if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
#endif
}
static inline void __kvm_unexpected_el2_exception(void) static inline void __kvm_unexpected_el2_exception(void)
{ {
unsigned long addr, fixup; unsigned long addr, fixup;
......
...@@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_switch_to_guest(vcpu); __debug_switch_to_guest(vcpu);
__set_guest_arch_workaround_state(vcpu);
do { do {
/* Jump in the fire! */ /* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt); exit_code = __guest_enter(vcpu, host_ctxt);
...@@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* And we're baaack! */ /* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code)); } while (fixup_guest_exit(vcpu, &exit_code));
__set_host_arch_workaround_state(vcpu);
__sysreg_save_state_nvhe(guest_ctxt); __sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu); __sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu); __timer_disable_traps(vcpu);
......
...@@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe(guest_ctxt); sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu); __debug_switch_to_guest(vcpu);
__set_guest_arch_workaround_state(vcpu);
do { do {
/* Jump in the fire! */ /* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt); exit_code = __guest_enter(vcpu, host_ctxt);
...@@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */ /* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code)); } while (fixup_guest_exit(vcpu, &exit_code));
__set_host_arch_workaround_state(vcpu);
sysreg_save_guest_state_vhe(guest_ctxt); sysreg_save_guest_state_vhe(guest_ctxt);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
......
...@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) ...@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
feature = smccc_get_arg1(vcpu); feature = smccc_get_arg1(vcpu);
switch (feature) { switch (feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1: case ARM_SMCCC_ARCH_WORKAROUND_1:
switch (kvm_arm_harden_branch_predictor()) { switch (arm64_get_spectre_v2_state()) {
case KVM_BP_HARDEN_UNKNOWN: case SPECTRE_VULNERABLE:
break; break;
case KVM_BP_HARDEN_WA_NEEDED: case SPECTRE_MITIGATED:
val = SMCCC_RET_SUCCESS; val = SMCCC_RET_SUCCESS;
break; break;
case KVM_BP_HARDEN_NOT_REQUIRED: case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED; val = SMCCC_RET_NOT_REQUIRED;
break; break;
} }
break; break;
case ARM_SMCCC_ARCH_WORKAROUND_2: case ARM_SMCCC_ARCH_WORKAROUND_2:
switch (kvm_arm_have_ssbd()) { switch (arm64_get_spectre_v4_state()) {
case KVM_SSBD_FORCE_DISABLE: case SPECTRE_VULNERABLE:
case KVM_SSBD_UNKNOWN:
break; break;
case KVM_SSBD_KERNEL: case SPECTRE_MITIGATED:
val = SMCCC_RET_SUCCESS; /*
break; * SSBS everywhere: Indicate no firmware
case KVM_SSBD_FORCE_ENABLE: * support, as the SSBS support will be
case KVM_SSBD_MITIGATED: * indicated to the guest and the default is
* safe.
*
* Otherwise, expose a permanent mitigation
* to the guest, and hide SSBS so that the
* guest stays protected.
*/
if (cpus_have_final_cap(ARM64_SSBS))
break;
fallthrough;
case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED; val = SMCCC_RET_NOT_REQUIRED;
break; break;
} }
......
...@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid) ...@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
{ {
switch (regid) { switch (regid) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
switch (kvm_arm_harden_branch_predictor()) { switch (arm64_get_spectre_v2_state()) {
case KVM_BP_HARDEN_UNKNOWN: case SPECTRE_VULNERABLE:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
case KVM_BP_HARDEN_WA_NEEDED: case SPECTRE_MITIGATED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
case KVM_BP_HARDEN_NOT_REQUIRED: case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
} }
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
switch (kvm_arm_have_ssbd()) { switch (arm64_get_spectre_v4_state()) {
case KVM_SSBD_FORCE_DISABLE: case SPECTRE_MITIGATED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; /*
case KVM_SSBD_KERNEL: * As for the hypercall discovery, we pretend we
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL; * don't have any FW mitigation if SSBS is there at
case KVM_SSBD_FORCE_ENABLE: * all times.
case KVM_SSBD_MITIGATED: */
if (cpus_have_final_cap(ARM64_SSBS))
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
fallthrough;
case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
case KVM_SSBD_UNKNOWN: case SPECTRE_VULNERABLE:
default: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
} }
} }
...@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
val = kvm_psci_version(vcpu, vcpu->kvm); val = kvm_psci_version(vcpu, vcpu->kvm);
break; break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
kvm_arm_get_vcpu_workaround_2_flag(vcpu))
val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
break; break;
default: default:
return -ENOENT; return -ENOENT;
...@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED)) KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
return -EINVAL; return -EINVAL;
wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
if (get_kernel_wa_level(reg->id) < wa_level)
return -EINVAL;
/* The enabled bit must not be set unless the level is AVAIL. */ /* The enabled bit must not be set unless the level is AVAIL. */
if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
wa_level != val) (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
return -EINVAL; return -EINVAL;
/* Are we finished or do we need to check the enable bit ? */
if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
return 0;
/* /*
* If this kernel supports the workaround to be switched on * Map all the possible incoming states to the only two we
* or off, make sure it matches the requested setting. * really want to deal with.
*/ */
switch (wa_level) { switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
kvm_arm_set_vcpu_workaround_2_flag(vcpu, case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED); wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
break; break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
kvm_arm_set_vcpu_workaround_2_flag(vcpu, true); wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
break; break;
default:
return -EINVAL;
} }
/*
* We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
* other way around.
*/
if (get_kernel_wa_level(reg->id) < wa_level)
return -EINVAL;
return 0; return 0;
default: default:
return -ENOENT; return -ENOENT;
......
...@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false; vcpu->arch.reset_state.reset = false;
} }
/* Default workaround setup is enabled (if supported) */
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
/* Reset timer */ /* Reset timer */
ret = kvm_timer_vcpu_reset(vcpu); ret = kvm_timer_vcpu_reset(vcpu);
out: out:
......
...@@ -1131,6 +1131,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1131,6 +1131,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
if (!vcpu_has_sve(vcpu)) if (!vcpu_has_sve(vcpu))
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
(0xfUL << ID_AA64ISAR1_API_SHIFT) | (0xfUL << ID_AA64ISAR1_API_SHIFT) |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment