Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
14ef9d04
Commit
14ef9d04
authored
Sep 30, 2020
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-arm64/hyp-pcpu' into kvmarm-master/next
Signed-off-by:
Marc Zyngier
<
maz@kernel.org
>
parents
2e02cbb2
816c347f
Changes
43
Show whitespace changes
Inline
Side-by-side
Showing
43 changed files
with
1262 additions
and
1197 deletions
+1262
-1197
arch/arm64/Kconfig
arch/arm64/Kconfig
+0
-26
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/assembler.h
+19
-10
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpucaps.h
+2
-2
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cpufeature.h
+0
-24
arch/arm64/include/asm/hyp_image.h
arch/arm64/include/asm/hyp_image.h
+36
-0
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_asm.h
+53
-53
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
+0
-14
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
+1
-41
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h
+8
-82
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/mmu.h
+1
-10
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/percpu.h
+26
-2
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/processor.h
+15
-29
arch/arm64/include/asm/spectre.h
arch/arm64/include/asm/spectre.h
+32
-0
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
+9
-0
arch/arm64/kernel/Makefile
arch/arm64/kernel/Makefile
+1
-2
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpu_errata.c
+8
-479
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpufeature.c
+3
-48
arch/arm64/kernel/entry.S
arch/arm64/kernel/entry.S
+3
-7
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/hibernate.c
+1
-5
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/image-vars.h
+0
-7
arch/arm64/kernel/process.c
arch/arm64/kernel/process.c
+9
-14
arch/arm64/kernel/proton-pack.c
arch/arm64/kernel/proton-pack.c
+792
-0
arch/arm64/kernel/ssbd.c
arch/arm64/kernel/ssbd.c
+0
-129
arch/arm64/kernel/suspend.c
arch/arm64/kernel/suspend.c
+1
-2
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kernel/vmlinux.lds.S
+13
-0
arch/arm64/kvm/Kconfig
arch/arm64/kvm/Kconfig
+0
-3
arch/arm64/kvm/arm.c
arch/arm64/kvm/arm.c
+83
-35
arch/arm64/kvm/hyp/Makefile
arch/arm64/kvm/hyp/Makefile
+1
-2
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/entry.S
+3
-3
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/hyp-entry.S
+0
-31
arch/arm64/kvm/hyp/include/hyp/debug-sr.h
arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+2
-2
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/switch.h
+1
-34
arch/arm64/kvm/hyp/nvhe/.gitignore
arch/arm64/kvm/hyp/nvhe/.gitignore
+2
-0
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/hyp/nvhe/Makefile
+33
-27
arch/arm64/kvm/hyp/nvhe/hyp.lds.S
arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+19
-0
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/nvhe/switch.c
+8
-7
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
+7
-6
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+2
-2
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/hypercalls.c
+21
-12
arch/arm64/kvm/pmu.c
arch/arm64/kvm/pmu.c
+8
-5
arch/arm64/kvm/psci.c
arch/arm64/kvm/psci.c
+36
-38
arch/arm64/kvm/reset.c
arch/arm64/kvm/reset.c
+0
-4
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.c
+3
-0
No files found.
arch/arm64/Kconfig
View file @
14ef9d04
...
@@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0
...
@@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y.
If unsure, say Y.
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
default y
help
Speculation attacks against some high-performance processors rely on
being able to manipulate the branch predictor for a victim context by
executing aliasing branches in the attacker context. Such attacks
can be partially mitigated against by clearing internal branch
predictor state and limiting the prediction logic in some situations.
This config option will take CPU-specific actions to harden the
branch predictor against aliasing attacks and may rely on specific
instruction sequences or control bits being set by the system
firmware.
If unsure, say Y.
config ARM64_SSBD
bool "Speculative Store Bypass Disable" if EXPERT
default y
help
This enables mitigation of the bypassing of previous stores
by speculative loads.
If unsure, say Y.
config RODATA_FULL_DEFAULT_ENABLED
config RODATA_FULL_DEFAULT_ENABLED
bool "Apply r/o permissions of VM areas also to their linear aliases"
bool "Apply r/o permissions of VM areas also to their linear aliases"
default y
default y
...
...
arch/arm64/include/asm/assembler.h
View file @
14ef9d04
...
@@ -218,6 +218,23 @@ lr .req x30 // link register
...
@@ -218,6 +218,23 @@ lr .req x30 // link register
str
\
src
,
[
\
tmp
,
:
lo12
:
\
sym
]
str
\
src
,
[
\
tmp
,
:
lo12
:
\
sym
]
.
endm
.
endm
/*
* @dst: destination register
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
.
macro
this_cpu_offset
,
dst
mrs
\
dst
,
tpidr_el2
.
endm
#else
.
macro
this_cpu_offset
,
dst
alternative_if_not
ARM64_HAS_VIRT_HOST_EXTN
mrs
\
dst
,
tpidr_el1
alternative_else
mrs
\
dst
,
tpidr_el2
alternative_endif
.
endm
#endif
/*
/*
* @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* @sym: The name of the per-cpu variable
* @sym: The name of the per-cpu variable
...
@@ -226,11 +243,7 @@ lr .req x30 // link register
...
@@ -226,11 +243,7 @@ lr .req x30 // link register
.
macro
adr_this_cpu
,
dst
,
sym
,
tmp
.
macro
adr_this_cpu
,
dst
,
sym
,
tmp
adrp
\
tmp
,
\
sym
adrp
\
tmp
,
\
sym
add
\
dst
,
\
tmp
,
#
:
lo12
:
\
sym
add
\
dst
,
\
tmp
,
#
:
lo12
:
\
sym
alternative_if_not
ARM64_HAS_VIRT_HOST_EXTN
this_cpu_offset
\
tmp
mrs
\
tmp
,
tpidr_el1
alternative_else
mrs
\
tmp
,
tpidr_el2
alternative_endif
add
\
dst
,
\
dst
,
\
tmp
add
\
dst
,
\
dst
,
\
tmp
.
endm
.
endm
...
@@ -241,11 +254,7 @@ alternative_endif
...
@@ -241,11 +254,7 @@ alternative_endif
*/
*/
.
macro
ldr_this_cpu
dst
,
sym
,
tmp
.
macro
ldr_this_cpu
dst
,
sym
,
tmp
adr_l
\
dst
,
\
sym
adr_l
\
dst
,
\
sym
alternative_if_not
ARM64_HAS_VIRT_HOST_EXTN
this_cpu_offset
\
tmp
mrs
\
tmp
,
tpidr_el1
alternative_else
mrs
\
tmp
,
tpidr_el2
alternative_endif
ldr
\
dst
,
[
\
dst
,
\
tmp
]
ldr
\
dst
,
[
\
dst
,
\
tmp
]
.
endm
.
endm
...
...
arch/arm64/include/asm/cpucaps.h
View file @
14ef9d04
...
@@ -31,13 +31,13 @@
...
@@ -31,13 +31,13 @@
#define ARM64_HAS_DCPOP 21
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
#define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_
HARDEN_BRANCH_PREDICTOR
24
#define ARM64_
SPECTRE_V2
24
#define ARM64_HAS_RAS_EXTN 25
#define ARM64_HAS_RAS_EXTN 25
#define ARM64_WORKAROUND_843419 26
#define ARM64_WORKAROUND_843419 26
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
#define ARM64_HW_DBM 29
#define ARM64_S
SBD
30
#define ARM64_S
PECTRE_V4
30
#define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_CRC32 33
#define ARM64_HAS_CRC32 33
...
...
arch/arm64/include/asm/cpufeature.h
View file @
14ef9d04
...
@@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void)
...
@@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void)
cpus_have_const_cap
(
ARM64_HAS_TLB_RANGE
);
cpus_have_const_cap
(
ARM64_HAS_TLB_RANGE
);
}
}
#define ARM64_BP_HARDEN_UNKNOWN -1
#define ARM64_BP_HARDEN_WA_NEEDED 0
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
int
get_spectre_v2_workaround_state
(
void
);
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
#define ARM64_SSBD_FORCE_ENABLE 2
#define ARM64_SSBD_MITIGATED 3
static
inline
int
arm64_get_ssbd_state
(
void
)
{
#ifdef CONFIG_ARM64_SSBD
extern
int
ssbd_state
;
return
ssbd_state
;
#else
return
ARM64_SSBD_UNKNOWN
;
#endif
}
void
arm64_set_ssbd_mitigation
(
bool
state
);
extern
int
do_emulate_mrs
(
struct
pt_regs
*
regs
,
u32
sys_reg
,
u32
rt
);
extern
int
do_emulate_mrs
(
struct
pt_regs
*
regs
,
u32
sys_reg
,
u32
rt
);
static
inline
u32
id_aa64mmfr0_parange_to_phys_shift
(
int
parange
)
static
inline
u32
id_aa64mmfr0_parange_to_phys_shift
(
int
parange
)
...
...
arch/arm64/include/asm/hyp_image.h
0 → 100644
View file @
14ef9d04
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Google LLC.
* Written by David Brazdil <dbrazdil@google.com>
*/
#ifndef __ARM64_HYP_IMAGE_H__
#define __ARM64_HYP_IMAGE_H__
/*
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_,
* to separate it from the kernel proper.
*/
#define kvm_nvhe_sym(sym) __kvm_nvhe_##sym
#ifdef LINKER_SCRIPT
/*
* KVM nVHE ELF section names are prefixed with .hyp, to separate them
* from the kernel proper.
*/
#define HYP_SECTION_NAME(NAME) .hyp##NAME
/* Defines an ELF hyp section from input section @NAME and its subsections. */
#define HYP_SECTION(NAME) \
HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) }
/*
* Defines a linker script alias of a kernel-proper symbol referenced by
* KVM nVHE hyp code.
*/
#define KVM_NVHE_ALIAS(sym) kvm_nvhe_sym(sym) = sym;
#endif
/* LINKER_SCRIPT */
#endif
/* __ARM64_HYP_IMAGE_H__ */
arch/arm64/include/asm/kvm_asm.h
View file @
14ef9d04
...
@@ -7,11 +7,9 @@
...
@@ -7,11 +7,9 @@
#ifndef __ARM_KVM_ASM_H__
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
#include <asm/hyp_image.h>
#include <asm/virt.h>
#include <asm/virt.h>
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
...
@@ -66,13 +64,6 @@
...
@@ -66,13 +64,6 @@
#include <linux/mm.h>
#include <linux/mm.h>
/*
* Translate name of a symbol defined in nVHE hyp to the name seen
* by kernel proper. All nVHE symbols are prefixed by the build system
* to avoid clashes with the VHE variants.
*/
#define kvm_nvhe_sym(sym) __kvm_nvhe_##sym
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
...
@@ -84,21 +75,50 @@
...
@@ -84,21 +75,50 @@
DECLARE_KVM_VHE_SYM(sym); \
DECLARE_KVM_VHE_SYM(sym); \
DECLARE_KVM_NVHE_SYM(sym)
DECLARE_KVM_NVHE_SYM(sym)
#define DECLARE_KVM_VHE_PER_CPU(type, sym) \
DECLARE_PER_CPU(type, sym)
#define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
#define DECLARE_KVM_HYP_PER_CPU(type, sym) \
DECLARE_KVM_VHE_PER_CPU(type, sym); \
DECLARE_KVM_NVHE_PER_CPU(type, sym)
/*
* Compute pointer to a symbol defined in nVHE percpu region.
* Returns NULL if percpu memory has not been allocated yet.
*/
#define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
#define per_cpu_ptr_nvhe_sym(sym, cpu) \
({ \
unsigned long base, off; \
base = kvm_arm_hyp_percpu_base[cpu]; \
off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
(unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
})
#if defined(__KVM_NVHE_HYPERVISOR__)
#if defined(__KVM_NVHE_HYPERVISOR__)
#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
#define CHOOSE_NVHE_SYM(sym) sym
#define CHOOSE_NVHE_SYM(sym) sym
#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
/* The nVHE hypervisor shouldn't even try to access VHE symbols */
/* The nVHE hypervisor shouldn't even try to access VHE symbols */
extern
void
*
__nvhe_undefined_symbol
;
extern
void
*
__nvhe_undefined_symbol
;
#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
#elif defined(__KVM_VHE_HYPERVISOR)
#elif defined(__KVM_VHE_HYPERVISOR
__
)
#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
/* The VHE hypervisor shouldn't even try to access nVHE symbols */
/* The VHE hypervisor shouldn't even try to access nVHE symbols */
extern
void
*
__vhe_undefined_symbol
;
extern
void
*
__vhe_undefined_symbol
;
#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
#define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
#else
#else
...
@@ -113,8 +133,18 @@ extern void *__vhe_undefined_symbol;
...
@@ -113,8 +133,18 @@ extern void *__vhe_undefined_symbol;
* - Don't let the nVHE hypervisor have access to this, as it will
* - Don't let the nVHE hypervisor have access to this, as it will
* pick the *wrong* symbol (yes, it runs at EL2...).
* pick the *wrong* symbol (yes, it runs at EL2...).
*/
*/
#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
? CHOOSE_VHE_SYM(sym) \
: CHOOSE_NVHE_SYM(sym))
: CHOOSE_NVHE_SYM(sym))
#define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
? this_cpu_ptr(&sym) \
: this_cpu_ptr_nvhe_sym(sym))
#define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
? per_cpu_ptr(&sym, cpu) \
: per_cpu_ptr_nvhe_sym(sym, cpu))
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
...
@@ -141,11 +171,13 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
...
@@ -141,11 +171,13 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern
unsigned
long
kvm_arm_hyp_percpu_base
[
NR_CPUS
];
DECLARE_KVM_NVHE_SYM
(
__per_cpu_start
);
DECLARE_KVM_NVHE_SYM
(
__per_cpu_end
);
extern
atomic_t
arm64_el2_vector_last_slot
;
extern
atomic_t
arm64_el2_vector_last_slot
;
DECLARE_KVM_HYP_SYM
(
__bp_harden_hyp_vecs
);
DECLARE_KVM_HYP_SYM
(
__bp_harden_hyp_vecs
);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#endif
extern
void
__kvm_flush_vm_context
(
void
);
extern
void
__kvm_flush_vm_context
(
void
);
extern
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm_s2_mmu
*
mmu
,
phys_addr_t
ipa
,
extern
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm_s2_mmu
*
mmu
,
phys_addr_t
ipa
,
...
@@ -188,26 +220,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
...
@@ -188,26 +220,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
addr; \
addr; \
})
})
/*
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
* provided that sym is really a *symbol* and not a pointer obtained from
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
* sparse quiet.
*/
#define __hyp_this_cpu_ptr(sym) \
({ \
void *__ptr; \
__verify_pcpu_ptr(&sym); \
__ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
(typeof(sym) __kernel __force *)__ptr; \
})
#define __hyp_this_cpu_read(sym) \
({ \
*__hyp_this_cpu_ptr(sym); \
})
#define __KVM_EXTABLE(from, to) \
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
" .align 3\n" \
...
@@ -238,20 +250,8 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
...
@@ -238,20 +250,8 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
#else
/* __ASSEMBLY__ */
#else
/* __ASSEMBLY__ */
.
macro
hyp_adr_this_cpu
reg
,
sym
,
tmp
adr_l
\
reg
,
\
sym
mrs
\
tmp
,
tpidr_el2
add
\
reg
,
\
reg
,
\
tmp
.
endm
.
macro
hyp_ldr_this_cpu
reg
,
sym
,
tmp
adr_l
\
reg
,
\
sym
mrs
\
tmp
,
tpidr_el2
ldr
\
reg
,
[
\
reg
,
\
tmp
]
.
endm
.
macro
get_host_ctxt
reg
,
tmp
.
macro
get_host_ctxt
reg
,
tmp
hyp_
adr_this_cpu
\
reg
,
kvm_host_data
,
\
tmp
adr_this_cpu
\
reg
,
kvm_host_data
,
\
tmp
add
\
reg
,
\
reg
,
#
HOST_DATA_CONTEXT
add
\
reg
,
\
reg
,
#
HOST_DATA_CONTEXT
.
endm
.
endm
...
@@ -261,12 +261,12 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
...
@@ -261,12 +261,12 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
.
endm
.
endm
.
macro
get_loaded_vcpu
vcpu
,
ctxt
.
macro
get_loaded_vcpu
vcpu
,
ctxt
hyp_
adr_this_cpu
\
ctxt
,
kvm_hyp_ctxt
,
\
vcpu
adr_this_cpu
\
ctxt
,
kvm_hyp_ctxt
,
\
vcpu
ldr
\
vcpu
,
[
\
ctxt
,
#
HOST_CONTEXT_VCPU
]
ldr
\
vcpu
,
[
\
ctxt
,
#
HOST_CONTEXT_VCPU
]
.
endm
.
endm
.
macro
set_loaded_vcpu
vcpu
,
ctxt
,
tmp
.
macro
set_loaded_vcpu
vcpu
,
ctxt
,
tmp
hyp_
adr_this_cpu
\
ctxt
,
kvm_hyp_ctxt
,
\
tmp
adr_this_cpu
\
ctxt
,
kvm_hyp_ctxt
,
\
tmp
str
\
vcpu
,
[
\
ctxt
,
#
HOST_CONTEXT_VCPU
]
str
\
vcpu
,
[
\
ctxt
,
#
HOST_CONTEXT_VCPU
]
.
endm
.
endm
...
...
arch/arm64/include/asm/kvm_emulate.h
View file @
14ef9d04
...
@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
...
@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return
vcpu_read_sys_reg
(
vcpu
,
MPIDR_EL1
)
&
MPIDR_HWID_BITMASK
;
return
vcpu_read_sys_reg
(
vcpu
,
MPIDR_EL1
)
&
MPIDR_HWID_BITMASK
;
}
}
static
inline
bool
kvm_arm_get_vcpu_workaround_2_flag
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
workaround_flags
&
VCPU_WORKAROUND_2_FLAG
;
}
static
inline
void
kvm_arm_set_vcpu_workaround_2_flag
(
struct
kvm_vcpu
*
vcpu
,
bool
flag
)
{
if
(
flag
)
vcpu
->
arch
.
workaround_flags
|=
VCPU_WORKAROUND_2_FLAG
;
else
vcpu
->
arch
.
workaround_flags
&=
~
VCPU_WORKAROUND_2_FLAG
;
}
static
inline
void
kvm_vcpu_set_be
(
struct
kvm_vcpu
*
vcpu
)
static
inline
void
kvm_vcpu_set_be
(
struct
kvm_vcpu
*
vcpu
)
{
{
if
(
vcpu_mode_is_32bit
(
vcpu
))
{
if
(
vcpu_mode_is_32bit
(
vcpu
))
{
...
...
arch/arm64/include/asm/kvm_host.h
View file @
14ef9d04
...
@@ -568,7 +568,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
...
@@ -568,7 +568,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct
kvm_vcpu
*
kvm_mpidr_to_vcpu
(
struct
kvm
*
kvm
,
unsigned
long
mpidr
);
struct
kvm_vcpu
*
kvm_mpidr_to_vcpu
(
struct
kvm
*
kvm
,
unsigned
long
mpidr
);
DECLARE_PER_CPU
(
struct
kvm_host_data
,
kvm_host_data
);
DECLARE_
KVM_HYP_
PER_CPU
(
struct
kvm_host_data
,
kvm_host_data
);
static
inline
void
kvm_init_host_cpu_context
(
struct
kvm_cpu_context
*
cpu_ctxt
)
static
inline
void
kvm_init_host_cpu_context
(
struct
kvm_cpu_context
*
cpu_ctxt
)
{
{
...
@@ -634,46 +634,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
...
@@ -634,46 +634,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static
inline
void
kvm_clr_pmu_events
(
u32
clr
)
{}
static
inline
void
kvm_clr_pmu_events
(
u32
clr
)
{}
#endif
#endif
#define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
static
inline
int
kvm_arm_harden_branch_predictor
(
void
)
{
switch
(
get_spectre_v2_workaround_state
())
{
case
ARM64_BP_HARDEN_WA_NEEDED
:
return
KVM_BP_HARDEN_WA_NEEDED
;
case
ARM64_BP_HARDEN_NOT_REQUIRED
:
return
KVM_BP_HARDEN_NOT_REQUIRED
;
case
ARM64_BP_HARDEN_UNKNOWN
:
default:
return
KVM_BP_HARDEN_UNKNOWN
;
}
}
#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3
static
inline
int
kvm_arm_have_ssbd
(
void
)
{
switch
(
arm64_get_ssbd_state
())
{
case
ARM64_SSBD_FORCE_DISABLE
:
return
KVM_SSBD_FORCE_DISABLE
;
case
ARM64_SSBD_KERNEL
:
return
KVM_SSBD_KERNEL
;
case
ARM64_SSBD_FORCE_ENABLE
:
return
KVM_SSBD_FORCE_ENABLE
;
case
ARM64_SSBD_MITIGATED
:
return
KVM_SSBD_MITIGATED
;
case
ARM64_SSBD_UNKNOWN
:
default:
return
KVM_SSBD_UNKNOWN
;
}
}
void
kvm_vcpu_load_sysregs_vhe
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vcpu_load_sysregs_vhe
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vcpu_put_sysregs_vhe
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vcpu_put_sysregs_vhe
(
struct
kvm_vcpu
*
vcpu
);
...
...
arch/arm64/include/asm/kvm_mmu.h
View file @
14ef9d04
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/page.h>
#include <asm/memory.h>
#include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/cpufeature.h>
#include <asm/cpufeature.h>
/*
/*
...
@@ -207,19 +208,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
...
@@ -207,19 +208,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
return
ret
;
return
ret
;
}
}
#ifdef CONFIG_KVM_INDIRECT_VECTORS
/*
/*
* EL2 vectors can be mapped and rerouted in a number of ways,
* EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present:
* depending on the kernel configuration and CPU present:
*
*
* - If the CPU
has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
* - If the CPU
is affected by Spectre-v2, the hardening sequence is
*
hardening sequence is placed in one of the vector slots, which is
*
placed in one of the vector slots, which is executed before jumping
*
executed before jumping
to the real vectors.
* to the real vectors.
*
*
* - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
* - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
* ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
* containing the hardening sequence is mapped next to the idmap page,
* hardening sequence is mapped next to the idmap page, and executed
* and executed before jumping to the real vectors.
* before jumping to the real vectors.
*
*
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* empty slot is selected, mapped next to the idmap page, and
* empty slot is selected, mapped next to the idmap page, and
...
@@ -229,19 +228,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
...
@@ -229,19 +228,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
* VHE, as we don't have hypervisor-specific mappings. If the system
* VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored.
* is VHE and yet selects this capability, it will be ignored.
*/
*/
#include <asm/mmu.h>
extern
void
*
__kvm_bp_vect_base
;
extern
void
*
__kvm_bp_vect_base
;
extern
int
__kvm_harden_el2_vector_slot
;
extern
int
__kvm_harden_el2_vector_slot
;
/* This is called on both VHE and !VHE systems */
static
inline
void
*
kvm_get_hyp_vector
(
void
)
static
inline
void
*
kvm_get_hyp_vector
(
void
)
{
{
struct
bp_hardening_data
*
data
=
arm64_get_bp_hardening_data
();
struct
bp_hardening_data
*
data
=
arm64_get_bp_hardening_data
();
void
*
vect
=
kern_hyp_va
(
kvm_ksym_ref
(
__kvm_hyp_vector
));
void
*
vect
=
kern_hyp_va
(
kvm_ksym_ref
(
__kvm_hyp_vector
));
int
slot
=
-
1
;
int
slot
=
-
1
;
if
(
cpus_have_const_cap
(
ARM64_
HARDEN_BRANCH_PREDICTOR
)
&&
data
->
fn
)
{
if
(
cpus_have_const_cap
(
ARM64_
SPECTRE_V2
)
&&
data
->
fn
)
{
vect
=
kern_hyp_va
(
kvm_ksym_ref
(
__bp_harden_hyp_vecs
));
vect
=
kern_hyp_va
(
kvm_ksym_ref
(
__bp_harden_hyp_vecs
));
slot
=
data
->
hyp_vectors_slot
;
slot
=
data
->
hyp_vectors_slot
;
}
}
...
@@ -258,76 +254,6 @@ static inline void *kvm_get_hyp_vector(void)
...
@@ -258,76 +254,6 @@ static inline void *kvm_get_hyp_vector(void)
return
vect
;
return
vect
;
}
}
/* This is only called on a !VHE system */
static
inline
int
kvm_map_vectors
(
void
)
{
/*
* HBP = ARM64_HARDEN_BRANCH_PREDICTOR
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
* !HBP + !HEL2 -> use direct vectors
* HBP + !HEL2 -> use hardened vectors in place
* !HBP + HEL2 -> allocate one vector slot and use exec mapping
* HBP + HEL2 -> use hardened vertors and use exec mapping
*/
if
(
cpus_have_const_cap
(
ARM64_HARDEN_BRANCH_PREDICTOR
))
{
__kvm_bp_vect_base
=
kvm_ksym_ref
(
__bp_harden_hyp_vecs
);
__kvm_bp_vect_base
=
kern_hyp_va
(
__kvm_bp_vect_base
);
}
if
(
cpus_have_const_cap
(
ARM64_HARDEN_EL2_VECTORS
))
{
phys_addr_t
vect_pa
=
__pa_symbol
(
__bp_harden_hyp_vecs
);
unsigned
long
size
=
__BP_HARDEN_HYP_VECS_SZ
;
/*
* Always allocate a spare vector slot, as we don't
* know yet which CPUs have a BP hardening slot that
* we can reuse.
*/
__kvm_harden_el2_vector_slot
=
atomic_inc_return
(
&
arm64_el2_vector_last_slot
);
BUG_ON
(
__kvm_harden_el2_vector_slot
>=
BP_HARDEN_EL2_SLOTS
);
return
create_hyp_exec_mappings
(
vect_pa
,
size
,
&
__kvm_bp_vect_base
);
}
return
0
;
}
#else
static
inline
void
*
kvm_get_hyp_vector
(
void
)
{
return
kern_hyp_va
(
kvm_ksym_ref
(
__kvm_hyp_vector
));
}
static
inline
int
kvm_map_vectors
(
void
)
{
return
0
;
}
#endif
#ifdef CONFIG_ARM64_SSBD
DECLARE_PER_CPU_READ_MOSTLY
(
u64
,
arm64_ssbd_callback_required
);
static
inline
int
hyp_map_aux_data
(
void
)
{
int
cpu
,
err
;
for_each_possible_cpu
(
cpu
)
{
u64
*
ptr
;
ptr
=
per_cpu_ptr
(
&
arm64_ssbd_callback_required
,
cpu
);
err
=
create_hyp_mappings
(
ptr
,
ptr
+
1
,
PAGE_HYP
);
if
(
err
)
return
err
;
}
return
0
;
}
#else
static
inline
int
hyp_map_aux_data
(
void
)
{
return
0
;
}
#endif
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
static
__always_inline
u64
kvm_get_vttbr
(
struct
kvm_s2_mmu
*
mmu
)
static
__always_inline
u64
kvm_get_vttbr
(
struct
kvm_s2_mmu
*
mmu
)
...
...
arch/arm64/include/asm/mmu.h
View file @
14ef9d04
...
@@ -45,7 +45,6 @@ struct bp_hardening_data {
...
@@ -45,7 +45,6 @@ struct bp_hardening_data {
bp_hardening_cb_t
fn
;
bp_hardening_cb_t
fn
;
};
};
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY
(
struct
bp_hardening_data
,
bp_hardening_data
);
DECLARE_PER_CPU_READ_MOSTLY
(
struct
bp_hardening_data
,
bp_hardening_data
);
static
inline
struct
bp_hardening_data
*
arm64_get_bp_hardening_data
(
void
)
static
inline
struct
bp_hardening_data
*
arm64_get_bp_hardening_data
(
void
)
...
@@ -57,21 +56,13 @@ static inline void arm64_apply_bp_hardening(void)
...
@@ -57,21 +56,13 @@ static inline void arm64_apply_bp_hardening(void)
{
{
struct
bp_hardening_data
*
d
;
struct
bp_hardening_data
*
d
;
if
(
!
cpus_have_const_cap
(
ARM64_
HARDEN_BRANCH_PREDICTOR
))
if
(
!
cpus_have_const_cap
(
ARM64_
SPECTRE_V2
))
return
;
return
;
d
=
arm64_get_bp_hardening_data
();
d
=
arm64_get_bp_hardening_data
();
if
(
d
->
fn
)
if
(
d
->
fn
)
d
->
fn
();
d
->
fn
();
}
}
#else
static
inline
struct
bp_hardening_data
*
arm64_get_bp_hardening_data
(
void
)
{
return
NULL
;
}
static
inline
void
arm64_apply_bp_hardening
(
void
)
{
}
#endif
/* CONFIG_HARDEN_BRANCH_PREDICTOR */
extern
void
arm64_memblock_init
(
void
);
extern
void
arm64_memblock_init
(
void
);
extern
void
paging_init
(
void
);
extern
void
paging_init
(
void
);
...
...
arch/arm64/include/asm/percpu.h
View file @
14ef9d04
...
@@ -19,7 +19,16 @@ static inline void set_my_cpu_offset(unsigned long off)
...
@@ -19,7 +19,16 @@ static inline void set_my_cpu_offset(unsigned long off)
::
"r"
(
off
)
:
"memory"
);
::
"r"
(
off
)
:
"memory"
);
}
}
static
inline
unsigned
long
__my_cpu_offset
(
void
)
static
inline
unsigned
long
__hyp_my_cpu_offset
(
void
)
{
/*
* Non-VHE hyp code runs with preemption disabled. No need to hazard
* the register access against barrier() as in __kern_my_cpu_offset.
*/
return
read_sysreg
(
tpidr_el2
);
}
static
inline
unsigned
long
__kern_my_cpu_offset
(
void
)
{
{
unsigned
long
off
;
unsigned
long
off
;
...
@@ -35,7 +44,12 @@ static inline unsigned long __my_cpu_offset(void)
...
@@ -35,7 +44,12 @@ static inline unsigned long __my_cpu_offset(void)
return
off
;
return
off
;
}
}
#define __my_cpu_offset __my_cpu_offset()
#ifdef __KVM_NVHE_HYPERVISOR__
#define __my_cpu_offset __hyp_my_cpu_offset()
#else
#define __my_cpu_offset __kern_my_cpu_offset()
#endif
#define PERCPU_RW_OPS(sz) \
#define PERCPU_RW_OPS(sz) \
static inline unsigned long __percpu_read_##sz(void *ptr) \
static inline unsigned long __percpu_read_##sz(void *ptr) \
...
@@ -227,4 +241,14 @@ PERCPU_RET_OP(add, add, ldadd)
...
@@ -227,4 +241,14 @@ PERCPU_RET_OP(add, add, ldadd)
#include <asm-generic/percpu.h>
#include <asm-generic/percpu.h>
/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
#undef this_cpu_ptr
#define this_cpu_ptr raw_cpu_ptr
#undef __this_cpu_read
#define __this_cpu_read raw_cpu_read
#undef __this_cpu_write
#define __this_cpu_write raw_cpu_write
#endif
#endif
/* __ASM_PERCPU_H */
#endif
/* __ASM_PERCPU_H */
arch/arm64/include/asm/processor.h
View file @
14ef9d04
...
@@ -38,6 +38,7 @@
...
@@ -38,6 +38,7 @@
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h>
#include <asm/pointer_auth.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/spectre.h>
#include <asm/types.h>
#include <asm/types.h>
/*
/*
...
@@ -197,40 +198,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
...
@@ -197,40 +198,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs
->
pmr_save
=
GIC_PRIO_IRQON
;
regs
->
pmr_save
=
GIC_PRIO_IRQON
;
}
}
static
inline
void
set_ssbs_bit
(
struct
pt_regs
*
regs
)
{
regs
->
pstate
|=
PSR_SSBS_BIT
;
}
static
inline
void
set_compat_ssbs_bit
(
struct
pt_regs
*
regs
)
{
regs
->
pstate
|=
PSR_AA32_SSBS_BIT
;
}
static
inline
void
start_thread
(
struct
pt_regs
*
regs
,
unsigned
long
pc
,
static
inline
void
start_thread
(
struct
pt_regs
*
regs
,
unsigned
long
pc
,
unsigned
long
sp
)
unsigned
long
sp
)
{
{
start_thread_common
(
regs
,
pc
);
start_thread_common
(
regs
,
pc
);
regs
->
pstate
=
PSR_MODE_EL0t
;
regs
->
pstate
=
PSR_MODE_EL0t
;
spectre_v4_enable_task_mitigation
(
current
);
if
(
arm64_get_ssbd_state
()
!=
ARM64_SSBD_FORCE_ENABLE
)
set_ssbs_bit
(
regs
);
regs
->
sp
=
sp
;
regs
->
sp
=
sp
;
}
}
static
inline
bool
is_ttbr0_addr
(
unsigned
long
addr
)
{
/* entry assembly clears tags for TTBR0 addrs */
return
addr
<
TASK_SIZE
;
}
static
inline
bool
is_ttbr1_addr
(
unsigned
long
addr
)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return
arch_kasan_reset_tag
(
addr
)
>=
PAGE_OFFSET
;
}
#ifdef CONFIG_COMPAT
#ifdef CONFIG_COMPAT
static
inline
void
compat_start_thread
(
struct
pt_regs
*
regs
,
unsigned
long
pc
,
static
inline
void
compat_start_thread
(
struct
pt_regs
*
regs
,
unsigned
long
pc
,
unsigned
long
sp
)
unsigned
long
sp
)
...
@@ -244,13 +220,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
...
@@ -244,13 +220,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs
->
pstate
|=
PSR_AA32_E_BIT
;
regs
->
pstate
|=
PSR_AA32_E_BIT
;
#endif
#endif
if
(
arm64_get_ssbd_state
()
!=
ARM64_SSBD_FORCE_ENABLE
)
spectre_v4_enable_task_mitigation
(
current
);
set_compat_ssbs_bit
(
regs
);
regs
->
compat_sp
=
sp
;
regs
->
compat_sp
=
sp
;
}
}
#endif
#endif
static
inline
bool
is_ttbr0_addr
(
unsigned
long
addr
)
{
/* entry assembly clears tags for TTBR0 addrs */
return
addr
<
TASK_SIZE
;
}
static
inline
bool
is_ttbr1_addr
(
unsigned
long
addr
)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return
arch_kasan_reset_tag
(
addr
)
>=
PAGE_OFFSET
;
}
/* Forward declaration, a strange C thing */
/* Forward declaration, a strange C thing */
struct
task_struct
;
struct
task_struct
;
...
...
arch/arm64/include/asm/spectre.h
0 → 100644
View file @
14ef9d04
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Interface for managing mitigations for Spectre vulnerabilities.
*
* Copyright (C) 2020 Google LLC
* Author: Will Deacon <will@kernel.org>
*/
#ifndef __ASM_SPECTRE_H
#define __ASM_SPECTRE_H
#include <asm/cpufeature.h>
/* Watch out, ordering is important here. */
enum
mitigation_state
{
SPECTRE_UNAFFECTED
,
SPECTRE_MITIGATED
,
SPECTRE_VULNERABLE
,
};
struct
task_struct
;
enum
mitigation_state
arm64_get_spectre_v2_state
(
void
);
bool
has_spectre_v2
(
const
struct
arm64_cpu_capabilities
*
cap
,
int
scope
);
void
spectre_v2_enable_mitigation
(
const
struct
arm64_cpu_capabilities
*
__unused
);
enum
mitigation_state
arm64_get_spectre_v4_state
(
void
);
bool
has_spectre_v4
(
const
struct
arm64_cpu_capabilities
*
cap
,
int
scope
);
void
spectre_v4_enable_mitigation
(
const
struct
arm64_cpu_capabilities
*
__unused
);
void
spectre_v4_enable_task_mitigation
(
struct
task_struct
*
tsk
);
#endif
/* __ASM_SPECTRE_H */
arch/arm64/include/uapi/asm/kvm.h
View file @
14ef9d04
...
@@ -257,6 +257,15 @@ struct kvm_vcpu_events {
...
@@ -257,6 +257,15 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
/*
* Only two states can be presented by the host kernel:
* - NOT_REQUIRED: the guest doesn't need to do anything
* - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
*
* All the other values are deprecated. The host still accepts all
* values (they are ABI), but will narrow them to the above two.
*/
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
...
...
arch/arm64/kernel/Makefile
View file @
14ef9d04
...
@@ -19,7 +19,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
...
@@ -19,7 +19,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o
\
return_address.o cpuinfo.o cpu_errata.o
\
cpufeature.o alternative.o cacheinfo.o
\
cpufeature.o alternative.o cacheinfo.o
\
smp.o smp_spin_table.o topology.o smccc-call.o
\
smp.o smp_spin_table.o topology.o smccc-call.o
\
syscall.o
syscall.o
proton-pack.o
targets
+=
efi-entry.o
targets
+=
efi-entry.o
...
@@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
...
@@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
obj-$(CONFIG_CRASH_DUMP)
+=
crash_dump.o
obj-$(CONFIG_CRASH_DUMP)
+=
crash_dump.o
obj-$(CONFIG_CRASH_CORE)
+=
crash_core.o
obj-$(CONFIG_CRASH_CORE)
+=
crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE)
+=
sdei.o
obj-$(CONFIG_ARM_SDE_INTERFACE)
+=
sdei.o
obj-$(CONFIG_ARM64_SSBD)
+=
ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH)
+=
pointer_auth.o
obj-$(CONFIG_ARM64_PTR_AUTH)
+=
pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK)
+=
scs.o
obj-$(CONFIG_SHADOW_CALL_STACK)
+=
scs.o
...
...
arch/arm64/kernel/cpu_errata.c
View file @
14ef9d04
...
@@ -106,365 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
...
@@ -106,365 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
sysreg_clear_set
(
sctlr_el1
,
SCTLR_EL1_UCT
,
0
);
sysreg_clear_set
(
sctlr_el1
,
SCTLR_EL1_UCT
,
0
);
}
}
atomic_t
arm64_el2_vector_last_slot
=
ATOMIC_INIT
(
-
1
);
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
DEFINE_PER_CPU_READ_MOSTLY
(
struct
bp_hardening_data
,
bp_hardening_data
);
#ifdef CONFIG_KVM_INDIRECT_VECTORS
static
void
__copy_hyp_vect_bpi
(
int
slot
,
const
char
*
hyp_vecs_start
,
const
char
*
hyp_vecs_end
)
{
void
*
dst
=
lm_alias
(
__bp_harden_hyp_vecs
+
slot
*
SZ_2K
);
int
i
;
for
(
i
=
0
;
i
<
SZ_2K
;
i
+=
0x80
)
memcpy
(
dst
+
i
,
hyp_vecs_start
,
hyp_vecs_end
-
hyp_vecs_start
);
__flush_icache_range
((
uintptr_t
)
dst
,
(
uintptr_t
)
dst
+
SZ_2K
);
}
static
void
install_bp_hardening_cb
(
bp_hardening_cb_t
fn
,
const
char
*
hyp_vecs_start
,
const
char
*
hyp_vecs_end
)
{
static
DEFINE_RAW_SPINLOCK
(
bp_lock
);
int
cpu
,
slot
=
-
1
;
/*
* detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
* we're a guest. Skip the hyp-vectors work.
*/
if
(
!
hyp_vecs_start
)
{
__this_cpu_write
(
bp_hardening_data
.
fn
,
fn
);
return
;
}
raw_spin_lock
(
&
bp_lock
);
for_each_possible_cpu
(
cpu
)
{
if
(
per_cpu
(
bp_hardening_data
.
fn
,
cpu
)
==
fn
)
{
slot
=
per_cpu
(
bp_hardening_data
.
hyp_vectors_slot
,
cpu
);
break
;
}
}
if
(
slot
==
-
1
)
{
slot
=
atomic_inc_return
(
&
arm64_el2_vector_last_slot
);
BUG_ON
(
slot
>=
BP_HARDEN_EL2_SLOTS
);
__copy_hyp_vect_bpi
(
slot
,
hyp_vecs_start
,
hyp_vecs_end
);
}
__this_cpu_write
(
bp_hardening_data
.
hyp_vectors_slot
,
slot
);
__this_cpu_write
(
bp_hardening_data
.
fn
,
fn
);
raw_spin_unlock
(
&
bp_lock
);
}
#else
static
void
install_bp_hardening_cb
(
bp_hardening_cb_t
fn
,
const
char
*
hyp_vecs_start
,
const
char
*
hyp_vecs_end
)
{
__this_cpu_write
(
bp_hardening_data
.
fn
,
fn
);
}
#endif
/* CONFIG_KVM_INDIRECT_VECTORS */
#include <linux/arm-smccc.h>
static
void
__maybe_unused
call_smc_arch_workaround_1
(
void
)
{
arm_smccc_1_1_smc
(
ARM_SMCCC_ARCH_WORKAROUND_1
,
NULL
);
}
static
void
call_hvc_arch_workaround_1
(
void
)
{
arm_smccc_1_1_hvc
(
ARM_SMCCC_ARCH_WORKAROUND_1
,
NULL
);
}
static
void
qcom_link_stack_sanitization
(
void
)
{
u64
tmp
;
asm
volatile
(
"mov %0, x30
\n
"
".rept 16
\n
"
"bl . + 4
\n
"
".endr
\n
"
"mov x30, %0
\n
"
:
"=&r"
(
tmp
));
}
static
bool
__nospectre_v2
;
static
int
__init
parse_nospectre_v2
(
char
*
str
)
{
__nospectre_v2
=
true
;
return
0
;
}
early_param
(
"nospectre_v2"
,
parse_nospectre_v2
);
/*
* -1: No workaround
* 0: No workaround required
* 1: Workaround installed
*/
static
int
detect_harden_bp_fw
(
void
)
{
bp_hardening_cb_t
cb
;
void
*
smccc_start
,
*
smccc_end
;
struct
arm_smccc_res
res
;
u32
midr
=
read_cpuid_id
();
arm_smccc_1_1_invoke
(
ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
ARM_SMCCC_ARCH_WORKAROUND_1
,
&
res
);
switch
((
int
)
res
.
a0
)
{
case
1
:
/* Firmware says we're just fine */
return
0
;
case
0
:
break
;
default:
return
-
1
;
}
switch
(
arm_smccc_1_1_get_conduit
())
{
case
SMCCC_CONDUIT_HVC
:
cb
=
call_hvc_arch_workaround_1
;
/* This is a guest, no need to patch KVM vectors */
smccc_start
=
NULL
;
smccc_end
=
NULL
;
break
;
#if IS_ENABLED(CONFIG_KVM)
case
SMCCC_CONDUIT_SMC
:
cb
=
call_smc_arch_workaround_1
;
smccc_start
=
__smccc_workaround_1_smc
;
smccc_end
=
__smccc_workaround_1_smc
+
__SMCCC_WORKAROUND_1_SMC_SZ
;
break
;
#endif
default:
return
-
1
;
}
if
(((
midr
&
MIDR_CPU_MODEL_MASK
)
==
MIDR_QCOM_FALKOR
)
||
((
midr
&
MIDR_CPU_MODEL_MASK
)
==
MIDR_QCOM_FALKOR_V1
))
cb
=
qcom_link_stack_sanitization
;
if
(
IS_ENABLED
(
CONFIG_HARDEN_BRANCH_PREDICTOR
))
install_bp_hardening_cb
(
cb
,
smccc_start
,
smccc_end
);
return
1
;
}
DEFINE_PER_CPU_READ_MOSTLY
(
u64
,
arm64_ssbd_callback_required
);
int
ssbd_state
__read_mostly
=
ARM64_SSBD_KERNEL
;
static
bool
__ssb_safe
=
true
;
static
const
struct
ssbd_options
{
const
char
*
str
;
int
state
;
}
ssbd_options
[]
=
{
{
"force-on"
,
ARM64_SSBD_FORCE_ENABLE
,
},
{
"force-off"
,
ARM64_SSBD_FORCE_DISABLE
,
},
{
"kernel"
,
ARM64_SSBD_KERNEL
,
},
};
static
int
__init
ssbd_cfg
(
char
*
buf
)
{
int
i
;
if
(
!
buf
||
!
buf
[
0
])
return
-
EINVAL
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
ssbd_options
);
i
++
)
{
int
len
=
strlen
(
ssbd_options
[
i
].
str
);
if
(
strncmp
(
buf
,
ssbd_options
[
i
].
str
,
len
))
continue
;
ssbd_state
=
ssbd_options
[
i
].
state
;
return
0
;
}
return
-
EINVAL
;
}
early_param
(
"ssbd"
,
ssbd_cfg
);
void
__init
arm64_update_smccc_conduit
(
struct
alt_instr
*
alt
,
__le32
*
origptr
,
__le32
*
updptr
,
int
nr_inst
)
{
u32
insn
;
BUG_ON
(
nr_inst
!=
1
);
switch
(
arm_smccc_1_1_get_conduit
())
{
case
SMCCC_CONDUIT_HVC
:
insn
=
aarch64_insn_get_hvc_value
();
break
;
case
SMCCC_CONDUIT_SMC
:
insn
=
aarch64_insn_get_smc_value
();
break
;
default:
return
;
}
*
updptr
=
cpu_to_le32
(
insn
);
}
void
__init
arm64_enable_wa2_handling
(
struct
alt_instr
*
alt
,
__le32
*
origptr
,
__le32
*
updptr
,
int
nr_inst
)
{
BUG_ON
(
nr_inst
!=
1
);
/*
* Only allow mitigation on EL1 entry/exit and guest
* ARCH_WORKAROUND_2 handling if the SSBD state allows it to
* be flipped.
*/
if
(
arm64_get_ssbd_state
()
==
ARM64_SSBD_KERNEL
)
*
updptr
=
cpu_to_le32
(
aarch64_insn_gen_nop
());
}
void
arm64_set_ssbd_mitigation
(
bool
state
)
{
int
conduit
;
if
(
!
IS_ENABLED
(
CONFIG_ARM64_SSBD
))
{
pr_info_once
(
"SSBD disabled by kernel configuration
\n
"
);
return
;
}
if
(
this_cpu_has_cap
(
ARM64_SSBS
))
{
if
(
state
)
asm
volatile
(
SET_PSTATE_SSBS
(
0
));
else
asm
volatile
(
SET_PSTATE_SSBS
(
1
));
return
;
}
conduit
=
arm_smccc_1_1_invoke
(
ARM_SMCCC_ARCH_WORKAROUND_2
,
state
,
NULL
);
WARN_ON_ONCE
(
conduit
==
SMCCC_CONDUIT_NONE
);
}
static
bool
has_ssbd_mitigation
(
const
struct
arm64_cpu_capabilities
*
entry
,
int
scope
)
{
struct
arm_smccc_res
res
;
bool
required
=
true
;
s32
val
;
bool
this_cpu_safe
=
false
;
int
conduit
;
WARN_ON
(
scope
!=
SCOPE_LOCAL_CPU
||
preemptible
());
if
(
cpu_mitigations_off
())
ssbd_state
=
ARM64_SSBD_FORCE_DISABLE
;
/* delay setting __ssb_safe until we get a firmware response */
if
(
is_midr_in_range_list
(
read_cpuid_id
(),
entry
->
midr_range_list
))
this_cpu_safe
=
true
;
if
(
this_cpu_has_cap
(
ARM64_SSBS
))
{
if
(
!
this_cpu_safe
)
__ssb_safe
=
false
;
required
=
false
;
goto
out_printmsg
;
}
conduit
=
arm_smccc_1_1_invoke
(
ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
ARM_SMCCC_ARCH_WORKAROUND_2
,
&
res
);
if
(
conduit
==
SMCCC_CONDUIT_NONE
)
{
ssbd_state
=
ARM64_SSBD_UNKNOWN
;
if
(
!
this_cpu_safe
)
__ssb_safe
=
false
;
return
false
;
}
val
=
(
s32
)
res
.
a0
;
switch
(
val
)
{
case
SMCCC_RET_NOT_SUPPORTED
:
ssbd_state
=
ARM64_SSBD_UNKNOWN
;
if
(
!
this_cpu_safe
)
__ssb_safe
=
false
;
return
false
;
/* machines with mixed mitigation requirements must not return this */
case
SMCCC_RET_NOT_REQUIRED
:
pr_info_once
(
"%s mitigation not required
\n
"
,
entry
->
desc
);
ssbd_state
=
ARM64_SSBD_MITIGATED
;
return
false
;
case
SMCCC_RET_SUCCESS
:
__ssb_safe
=
false
;
required
=
true
;
break
;
case
1
:
/* Mitigation not required on this CPU */
required
=
false
;
break
;
default:
WARN_ON
(
1
);
if
(
!
this_cpu_safe
)
__ssb_safe
=
false
;
return
false
;
}
switch
(
ssbd_state
)
{
case
ARM64_SSBD_FORCE_DISABLE
:
arm64_set_ssbd_mitigation
(
false
);
required
=
false
;
break
;
case
ARM64_SSBD_KERNEL
:
if
(
required
)
{
__this_cpu_write
(
arm64_ssbd_callback_required
,
1
);
arm64_set_ssbd_mitigation
(
true
);
}
break
;
case
ARM64_SSBD_FORCE_ENABLE
:
arm64_set_ssbd_mitigation
(
true
);
required
=
true
;
break
;
default:
WARN_ON
(
1
);
break
;
}
out_printmsg:
switch
(
ssbd_state
)
{
case
ARM64_SSBD_FORCE_DISABLE
:
pr_info_once
(
"%s disabled from command-line
\n
"
,
entry
->
desc
);
break
;
case
ARM64_SSBD_FORCE_ENABLE
:
pr_info_once
(
"%s forced from command-line
\n
"
,
entry
->
desc
);
break
;
}
return
required
;
}
/* known invulnerable cores */
static
const
struct
midr_range
arm64_ssb_cpus
[]
=
{
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A35
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A53
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A55
),
MIDR_ALL_VERSIONS
(
MIDR_BRAHMA_B53
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_3XX_SILVER
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_4XX_SILVER
),
{},
};
#ifdef CONFIG_ARM64_ERRATUM_1463225
#ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU
(
int
,
__in_cortex_a76_erratum_1463225_wa
);
DEFINE_PER_CPU
(
int
,
__in_cortex_a76_erratum_1463225_wa
);
...
@@ -519,83 +160,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
...
@@ -519,83 +160,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
CAP_MIDR_RANGE_LIST(midr_list)
/* Track overall mitigation state. We are only mitigated if all cores are ok */
static
bool
__hardenbp_enab
=
true
;
static
bool
__spectrev2_safe
=
true
;
int
get_spectre_v2_workaround_state
(
void
)
{
if
(
__spectrev2_safe
)
return
ARM64_BP_HARDEN_NOT_REQUIRED
;
if
(
!
__hardenbp_enab
)
return
ARM64_BP_HARDEN_UNKNOWN
;
return
ARM64_BP_HARDEN_WA_NEEDED
;
}
/*
* List of CPUs that do not need any Spectre-v2 mitigation at all.
*/
static
const
struct
midr_range
spectre_v2_safe_list
[]
=
{
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A35
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A53
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A55
),
MIDR_ALL_VERSIONS
(
MIDR_BRAHMA_B53
),
MIDR_ALL_VERSIONS
(
MIDR_HISI_TSV110
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_3XX_SILVER
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_4XX_SILVER
),
{
/* sentinel */
}
};
/*
* Track overall bp hardening for all heterogeneous cores in the machine.
* We are only considered "safe" if all booted cores are known safe.
*/
static
bool
__maybe_unused
check_branch_predictor
(
const
struct
arm64_cpu_capabilities
*
entry
,
int
scope
)
{
int
need_wa
;
WARN_ON
(
scope
!=
SCOPE_LOCAL_CPU
||
preemptible
());
/* If the CPU has CSV2 set, we're safe */
if
(
cpuid_feature_extract_unsigned_field
(
read_cpuid
(
ID_AA64PFR0_EL1
),
ID_AA64PFR0_CSV2_SHIFT
))
return
false
;
/* Alternatively, we have a list of unaffected CPUs */
if
(
is_midr_in_range_list
(
read_cpuid_id
(),
spectre_v2_safe_list
))
return
false
;
/* Fallback to firmware detection */
need_wa
=
detect_harden_bp_fw
();
if
(
!
need_wa
)
return
false
;
__spectrev2_safe
=
false
;
if
(
!
IS_ENABLED
(
CONFIG_HARDEN_BRANCH_PREDICTOR
))
{
pr_warn_once
(
"spectrev2 mitigation disabled by kernel configuration
\n
"
);
__hardenbp_enab
=
false
;
return
false
;
}
/* forced off */
if
(
__nospectre_v2
||
cpu_mitigations_off
())
{
pr_info_once
(
"spectrev2 mitigation disabled by command line option
\n
"
);
__hardenbp_enab
=
false
;
return
false
;
}
if
(
need_wa
<
0
)
{
pr_warn_once
(
"ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware
\n
"
);
__hardenbp_enab
=
false
;
}
return
(
need_wa
>
0
);
}
static
const
__maybe_unused
struct
midr_range
tx2_family_cpus
[]
=
{
static
const
__maybe_unused
struct
midr_range
tx2_family_cpus
[]
=
{
MIDR_ALL_VERSIONS
(
MIDR_BRCM_VULCAN
),
MIDR_ALL_VERSIONS
(
MIDR_BRCM_VULCAN
),
MIDR_ALL_VERSIONS
(
MIDR_CAVIUM_THUNDERX2
),
MIDR_ALL_VERSIONS
(
MIDR_CAVIUM_THUNDERX2
),
...
@@ -887,9 +451,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
...
@@ -887,9 +451,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
},
#endif
#endif
{
{
.
capability
=
ARM64_HARDEN_BRANCH_PREDICTOR
,
.
desc
=
"Spectre-v2"
,
.
capability
=
ARM64_SPECTRE_V2
,
.
type
=
ARM64_CPUCAP_LOCAL_CPU_ERRATUM
,
.
type
=
ARM64_CPUCAP_LOCAL_CPU_ERRATUM
,
.
matches
=
check_branch_predictor
,
.
matches
=
has_spectre_v2
,
.
cpu_enable
=
spectre_v2_enable_mitigation
,
},
},
#ifdef CONFIG_RANDOMIZE_BASE
#ifdef CONFIG_RANDOMIZE_BASE
{
{
...
@@ -899,11 +465,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
...
@@ -899,11 +465,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
},
#endif
#endif
{
{
.
desc
=
"Spec
ulative Store Bypass Disable
"
,
.
desc
=
"Spec
tre-v4
"
,
.
capability
=
ARM64_S
SBD
,
.
capability
=
ARM64_S
PECTRE_V4
,
.
type
=
ARM64_CPUCAP_LOCAL_CPU_ERRATUM
,
.
type
=
ARM64_CPUCAP_LOCAL_CPU_ERRATUM
,
.
matches
=
has_s
sbd_mitigation
,
.
matches
=
has_s
pectre_v4
,
.
midr_range_list
=
arm64_ssb_cpus
,
.
cpu_enable
=
spectre_v4_enable_mitigation
,
},
},
#ifdef CONFIG_ARM64_ERRATUM_1418040
#ifdef CONFIG_ARM64_ERRATUM_1418040
{
{
...
@@ -956,40 +522,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
...
@@ -956,40 +522,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
{
}
}
};
};
ssize_t
cpu_show_spectre_v1
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
return
sprintf
(
buf
,
"Mitigation: __user pointer sanitization
\n
"
);
}
ssize_t
cpu_show_spectre_v2
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
switch
(
get_spectre_v2_workaround_state
())
{
case
ARM64_BP_HARDEN_NOT_REQUIRED
:
return
sprintf
(
buf
,
"Not affected
\n
"
);
case
ARM64_BP_HARDEN_WA_NEEDED
:
return
sprintf
(
buf
,
"Mitigation: Branch predictor hardening
\n
"
);
case
ARM64_BP_HARDEN_UNKNOWN
:
default:
return
sprintf
(
buf
,
"Vulnerable
\n
"
);
}
}
ssize_t
cpu_show_spec_store_bypass
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
if
(
__ssb_safe
)
return
sprintf
(
buf
,
"Not affected
\n
"
);
switch
(
ssbd_state
)
{
case
ARM64_SSBD_KERNEL
:
case
ARM64_SSBD_FORCE_ENABLE
:
if
(
IS_ENABLED
(
CONFIG_ARM64_SSBD
))
return
sprintf
(
buf
,
"Mitigation: Speculative Store Bypass disabled via prctl
\n
"
);
}
return
sprintf
(
buf
,
"Vulnerable
\n
"
);
}
arch/arm64/kernel/cpufeature.c
View file @
14ef9d04
...
@@ -227,7 +227,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
...
@@ -227,7 +227,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static
const
struct
arm64_ftr_bits
ftr_id_aa64pfr1
[]
=
{
static
const
struct
arm64_ftr_bits
ftr_id_aa64pfr1
[]
=
{
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_MPAMFRAC_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_MPAMFRAC_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_RASFRAC_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_RASFRAC_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_VISIBLE
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_SSBS_SHIFT
,
4
,
ID_AA64PFR1_SSBS_PSTATE_NI
),
ARM64_FTR_BITS
(
FTR_VISIBLE
,
FTR_
NON
STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_SSBS_SHIFT
,
4
,
ID_AA64PFR1_SSBS_PSTATE_NI
),
ARM64_FTR_BITS
(
FTR_VISIBLE_IF_IS_ENABLED
(
CONFIG_ARM64_BTI
),
ARM64_FTR_BITS
(
FTR_VISIBLE_IF_IS_ENABLED
(
CONFIG_ARM64_BTI
),
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_BT_SHIFT
,
4
,
0
),
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64PFR1_BT_SHIFT
,
4
,
0
),
ARM64_FTR_END
,
ARM64_FTR_END
,
...
@@ -487,7 +487,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
...
@@ -487,7 +487,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
};
};
static
const
struct
arm64_ftr_bits
ftr_id_pfr2
[]
=
{
static
const
struct
arm64_ftr_bits
ftr_id_pfr2
[]
=
{
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_PFR2_SSBS_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_
NON
STRICT
,
FTR_LOWER_SAFE
,
ID_PFR2_SSBS_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_NONSTRICT
,
FTR_LOWER_SAFE
,
ID_PFR2_CSV3_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_NONSTRICT
,
FTR_LOWER_SAFE
,
ID_PFR2_CSV3_SHIFT
,
4
,
0
),
ARM64_FTR_END
,
ARM64_FTR_END
,
};
};
...
@@ -1583,48 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
...
@@ -1583,48 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON
(
val
&
(
7
<<
27
|
7
<<
21
));
WARN_ON
(
val
&
(
7
<<
27
|
7
<<
21
));
}
}
#ifdef CONFIG_ARM64_SSBD
static
int
ssbs_emulation_handler
(
struct
pt_regs
*
regs
,
u32
instr
)
{
if
(
user_mode
(
regs
))
return
1
;
if
(
instr
&
BIT
(
PSTATE_Imm_shift
))
regs
->
pstate
|=
PSR_SSBS_BIT
;
else
regs
->
pstate
&=
~
PSR_SSBS_BIT
;
arm64_skip_faulting_instruction
(
regs
,
4
);
return
0
;
}
static
struct
undef_hook
ssbs_emulation_hook
=
{
.
instr_mask
=
~
(
1U
<<
PSTATE_Imm_shift
),
.
instr_val
=
0xd500401f
|
PSTATE_SSBS
,
.
fn
=
ssbs_emulation_handler
,
};
static
void
cpu_enable_ssbs
(
const
struct
arm64_cpu_capabilities
*
__unused
)
{
static
bool
undef_hook_registered
=
false
;
static
DEFINE_RAW_SPINLOCK
(
hook_lock
);
raw_spin_lock
(
&
hook_lock
);
if
(
!
undef_hook_registered
)
{
register_undef_hook
(
&
ssbs_emulation_hook
);
undef_hook_registered
=
true
;
}
raw_spin_unlock
(
&
hook_lock
);
if
(
arm64_get_ssbd_state
()
==
ARM64_SSBD_FORCE_DISABLE
)
{
sysreg_clear_set
(
sctlr_el1
,
0
,
SCTLR_ELx_DSSBS
);
arm64_set_ssbd_mitigation
(
false
);
}
else
{
arm64_set_ssbd_mitigation
(
true
);
}
}
#endif
/* CONFIG_ARM64_SSBD */
#ifdef CONFIG_ARM64_PAN
#ifdef CONFIG_ARM64_PAN
static
void
cpu_enable_pan
(
const
struct
arm64_cpu_capabilities
*
__unused
)
static
void
cpu_enable_pan
(
const
struct
arm64_cpu_capabilities
*
__unused
)
{
{
...
@@ -1976,19 +1934,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
...
@@ -1976,19 +1934,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.
field_pos
=
ID_AA64ISAR0_CRC32_SHIFT
,
.
field_pos
=
ID_AA64ISAR0_CRC32_SHIFT
,
.
min_field_value
=
1
,
.
min_field_value
=
1
,
},
},
#ifdef CONFIG_ARM64_SSBD
{
{
.
desc
=
"Speculative Store Bypassing Safe (SSBS)"
,
.
desc
=
"Speculative Store Bypassing Safe (SSBS)"
,
.
capability
=
ARM64_SSBS
,
.
capability
=
ARM64_SSBS
,
.
type
=
ARM64_CPUCAP_
WEAK_LOCAL_CPU
_FEATURE
,
.
type
=
ARM64_CPUCAP_
SYSTEM
_FEATURE
,
.
matches
=
has_cpuid_feature
,
.
matches
=
has_cpuid_feature
,
.
sys_reg
=
SYS_ID_AA64PFR1_EL1
,
.
sys_reg
=
SYS_ID_AA64PFR1_EL1
,
.
field_pos
=
ID_AA64PFR1_SSBS_SHIFT
,
.
field_pos
=
ID_AA64PFR1_SSBS_SHIFT
,
.
sign
=
FTR_UNSIGNED
,
.
sign
=
FTR_UNSIGNED
,
.
min_field_value
=
ID_AA64PFR1_SSBS_PSTATE_ONLY
,
.
min_field_value
=
ID_AA64PFR1_SSBS_PSTATE_ONLY
,
.
cpu_enable
=
cpu_enable_ssbs
,
},
},
#endif
#ifdef CONFIG_ARM64_CNP
#ifdef CONFIG_ARM64_CNP
{
{
.
desc
=
"Common not Private translations"
,
.
desc
=
"Common not Private translations"
,
...
...
arch/arm64/kernel/entry.S
View file @
14ef9d04
...
@@ -132,9 +132,8 @@ alternative_else_nop_endif
...
@@ -132,9 +132,8 @@ alternative_else_nop_endif
*
them
if
required
.
*
them
if
required
.
*/
*/
.
macro
apply_ssbd
,
state
,
tmp1
,
tmp2
.
macro
apply_ssbd
,
state
,
tmp1
,
tmp2
#ifdef CONFIG_ARM64_SSBD
alternative_cb
spectre_v4_patch_fw_mitigation_enable
alternative_cb
arm64_enable_wa2_handling
b
.
L__asm_ssbd_skip
\
@
//
Patched
to
NOP
b
.
L__asm_ssbd_skip
\
@
alternative_cb_end
alternative_cb_end
ldr_this_cpu
\
tmp2
,
arm64_ssbd_callback_required
,
\
tmp1
ldr_this_cpu
\
tmp2
,
arm64_ssbd_callback_required
,
\
tmp1
cbz
\
tmp2
,
.
L__asm_ssbd_skip
\
@
cbz
\
tmp2
,
.
L__asm_ssbd_skip
\
@
...
@@ -142,11 +141,10 @@ alternative_cb_end
...
@@ -142,11 +141,10 @@ alternative_cb_end
tbnz
\
tmp2
,
#
TIF_SSBD
,
.
L__asm_ssbd_skip
\
@
tbnz
\
tmp2
,
#
TIF_SSBD
,
.
L__asm_ssbd_skip
\
@
mov
w0
,
#
ARM_SMCCC_ARCH_WORKAROUND_2
mov
w0
,
#
ARM_SMCCC_ARCH_WORKAROUND_2
mov
w1
,
#
\
state
mov
w1
,
#
\
state
alternative_cb
arm64_update_smccc
_conduit
alternative_cb
spectre_v4_patch_fw_mitigation
_conduit
nop
//
Patched
to
SMC
/
HVC
#
0
nop
//
Patched
to
SMC
/
HVC
#
0
alternative_cb_end
alternative_cb_end
.
L__asm_ssbd_skip
\@:
.
L__asm_ssbd_skip
\@:
#endif
.
endm
.
endm
.
macro
kernel_entry
,
el
,
regsize
=
64
.
macro
kernel_entry
,
el
,
regsize
=
64
...
@@ -697,11 +695,9 @@ el0_irq_naked:
...
@@ -697,11 +695,9 @@ el0_irq_naked:
bl
trace_hardirqs_off
bl
trace_hardirqs_off
#endif
#endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz
x22
,
#
55
,
1
f
tbz
x22
,
#
55
,
1
f
bl
do_el0_irq_bp_hardening
bl
do_el0_irq_bp_hardening
1
:
1
:
#endif
irq_handler
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS
#ifdef CONFIG_TRACE_IRQFLAGS
...
...
arch/arm64/kernel/hibernate.c
View file @
14ef9d04
...
@@ -332,11 +332,7 @@ int swsusp_arch_suspend(void)
...
@@ -332,11 +332,7 @@ int swsusp_arch_suspend(void)
* mitigation off behind our back, let's set the state
* mitigation off behind our back, let's set the state
* to what we expect it to be.
* to what we expect it to be.
*/
*/
switch
(
arm64_get_ssbd_state
())
{
spectre_v4_enable_mitigation
(
NULL
);
case
ARM64_SSBD_FORCE_ENABLE
:
case
ARM64_SSBD_KERNEL
:
arm64_set_ssbd_mitigation
(
true
);
}
}
}
local_daif_restore
(
flags
);
local_daif_restore
(
flags
);
...
...
arch/arm64/kernel/image-vars.h
View file @
14ef9d04
...
@@ -61,18 +61,11 @@ __efistub__ctype = _ctype;
...
@@ -61,18 +61,11 @@ __efistub__ctype = _ctype;
* memory mappings.
* memory mappings.
*/
*/
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
/* Alternative callbacks for init-time patching of nVHE hyp code. */
/* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS
(
arm64_enable_wa2_handling
);
KVM_NVHE_ALIAS
(
kvm_patch_vector_branch
);
KVM_NVHE_ALIAS
(
kvm_patch_vector_branch
);
KVM_NVHE_ALIAS
(
kvm_update_va_mask
);
KVM_NVHE_ALIAS
(
kvm_update_va_mask
);
/* Global kernel state accessed by nVHE hyp code. */
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS
(
arm64_ssbd_callback_required
);
KVM_NVHE_ALIAS
(
kvm_host_data
);
KVM_NVHE_ALIAS
(
kvm_hyp_ctxt
);
KVM_NVHE_ALIAS
(
kvm_hyp_vector
);
KVM_NVHE_ALIAS
(
kvm_vgic_global_state
);
KVM_NVHE_ALIAS
(
kvm_vgic_global_state
);
/* Kernel constant needed to compute idmap addresses. */
/* Kernel constant needed to compute idmap addresses. */
...
...
arch/arm64/kernel/process.c
View file @
14ef9d04
...
@@ -21,6 +21,7 @@
...
@@ -21,6 +21,7 @@
#include <linux/lockdep.h>
#include <linux/lockdep.h>
#include <linux/mman.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/nospec.h>
#include <linux/stddef.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
#include <linux/unistd.h>
...
@@ -421,8 +422,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
...
@@ -421,8 +422,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
cpus_have_const_cap
(
ARM64_HAS_UAO
))
cpus_have_const_cap
(
ARM64_HAS_UAO
))
childregs
->
pstate
|=
PSR_UAO_BIT
;
childregs
->
pstate
|=
PSR_UAO_BIT
;
if
(
arm64_get_ssbd_state
()
==
ARM64_SSBD_FORCE_DISABLE
)
spectre_v4_enable_task_mitigation
(
p
);
set_ssbs_bit
(
childregs
);
if
(
system_uses_irq_prio_masking
())
if
(
system_uses_irq_prio_masking
())
childregs
->
pmr_save
=
GIC_PRIO_IRQON
;
childregs
->
pmr_save
=
GIC_PRIO_IRQON
;
...
@@ -472,8 +472,6 @@ void uao_thread_switch(struct task_struct *next)
...
@@ -472,8 +472,6 @@ void uao_thread_switch(struct task_struct *next)
*/
*/
static
void
ssbs_thread_switch
(
struct
task_struct
*
next
)
static
void
ssbs_thread_switch
(
struct
task_struct
*
next
)
{
{
struct
pt_regs
*
regs
=
task_pt_regs
(
next
);
/*
/*
* Nothing to do for kernel threads, but 'regs' may be junk
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
* (e.g. idle task) so check the flags and bail early.
...
@@ -485,18 +483,10 @@ static void ssbs_thread_switch(struct task_struct *next)
...
@@ -485,18 +483,10 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
* context-switch the PSTATE field.
*/
*/
if
(
cpu_have_feature
(
cpu_feature
(
SSBS
)))
if
(
cpus_have_const_cap
(
ARM64_SSBS
))
return
;
/* If the mitigation is enabled, then we leave SSBS clear. */
if
((
arm64_get_ssbd_state
()
==
ARM64_SSBD_FORCE_ENABLE
)
||
test_tsk_thread_flag
(
next
,
TIF_SSBD
))
return
;
return
;
if
(
compat_user_mode
(
regs
))
spectre_v4_enable_task_mitigation
(
next
);
set_compat_ssbs_bit
(
regs
);
else
if
(
user_mode
(
regs
))
set_ssbs_bit
(
regs
);
}
}
/*
/*
...
@@ -620,6 +610,11 @@ void arch_setup_new_exec(void)
...
@@ -620,6 +610,11 @@ void arch_setup_new_exec(void)
current
->
mm
->
context
.
flags
=
is_compat_task
()
?
MMCF_AARCH32
:
0
;
current
->
mm
->
context
.
flags
=
is_compat_task
()
?
MMCF_AARCH32
:
0
;
ptrauth_thread_init_user
(
current
);
ptrauth_thread_init_user
(
current
);
if
(
task_spec_ssb_noexec
(
current
))
{
arch_prctl_spec_ctrl_set
(
current
,
PR_SPEC_STORE_BYPASS
,
PR_SPEC_ENABLE
);
}
}
}
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
...
...
arch/arm64/kernel/proton-pack.c
0 → 100644
View file @
14ef9d04
// SPDX-License-Identifier: GPL-2.0-only
/*
* Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
* detailed at:
*
* https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
*
* This code was originally written hastily under an awful lot of stress and so
* aspects of it are somewhat hacky. Unfortunately, changing anything in here
* instantly makes me feel ill. Thanks, Jann. Thann.
*
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
* Copyright (C) 2020 Google LLC
*
* "If there's something strange in your neighbourhood, who you gonna call?"
*
* Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
*/
#include <linux/arm-smccc.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
#include <linux/sched/task_stack.h>
#include <asm/spectre.h>
#include <asm/traps.h>
/*
* We try to ensure that the mitigation state can never change as the result of
* onlining a late CPU.
*/
static
void
update_mitigation_state
(
enum
mitigation_state
*
oldp
,
enum
mitigation_state
new
)
{
enum
mitigation_state
state
;
do
{
state
=
READ_ONCE
(
*
oldp
);
if
(
new
<=
state
)
break
;
/* Userspace almost certainly can't deal with this. */
if
(
WARN_ON
(
system_capabilities_finalized
()))
break
;
}
while
(
cmpxchg_relaxed
(
oldp
,
state
,
new
)
!=
state
);
}
/*
* Spectre v1.
*
* The kernel can't protect userspace for this one: it's each person for
* themselves. Advertise what we're doing and be done with it.
*/
ssize_t
cpu_show_spectre_v1
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
return
sprintf
(
buf
,
"Mitigation: __user pointer sanitization
\n
"
);
}
/*
* Spectre v2.
*
* This one sucks. A CPU is either:
*
* - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
* - Mitigated in hardware and listed in our "safe list".
* - Mitigated in software by firmware.
* - Mitigated in software by a CPU-specific dance in the kernel.
* - Vulnerable.
*
* It's not unlikely for different CPUs in a big.LITTLE system to fall into
* different camps.
*/
static
enum
mitigation_state
spectre_v2_state
;
static
bool
__read_mostly
__nospectre_v2
;
static
int
__init
parse_spectre_v2_param
(
char
*
str
)
{
__nospectre_v2
=
true
;
return
0
;
}
early_param
(
"nospectre_v2"
,
parse_spectre_v2_param
);
static
bool
spectre_v2_mitigations_off
(
void
)
{
bool
ret
=
__nospectre_v2
||
cpu_mitigations_off
();
if
(
ret
)
pr_info_once
(
"spectre-v2 mitigation disabled by command line option
\n
"
);
return
ret
;
}
ssize_t
cpu_show_spectre_v2
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
switch
(
spectre_v2_state
)
{
case
SPECTRE_UNAFFECTED
:
return
sprintf
(
buf
,
"Not affected
\n
"
);
case
SPECTRE_MITIGATED
:
return
sprintf
(
buf
,
"Mitigation: Branch predictor hardening
\n
"
);
case
SPECTRE_VULNERABLE
:
fallthrough
;
default:
return
sprintf
(
buf
,
"Vulnerable
\n
"
);
}
}
static
enum
mitigation_state
spectre_v2_get_cpu_hw_mitigation_state
(
void
)
{
u64
pfr0
;
static
const
struct
midr_range
spectre_v2_safe_list
[]
=
{
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A35
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A53
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A55
),
MIDR_ALL_VERSIONS
(
MIDR_BRAHMA_B53
),
MIDR_ALL_VERSIONS
(
MIDR_HISI_TSV110
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_3XX_SILVER
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_4XX_SILVER
),
{
/* sentinel */
}
};
/* If the CPU has CSV2 set, we're safe */
pfr0
=
read_cpuid
(
ID_AA64PFR0_EL1
);
if
(
cpuid_feature_extract_unsigned_field
(
pfr0
,
ID_AA64PFR0_CSV2_SHIFT
))
return
SPECTRE_UNAFFECTED
;
/* Alternatively, we have a list of unaffected CPUs */
if
(
is_midr_in_range_list
(
read_cpuid_id
(),
spectre_v2_safe_list
))
return
SPECTRE_UNAFFECTED
;
return
SPECTRE_VULNERABLE
;
}
#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1)
static
enum
mitigation_state
spectre_v2_get_cpu_fw_mitigation_state
(
void
)
{
int
ret
;
struct
arm_smccc_res
res
;
arm_smccc_1_1_invoke
(
ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
ARM_SMCCC_ARCH_WORKAROUND_1
,
&
res
);
ret
=
res
.
a0
;
switch
(
ret
)
{
case
SMCCC_RET_SUCCESS
:
return
SPECTRE_MITIGATED
;
case
SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
:
return
SPECTRE_UNAFFECTED
;
default:
fallthrough
;
case
SMCCC_RET_NOT_SUPPORTED
:
return
SPECTRE_VULNERABLE
;
}
}
bool
has_spectre_v2
(
const
struct
arm64_cpu_capabilities
*
entry
,
int
scope
)
{
WARN_ON
(
scope
!=
SCOPE_LOCAL_CPU
||
preemptible
());
if
(
spectre_v2_get_cpu_hw_mitigation_state
()
==
SPECTRE_UNAFFECTED
)
return
false
;
if
(
spectre_v2_get_cpu_fw_mitigation_state
()
==
SPECTRE_UNAFFECTED
)
return
false
;
return
true
;
}
DEFINE_PER_CPU_READ_MOSTLY
(
struct
bp_hardening_data
,
bp_hardening_data
);
enum
mitigation_state
arm64_get_spectre_v2_state
(
void
)
{
return
spectre_v2_state
;
}
#ifdef CONFIG_KVM
#include <asm/cacheflush.h>
#include <asm/kvm_asm.h>
atomic_t
arm64_el2_vector_last_slot
=
ATOMIC_INIT
(
-
1
);
static
void
__copy_hyp_vect_bpi
(
int
slot
,
const
char
*
hyp_vecs_start
,
const
char
*
hyp_vecs_end
)
{
void
*
dst
=
lm_alias
(
__bp_harden_hyp_vecs
+
slot
*
SZ_2K
);
int
i
;
for
(
i
=
0
;
i
<
SZ_2K
;
i
+=
0x80
)
memcpy
(
dst
+
i
,
hyp_vecs_start
,
hyp_vecs_end
-
hyp_vecs_start
);
__flush_icache_range
((
uintptr_t
)
dst
,
(
uintptr_t
)
dst
+
SZ_2K
);
}
static
void
install_bp_hardening_cb
(
bp_hardening_cb_t
fn
)
{
static
DEFINE_RAW_SPINLOCK
(
bp_lock
);
int
cpu
,
slot
=
-
1
;
const
char
*
hyp_vecs_start
=
__smccc_workaround_1_smc
;
const
char
*
hyp_vecs_end
=
__smccc_workaround_1_smc
+
__SMCCC_WORKAROUND_1_SMC_SZ
;
/*
* detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
* we're a guest. Skip the hyp-vectors work.
*/
if
(
!
is_hyp_mode_available
())
{
__this_cpu_write
(
bp_hardening_data
.
fn
,
fn
);
return
;
}
raw_spin_lock
(
&
bp_lock
);
for_each_possible_cpu
(
cpu
)
{
if
(
per_cpu
(
bp_hardening_data
.
fn
,
cpu
)
==
fn
)
{
slot
=
per_cpu
(
bp_hardening_data
.
hyp_vectors_slot
,
cpu
);
break
;
}
}
if
(
slot
==
-
1
)
{
slot
=
atomic_inc_return
(
&
arm64_el2_vector_last_slot
);
BUG_ON
(
slot
>=
BP_HARDEN_EL2_SLOTS
);
__copy_hyp_vect_bpi
(
slot
,
hyp_vecs_start
,
hyp_vecs_end
);
}
__this_cpu_write
(
bp_hardening_data
.
hyp_vectors_slot
,
slot
);
__this_cpu_write
(
bp_hardening_data
.
fn
,
fn
);
raw_spin_unlock
(
&
bp_lock
);
}
#else
static
void
install_bp_hardening_cb
(
bp_hardening_cb_t
fn
)
{
__this_cpu_write
(
bp_hardening_data
.
fn
,
fn
);
}
#endif
/* CONFIG_KVM */
static
void
call_smc_arch_workaround_1
(
void
)
{
arm_smccc_1_1_smc
(
ARM_SMCCC_ARCH_WORKAROUND_1
,
NULL
);
}
static
void
call_hvc_arch_workaround_1
(
void
)
{
arm_smccc_1_1_hvc
(
ARM_SMCCC_ARCH_WORKAROUND_1
,
NULL
);
}
static
void
qcom_link_stack_sanitisation
(
void
)
{
u64
tmp
;
asm
volatile
(
"mov %0, x30
\n
"
".rept 16
\n
"
"bl . + 4
\n
"
".endr
\n
"
"mov x30, %0
\n
"
:
"=&r"
(
tmp
));
}
static
enum
mitigation_state
spectre_v2_enable_fw_mitigation
(
void
)
{
bp_hardening_cb_t
cb
;
enum
mitigation_state
state
;
state
=
spectre_v2_get_cpu_fw_mitigation_state
();
if
(
state
!=
SPECTRE_MITIGATED
)
return
state
;
if
(
spectre_v2_mitigations_off
())
return
SPECTRE_VULNERABLE
;
switch
(
arm_smccc_1_1_get_conduit
())
{
case
SMCCC_CONDUIT_HVC
:
cb
=
call_hvc_arch_workaround_1
;
break
;
case
SMCCC_CONDUIT_SMC
:
cb
=
call_smc_arch_workaround_1
;
break
;
default:
return
SPECTRE_VULNERABLE
;
}
install_bp_hardening_cb
(
cb
);
return
SPECTRE_MITIGATED
;
}
static
enum
mitigation_state
spectre_v2_enable_sw_mitigation
(
void
)
{
u32
midr
;
if
(
spectre_v2_mitigations_off
())
return
SPECTRE_VULNERABLE
;
midr
=
read_cpuid_id
();
if
(((
midr
&
MIDR_CPU_MODEL_MASK
)
!=
MIDR_QCOM_FALKOR
)
&&
((
midr
&
MIDR_CPU_MODEL_MASK
)
!=
MIDR_QCOM_FALKOR_V1
))
return
SPECTRE_VULNERABLE
;
install_bp_hardening_cb
(
qcom_link_stack_sanitisation
);
return
SPECTRE_MITIGATED
;
}
void
spectre_v2_enable_mitigation
(
const
struct
arm64_cpu_capabilities
*
__unused
)
{
enum
mitigation_state
state
;
WARN_ON
(
preemptible
());
state
=
spectre_v2_get_cpu_hw_mitigation_state
();
if
(
state
==
SPECTRE_VULNERABLE
)
state
=
spectre_v2_enable_fw_mitigation
();
if
(
state
==
SPECTRE_VULNERABLE
)
state
=
spectre_v2_enable_sw_mitigation
();
update_mitigation_state
(
&
spectre_v2_state
,
state
);
}
/*
* Spectre v4.
*
* If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
* either:
*
* - Mitigated in hardware and listed in our "safe list".
* - Mitigated in hardware via PSTATE.SSBS.
* - Mitigated in software by firmware (sometimes referred to as SSBD).
*
* Wait, that doesn't sound so bad, does it? Keep reading...
*
* A major source of headaches is that the software mitigation is enabled both
* on a per-task basis, but can also be forced on for the kernel, necessitating
* both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
* allow EL0 to toggle SSBS directly, which can end up with the prctl() state
* being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
* so you can have systems that have both firmware and SSBS mitigations. This
* means we actually have to reject late onlining of CPUs with mitigations if
* all of the currently onlined CPUs are safelisted, as the mitigation tends to
* be opt-in for userspace. Yes, really, the cure is worse than the disease.
*
* The only good part is that if the firmware mitigation is present, then it is
* present for all CPUs, meaning we don't have to worry about late onlining of a
* vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
*
* Give me a VAX-11/780 any day of the week...
*/
static
enum
mitigation_state
spectre_v4_state
;
/* This is the per-cpu state tracking whether we need to talk to firmware */
DEFINE_PER_CPU_READ_MOSTLY
(
u64
,
arm64_ssbd_callback_required
);
enum
spectre_v4_policy
{
SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
,
SPECTRE_V4_POLICY_MITIGATION_ENABLED
,
SPECTRE_V4_POLICY_MITIGATION_DISABLED
,
};
static
enum
spectre_v4_policy
__read_mostly
__spectre_v4_policy
;
static
const
struct
spectre_v4_param
{
const
char
*
str
;
enum
spectre_v4_policy
policy
;
}
spectre_v4_params
[]
=
{
{
"force-on"
,
SPECTRE_V4_POLICY_MITIGATION_ENABLED
,
},
{
"force-off"
,
SPECTRE_V4_POLICY_MITIGATION_DISABLED
,
},
{
"kernel"
,
SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
,
},
};
static
int
__init
parse_spectre_v4_param
(
char
*
str
)
{
int
i
;
if
(
!
str
||
!
str
[
0
])
return
-
EINVAL
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
spectre_v4_params
);
i
++
)
{
const
struct
spectre_v4_param
*
param
=
&
spectre_v4_params
[
i
];
if
(
strncmp
(
str
,
param
->
str
,
strlen
(
param
->
str
)))
continue
;
__spectre_v4_policy
=
param
->
policy
;
return
0
;
}
return
-
EINVAL
;
}
early_param
(
"ssbd"
,
parse_spectre_v4_param
);
/*
* Because this was all written in a rush by people working in different silos,
* we've ended up with multiple command line options to control the same thing.
* Wrap these up in some helpers, which prefer disabling the mitigation if faced
* with contradictory parameters. The mitigation is always either "off",
* "dynamic" or "on".
*/
static
bool
spectre_v4_mitigations_off
(
void
)
{
bool
ret
=
cpu_mitigations_off
()
||
__spectre_v4_policy
==
SPECTRE_V4_POLICY_MITIGATION_DISABLED
;
if
(
ret
)
pr_info_once
(
"spectre-v4 mitigation disabled by command-line option
\n
"
);
return
ret
;
}
/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
static
bool
spectre_v4_mitigations_dynamic
(
void
)
{
return
!
spectre_v4_mitigations_off
()
&&
__spectre_v4_policy
==
SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
;
}
static
bool
spectre_v4_mitigations_on
(
void
)
{
return
!
spectre_v4_mitigations_off
()
&&
__spectre_v4_policy
==
SPECTRE_V4_POLICY_MITIGATION_ENABLED
;
}
ssize_t
cpu_show_spec_store_bypass
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
switch
(
spectre_v4_state
)
{
case
SPECTRE_UNAFFECTED
:
return
sprintf
(
buf
,
"Not affected
\n
"
);
case
SPECTRE_MITIGATED
:
return
sprintf
(
buf
,
"Mitigation: Speculative Store Bypass disabled via prctl
\n
"
);
case
SPECTRE_VULNERABLE
:
fallthrough
;
default:
return
sprintf
(
buf
,
"Vulnerable
\n
"
);
}
}
enum
mitigation_state
arm64_get_spectre_v4_state
(
void
)
{
return
spectre_v4_state
;
}
static
enum
mitigation_state
spectre_v4_get_cpu_hw_mitigation_state
(
void
)
{
static
const
struct
midr_range
spectre_v4_safe_list
[]
=
{
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A35
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A53
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A55
),
MIDR_ALL_VERSIONS
(
MIDR_BRAHMA_B53
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_3XX_SILVER
),
MIDR_ALL_VERSIONS
(
MIDR_QCOM_KRYO_4XX_SILVER
),
{
/* sentinel */
},
};
if
(
is_midr_in_range_list
(
read_cpuid_id
(),
spectre_v4_safe_list
))
return
SPECTRE_UNAFFECTED
;
/* CPU features are detected first */
if
(
this_cpu_has_cap
(
ARM64_SSBS
))
return
SPECTRE_MITIGATED
;
return
SPECTRE_VULNERABLE
;
}
static
enum
mitigation_state
spectre_v4_get_cpu_fw_mitigation_state
(
void
)
{
int
ret
;
struct
arm_smccc_res
res
;
arm_smccc_1_1_invoke
(
ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
ARM_SMCCC_ARCH_WORKAROUND_2
,
&
res
);
ret
=
res
.
a0
;
switch
(
ret
)
{
case
SMCCC_RET_SUCCESS
:
return
SPECTRE_MITIGATED
;
case
SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
:
fallthrough
;
case
SMCCC_RET_NOT_REQUIRED
:
return
SPECTRE_UNAFFECTED
;
default:
fallthrough
;
case
SMCCC_RET_NOT_SUPPORTED
:
return
SPECTRE_VULNERABLE
;
}
}
bool
has_spectre_v4
(
const
struct
arm64_cpu_capabilities
*
cap
,
int
scope
)
{
enum
mitigation_state
state
;
WARN_ON
(
scope
!=
SCOPE_LOCAL_CPU
||
preemptible
());
state
=
spectre_v4_get_cpu_hw_mitigation_state
();
if
(
state
==
SPECTRE_VULNERABLE
)
state
=
spectre_v4_get_cpu_fw_mitigation_state
();
return
state
!=
SPECTRE_UNAFFECTED
;
}
static
int
ssbs_emulation_handler
(
struct
pt_regs
*
regs
,
u32
instr
)
{
if
(
user_mode
(
regs
))
return
1
;
if
(
instr
&
BIT
(
PSTATE_Imm_shift
))
regs
->
pstate
|=
PSR_SSBS_BIT
;
else
regs
->
pstate
&=
~
PSR_SSBS_BIT
;
arm64_skip_faulting_instruction
(
regs
,
4
);
return
0
;
}
static
struct
undef_hook
ssbs_emulation_hook
=
{
.
instr_mask
=
~
(
1U
<<
PSTATE_Imm_shift
),
.
instr_val
=
0xd500401f
|
PSTATE_SSBS
,
.
fn
=
ssbs_emulation_handler
,
};
static
enum
mitigation_state
spectre_v4_enable_hw_mitigation
(
void
)
{
static
bool
undef_hook_registered
=
false
;
static
DEFINE_RAW_SPINLOCK
(
hook_lock
);
enum
mitigation_state
state
;
/*
* If the system is mitigated but this CPU doesn't have SSBS, then
* we must be on the safelist and there's nothing more to do.
*/
state
=
spectre_v4_get_cpu_hw_mitigation_state
();
if
(
state
!=
SPECTRE_MITIGATED
||
!
this_cpu_has_cap
(
ARM64_SSBS
))
return
state
;
raw_spin_lock
(
&
hook_lock
);
if
(
!
undef_hook_registered
)
{
register_undef_hook
(
&
ssbs_emulation_hook
);
undef_hook_registered
=
true
;
}
raw_spin_unlock
(
&
hook_lock
);
if
(
spectre_v4_mitigations_off
())
{
sysreg_clear_set
(
sctlr_el1
,
0
,
SCTLR_ELx_DSSBS
);
asm
volatile
(
SET_PSTATE_SSBS
(
1
));
return
SPECTRE_VULNERABLE
;
}
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
asm
volatile
(
SET_PSTATE_SSBS
(
0
));
return
SPECTRE_MITIGATED
;
}
/*
* Patch a branch over the Spectre-v4 mitigation code with a NOP so that
* we fallthrough and check whether firmware needs to be called on this CPU.
*/
void
__init
spectre_v4_patch_fw_mitigation_enable
(
struct
alt_instr
*
alt
,
__le32
*
origptr
,
__le32
*
updptr
,
int
nr_inst
)
{
BUG_ON
(
nr_inst
!=
1
);
/* Branch -> NOP */
if
(
spectre_v4_mitigations_off
())
return
;
if
(
cpus_have_final_cap
(
ARM64_SSBS
))
return
;
if
(
spectre_v4_mitigations_dynamic
())
*
updptr
=
cpu_to_le32
(
aarch64_insn_gen_nop
());
}
/*
* Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
* to call into firmware to adjust the mitigation state.
*/
void
__init
spectre_v4_patch_fw_mitigation_conduit
(
struct
alt_instr
*
alt
,
__le32
*
origptr
,
__le32
*
updptr
,
int
nr_inst
)
{
u32
insn
;
BUG_ON
(
nr_inst
!=
1
);
/* NOP -> HVC/SMC */
switch
(
arm_smccc_1_1_get_conduit
())
{
case
SMCCC_CONDUIT_HVC
:
insn
=
aarch64_insn_get_hvc_value
();
break
;
case
SMCCC_CONDUIT_SMC
:
insn
=
aarch64_insn_get_smc_value
();
break
;
default:
return
;
}
*
updptr
=
cpu_to_le32
(
insn
);
}
static
enum
mitigation_state
spectre_v4_enable_fw_mitigation
(
void
)
{
enum
mitigation_state
state
;
state
=
spectre_v4_get_cpu_fw_mitigation_state
();
if
(
state
!=
SPECTRE_MITIGATED
)
return
state
;
if
(
spectre_v4_mitigations_off
())
{
arm_smccc_1_1_invoke
(
ARM_SMCCC_ARCH_WORKAROUND_2
,
false
,
NULL
);
return
SPECTRE_VULNERABLE
;
}
arm_smccc_1_1_invoke
(
ARM_SMCCC_ARCH_WORKAROUND_2
,
true
,
NULL
);
if
(
spectre_v4_mitigations_dynamic
())
__this_cpu_write
(
arm64_ssbd_callback_required
,
1
);
return
SPECTRE_MITIGATED
;
}
void
spectre_v4_enable_mitigation
(
const
struct
arm64_cpu_capabilities
*
__unused
)
{
enum
mitigation_state
state
;
WARN_ON
(
preemptible
());
state
=
spectre_v4_enable_hw_mitigation
();
if
(
state
==
SPECTRE_VULNERABLE
)
state
=
spectre_v4_enable_fw_mitigation
();
update_mitigation_state
(
&
spectre_v4_state
,
state
);
}
static
void
__update_pstate_ssbs
(
struct
pt_regs
*
regs
,
bool
state
)
{
u64
bit
=
compat_user_mode
(
regs
)
?
PSR_AA32_SSBS_BIT
:
PSR_SSBS_BIT
;
if
(
state
)
regs
->
pstate
|=
bit
;
else
regs
->
pstate
&=
~
bit
;
}
void
spectre_v4_enable_task_mitigation
(
struct
task_struct
*
tsk
)
{
struct
pt_regs
*
regs
=
task_pt_regs
(
tsk
);
bool
ssbs
=
false
,
kthread
=
tsk
->
flags
&
PF_KTHREAD
;
if
(
spectre_v4_mitigations_off
())
ssbs
=
true
;
else
if
(
spectre_v4_mitigations_dynamic
()
&&
!
kthread
)
ssbs
=
!
test_tsk_thread_flag
(
tsk
,
TIF_SSBD
);
__update_pstate_ssbs
(
regs
,
ssbs
);
}
/*
* The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
* This is interesting because the "speculation disabled" behaviour can be
* configured so that it is preserved across exec(), which means that the
* prctl() may be necessary even when PSTATE.SSBS can be toggled directly
* from userspace.
*/
static
void
ssbd_prctl_enable_mitigation
(
struct
task_struct
*
task
)
{
task_clear_spec_ssb_noexec
(
task
);
task_set_spec_ssb_disable
(
task
);
set_tsk_thread_flag
(
task
,
TIF_SSBD
);
}
static
void
ssbd_prctl_disable_mitigation
(
struct
task_struct
*
task
)
{
task_clear_spec_ssb_noexec
(
task
);
task_clear_spec_ssb_disable
(
task
);
clear_tsk_thread_flag
(
task
,
TIF_SSBD
);
}
static
int
ssbd_prctl_set
(
struct
task_struct
*
task
,
unsigned
long
ctrl
)
{
switch
(
ctrl
)
{
case
PR_SPEC_ENABLE
:
/* Enable speculation: disable mitigation */
/*
* Force disabled speculation prevents it from being
* re-enabled.
*/
if
(
task_spec_ssb_force_disable
(
task
))
return
-
EPERM
;
/*
* If the mitigation is forced on, then speculation is forced
* off and we again prevent it from being re-enabled.
*/
if
(
spectre_v4_mitigations_on
())
return
-
EPERM
;
ssbd_prctl_disable_mitigation
(
task
);
break
;
case
PR_SPEC_FORCE_DISABLE
:
/* Force disable speculation: force enable mitigation */
/*
* If the mitigation is forced off, then speculation is forced
* on and we prevent it from being disabled.
*/
if
(
spectre_v4_mitigations_off
())
return
-
EPERM
;
task_set_spec_ssb_force_disable
(
task
);
fallthrough
;
case
PR_SPEC_DISABLE
:
/* Disable speculation: enable mitigation */
/* Same as PR_SPEC_FORCE_DISABLE */
if
(
spectre_v4_mitigations_off
())
return
-
EPERM
;
ssbd_prctl_enable_mitigation
(
task
);
break
;
case
PR_SPEC_DISABLE_NOEXEC
:
/* Disable speculation until execve(): enable mitigation */
/*
* If the mitigation state is forced one way or the other, then
* we must fail now before we try to toggle it on execve().
*/
if
(
task_spec_ssb_force_disable
(
task
)
||
spectre_v4_mitigations_off
()
||
spectre_v4_mitigations_on
())
{
return
-
EPERM
;
}
ssbd_prctl_enable_mitigation
(
task
);
task_set_spec_ssb_noexec
(
task
);
break
;
default:
return
-
ERANGE
;
}
spectre_v4_enable_task_mitigation
(
task
);
return
0
;
}
int
arch_prctl_spec_ctrl_set
(
struct
task_struct
*
task
,
unsigned
long
which
,
unsigned
long
ctrl
)
{
switch
(
which
)
{
case
PR_SPEC_STORE_BYPASS
:
return
ssbd_prctl_set
(
task
,
ctrl
);
default:
return
-
ENODEV
;
}
}
static
int
ssbd_prctl_get
(
struct
task_struct
*
task
)
{
switch
(
spectre_v4_state
)
{
case
SPECTRE_UNAFFECTED
:
return
PR_SPEC_NOT_AFFECTED
;
case
SPECTRE_MITIGATED
:
if
(
spectre_v4_mitigations_on
())
return
PR_SPEC_NOT_AFFECTED
;
if
(
spectre_v4_mitigations_dynamic
())
break
;
/* Mitigations are disabled, so we're vulnerable. */
fallthrough
;
case
SPECTRE_VULNERABLE
:
fallthrough
;
default:
return
PR_SPEC_ENABLE
;
}
/* Check the mitigation state for this task */
if
(
task_spec_ssb_force_disable
(
task
))
return
PR_SPEC_PRCTL
|
PR_SPEC_FORCE_DISABLE
;
if
(
task_spec_ssb_noexec
(
task
))
return
PR_SPEC_PRCTL
|
PR_SPEC_DISABLE_NOEXEC
;
if
(
task_spec_ssb_disable
(
task
))
return
PR_SPEC_PRCTL
|
PR_SPEC_DISABLE
;
return
PR_SPEC_PRCTL
|
PR_SPEC_ENABLE
;
}
int
arch_prctl_spec_ctrl_get
(
struct
task_struct
*
task
,
unsigned
long
which
)
{
switch
(
which
)
{
case
PR_SPEC_STORE_BYPASS
:
return
ssbd_prctl_get
(
task
);
default:
return
-
ENODEV
;
}
}
arch/arm64/kernel/ssbd.c
deleted
100644 → 0
View file @
2e02cbb2
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
*/
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
static
void
ssbd_ssbs_enable
(
struct
task_struct
*
task
)
{
u64
val
=
is_compat_thread
(
task_thread_info
(
task
))
?
PSR_AA32_SSBS_BIT
:
PSR_SSBS_BIT
;
task_pt_regs
(
task
)
->
pstate
|=
val
;
}
static
void
ssbd_ssbs_disable
(
struct
task_struct
*
task
)
{
u64
val
=
is_compat_thread
(
task_thread_info
(
task
))
?
PSR_AA32_SSBS_BIT
:
PSR_SSBS_BIT
;
task_pt_regs
(
task
)
->
pstate
&=
~
val
;
}
/*
* prctl interface for SSBD
*/
static
int
ssbd_prctl_set
(
struct
task_struct
*
task
,
unsigned
long
ctrl
)
{
int
state
=
arm64_get_ssbd_state
();
/* Unsupported */
if
(
state
==
ARM64_SSBD_UNKNOWN
)
return
-
ENODEV
;
/* Treat the unaffected/mitigated state separately */
if
(
state
==
ARM64_SSBD_MITIGATED
)
{
switch
(
ctrl
)
{
case
PR_SPEC_ENABLE
:
return
-
EPERM
;
case
PR_SPEC_DISABLE
:
case
PR_SPEC_FORCE_DISABLE
:
return
0
;
}
}
/*
* Things are a bit backward here: the arm64 internal API
* *enables the mitigation* when the userspace API *disables
* speculation*. So much fun.
*/
switch
(
ctrl
)
{
case
PR_SPEC_ENABLE
:
/* If speculation is force disabled, enable is not allowed */
if
(
state
==
ARM64_SSBD_FORCE_ENABLE
||
task_spec_ssb_force_disable
(
task
))
return
-
EPERM
;
task_clear_spec_ssb_disable
(
task
);
clear_tsk_thread_flag
(
task
,
TIF_SSBD
);
ssbd_ssbs_enable
(
task
);
break
;
case
PR_SPEC_DISABLE
:
if
(
state
==
ARM64_SSBD_FORCE_DISABLE
)
return
-
EPERM
;
task_set_spec_ssb_disable
(
task
);
set_tsk_thread_flag
(
task
,
TIF_SSBD
);
ssbd_ssbs_disable
(
task
);
break
;
case
PR_SPEC_FORCE_DISABLE
:
if
(
state
==
ARM64_SSBD_FORCE_DISABLE
)
return
-
EPERM
;
task_set_spec_ssb_disable
(
task
);
task_set_spec_ssb_force_disable
(
task
);
set_tsk_thread_flag
(
task
,
TIF_SSBD
);
ssbd_ssbs_disable
(
task
);
break
;
default:
return
-
ERANGE
;
}
return
0
;
}
int
arch_prctl_spec_ctrl_set
(
struct
task_struct
*
task
,
unsigned
long
which
,
unsigned
long
ctrl
)
{
switch
(
which
)
{
case
PR_SPEC_STORE_BYPASS
:
return
ssbd_prctl_set
(
task
,
ctrl
);
default:
return
-
ENODEV
;
}
}
static
int
ssbd_prctl_get
(
struct
task_struct
*
task
)
{
switch
(
arm64_get_ssbd_state
())
{
case
ARM64_SSBD_UNKNOWN
:
return
-
ENODEV
;
case
ARM64_SSBD_FORCE_ENABLE
:
return
PR_SPEC_DISABLE
;
case
ARM64_SSBD_KERNEL
:
if
(
task_spec_ssb_force_disable
(
task
))
return
PR_SPEC_PRCTL
|
PR_SPEC_FORCE_DISABLE
;
if
(
task_spec_ssb_disable
(
task
))
return
PR_SPEC_PRCTL
|
PR_SPEC_DISABLE
;
return
PR_SPEC_PRCTL
|
PR_SPEC_ENABLE
;
case
ARM64_SSBD_FORCE_DISABLE
:
return
PR_SPEC_ENABLE
;
default:
return
PR_SPEC_NOT_AFFECTED
;
}
}
int
arch_prctl_spec_ctrl_get
(
struct
task_struct
*
task
,
unsigned
long
which
)
{
switch
(
which
)
{
case
PR_SPEC_STORE_BYPASS
:
return
ssbd_prctl_get
(
task
);
default:
return
-
ENODEV
;
}
}
arch/arm64/kernel/suspend.c
View file @
14ef9d04
...
@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void)
...
@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void)
* have turned the mitigation on. If the user has forcefully
* have turned the mitigation on. If the user has forcefully
* disabled it, make sure their wishes are obeyed.
* disabled it, make sure their wishes are obeyed.
*/
*/
if
(
arm64_get_ssbd_state
()
==
ARM64_SSBD_FORCE_DISABLE
)
spectre_v4_enable_mitigation
(
NULL
);
arm64_set_ssbd_mitigation
(
false
);
}
}
/*
/*
...
...
arch/arm64/kernel/vmlinux.lds.S
View file @
14ef9d04
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/cache.h>
#include <asm/hyp_image.h>
#include <asm/kernel-pgtable.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/page.h>
...
@@ -21,12 +22,23 @@ ENTRY(_text)
...
@@ -21,12 +22,23 @@ ENTRY(_text)
jiffies
=
jiffies_64
;
jiffies
=
jiffies_64
;
#ifdef CONFIG_KVM
#define HYPERVISOR_EXTABLE \
#define HYPERVISOR_EXTABLE \
.
=
ALIGN
(
SZ_8
)
; \
.
=
ALIGN
(
SZ_8
)
; \
__start___kvm_ex_table
=
.
; \
__start___kvm_ex_table
=
.
; \
*(
__kvm_ex_table
)
\
*(
__kvm_ex_table
)
\
__stop___kvm_ex_table
=
.
;
__stop___kvm_ex_table
=
.
;
#define HYPERVISOR_PERCPU_SECTION \
.
=
ALIGN
(
PAGE_SIZE
)
; \
HYP_SECTION_NAME
(.
data..percpu
)
:
{
\
*(
HYP_SECTION_NAME
(.
data..percpu
))
\
}
#else /* CONFIG_KVM */
#define HYPERVISOR_EXTABLE
#define HYPERVISOR_PERCPU_SECTION
#endif
#define HYPERVISOR_TEXT \
#define HYPERVISOR_TEXT \
/
*
\
/
*
\
*
Align
to
4
KB
so
that
\
*
Align
to
4
KB
so
that
\
...
@@ -190,6 +202,7 @@ SECTIONS
...
@@ -190,6 +202,7 @@ SECTIONS
}
}
PERCPU_SECTION
(
L1_CACHE_BYTES
)
PERCPU_SECTION
(
L1_CACHE_BYTES
)
HYPERVISOR_PERCPU_SECTION
.
rela.dyn
:
ALIGN
(
8
)
{
.
rela.dyn
:
ALIGN
(
8
)
{
*(.
rela
.
rela
*)
*(.
rela
.
rela
*)
...
...
arch/arm64/kvm/Kconfig
View file @
14ef9d04
...
@@ -57,9 +57,6 @@ config KVM_ARM_PMU
...
@@ -57,9 +57,6 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
virtual machines.
config KVM_INDIRECT_VECTORS
def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
endif # KVM
endif # KVM
endif # VIRTUALIZATION
endif # VIRTUALIZATION
arch/arm64/kvm/arm.c
View file @
14ef9d04
...
@@ -46,10 +46,10 @@
...
@@ -46,10 +46,10 @@
__asm__
(
".arch_extension virt"
);
__asm__
(
".arch_extension virt"
);
#endif
#endif
DEFINE_PER_CPU
(
struct
kvm_host_data
,
kvm_host_data
);
DECLARE_KVM_HYP_PER_CPU
(
unsigned
long
,
kvm_hyp_vector
);
DEFINE_PER_CPU
(
struct
kvm_cpu_context
,
kvm_hyp_ctxt
);
DEFINE_PER_CPU
(
unsigned
long
,
kvm_hyp_vector
);
static
DEFINE_PER_CPU
(
unsigned
long
,
kvm_arm_hyp_stack_page
);
static
DEFINE_PER_CPU
(
unsigned
long
,
kvm_arm_hyp_stack_page
);
unsigned
long
kvm_arm_hyp_percpu_base
[
NR_CPUS
];
/* The VMID used in the VTTBR */
/* The VMID used in the VTTBR */
static
atomic64_t
kvm_vmid_gen
=
ATOMIC64_INIT
(
1
);
static
atomic64_t
kvm_vmid_gen
=
ATOMIC64_INIT
(
1
);
...
@@ -1263,6 +1263,53 @@ long kvm_arch_vm_ioctl(struct file *filp,
...
@@ -1263,6 +1263,53 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
}
}
}
static
unsigned
long
nvhe_percpu_size
(
void
)
{
return
(
unsigned
long
)
CHOOSE_NVHE_SYM
(
__per_cpu_end
)
-
(
unsigned
long
)
CHOOSE_NVHE_SYM
(
__per_cpu_start
);
}
static
unsigned
long
nvhe_percpu_order
(
void
)
{
unsigned
long
size
=
nvhe_percpu_size
();
return
size
?
get_order
(
size
)
:
0
;
}
static
int
kvm_map_vectors
(
void
)
{
/*
* SV2 = ARM64_SPECTRE_V2
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
* !SV2 + !HEL2 -> use direct vectors
* SV2 + !HEL2 -> use hardened vectors in place
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
* SV2 + HEL2 -> use hardened vectors and use exec mapping
*/
if
(
cpus_have_const_cap
(
ARM64_SPECTRE_V2
))
{
__kvm_bp_vect_base
=
kvm_ksym_ref
(
__bp_harden_hyp_vecs
);
__kvm_bp_vect_base
=
kern_hyp_va
(
__kvm_bp_vect_base
);
}
if
(
cpus_have_const_cap
(
ARM64_HARDEN_EL2_VECTORS
))
{
phys_addr_t
vect_pa
=
__pa_symbol
(
__bp_harden_hyp_vecs
);
unsigned
long
size
=
__BP_HARDEN_HYP_VECS_SZ
;
/*
* Always allocate a spare vector slot, as we don't
* know yet which CPUs have a BP hardening slot that
* we can reuse.
*/
__kvm_harden_el2_vector_slot
=
atomic_inc_return
(
&
arm64_el2_vector_last_slot
);
BUG_ON
(
__kvm_harden_el2_vector_slot
>=
BP_HARDEN_EL2_SLOTS
);
return
create_hyp_exec_mappings
(
vect_pa
,
size
,
&
__kvm_bp_vect_base
);
}
return
0
;
}
static
void
cpu_init_hyp_mode
(
void
)
static
void
cpu_init_hyp_mode
(
void
)
{
{
phys_addr_t
pgd_ptr
;
phys_addr_t
pgd_ptr
;
...
@@ -1279,8 +1326,8 @@ static void cpu_init_hyp_mode(void)
...
@@ -1279,8 +1326,8 @@ static void cpu_init_hyp_mode(void)
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
* so that we can use adr_l to access per-cpu variables in EL2.
*/
*/
tpidr_el2
=
(
(
unsigned
long
)
this_cpu_ptr
(
&
kvm_host_data
)
-
tpidr_el2
=
(
unsigned
long
)
this_cpu_ptr_nvhe_sym
(
__per_cpu_start
)
-
(
unsigned
long
)
kvm_ksym_ref
(
&
kvm_host_data
));
(
unsigned
long
)
kvm_ksym_ref
(
CHOOSE_NVHE_SYM
(
__per_cpu_start
));
pgd_ptr
=
kvm_mmu_get_httbr
();
pgd_ptr
=
kvm_mmu_get_httbr
();
hyp_stack_ptr
=
__this_cpu_read
(
kvm_arm_hyp_stack_page
)
+
PAGE_SIZE
;
hyp_stack_ptr
=
__this_cpu_read
(
kvm_arm_hyp_stack_page
)
+
PAGE_SIZE
;
...
@@ -1303,7 +1350,7 @@ static void cpu_init_hyp_mode(void)
...
@@ -1303,7 +1350,7 @@ static void cpu_init_hyp_mode(void)
* at EL2.
* at EL2.
*/
*/
if
(
this_cpu_has_cap
(
ARM64_SSBS
)
&&
if
(
this_cpu_has_cap
(
ARM64_SSBS
)
&&
arm64_get_s
sbd_state
()
==
ARM64_SSBD_FORCE_DIS
ABLE
)
{
arm64_get_s
pectre_v4_state
()
==
SPECTRE_VULNER
ABLE
)
{
kvm_call_hyp_nvhe
(
__kvm_enable_ssbs
);
kvm_call_hyp_nvhe
(
__kvm_enable_ssbs
);
}
}
}
}
...
@@ -1316,11 +1363,11 @@ static void cpu_hyp_reset(void)
...
@@ -1316,11 +1363,11 @@ static void cpu_hyp_reset(void)
static
void
cpu_hyp_reinit
(
void
)
static
void
cpu_hyp_reinit
(
void
)
{
{
kvm_init_host_cpu_context
(
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
);
kvm_init_host_cpu_context
(
&
this_cpu_ptr
_hyp_sym
(
kvm_host_data
)
->
host_ctxt
);
cpu_hyp_reset
();
cpu_hyp_reset
();
__this_cpu_write
(
kvm_hyp_vector
,
(
unsigned
long
)
kvm_get_hyp_vector
()
);
*
this_cpu_ptr_hyp_sym
(
kvm_hyp_vector
)
=
(
unsigned
long
)
kvm_get_hyp_vector
(
);
if
(
is_kernel_in_hyp_mode
())
if
(
is_kernel_in_hyp_mode
())
kvm_timer_init_vhe
();
kvm_timer_init_vhe
();
...
@@ -1472,8 +1519,10 @@ static void teardown_hyp_mode(void)
...
@@ -1472,8 +1519,10 @@ static void teardown_hyp_mode(void)
int
cpu
;
int
cpu
;
free_hyp_pgds
();
free_hyp_pgds
();
for_each_possible_cpu
(
cpu
)
for_each_possible_cpu
(
cpu
)
{
free_page
(
per_cpu
(
kvm_arm_hyp_stack_page
,
cpu
));
free_page
(
per_cpu
(
kvm_arm_hyp_stack_page
,
cpu
));
free_pages
(
kvm_arm_hyp_percpu_base
[
cpu
],
nvhe_percpu_order
());
}
}
}
/**
/**
...
@@ -1506,6 +1555,24 @@ static int init_hyp_mode(void)
...
@@ -1506,6 +1555,24 @@ static int init_hyp_mode(void)
per_cpu
(
kvm_arm_hyp_stack_page
,
cpu
)
=
stack_page
;
per_cpu
(
kvm_arm_hyp_stack_page
,
cpu
)
=
stack_page
;
}
}
/*
* Allocate and initialize pages for Hypervisor-mode percpu regions.
*/
for_each_possible_cpu
(
cpu
)
{
struct
page
*
page
;
void
*
page_addr
;
page
=
alloc_pages
(
GFP_KERNEL
,
nvhe_percpu_order
());
if
(
!
page
)
{
err
=
-
ENOMEM
;
goto
out_err
;
}
page_addr
=
page_address
(
page
);
memcpy
(
page_addr
,
CHOOSE_NVHE_SYM
(
__per_cpu_start
),
nvhe_percpu_size
());
kvm_arm_hyp_percpu_base
[
cpu
]
=
(
unsigned
long
)
page_addr
;
}
/*
/*
* Map the Hyp-code called directly from the host
* Map the Hyp-code called directly from the host
*/
*/
...
@@ -1550,39 +1617,20 @@ static int init_hyp_mode(void)
...
@@ -1550,39 +1617,20 @@ static int init_hyp_mode(void)
}
}
}
}
/*
* Map Hyp percpu pages
*/
for_each_possible_cpu
(
cpu
)
{
for_each_possible_cpu
(
cpu
)
{
struct
kvm_host_data
*
cpu_data
;
char
*
percpu_begin
=
(
char
*
)
kvm_arm_hyp_percpu_base
[
cpu
];
struct
kvm_cpu_context
*
hyp_ctxt
;
char
*
percpu_end
=
percpu_begin
+
nvhe_percpu_size
();
unsigned
long
*
vector
;
cpu_data
=
per_cpu_ptr
(
&
kvm_host_data
,
cpu
);
err
=
create_hyp_mappings
(
percpu_begin
,
percpu_end
,
PAGE_HYP
);
err
=
create_hyp_mappings
(
cpu_data
,
cpu_data
+
1
,
PAGE_HYP
);
if
(
err
)
{
if
(
err
)
{
kvm_err
(
"Cannot map h
ost CPU state: %d
\n
"
,
err
);
kvm_err
(
"Cannot map h
yp percpu region
\n
"
);
goto
out_err
;
goto
out_err
;
}
}
hyp_ctxt
=
per_cpu_ptr
(
&
kvm_hyp_ctxt
,
cpu
);
err
=
create_hyp_mappings
(
hyp_ctxt
,
hyp_ctxt
+
1
,
PAGE_HYP
);
if
(
err
)
{
kvm_err
(
"Cannot map hyp context: %d
\n
"
,
err
);
goto
out_err
;
}
vector
=
per_cpu_ptr
(
&
kvm_hyp_vector
,
cpu
);
err
=
create_hyp_mappings
(
vector
,
vector
+
1
,
PAGE_HYP
);
if
(
err
)
{
kvm_err
(
"Cannot map hyp guest vector address
\n
"
);
goto
out_err
;
}
}
}
err
=
hyp_map_aux_data
();
if
(
err
)
kvm_err
(
"Cannot map host auxiliary data: %d
\n
"
,
err
);
return
0
;
return
0
;
...
...
arch/arm64/kvm/hyp/Makefile
View file @
14ef9d04
...
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
...
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING
\
-DDISABLE_BRANCH_PROFILING
\
$(DISABLE_STACKLEAK_PLUGIN)
$(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_KVM)
+=
vhe/ nvhe/ pgtable.o
obj-$(CONFIG_KVM)
+=
vhe/ nvhe/ pgtable.o smccc_wa.o
obj-$(CONFIG_KVM_INDIRECT_VECTORS)
+=
smccc_wa.o
arch/arm64/kvm/hyp/entry.S
View file @
14ef9d04
...
@@ -25,7 +25,7 @@ SYM_FUNC_START(__guest_enter)
...
@@ -25,7 +25,7 @@ SYM_FUNC_START(__guest_enter)
//
x1
-
x17
:
clobbered
by
macros
//
x1
-
x17
:
clobbered
by
macros
//
x29
:
guest
context
//
x29
:
guest
context
hyp_
adr_this_cpu
x1
,
kvm_hyp_ctxt
,
x2
adr_this_cpu
x1
,
kvm_hyp_ctxt
,
x2
//
Store
the
hyp
regs
//
Store
the
hyp
regs
save_callee_saved_regs
x1
save_callee_saved_regs
x1
...
@@ -93,7 +93,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
...
@@ -93,7 +93,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
//
return
address
to
tail
call
into
hyp_panic
.
As
a
side
effect
,
the
//
return
address
to
tail
call
into
hyp_panic
.
As
a
side
effect
,
the
//
current
state
is
saved
to
the
guest
context
but
it
will
only
be
//
current
state
is
saved
to
the
guest
context
but
it
will
only
be
//
accurate
if
the
guest
had
been
completely
restored
.
//
accurate
if
the
guest
had
been
completely
restored
.
hyp_
adr_this_cpu
x0
,
kvm_hyp_ctxt
,
x1
adr_this_cpu
x0
,
kvm_hyp_ctxt
,
x1
adr
x1
,
hyp_panic
adr
x1
,
hyp_panic
str
x1
,
[
x0
,
#
CPU_XREG_OFFSET
(
30
)]
str
x1
,
[
x0
,
#
CPU_XREG_OFFSET
(
30
)]
...
@@ -131,7 +131,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
...
@@ -131,7 +131,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
//
Store
the
guest
's sp_el0
//
Store
the
guest
's sp_el0
save_sp_el0
x1
,
x2
save_sp_el0
x1
,
x2
hyp_
adr_this_cpu
x2
,
kvm_hyp_ctxt
,
x3
adr_this_cpu
x2
,
kvm_hyp_ctxt
,
x3
//
Macro
ptrauth_switch_to_hyp
format
:
//
Macro
ptrauth_switch_to_hyp
format
:
//
ptrauth_switch_to_hyp
(
guest
cxt
,
host
cxt
,
tmp1
,
tmp2
,
tmp3
)
//
ptrauth_switch_to_hyp
(
guest
cxt
,
host
cxt
,
tmp1
,
tmp2
,
tmp3
)
...
...
arch/arm64/kvm/hyp/hyp-entry.S
View file @
14ef9d04
...
@@ -63,35 +63,6 @@ el1_sync: // Guest trapped into EL2
...
@@ -63,35 +63,6 @@ el1_sync: // Guest trapped into EL2
ARM_SMCCC_ARCH_WORKAROUND_2
)
ARM_SMCCC_ARCH_WORKAROUND_2
)
cbnz
w1
,
el1_trap
cbnz
w1
,
el1_trap
#ifdef CONFIG_ARM64_SSBD
alternative_cb
arm64_enable_wa2_handling
b
wa2_end
alternative_cb_end
get_vcpu_ptr
x2
,
x0
ldr
x0
,
[
x2
,
#
VCPU_WORKAROUND_FLAGS
]
//
Sanitize
the
argument
and
update
the
guest
flags
ldr
x1
,
[
sp
,
#
8
]
//
Guest
's x1
clz
w1
,
w1
//
Murphy
's device:
lsr
w1
,
w1
,
#
5
//
w1
=
!!
w1
without
using
eor
w1
,
w1
,
#
1
//
the
flags
...
bfi
x0
,
x1
,
#
VCPU_WORKAROUND_2_FLAG_SHIFT
,
#
1
str
x0
,
[
x2
,
#
VCPU_WORKAROUND_FLAGS
]
/
*
Check
that
we
actually
need
to
perform
the
call
*/
hyp_ldr_this_cpu
x0
,
arm64_ssbd_callback_required
,
x2
cbz
x0
,
wa2_end
mov
w0
,
#
ARM_SMCCC_ARCH_WORKAROUND_2
smc
#
0
/
*
Don
't leak data from the SMC call */
mov
x3
,
xzr
wa2_end
:
mov
x2
,
xzr
mov
x1
,
xzr
#endif
wa_epilogue
:
wa_epilogue
:
mov
x0
,
xzr
mov
x0
,
xzr
add
sp
,
sp
,
#
16
add
sp
,
sp
,
#
16
...
@@ -216,7 +187,6 @@ SYM_CODE_START(__kvm_hyp_vector)
...
@@ -216,7 +187,6 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect
el1_error
//
Error
32
-
bit
EL1
valid_vect
el1_error
//
Error
32
-
bit
EL1
SYM_CODE_END
(
__kvm_hyp_vector
)
SYM_CODE_END
(
__kvm_hyp_vector
)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
.
macro
hyp_ventry
.
macro
hyp_ventry
.
align
7
.
align
7
1
:
esb
1
:
esb
...
@@ -266,4 +236,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
...
@@ -266,4 +236,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
1
:
.
org
__bp_harden_hyp_vecs
+
__BP_HARDEN_HYP_VECS_SZ
1
:
.
org
__bp_harden_hyp_vecs
+
__BP_HARDEN_HYP_VECS_SZ
.
org
1
b
.
org
1
b
SYM_CODE_END
(
__bp_harden_hyp_vecs
)
SYM_CODE_END
(
__bp_harden_hyp_vecs
)
#endif
arch/arm64/kvm/hyp/include/hyp/debug-sr.h
View file @
14ef9d04
...
@@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
...
@@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
if
(
!
(
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
))
if
(
!
(
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
))
return
;
return
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
...
@@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
...
@@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
if
(
!
(
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
))
if
(
!
(
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
))
return
;
return
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
...
...
arch/arm64/kvm/hyp/include/hyp/switch.h
View file @
14ef9d04
...
@@ -383,7 +383,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
...
@@ -383,7 +383,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
!
esr_is_ptrauth_trap
(
kvm_vcpu_get_esr
(
vcpu
)))
!
esr_is_ptrauth_trap
(
kvm_vcpu_get_esr
(
vcpu
)))
return
false
;
return
false
;
ctxt
=
__hyp_this_cpu_ptr
(
kvm_hyp_ctxt
);
ctxt
=
this_cpu_ptr
(
&
kvm_hyp_ctxt
);
__ptrauth_save_key
(
ctxt
,
APIA
);
__ptrauth_save_key
(
ctxt
,
APIA
);
__ptrauth_save_key
(
ctxt
,
APIB
);
__ptrauth_save_key
(
ctxt
,
APIB
);
__ptrauth_save_key
(
ctxt
,
APDA
);
__ptrauth_save_key
(
ctxt
,
APDA
);
...
@@ -476,39 +476,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
...
@@ -476,39 +476,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return
false
;
return
false
;
}
}
static
inline
bool
__needs_ssbd_off
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
!
cpus_have_final_cap
(
ARM64_SSBD
))
return
false
;
return
!
(
vcpu
->
arch
.
workaround_flags
&
VCPU_WORKAROUND_2_FLAG
);
}
static
inline
void
__set_guest_arch_workaround_state
(
struct
kvm_vcpu
*
vcpu
)
{
#ifdef CONFIG_ARM64_SSBD
/*
* The host runs with the workaround always present. If the
* guest wants it disabled, so be it...
*/
if
(
__needs_ssbd_off
(
vcpu
)
&&
__hyp_this_cpu_read
(
arm64_ssbd_callback_required
))
arm_smccc_1_1_smc
(
ARM_SMCCC_ARCH_WORKAROUND_2
,
0
,
NULL
);
#endif
}
static
inline
void
__set_host_arch_workaround_state
(
struct
kvm_vcpu
*
vcpu
)
{
#ifdef CONFIG_ARM64_SSBD
/*
* If the guest has disabled the workaround, bring it back on.
*/
if
(
__needs_ssbd_off
(
vcpu
)
&&
__hyp_this_cpu_read
(
arm64_ssbd_callback_required
))
arm_smccc_1_1_smc
(
ARM_SMCCC_ARCH_WORKAROUND_2
,
1
,
NULL
);
#endif
}
static
inline
void
__kvm_unexpected_el2_exception
(
void
)
static
inline
void
__kvm_unexpected_el2_exception
(
void
)
{
{
extern
char
__guest_exit_panic
[];
extern
char
__guest_exit_panic
[];
...
...
arch/arm64/kvm/hyp/nvhe/.gitignore
0 → 100644
View file @
14ef9d04
# SPDX-License-Identifier: GPL-2.0-only
hyp.lds
arch/arm64/kvm/hyp/nvhe/Makefile
View file @
14ef9d04
...
@@ -10,40 +10,46 @@ obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-
...
@@ -10,40 +10,46 @@ obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-
obj-y
+=
../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o
\
obj-y
+=
../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o
\
../fpsimd.o ../hyp-entry.o
../fpsimd.o ../hyp-entry.o
obj-y
:=
$(
patsubst
%.o,%.hyp.o,
$
(
obj-y
))
##
extra-y
:=
$(
patsubst
%.hyp.o,%.hyp.tmp.o,
$
(
obj-y
))
## Build rules for compiling nVHE hyp code
## Output of this folder is `kvm_nvhe.o`, a partially linked object
## file containing all nVHE hyp code and data.
##
$(obj)/%.hyp.tmp.o
:
$(src)/%.c FORCE
hyp-obj
:=
$(
patsubst
%.o,%.nvhe.o,
$
(
obj-y
))
obj-y
:=
kvm_nvhe.o
extra-y
:=
$
(
hyp-obj
)
kvm_nvhe.tmp.o hyp.lds
# 1) Compile all source files to `.nvhe.o` object files. The file extension
# avoids file name clashes for files shared with VHE.
$(obj)/%.nvhe.o
:
$(src)/%.c FORCE
$(
call
if_changed_rule,cc_o_c
)
$(
call
if_changed_rule,cc_o_c
)
$(obj)/%.
hyp.tmp
.o
:
$(src)/%.S FORCE
$(obj)/%.
nvhe
.o
:
$(src)/%.S FORCE
$(
call
if_changed_rule,as_o_S
)
$(
call
if_changed_rule,as_o_S
)
$(obj)/%.hyp.o
:
$(obj)/%.hyp.tmp.o FORCE
$(
call
if_changed,hypcopy
)
# Disable reordering functions by GCC (enabled at -O2).
# 2) Compile linker script.
# This pass puts functions into '.text.*' sections to aid the linker
$(obj)/hyp.lds
:
$(src)/hyp.lds.S FORCE
# in optimizing ELF layout. See HYPCOPY comment below for more info.
$(
call
if_changed_dep,cpp_lds_S
)
ccflags-y
+=
$(
call
cc-option,-fno-reorder-functions
)
# 3) Partially link all '.nvhe.o' files and apply the linker script.
# Prefixes names of ELF sections with '.hyp', eg. '.hyp.text'.
# Note: The following rule assumes that the 'ld' rule puts LDFLAGS before
# the list of dependencies to form '-T $(obj)/hyp.lds'. This is to
# keep the dependency on the target while avoiding an error from
# GNU ld if the linker script is passed to it twice.
LDFLAGS_kvm_nvhe.tmp.o
:=
-r
-T
$(obj)/kvm_nvhe.tmp.o
:
$(obj)/hyp.lds $(addprefix $(obj)/
,
$(hyp-obj)) FORCE
$(
call
if_changed,ld
)
# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
# Prefixes names of ELF symbols with '__kvm_nvhe_'.
$(obj)/kvm_nvhe.o
:
$(obj)/kvm_nvhe.tmp.o FORCE
$(
call
if_changed,hypcopy
)
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
# and relevant ELF section names to avoid clashes with VHE code/data.
# to avoid clashes with VHE code/data.
#
# Hyp code is assumed to be in the '.text' section of the input object
# files (with the exception of specialized sections such as
# '.hyp.idmap.text'). This assumption may be broken by a compiler that
# divides code into sections like '.text.unlikely' so as to optimize
# ELF layout. HYPCOPY checks that no such sections exist in the input
# using `objdump`, otherwise they would be linked together with other
# kernel code and not memory-mapped correctly at runtime.
quiet_cmd_hypcopy
=
HYPCOPY
$@
quiet_cmd_hypcopy
=
HYPCOPY
$@
cmd_hypcopy
=
\
cmd_hypcopy
=
$(OBJCOPY)
--prefix-symbols
=
__kvm_nvhe_
$<
$@
if
$(OBJDUMP)
-h
$<
|
grep
-F
'.text.'
;
then
\
echo
"
$@
: function reordering not supported in nVHE hyp code"
>
&2
;
\
/bin/false
;
\
fi
;
\
$(OBJCOPY)
--prefix-symbols
=
__kvm_nvhe_
\
--rename-section
=
.text
=
.hyp.text
\
$<
$@
# Remove ftrace and Shadow Call Stack CFLAGS.
# Remove ftrace and Shadow Call Stack CFLAGS.
# This is equivalent to the 'notrace' and '__noscs' annotations.
# This is equivalent to the 'notrace' and '__noscs' annotations.
...
...
arch/arm64/kvm/hyp/nvhe/hyp.lds.S
0 → 100644
View file @
14ef9d04
/*
SPDX
-
License
-
Identifier
:
GPL
-
2
.0
*/
/*
*
Copyright
(
C
)
2020
Google
LLC
.
*
Written
by
David
Brazdil
<
dbrazdil
@
google
.
com
>
*
*
Linker
script
used
for
partial
linking
of
nVHE
EL2
object
files
.
*/
#include <asm/hyp_image.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/memory.h>
SECTIONS
{
HYP_SECTION
(.
text
)
HYP_SECTION_NAME
(.
data..percpu
)
:
{
PERCPU_INPUT
(
L1_CACHE_BYTES
)
}
}
arch/arm64/kvm/hyp/nvhe/switch.c
View file @
14ef9d04
...
@@ -27,6 +27,11 @@
...
@@ -27,6 +27,11 @@
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/thread_info.h>
/* Non-VHE specific context */
DEFINE_PER_CPU
(
struct
kvm_host_data
,
kvm_host_data
);
DEFINE_PER_CPU
(
struct
kvm_cpu_context
,
kvm_hyp_ctxt
);
DEFINE_PER_CPU
(
unsigned
long
,
kvm_hyp_vector
);
static
void
__activate_traps
(
struct
kvm_vcpu
*
vcpu
)
static
void
__activate_traps
(
struct
kvm_vcpu
*
vcpu
)
{
{
u64
val
;
u64
val
;
...
@@ -42,7 +47,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
...
@@ -42,7 +47,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
}
}
write_sysreg
(
val
,
cptr_el2
);
write_sysreg
(
val
,
cptr_el2
);
write_sysreg
(
__
hyp_
this_cpu_read
(
kvm_hyp_vector
),
vbar_el2
);
write_sysreg
(
__this_cpu_read
(
kvm_hyp_vector
),
vbar_el2
);
if
(
cpus_have_final_cap
(
ARM64_WORKAROUND_SPECULATIVE_AT
))
{
if
(
cpus_have_final_cap
(
ARM64_WORKAROUND_SPECULATIVE_AT
))
{
struct
kvm_cpu_context
*
ctxt
=
&
vcpu
->
arch
.
ctxt
;
struct
kvm_cpu_context
*
ctxt
=
&
vcpu
->
arch
.
ctxt
;
...
@@ -176,7 +181,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
...
@@ -176,7 +181,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
pmr_sync
();
pmr_sync
();
}
}
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
host_ctxt
->
__hyp_running_vcpu
=
vcpu
;
host_ctxt
->
__hyp_running_vcpu
=
vcpu
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
...
@@ -203,8 +208,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
...
@@ -203,8 +208,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_switch_to_guest
(
vcpu
);
__debug_switch_to_guest
(
vcpu
);
__set_guest_arch_workaround_state
(
vcpu
);
do
{
do
{
/* Jump in the fire! */
/* Jump in the fire! */
exit_code
=
__guest_enter
(
vcpu
);
exit_code
=
__guest_enter
(
vcpu
);
...
@@ -212,8 +215,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
...
@@ -212,8 +215,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* And we're baaack! */
/* And we're baaack! */
}
while
(
fixup_guest_exit
(
vcpu
,
&
exit_code
));
}
while
(
fixup_guest_exit
(
vcpu
,
&
exit_code
));
__set_host_arch_workaround_state
(
vcpu
);
__sysreg_save_state_nvhe
(
guest_ctxt
);
__sysreg_save_state_nvhe
(
guest_ctxt
);
__sysreg32_save_state
(
vcpu
);
__sysreg32_save_state
(
vcpu
);
__timer_disable_traps
(
vcpu
);
__timer_disable_traps
(
vcpu
);
...
@@ -254,7 +255,7 @@ void __noreturn hyp_panic(void)
...
@@ -254,7 +255,7 @@ void __noreturn hyp_panic(void)
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_vcpu
*
vcpu
;
struct
kvm_vcpu
*
vcpu
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
vcpu
=
host_ctxt
->
__hyp_running_vcpu
;
vcpu
=
host_ctxt
->
__hyp_running_vcpu
;
if
(
vcpu
)
{
if
(
vcpu
)
{
...
...
arch/arm64/kvm/hyp/vhe/switch.c
View file @
14ef9d04
...
@@ -28,6 +28,11 @@
...
@@ -28,6 +28,11 @@
const
char
__hyp_panic_string
[]
=
"HYP panic:
\n
PS:%08llx PC:%016llx ESR:%08llx
\n
FAR:%016llx HPFAR:%016llx PAR:%016llx
\n
VCPU:%p
\n
"
;
const
char
__hyp_panic_string
[]
=
"HYP panic:
\n
PS:%08llx PC:%016llx ESR:%08llx
\n
FAR:%016llx HPFAR:%016llx PAR:%016llx
\n
VCPU:%p
\n
"
;
/* VHE specific context */
DEFINE_PER_CPU
(
struct
kvm_host_data
,
kvm_host_data
);
DEFINE_PER_CPU
(
struct
kvm_cpu_context
,
kvm_hyp_ctxt
);
DEFINE_PER_CPU
(
unsigned
long
,
kvm_hyp_vector
);
static
void
__activate_traps
(
struct
kvm_vcpu
*
vcpu
)
static
void
__activate_traps
(
struct
kvm_vcpu
*
vcpu
)
{
{
u64
val
;
u64
val
;
...
@@ -108,7 +113,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
...
@@ -108,7 +113,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct
kvm_cpu_context
*
guest_ctxt
;
struct
kvm_cpu_context
*
guest_ctxt
;
u64
exit_code
;
u64
exit_code
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
host_ctxt
->
__hyp_running_vcpu
=
vcpu
;
host_ctxt
->
__hyp_running_vcpu
=
vcpu
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
...
@@ -131,8 +136,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
...
@@ -131,8 +136,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe
(
guest_ctxt
);
sysreg_restore_guest_state_vhe
(
guest_ctxt
);
__debug_switch_to_guest
(
vcpu
);
__debug_switch_to_guest
(
vcpu
);
__set_guest_arch_workaround_state
(
vcpu
);
do
{
do
{
/* Jump in the fire! */
/* Jump in the fire! */
exit_code
=
__guest_enter
(
vcpu
);
exit_code
=
__guest_enter
(
vcpu
);
...
@@ -140,8 +143,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
...
@@ -140,8 +143,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
/* And we're baaack! */
}
while
(
fixup_guest_exit
(
vcpu
,
&
exit_code
));
}
while
(
fixup_guest_exit
(
vcpu
,
&
exit_code
));
__set_host_arch_workaround_state
(
vcpu
);
sysreg_save_guest_state_vhe
(
guest_ctxt
);
sysreg_save_guest_state_vhe
(
guest_ctxt
);
__deactivate_traps
(
vcpu
);
__deactivate_traps
(
vcpu
);
...
@@ -197,7 +198,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
...
@@ -197,7 +198,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_vcpu
*
vcpu
;
struct
kvm_vcpu
*
vcpu
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
vcpu
=
host_ctxt
->
__hyp_running_vcpu
;
vcpu
=
host_ctxt
->
__hyp_running_vcpu
;
__deactivate_traps
(
vcpu
);
__deactivate_traps
(
vcpu
);
...
...
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
View file @
14ef9d04
...
@@ -66,7 +66,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
...
@@ -66,7 +66,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
struct
kvm_cpu_context
*
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
struct
kvm_cpu_context
*
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
__sysreg_save_user_state
(
host_ctxt
);
__sysreg_save_user_state
(
host_ctxt
);
/*
/*
...
@@ -100,7 +100,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
...
@@ -100,7 +100,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
struct
kvm_cpu_context
*
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
struct
kvm_cpu_context
*
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
deactivate_traps_vhe_put
();
deactivate_traps_vhe_put
();
__sysreg_save_el1_state
(
guest_ctxt
);
__sysreg_save_el1_state
(
guest_ctxt
);
...
...
arch/arm64/kvm/hypercalls.c
View file @
14ef9d04
...
@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
...
@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
feature
=
smccc_get_arg1
(
vcpu
);
feature
=
smccc_get_arg1
(
vcpu
);
switch
(
feature
)
{
switch
(
feature
)
{
case
ARM_SMCCC_ARCH_WORKAROUND_1
:
case
ARM_SMCCC_ARCH_WORKAROUND_1
:
switch
(
kvm_arm_harden_branch_predictor
())
{
switch
(
arm64_get_spectre_v2_state
())
{
case
KVM_BP_HARDEN_UNKNOWN
:
case
SPECTRE_VULNERABLE
:
break
;
break
;
case
KVM_BP_HARDEN_WA_NEED
ED
:
case
SPECTRE_MITIGAT
ED
:
val
=
SMCCC_RET_SUCCESS
;
val
=
SMCCC_RET_SUCCESS
;
break
;
break
;
case
KVM_BP_HARDEN_NOT_REQUIR
ED
:
case
SPECTRE_UNAFFECT
ED
:
val
=
SMCCC_RET_NOT_REQUIRED
;
val
=
SMCCC_RET_NOT_REQUIRED
;
break
;
break
;
}
}
break
;
break
;
case
ARM_SMCCC_ARCH_WORKAROUND_2
:
case
ARM_SMCCC_ARCH_WORKAROUND_2
:
switch
(
kvm_arm_have_ssbd
())
{
switch
(
arm64_get_spectre_v4_state
())
{
case
KVM_SSBD_FORCE_DISABLE
:
case
SPECTRE_VULNERABLE
:
case
KVM_SSBD_UNKNOWN
:
break
;
break
;
case
KVM_SSBD_KERNEL
:
case
SPECTRE_MITIGATED
:
val
=
SMCCC_RET_SUCCESS
;
/*
* SSBS everywhere: Indicate no firmware
* support, as the SSBS support will be
* indicated to the guest and the default is
* safe.
*
* Otherwise, expose a permanent mitigation
* to the guest, and hide SSBS so that the
* guest stays protected.
*/
if
(
cpus_have_final_cap
(
ARM64_SSBS
))
break
;
break
;
case
KVM_SSBD_FORCE_ENABLE
:
fallthrough
;
case
KVM_SSBD_MITIGA
TED
:
case
SPECTRE_UNAFFEC
TED
:
val
=
SMCCC_RET_NOT_REQUIRED
;
val
=
SMCCC_RET_NOT_REQUIRED
;
break
;
break
;
}
}
...
...
arch/arm64/kvm/pmu.c
View file @
14ef9d04
...
@@ -31,9 +31,9 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
...
@@ -31,9 +31,9 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
*/
*/
void
kvm_set_pmu_events
(
u32
set
,
struct
perf_event_attr
*
attr
)
void
kvm_set_pmu_events
(
u32
set
,
struct
perf_event_attr
*
attr
)
{
{
struct
kvm_host_data
*
ctx
=
this_cpu_ptr
(
&
kvm_host_data
);
struct
kvm_host_data
*
ctx
=
this_cpu_ptr
_hyp_sym
(
kvm_host_data
);
if
(
!
kvm_pmu_switch_needed
(
attr
))
if
(
!
ctx
||
!
kvm_pmu_switch_needed
(
attr
))
return
;
return
;
if
(
!
attr
->
exclude_host
)
if
(
!
attr
->
exclude_host
)
...
@@ -47,7 +47,10 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
...
@@ -47,7 +47,10 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/
*/
void
kvm_clr_pmu_events
(
u32
clr
)
void
kvm_clr_pmu_events
(
u32
clr
)
{
{
struct
kvm_host_data
*
ctx
=
this_cpu_ptr
(
&
kvm_host_data
);
struct
kvm_host_data
*
ctx
=
this_cpu_ptr_hyp_sym
(
kvm_host_data
);
if
(
!
ctx
)
return
;
ctx
->
pmu_events
.
events_host
&=
~
clr
;
ctx
->
pmu_events
.
events_host
&=
~
clr
;
ctx
->
pmu_events
.
events_guest
&=
~
clr
;
ctx
->
pmu_events
.
events_guest
&=
~
clr
;
...
@@ -173,7 +176,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
...
@@ -173,7 +176,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
return
;
return
;
preempt_disable
();
preempt_disable
();
host
=
this_cpu_ptr
(
&
kvm_host_data
);
host
=
this_cpu_ptr
_hyp_sym
(
kvm_host_data
);
events_guest
=
host
->
pmu_events
.
events_guest
;
events_guest
=
host
->
pmu_events
.
events_guest
;
events_host
=
host
->
pmu_events
.
events_host
;
events_host
=
host
->
pmu_events
.
events_host
;
...
@@ -193,7 +196,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
...
@@ -193,7 +196,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
if
(
!
has_vhe
())
if
(
!
has_vhe
())
return
;
return
;
host
=
this_cpu_ptr
(
&
kvm_host_data
);
host
=
this_cpu_ptr
_hyp_sym
(
kvm_host_data
);
events_guest
=
host
->
pmu_events
.
events_guest
;
events_guest
=
host
->
pmu_events
.
events_guest
;
events_host
=
host
->
pmu_events
.
events_host
;
events_host
=
host
->
pmu_events
.
events_host
;
...
...
arch/arm64/kvm/psci.c
View file @
14ef9d04
...
@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
...
@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
{
{
switch
(
regid
)
{
switch
(
regid
)
{
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
:
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
:
switch
(
kvm_arm_harden_branch_predictor
())
{
switch
(
arm64_get_spectre_v2_state
())
{
case
KVM_BP_HARDEN_UNKNOWN
:
case
SPECTRE_VULNERABLE
:
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL
;
case
KVM_BP_HARDEN_WA_NEED
ED
:
case
SPECTRE_MITIGAT
ED
:
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL
;
case
KVM_BP_HARDEN_NOT_REQUIR
ED
:
case
SPECTRE_UNAFFECT
ED
:
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED
;
}
}
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL
;
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
:
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
:
switch
(
kvm_arm_have_ssbd
())
{
switch
(
arm64_get_spectre_v4_state
())
{
case
KVM_SSBD_FORCE_DISABLE
:
case
SPECTRE_MITIGATED
:
/*
* As for the hypercall discovery, we pretend we
* don't have any FW mitigation if SSBS is there at
* all times.
*/
if
(
cpus_have_final_cap
(
ARM64_SSBS
))
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
;
case
KVM_SSBD_KERNEL
:
fallthrough
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
;
case
SPECTRE_UNAFFECTED
:
case
KVM_SSBD_FORCE_ENABLE
:
case
KVM_SSBD_MITIGATED
:
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
;
case
KVM_SSBD_UNKNOWN
:
case
SPECTRE_VULNERABLE
:
default:
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
;
return
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN
;
}
}
}
}
...
@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
...
@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
val
=
kvm_psci_version
(
vcpu
,
vcpu
->
kvm
);
val
=
kvm_psci_version
(
vcpu
,
vcpu
->
kvm
);
break
;
break
;
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
:
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
:
val
=
get_kernel_wa_level
(
reg
->
id
)
&
KVM_REG_FEATURE_LEVEL_MASK
;
break
;
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
:
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
:
val
=
get_kernel_wa_level
(
reg
->
id
)
&
KVM_REG_FEATURE_LEVEL_MASK
;
val
=
get_kernel_wa_level
(
reg
->
id
)
&
KVM_REG_FEATURE_LEVEL_MASK
;
if
(
val
==
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
&&
kvm_arm_get_vcpu_workaround_2_flag
(
vcpu
))
val
|=
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED
;
break
;
break
;
default:
default:
return
-
ENOENT
;
return
-
ENOENT
;
...
@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
...
@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED
))
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED
))
return
-
EINVAL
;
return
-
EINVAL
;
wa_level
=
val
&
KVM_REG_FEATURE_LEVEL_MASK
;
if
(
get_kernel_wa_level
(
reg
->
id
)
<
wa_level
)
return
-
EINVAL
;
/* The enabled bit must not be set unless the level is AVAIL. */
/* The enabled bit must not be set unless the level is AVAIL. */
if
(
wa_level
!=
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
&&
if
(
(
val
&
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED
)
&&
wa_level
!=
val
)
(
val
&
KVM_REG_FEATURE_LEVEL_MASK
)
!=
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
)
return
-
EINVAL
;
return
-
EINVAL
;
/* Are we finished or do we need to check the enable bit ? */
if
(
kvm_arm_have_ssbd
()
!=
KVM_SSBD_KERNEL
)
return
0
;
/*
/*
*
If this kernel supports the workaround to be switched on
*
Map all the possible incoming states to the only two we
*
or off, make sure it matches the requested setting
.
*
really want to deal with
.
*/
*/
switch
(
wa_level
)
{
switch
(
val
&
KVM_REG_FEATURE_LEVEL_MASK
)
{
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
:
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_
NOT_
AVAIL
:
kvm_arm_set_vcpu_workaround_2_flag
(
vcpu
,
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN
:
val
&
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED
)
;
wa_level
=
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
;
break
;
break
;
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
:
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
:
case
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
:
kvm_arm_set_vcpu_workaround_2_flag
(
vcpu
,
true
)
;
wa_level
=
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
;
break
;
break
;
default:
return
-
EINVAL
;
}
}
/*
* We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
* other way around.
*/
if
(
get_kernel_wa_level
(
reg
->
id
)
<
wa_level
)
return
-
EINVAL
;
return
0
;
return
0
;
default:
default:
return
-
ENOENT
;
return
-
ENOENT
;
...
...
arch/arm64/kvm/reset.c
View file @
14ef9d04
...
@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
...
@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu
->
arch
.
reset_state
.
reset
=
false
;
vcpu
->
arch
.
reset_state
.
reset
=
false
;
}
}
/* Default workaround setup is enabled (if supported) */
if
(
kvm_arm_have_ssbd
()
==
KVM_SSBD_KERNEL
)
vcpu
->
arch
.
workaround_flags
|=
VCPU_WORKAROUND_2_FLAG
;
/* Reset timer */
/* Reset timer */
ret
=
kvm_timer_vcpu_reset
(
vcpu
);
ret
=
kvm_timer_vcpu_reset
(
vcpu
);
out:
out:
...
...
arch/arm64/kvm/sys_regs.c
View file @
14ef9d04
...
@@ -1128,6 +1128,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
...
@@ -1128,6 +1128,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
if
(
!
vcpu_has_sve
(
vcpu
))
if
(
!
vcpu_has_sve
(
vcpu
))
val
&=
~
(
0xfUL
<<
ID_AA64PFR0_SVE_SHIFT
);
val
&=
~
(
0xfUL
<<
ID_AA64PFR0_SVE_SHIFT
);
val
&=
~
(
0xfUL
<<
ID_AA64PFR0_AMU_SHIFT
);
val
&=
~
(
0xfUL
<<
ID_AA64PFR0_AMU_SHIFT
);
if
(
!
(
val
&
(
0xfUL
<<
ID_AA64PFR0_CSV2_SHIFT
))
&&
arm64_get_spectre_v2_state
()
==
SPECTRE_UNAFFECTED
)
val
|=
(
1UL
<<
ID_AA64PFR0_CSV2_SHIFT
);
}
else
if
(
id
==
SYS_ID_AA64ISAR1_EL1
&&
!
vcpu_has_ptrauth
(
vcpu
))
{
}
else
if
(
id
==
SYS_ID_AA64ISAR1_EL1
&&
!
vcpu_has_ptrauth
(
vcpu
))
{
val
&=
~
((
0xfUL
<<
ID_AA64ISAR1_APA_SHIFT
)
|
val
&=
~
((
0xfUL
<<
ID_AA64ISAR1_APA_SHIFT
)
|
(
0xfUL
<<
ID_AA64ISAR1_API_SHIFT
)
|
(
0xfUL
<<
ID_AA64ISAR1_API_SHIFT
)
|
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment