Commit e4e57f20 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull more arm64 updates from Will Deacon:
 "A few late updates to address some issues arising from conflicts with
  other trees:

   - Removal of Qualcomm-specific Spectre-v2 mitigation in favour of the
     generic SMCCC-based firmware call

   - Fix EL2 hardening capability checking, which was bodged to reduce
     conflicts with the KVM tree

   - Add some currently unused assembler macros for managing SIMD
     registers which will be used by some crypto code in the next merge
     window"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: assembler: add macros to conditionally yield the NEON under PREEMPT
  arm64: assembler: add utility macros to push/pop stack frames
  arm64: Move the content of bpi.S to hyp-entry.S
  arm64: Get rid of __smccc_workaround_1_hvc_*
  arm64: capabilities: Rework EL2 vector hardening entry
  arm64: KVM: Use SMCCC_ARCH_WORKAROUND_1 for Falkor BP hardening
parents 6c21e433 24534b35
...@@ -565,4 +565,140 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -565,4 +565,140 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#endif #endif
.endm .endm
/*
* frame_push - Push @regcount callee saved registers to the stack,
* starting at x19, as well as x29/x30, and set x29 to
* the new value of sp. Add @extra bytes of stack space
* for locals.
*/
.macro frame_push, regcount:req, extra
__frame st, \regcount, \extra
.endm
/*
* frame_pop - Pop the callee saved registers from the stack that were
* pushed in the most recent call to frame_push, as well
* as x29/x30 and any extra stack space that may have been
* allocated.
*/
.macro frame_pop
__frame ld
.endm
.macro __frame_regs, reg1, reg2, op, num
.if .Lframe_regcount == \num
\op\()r \reg1, [sp, #(\num + 1) * 8]
.elseif .Lframe_regcount > \num
\op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
.endif
.endm
.macro __frame, op, regcount, extra=0
.ifc \op, st
.if (\regcount) < 0 || (\regcount) > 10
.error "regcount should be in the range [0 ... 10]"
.endif
.if ((\extra) % 16) != 0
.error "extra should be a multiple of 16 bytes"
.endif
.ifdef .Lframe_regcount
.if .Lframe_regcount != -1
.error "frame_push/frame_pop may not be nested"
.endif
.endif
.set .Lframe_regcount, \regcount
.set .Lframe_extra, \extra
.set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
mov x29, sp
.endif
__frame_regs x19, x20, \op, 1
__frame_regs x21, x22, \op, 3
__frame_regs x23, x24, \op, 5
__frame_regs x25, x26, \op, 7
__frame_regs x27, x28, \op, 9
.ifc \op, ld
.if .Lframe_regcount == -1
.error "frame_push/frame_pop may not be nested"
.endif
ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
.set .Lframe_regcount, -1
.endif
.endm
/*
* Check whether to yield to another runnable task from kernel mode NEON code
* (which runs with preemption disabled).
*
* if_will_cond_yield_neon
* // pre-yield patchup code
* do_cond_yield_neon
* // post-yield patchup code
* endif_yield_neon <label>
*
* where <label> is optional, and marks the point where execution will resume
* after a yield has been performed. If omitted, execution resumes right after
* the endif_yield_neon invocation. Note that the entire sequence, including
* the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
* is not defined.
*
* As a convenience, in the case where no patchup code is required, the above
* sequence may be abbreviated to
*
* cond_yield_neon <label>
*
* Note that the patchup code does not support assembler directives that change
* the output section, any use of such directives is undefined.
*
* The yield itself consists of the following:
* - Check whether the preempt count is exactly 1, in which case disabling
* preemption once will make the task preemptible. If this is not the case,
* yielding is pointless.
* - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
* kernel mode NEON (which will trigger a reschedule), and branch to the
* yield fixup code.
*
* This macro sequence may clobber all CPU state that is not guaranteed by the
* AAPCS to be preserved across an ordinary function call.
*/
.macro cond_yield_neon, lbl
if_will_cond_yield_neon
do_cond_yield_neon
endif_yield_neon \lbl
.endm
.macro if_will_cond_yield_neon
#ifdef CONFIG_PREEMPT
get_thread_info x0
ldr w1, [x0, #TSK_TI_PREEMPT]
ldr x0, [x0, #TSK_TI_FLAGS]
cmp w1, #PREEMPT_DISABLE_OFFSET
csel x0, x0, xzr, eq
tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
/* fall through to endif_yield_neon */
.subsection 1
.Lyield_\@ :
#else
.section ".discard.cond_yield_neon", "ax"
#endif
.endm
.macro do_cond_yield_neon
bl kernel_neon_end
bl kernel_neon_begin
.endm
.macro endif_yield_neon, lbl
.ifnb \lbl
b \lbl
.else
b .Lyield_out_\@
.endif
.previous
.Lyield_out_\@ :
.endm
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */
...@@ -43,13 +43,12 @@ ...@@ -43,13 +43,12 @@
#define ARM64_SVE 22 #define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_HARDEN_BRANCH_PREDICTOR 24
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 #define ARM64_HAS_RAS_EXTN 25
#define ARM64_HAS_RAS_EXTN 26 #define ARM64_WORKAROUND_843419 26
#define ARM64_WORKAROUND_843419 27 #define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_IDC 28 #define ARM64_HAS_CACHE_DIC 28
#define ARM64_HAS_CACHE_DIC 29 #define ARM64_HW_DBM 29
#define ARM64_HW_DBM 30
#define ARM64_NCAPS 31 #define ARM64_NCAPS 30
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -71,8 +71,6 @@ extern u32 __kvm_get_mdcr_el2(void); ...@@ -71,8 +71,6 @@ extern u32 __kvm_get_mdcr_el2(void);
extern u32 __init_stage2_translation(void); extern u32 __init_stage2_translation(void);
extern void __qcom_hyp_sanitize_btac_predictors(void);
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
.macro get_host_ctxt reg, tmp .macro get_host_ctxt reg, tmp
......
...@@ -55,8 +55,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o ...@@ -55,8 +55,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
arm64-obj-$(CONFIG_KVM_INDIRECT_VECTORS)+= bpi.o
obj-y += $(arm64-obj-y) vdso/ probes/ obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m) obj-m += $(arm64-obj-m)
head-y := head.o head-y := head.o
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/preempt.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
...@@ -93,6 +94,8 @@ int main(void) ...@@ -93,6 +94,8 @@ int main(void)
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK(); BLANK();
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
BLANK();
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW); DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
......
/*
* Contains CPU specific branch predictor invalidation sequences
*
* Copyright (C) 2018 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
#include <asm/alternative.h>
#include <asm/mmu.h>
.macro hyp_ventry
.align 7
1: .rept 27
nop
.endr
/*
* The default sequence is to directly branch to the KVM vectors,
* using the computed offset. This applies for VHE as well as
* !ARM64_HARDEN_EL2_VECTORS.
*
* For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
* with:
*
* stp x0, x1, [sp, #-16]!
* movz x0, #(addr & 0xffff)
* movk x0, #((addr >> 16) & 0xffff), lsl #16
* movk x0, #((addr >> 32) & 0xffff), lsl #32
* br x0
*
* Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
* See kvm_patch_vector_branch for details.
*/
alternative_cb kvm_patch_vector_branch
b __kvm_hyp_vector + (1b - 0b)
nop
nop
nop
nop
alternative_cb_end
.endm
.macro generate_vectors
0:
.rept 16
hyp_ventry
.endr
.org 0b + SZ_2K // Safety measure
.endm
.text
.pushsection .hyp.text, "ax"
.align 11
ENTRY(__bp_harden_hyp_vecs_start)
.rept BP_HARDEN_EL2_SLOTS
generate_vectors
.endr
ENTRY(__bp_harden_hyp_vecs_end)
.popsection
ENTRY(__qcom_hyp_sanitize_link_stack_start)
stp x29, x30, [sp, #-16]!
.rept 16
bl . + 4
.endr
ldp x29, x30, [sp], #16
ENTRY(__qcom_hyp_sanitize_link_stack_end)
.macro smccc_workaround_1 inst
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
\inst #0
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
.endm
ENTRY(__smccc_workaround_1_smc_start)
smccc_workaround_1 smc
ENTRY(__smccc_workaround_1_smc_end)
ENTRY(__smccc_workaround_1_hvc_start)
smccc_workaround_1 hvc
ENTRY(__smccc_workaround_1_hvc_end)
...@@ -86,13 +86,9 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); ...@@ -86,13 +86,9 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM #ifdef CONFIG_KVM_INDIRECT_VECTORS
extern char __qcom_hyp_sanitize_link_stack_start[];
extern char __qcom_hyp_sanitize_link_stack_end[];
extern char __smccc_workaround_1_smc_start[]; extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[]; extern char __smccc_workaround_1_smc_end[];
extern char __smccc_workaround_1_hvc_start[];
extern char __smccc_workaround_1_hvc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end) const char *hyp_vecs_end)
...@@ -132,12 +128,8 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, ...@@ -132,12 +128,8 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
spin_unlock(&bp_lock); spin_unlock(&bp_lock);
} }
#else #else
#define __qcom_hyp_sanitize_link_stack_start NULL
#define __qcom_hyp_sanitize_link_stack_end NULL
#define __smccc_workaround_1_smc_start NULL #define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL #define __smccc_workaround_1_smc_end NULL
#define __smccc_workaround_1_hvc_start NULL
#define __smccc_workaround_1_hvc_end NULL
static void __install_bp_hardening_cb(bp_hardening_cb_t fn, static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start, const char *hyp_vecs_start,
...@@ -145,7 +137,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, ...@@ -145,7 +137,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
{ {
__this_cpu_write(bp_hardening_data.fn, fn); __this_cpu_write(bp_hardening_data.fn, fn);
} }
#endif /* CONFIG_KVM */ #endif /* CONFIG_KVM_INDIRECT_VECTORS */
static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
bp_hardening_cb_t fn, bp_hardening_cb_t fn,
...@@ -178,12 +170,25 @@ static void call_hvc_arch_workaround_1(void) ...@@ -178,12 +170,25 @@ static void call_hvc_arch_workaround_1(void)
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
} }
static void qcom_link_stack_sanitization(void)
{
u64 tmp;
asm volatile("mov %0, x30 \n"
".rept 16 \n"
"bl . + 4 \n"
".endr \n"
"mov x30, %0 \n"
: "=&r" (tmp));
}
static void static void
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
{ {
bp_hardening_cb_t cb; bp_hardening_cb_t cb;
void *smccc_start, *smccc_end; void *smccc_start, *smccc_end;
struct arm_smccc_res res; struct arm_smccc_res res;
u32 midr = read_cpuid_id();
if (!entry->matches(entry, SCOPE_LOCAL_CPU)) if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return; return;
...@@ -198,8 +203,9 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) ...@@ -198,8 +203,9 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
if ((int)res.a0 < 0) if ((int)res.a0 < 0)
return; return;
cb = call_hvc_arch_workaround_1; cb = call_hvc_arch_workaround_1;
smccc_start = __smccc_workaround_1_hvc_start; /* This is a guest, no need to patch KVM vectors */
smccc_end = __smccc_workaround_1_hvc_end; smccc_start = NULL;
smccc_end = NULL;
break; break;
case PSCI_CONDUIT_SMC: case PSCI_CONDUIT_SMC:
...@@ -216,30 +222,14 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) ...@@ -216,30 +222,14 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
return; return;
} }
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
cb = qcom_link_stack_sanitization;
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
return; return;
} }
static void qcom_link_stack_sanitization(void)
{
u64 tmp;
asm volatile("mov %0, x30 \n"
".rept 16 \n"
"bl . + 4 \n"
".endr \n"
"mov x30, %0 \n"
: "=&r" (tmp));
}
static void
qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
{
install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
__qcom_hyp_sanitize_link_stack_start,
__qcom_hyp_sanitize_link_stack_end);
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
...@@ -324,33 +314,23 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = { ...@@ -324,33 +314,23 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
{},
};
static const struct midr_range qcom_bp_harden_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
{}, {},
}; };
static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = { #endif
{
CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), #ifdef CONFIG_HARDEN_EL2_VECTORS
.cpu_enable = enable_smccc_arch_workaround_1,
}, static const struct midr_range arm64_harden_el2_vectors[] = {
{ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus), MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
.cpu_enable = qcom_enable_link_stack_sanitization,
},
{}, {},
}; };
#endif #endif
#ifndef ERRATA_MIDR_ALL_VERSIONS
#define ERRATA_MIDR_ALL_VERSIONS(x) MIDR_ALL_VERSIONS(x)
#endif
const struct arm64_cpu_capabilities arm64_errata[] = { const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \ #if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \
...@@ -495,25 +475,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -495,25 +475,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{ {
.capability = ARM64_HARDEN_BRANCH_PREDICTOR, .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = multi_entry_cap_matches, .cpu_enable = enable_smccc_arch_workaround_1,
.cpu_enable = multi_entry_cap_cpu_enable, ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
.match_list = arm64_bp_harden_list,
},
{
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
}, },
#endif #endif
#ifdef CONFIG_HARDEN_EL2_VECTORS #ifdef CONFIG_HARDEN_EL2_VECTORS
{ {
.desc = "Cortex-A57 EL2 vector hardening", .desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS,
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
},
{
.desc = "Cortex-A72 EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS, .capability = ARM64_HARDEN_EL2_VECTORS,
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
}, },
#endif #endif
{ {
......
...@@ -209,15 +209,3 @@ alternative_endif ...@@ -209,15 +209,3 @@ alternative_endif
eret eret
ENDPROC(__fpsimd_guest_restore) ENDPROC(__fpsimd_guest_restore)
ENTRY(__qcom_hyp_sanitize_btac_predictors)
/**
* Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
* 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
* b15-b0: contains SiP functionID
*/
movz x0, #0x1700
movk x0, #0xc200, lsl #16
smc #0
ret
ENDPROC(__qcom_hyp_sanitize_btac_predictors)
/* /*
* Copyright (C) 2015 - ARM Ltd * Copyright (C) 2015-2018 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/mmu.h>
.text .text
.pushsection .hyp.text, "ax" .pushsection .hyp.text, "ax"
...@@ -237,3 +238,64 @@ ENTRY(__kvm_hyp_vector) ...@@ -237,3 +238,64 @@ ENTRY(__kvm_hyp_vector)
invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
valid_vect el1_error // Error 32-bit EL1 valid_vect el1_error // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector) ENDPROC(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
.align 7
1: .rept 27
nop
.endr
/*
* The default sequence is to directly branch to the KVM vectors,
* using the computed offset. This applies for VHE as well as
* !ARM64_HARDEN_EL2_VECTORS.
*
* For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
* with:
*
* stp x0, x1, [sp, #-16]!
* movz x0, #(addr & 0xffff)
* movk x0, #((addr >> 16) & 0xffff), lsl #16
* movk x0, #((addr >> 32) & 0xffff), lsl #32
* br x0
*
* Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
* See kvm_patch_vector_branch for details.
*/
alternative_cb kvm_patch_vector_branch
b __kvm_hyp_vector + (1b - 0b)
nop
nop
nop
nop
alternative_cb_end
.endm
.macro generate_vectors
0:
.rept 16
hyp_ventry
.endr
.org 0b + SZ_2K // Safety measure
.endm
.align 11
ENTRY(__bp_harden_hyp_vecs_start)
.rept BP_HARDEN_EL2_SLOTS
generate_vectors
.endr
ENTRY(__bp_harden_hyp_vecs_end)
.popsection
ENTRY(__smccc_workaround_1_smc_start)
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
ENTRY(__smccc_workaround_1_smc_end)
#endif
...@@ -472,16 +472,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) ...@@ -472,16 +472,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */ /* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code)); } while (fixup_guest_exit(vcpu, &exit_code));
if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
u32 midr = read_cpuid_id();
/* Apply BTAC predictors mitigation to all Falkor chips */
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
__qcom_hyp_sanitize_btac_predictors();
}
}
fp_enabled = __fpsimd_enabled_nvhe(); fp_enabled = __fpsimd_enabled_nvhe();
__sysreg_save_state_nvhe(guest_ctxt); __sysreg_save_state_nvhe(guest_ctxt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment