Commit e1e315c4 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/misc into kvmarm/next

* kvm-arm64/misc:
  : Miscellaneous updates
  :
  :  - Avoid trapping CTR_EL0 on systems with FEAT_EVT, as the register is
  :    commonly read by userspace
  :
  :  - Make use of FEAT_BTI at hyp stage-1, setting the Guard Page bit to 1
  :    for executable mappings
  :
  :  - Use a separate set of pointer authentication keys for the hypervisor
  :    when running in protected mode (i.e. pKVM)
  :
  :  - Plug a few holes in timer initialization where KVM fails to free the
  :    timer IRQ(s)
  KVM: arm64: Use different pointer authentication keys for pKVM
  KVM: arm64: timers: Fix resource leaks in kvm_timer_hyp_init()
  KVM: arm64: Use BTI for nvhe
  KVM: arm64: Relax trapping of CTR_EL0 when FEAT_EVT is available
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents 89a734b5 8c15c2a0
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define HCR_ATA_SHIFT 56 #define HCR_ATA_SHIFT 56
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT) #define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
#define HCR_AMVOFFEN (UL(1) << 51) #define HCR_AMVOFFEN (UL(1) << 51)
#define HCR_TID4 (UL(1) << 49)
#define HCR_FIEN (UL(1) << 47) #define HCR_FIEN (UL(1) << 47)
#define HCR_FWB (UL(1) << 46) #define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41) #define HCR_API (UL(1) << 41)
...@@ -86,7 +87,7 @@ ...@@ -86,7 +87,7 @@
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TACR | \ HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID2) HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
......
...@@ -90,6 +90,12 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) ...@@ -90,6 +90,12 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TVM; vcpu->arch.hcr_el2 |= HCR_TVM;
} }
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
vcpu->arch.hcr_el2 |= HCR_TID4;
else
vcpu->arch.hcr_el2 |= HCR_TID2;
if (vcpu_el1_is_32bit(vcpu)) if (vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 &= ~HCR_RW; vcpu->arch.hcr_el2 &= ~HCR_RW;
......
...@@ -558,6 +558,7 @@ ...@@ -558,6 +558,7 @@
(BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
(BIT(29))) (BIT(29)))
#define SCTLR_EL2_BT (BIT(36))
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE #define ENDIAN_SET_EL2 SCTLR_ELx_EE
#else #else
......
...@@ -2662,6 +2662,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2662,6 +2662,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = hvhe_possible, .matches = hvhe_possible,
}, },
{
.desc = "Enhanced Virtualization Traps",
.capability = ARM64_HAS_EVT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_EL1_EVT_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64MMFR2_EL1_EVT_IMP,
.matches = has_cpuid_feature,
},
{}, {},
}; };
......
...@@ -1406,7 +1406,7 @@ int __init kvm_timer_hyp_init(bool has_gic) ...@@ -1406,7 +1406,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
kvm_get_running_vcpus()); kvm_get_running_vcpus());
if (err) { if (err) {
kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
goto out_free_irq; goto out_free_vtimer_irq;
} }
static_branch_enable(&has_gic_active_state); static_branch_enable(&has_gic_active_state);
...@@ -1422,7 +1422,7 @@ int __init kvm_timer_hyp_init(bool has_gic) ...@@ -1422,7 +1422,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
if (err) { if (err) {
kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n", kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
host_ptimer_irq, err); host_ptimer_irq, err);
return err; goto out_free_vtimer_irq;
} }
if (has_gic) { if (has_gic) {
...@@ -1430,7 +1430,7 @@ int __init kvm_timer_hyp_init(bool has_gic) ...@@ -1430,7 +1430,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
kvm_get_running_vcpus()); kvm_get_running_vcpus());
if (err) { if (err) {
kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
goto out_free_irq; goto out_free_ptimer_irq;
} }
} }
...@@ -1439,11 +1439,15 @@ int __init kvm_timer_hyp_init(bool has_gic) ...@@ -1439,11 +1439,15 @@ int __init kvm_timer_hyp_init(bool has_gic)
kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n", kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
info->physical_irq); info->physical_irq);
err = -ENODEV; err = -ENODEV;
goto out_free_irq; goto out_free_vtimer_irq;
} }
return 0; return 0;
out_free_irq:
out_free_ptimer_irq:
if (info->physical_irq > 0)
free_percpu_irq(host_ptimer_irq, kvm_get_running_vcpus());
out_free_vtimer_irq:
free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
return err; return err;
} }
......
...@@ -51,6 +51,8 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); ...@@ -51,6 +51,8 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
static bool vgic_present; static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
...@@ -2140,6 +2142,26 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits) ...@@ -2140,6 +2142,26 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
return 0; return 0;
} }
static void pkvm_hyp_init_ptrauth(void)
{
struct kvm_cpu_context *hyp_ctxt;
int cpu;
for_each_possible_cpu(cpu) {
hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
}
}
/* Inits Hyp-mode on all online CPUs */ /* Inits Hyp-mode on all online CPUs */
static int __init init_hyp_mode(void) static int __init init_hyp_mode(void)
{ {
...@@ -2301,6 +2323,10 @@ static int __init init_hyp_mode(void) ...@@ -2301,6 +2323,10 @@ static int __init init_hyp_mode(void)
kvm_hyp_init_symbols(); kvm_hyp_init_symbols();
if (is_protected_kvm_enabled()) { if (is_protected_kvm_enabled()) {
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH))
pkvm_hyp_init_ptrauth();
init_cpu_logical_map(); init_cpu_logical_map();
if (!init_psci_relay()) { if (!init_psci_relay()) {
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_ptrauth.h>
.text .text
...@@ -37,10 +38,43 @@ SYM_FUNC_START(__host_exit) ...@@ -37,10 +38,43 @@ SYM_FUNC_START(__host_exit)
/* Save the host context pointer in x29 across the function call */ /* Save the host context pointer in x29 across the function call */
mov x29, x0 mov x29, x0
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b __skip_pauth_save
alternative_else_nop_endif
alternative_if ARM64_KVM_PROTECTED_MODE
/* Save kernel ptrauth keys. */
add x18, x29, #CPU_APIAKEYLO_EL1
ptrauth_save_state x18, x19, x20
/* Use hyp keys. */
adr_this_cpu x18, kvm_hyp_ctxt, x19
add x18, x18, #CPU_APIAKEYLO_EL1
ptrauth_restore_state x18, x19, x20
isb
alternative_else_nop_endif
__skip_pauth_save:
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
bl handle_trap bl handle_trap
/* Restore host regs x0-x17 */
__host_enter_restore_full: __host_enter_restore_full:
/* Restore kernel keys. */
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b __skip_pauth_restore
alternative_else_nop_endif
alternative_if ARM64_KVM_PROTECTED_MODE
add x18, x29, #CPU_APIAKEYLO_EL1
ptrauth_restore_state x18, x19, x20
alternative_else_nop_endif
__skip_pauth_restore:
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
/* Restore host regs x0-x17 */
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
......
...@@ -141,6 +141,13 @@ alternative_if ARM64_HAS_ADDRESS_AUTH ...@@ -141,6 +141,13 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
orr x0, x0, x1 orr x0, x0, x1
alternative_else_nop_endif alternative_else_nop_endif
#ifdef CONFIG_ARM64_BTI_KERNEL
alternative_if ARM64_BTI
orr x0, x0, #SCTLR_EL2_BT
alternative_else_nop_endif
#endif /* CONFIG_ARM64_BTI_KERNEL */
msr sctlr_el2, x0 msr sctlr_el2, x0
isb isb
...@@ -210,6 +217,11 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) ...@@ -210,6 +217,11 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
SYM_CODE_END(__kvm_hyp_init_cpu) SYM_CODE_END(__kvm_hyp_init_cpu)
SYM_CODE_START(__kvm_handle_stub_hvc) SYM_CODE_START(__kvm_handle_stub_hvc)
/*
* __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
* we need bti j at beginning.
*/
bti j
cmp x0, #HVC_SOFT_RESTART cmp x0, #HVC_SOFT_RESTART
b.ne 1f b.ne 1f
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
#define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10) #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51) #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55) #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
KVM_PTE_LEAF_ATTR_HI_S2_XN) KVM_PTE_LEAF_ATTR_HI_S2_XN)
...@@ -386,6 +388,9 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) ...@@ -386,6 +388,9 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
if (device) if (device)
return -EINVAL; return -EINVAL;
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
} else { } else {
attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN; attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
} }
......
...@@ -25,6 +25,7 @@ HAS_E0PD ...@@ -25,6 +25,7 @@ HAS_E0PD
HAS_ECV HAS_ECV
HAS_ECV_CNTPOFF HAS_ECV_CNTPOFF
HAS_EPAN HAS_EPAN
HAS_EVT
HAS_GENERIC_AUTH HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA3
HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_ARCH_QARMA5
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment