Commit 66d5b53e authored by Fuad Tabba's avatar Fuad Tabba Committed by Marc Zyngier

KVM: arm64: Allocate memory mapped at hyp for host sve state in pKVM

Protected mode needs to maintain (save/restore) the host's sve
state, rather than relying on the host kernel to do that. This is
to avoid leaking information to the host about guests and the
type of operations they are performing.

As a first step towards that, allocate memory mapped at hyp, per
cpu, for the host sve state. The following patch will use this
memory to save/restore the host state.
Reviewed-by: default avatarOliver Upton <oliver.upton@linux.dev>
Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20240603122852.3923848-6-tabba@google.comSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent e511e08a
...@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; ...@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int __ro_after_init kvm_sve_max_vl; extern unsigned int __ro_after_init kvm_sve_max_vl;
extern unsigned int __ro_after_init kvm_host_sve_max_vl;
int __init kvm_arm_init_sve(void); int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void); u32 __attribute_const__ kvm_target_cpu(void);
...@@ -521,6 +522,20 @@ struct kvm_cpu_context { ...@@ -521,6 +522,20 @@ struct kvm_cpu_context {
u64 *vncr_array; u64 *vncr_array;
}; };
struct cpu_sve_state {
__u64 zcr_el1;
/*
* Ordering is important since __sve_save_state/__sve_restore_state
* relies on it.
*/
__u32 fpsr;
__u32 fpcr;
/* Must be SVE_VQ_BYTES (128 bit) aligned. */
__u8 sve_regs[];
};
/* /*
* This structure is instantiated on a per-CPU basis, and contains * This structure is instantiated on a per-CPU basis, and contains
* data that is: * data that is:
...@@ -534,7 +549,9 @@ struct kvm_cpu_context { ...@@ -534,7 +549,9 @@ struct kvm_cpu_context {
*/ */
struct kvm_host_data { struct kvm_host_data {
struct kvm_cpu_context host_ctxt; struct kvm_cpu_context host_ctxt;
struct user_fpsimd_state *fpsimd_state; /* hyp VA */ struct user_fpsimd_state *fpsimd_state; /* hyp VA */
struct cpu_sve_state *sve_state; /* hyp VA */
/* Ownership of the FP regs */ /* Ownership of the FP regs */
enum { enum {
......
...@@ -143,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val); ...@@ -143,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags); extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits); extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
#endif /* __ARM64_KVM_HYP_H__ */ #endif /* __ARM64_KVM_HYP_H__ */
...@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void) ...@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE); return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
} }
static inline size_t pkvm_host_sve_state_size(void)
{
if (!system_supports_sve())
return 0;
return size_add(sizeof(struct cpu_sve_state),
SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
}
#endif /* __ARM64_KVM_PKVM_H__ */ #endif /* __ARM64_KVM_PKVM_H__ */
...@@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void) ...@@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0; return size ? get_order(size) : 0;
} }
static size_t pkvm_host_sve_state_order(void)
{
return get_order(pkvm_host_sve_state_size());
}
/* A lookup table holding the hypervisor VA for each vector slot */ /* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
...@@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void) ...@@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
static void __init teardown_hyp_mode(void) static void __init teardown_hyp_mode(void)
{ {
bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
int cpu; int cpu;
free_hyp_pgds(); free_hyp_pgds();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
if (free_sve) {
struct cpu_sve_state *sve_state;
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
}
} }
} }
...@@ -2398,6 +2411,50 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits) ...@@ -2398,6 +2411,50 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
return 0; return 0;
} }
static int init_pkvm_host_sve_state(void)
{
int cpu;
if (!system_supports_sve())
return 0;
/* Allocate pages for host sve state in protected mode. */
for_each_possible_cpu(cpu) {
struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
if (!page)
return -ENOMEM;
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
}
/*
* Don't map the pages in hyp since these are only used in protected
* mode, which will (re)create its own mapping when initialized.
*/
return 0;
}
/*
* Finalizes the initialization of hyp mode, once everything else is initialized
* and the initialziation process cannot fail.
*/
static void finalize_init_hyp_mode(void)
{
int cpu;
if (!is_protected_kvm_enabled() || !system_supports_sve())
return;
for_each_possible_cpu(cpu) {
struct cpu_sve_state *sve_state;
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = kern_hyp_va(sve_state);
}
}
static void pkvm_hyp_init_ptrauth(void) static void pkvm_hyp_init_ptrauth(void)
{ {
struct kvm_cpu_context *hyp_ctxt; struct kvm_cpu_context *hyp_ctxt;
...@@ -2566,6 +2623,10 @@ static int __init init_hyp_mode(void) ...@@ -2566,6 +2623,10 @@ static int __init init_hyp_mode(void)
goto out_err; goto out_err;
} }
err = init_pkvm_host_sve_state();
if (err)
goto out_err;
err = kvm_hyp_init_protection(hyp_va_bits); err = kvm_hyp_init_protection(hyp_va_bits);
if (err) { if (err) {
kvm_err("Failed to init hyp memory protection\n"); kvm_err("Failed to init hyp memory protection\n");
...@@ -2730,6 +2791,13 @@ static __init int kvm_arm_init(void) ...@@ -2730,6 +2791,13 @@ static __init int kvm_arm_init(void)
if (err) if (err)
goto out_subs; goto out_subs;
/*
* This should be called after initialization is done and failure isn't
* possible anymore.
*/
if (!in_hyp_mode)
finalize_init_hyp_mode();
kvm_arm_initialised = true; kvm_arm_initialised = true;
return 0; return 0;
......
...@@ -18,6 +18,8 @@ unsigned long __icache_flags; ...@@ -18,6 +18,8 @@ unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */ /* Used by kvm_get_vttbr(). */
unsigned int kvm_arm_vmid_bits; unsigned int kvm_arm_vmid_bits;
unsigned int kvm_host_sve_max_vl;
/* /*
* Set trap register values based on features in ID_AA64PFR0. * Set trap register values based on features in ID_AA64PFR0.
*/ */
......
...@@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size) ...@@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
return 0; return 0;
} }
static int pkvm_create_host_sve_mappings(void)
{
void *start, *end;
int ret, i;
if (!system_supports_sve())
return 0;
for (i = 0; i < hyp_nr_cpus; i++) {
struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
struct cpu_sve_state *sve_state = host_data->sve_state;
start = kern_hyp_va(sve_state);
end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
ret = pkvm_create_mappings(start, end, PAGE_HYP);
if (ret)
return ret;
}
return 0;
}
static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
unsigned long *per_cpu_base, unsigned long *per_cpu_base,
u32 hyp_va_bits) u32 hyp_va_bits)
...@@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, ...@@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
return ret; return ret;
} }
pkvm_create_host_sve_mappings();
/* /*
* Map the host sections RO in the hypervisor, but transfer the * Map the host sections RO in the hypervisor, but transfer the
* ownership from the host to the hypervisor itself to make sure they * ownership from the host to the hypervisor itself to make sure they
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
/* Maximum phys_shift supported for any VM on this host */ /* Maximum phys_shift supported for any VM on this host */
static u32 __ro_after_init kvm_ipa_limit; static u32 __ro_after_init kvm_ipa_limit;
unsigned int __ro_after_init kvm_host_sve_max_vl;
/* /*
* ARMv8 Reset Values * ARMv8 Reset Values
...@@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void) ...@@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
{ {
if (system_supports_sve()) { if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl(); kvm_sve_max_vl = sve_max_virtualisable_vl();
kvm_host_sve_max_vl = sve_max_vl();
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
/* /*
* The get_sve_reg()/set_sve_reg() ioctl interface will need * The get_sve_reg()/set_sve_reg() ioctl interface will need
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment