Commit 82897db9 authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86: Move shadow_phys_bits into "kvm_host", as "maxphyaddr"

Move shadow_phys_bits into "struct kvm_host_values", i.e. into KVM's
global "kvm_host" variable, so that it is automatically exported for use
in vendor modules.  Rename the variable/field to maxphyaddr to more
clearly capture what value it holds, now that it's used outside of the
MMU (and because the "shadow" part is more than a bit misleading as the
variable is not at all unique to shadow paging).

Recomputing the raw/true host.MAXPHYADDR on every use can be subtly
expensive, e.g. it will incur a VM-Exit on the CPUID if KVM is running as
a nested hypervisor.  Vendor code already has access to the information,
e.g. by directly doing CPUID or by invoking kvm_get_shadow_phys_bits(), so
there's no tangible benefit to making it MMU-only.

Link: https://lore.kernel.org/r/20240423221521.2923759-5-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent c043eaaa
......@@ -57,12 +57,6 @@ static __always_inline u64 rsvd_bits(int s, int e)
return ((2ULL << (e - s)) - 1) << s;
}
/*
* The number of non-reserved physical address bits irrespective of features
* that repurpose legal bits, e.g. MKTME.
*/
extern u8 __ro_after_init shadow_phys_bits;
static inline gfn_t kvm_mmu_max_gfn(void)
{
/*
......@@ -76,30 +70,11 @@ static inline gfn_t kvm_mmu_max_gfn(void)
* than hardware's real MAXPHYADDR. Using the host MAXPHYADDR
* disallows such SPTEs entirely and simplifies the TDP MMU.
*/
int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52;
int max_gpa_bits = likely(tdp_enabled) ? kvm_host.maxphyaddr : 52;
return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
}
static inline u8 kvm_get_shadow_phys_bits(void)
{
/*
* boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
* in CPU detection code, but the processor treats those reduced bits as
* 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
* the physical address bits reported by CPUID.
*/
if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
return cpuid_eax(0x80000008) & 0xff;
/*
* Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
* custom CPUID. Proceed with whatever the kernel found since these features
* aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
*/
return boot_cpu_data.x86_phys_bits;
}
u8 kvm_mmu_get_max_tdp_level(void);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
......
......@@ -4980,7 +4980,7 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
static inline u64 reserved_hpa_bits(void)
{
return rsvd_bits(shadow_phys_bits, 63);
return rsvd_bits(kvm_host.maxphyaddr, 63);
}
/*
......
......@@ -43,7 +43,25 @@ u64 __read_mostly shadow_acc_track_mask;
u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
u8 __ro_after_init shadow_phys_bits;
static u8 __init kvm_get_host_maxphyaddr(void)
{
/*
* boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
* in CPU detection code, but the processor treats those reduced bits as
* 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
* the physical address bits reported by CPUID, i.e. the raw MAXPHYADDR,
* when reasoning about CPU behavior with respect to MAXPHYADDR.
*/
if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
return cpuid_eax(0x80000008) & 0xff;
/*
* Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
* custom CPUID. Proceed with whatever the kernel found since these features
* aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
*/
return boot_cpu_data.x86_phys_bits;
}
void __init kvm_mmu_spte_module_init(void)
{
......@@ -56,7 +74,7 @@ void __init kvm_mmu_spte_module_init(void)
*/
allow_mmio_caching = enable_mmio_caching;
shadow_phys_bits = kvm_get_shadow_phys_bits();
kvm_host.maxphyaddr = kvm_get_host_maxphyaddr();
}
static u64 generation_mmio_spte_mask(u64 gen)
......@@ -494,7 +512,7 @@ void kvm_mmu_reset_all_pte_masks(void)
* 52-bit physical addresses then there are no reserved PA bits in the
* PTEs and so the reserved PA approach must be disabled.
*/
if (shadow_phys_bits < 52)
if (kvm_host.maxphyaddr < 52)
mask = BIT_ULL(51) | PT_PRESENT_MASK;
else
mask = 0;
......
......@@ -8389,18 +8389,16 @@ static void __init vmx_setup_me_spte_mask(void)
u64 me_mask = 0;
/*
* kvm_get_shadow_phys_bits() returns shadow_phys_bits. Use
* the former to avoid exposing shadow_phys_bits.
*
* On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
* shadow_phys_bits. On MKTME and/or TDX capable systems,
* kvm_host.maxphyaddr. On MKTME and/or TDX capable systems,
* boot_cpu_data.x86_phys_bits holds the actual physical address
* w/o the KeyID bits, and shadow_phys_bits equals to MAXPHYADDR
* reported by CPUID. Those bits between are KeyID bits.
* w/o the KeyID bits, and kvm_host.maxphyaddr equals to
* MAXPHYADDR reported by CPUID. Those bits between are KeyID bits.
*/
if (boot_cpu_data.x86_phys_bits != kvm_get_shadow_phys_bits())
if (boot_cpu_data.x86_phys_bits != kvm_host.maxphyaddr)
me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits,
kvm_get_shadow_phys_bits() - 1);
kvm_host.maxphyaddr - 1);
/*
* Unlike SME, host kernel doesn't support setting up any
* MKTME KeyID on Intel platforms. No memory encryption
......
......@@ -727,7 +727,7 @@ static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
return true;
return allow_smaller_maxphyaddr &&
cpuid_maxphyaddr(vcpu) < kvm_get_shadow_phys_bits();
cpuid_maxphyaddr(vcpu) < kvm_host.maxphyaddr;
}
static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
......
......@@ -34,6 +34,13 @@ struct kvm_caps {
};
struct kvm_host_values {
/*
* The host's raw MAXPHYADDR, i.e. the number of non-reserved physical
* address bits irrespective of features that repurpose legal bits,
* e.g. MKTME.
*/
u8 maxphyaddr;
u64 efer;
u64 xcr0;
u64 xss;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment