Commit c258b62b authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: MMU: introduce the framework to check zero bits on sptes

We have abstracted the data struct and functions which are used to check
reserved bit on guest page tables, now we extend the logic to check
zero bits on shadow page tables

The zero bits on sptes include not only reserved bits on hardware but also
the bits that SPTEs willnever use.  For example, shadow pages will never
use GB pages unless the guest uses them too.
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 81b8eebb
...@@ -294,6 +294,14 @@ struct kvm_mmu { ...@@ -294,6 +294,14 @@ struct kvm_mmu {
u64 *pae_root; u64 *pae_root;
u64 *lm_root; u64 *lm_root;
/*
* check zero bits on shadow page table entries, these
* bits include not only hardware reserved bits but also
* the bits spte never used.
*/
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check; struct rsvd_bits_validate guest_rsvd_check;
/* /*
......
...@@ -3699,6 +3699,53 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, ...@@ -3699,6 +3699,53 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
cpuid_maxphyaddr(vcpu), execonly); cpuid_maxphyaddr(vcpu), execonly);
} }
/*
* the page table on host is the shadow page table for the page
* table in guest or amd nested guest, its mmu features completely
* follow the features in guest.
*/
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, context->nx,
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
/*
* the direct page table on host, use as much mmu features as
* possible, however, kvm currently does not do execution-protection.
*/
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
if (guest_cpuid_is_amd(vcpu))
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, false,
cpu_has_gbpages, true);
else
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
false);
}
/*
* as the comments in reset_shadow_zero_bits_mask() except it
* is the shadow page table for intel nested guest.
*/
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
boot_cpu_data.x86_phys_bits, execonly);
}
static void update_permission_bitmask(struct kvm_vcpu *vcpu, static void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept) struct kvm_mmu *mmu, bool ept)
{ {
...@@ -3877,6 +3924,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -3877,6 +3924,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
update_permission_bitmask(vcpu, context, false); update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context); update_last_pte_bitmap(vcpu, context);
reset_tdp_shadow_zero_bits_mask(vcpu, context);
} }
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
...@@ -3904,6 +3952,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) ...@@ -3904,6 +3952,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
context->base_role.smap_andnot_wp context->base_role.smap_andnot_wp
= smap && !is_write_protection(vcpu); = smap && !is_write_protection(vcpu);
context->base_role.smm = is_smm(vcpu); context->base_role.smm = is_smm(vcpu);
reset_shadow_zero_bits_mask(vcpu, context);
} }
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
...@@ -3927,6 +3976,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) ...@@ -3927,6 +3976,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
update_permission_bitmask(vcpu, context, true); update_permission_bitmask(vcpu, context, true);
reset_rsvds_bits_mask_ept(vcpu, context, execonly); reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
} }
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
......
...@@ -53,6 +53,9 @@ static inline u64 rsvd_bits(int s, int e) ...@@ -53,6 +53,9 @@ static inline u64 rsvd_bits(int s, int e)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
/* /*
* Return values of handle_mmio_page_fault_common: * Return values of handle_mmio_page_fault_common:
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
......
...@@ -2107,6 +2107,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -2107,6 +2107,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level(); vcpu->arch.mmu.shadow_root_level = get_npt_level();
reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment