Commit a37ebdce authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paolo Bonzini

KVM: VMX: Add and use X86_CR4_PDPTR_BITS when !enable_ept

In set_cr4_guest_host_mask(), all cr4 pdptr bits are already set to be
intercepted in an unclear way.

Add X86_CR4_PDPTR_BITS to make it clear and self-documented.

No functionality changed.
Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211108124407.12187-6-jiangshanlai@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5ec60aad
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP) #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
#define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
......
...@@ -4044,8 +4044,10 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) ...@@ -4044,8 +4044,10 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
~vcpu->arch.cr4_guest_rsvd_bits; ~vcpu->arch.cr4_guest_rsvd_bits;
if (!enable_ept) if (!enable_ept) {
vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS; vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
}
if (is_guest_mode(&vmx->vcpu)) if (is_guest_mode(&vmx->vcpu))
vcpu->arch.cr4_guest_owned_bits &= vcpu->arch.cr4_guest_owned_bits &=
~get_vmcs12(vcpu)->cr4_guest_host_mask; ~get_vmcs12(vcpu)->cr4_guest_host_mask;
......
...@@ -1051,8 +1051,6 @@ EXPORT_SYMBOL_GPL(kvm_post_set_cr4); ...@@ -1051,8 +1051,6 @@ EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_SMEP;
if (!kvm_is_valid_cr4(vcpu, cr4)) if (!kvm_is_valid_cr4(vcpu, cr4))
return 1; return 1;
...@@ -1063,7 +1061,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1063,7 +1061,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if ((cr4 ^ old_cr4) & X86_CR4_LA57) if ((cr4 ^ old_cr4) & X86_CR4_LA57)
return 1; return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits) && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu, && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu))) kvm_read_cr3(vcpu)))
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment