Commit 20f632bd authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Read and pass all CR0/CR4 role bits to shadow MMU helper

Grab all CR0/CR4 MMU role bits from current vCPU state when initializing
a non-nested shadow MMU.  Extract the masks from kvm_post_set_cr{0,4}(),
as the CR0/CR4 update masks must exactly match the mmu_role bits, with
one exception (see below).  The "full" CR0/CR4 will be used by future
commits to initialize the MMU and its role, as opposed to the current
approach of pulling everything from vCPU, which is incorrect for certain
flows, e.g. nested NPT.

CR4.LA57 is an exception, as it can be toggled on VM-Exit (for L1's MMU)
but can't be toggled via MOV CR4 while long mode is active.  I.e. LA57
needs to be in the mmu_role, but technically doesn't need to be checked
by kvm_post_set_cr4().  However, the extra check is completely benign as
the hardware restrictions simply mean LA57 will never be _the_ cause of
a MMU reset during MOV CR4.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622175739.3610207-18-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 18feaad3
...@@ -44,6 +44,12 @@ ...@@ -44,6 +44,12 @@
#define PT32_ROOT_LEVEL 2 #define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3 #define PT32E_ROOT_LEVEL 3
#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \
X86_CR4_LA57)
#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
static __always_inline u64 rsvd_bits(int s, int e) static __always_inline u64 rsvd_bits(int s, int e)
{ {
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s); BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
......
...@@ -4778,8 +4778,8 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) ...@@ -4778,8 +4778,8 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
struct kvm_mmu *context = &vcpu->arch.root_mmu; struct kvm_mmu *context = &vcpu->arch.root_mmu;
kvm_init_shadow_mmu(vcpu, kvm_init_shadow_mmu(vcpu,
kvm_read_cr0_bits(vcpu, X86_CR0_PG), kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
kvm_read_cr4_bits(vcpu, X86_CR4_PAE), kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
vcpu->arch.efer); vcpu->arch.efer);
context->get_guest_pgd = get_cr3; context->get_guest_pgd = get_cr3;
......
...@@ -851,14 +851,12 @@ EXPORT_SYMBOL_GPL(load_pdptrs); ...@@ -851,14 +851,12 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{ {
unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
if ((cr0 ^ old_cr0) & X86_CR0_PG) { if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu); kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
} }
if ((cr0 ^ old_cr0) & update_bits) if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
if (((cr0 ^ old_cr0) & X86_CR0_CD) && if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
...@@ -1037,10 +1035,7 @@ EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); ...@@ -1037,10 +1035,7 @@ EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
{ {
unsigned long mmu_role_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | if (((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) ||
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
if (((cr4 ^ old_cr4) & mmu_role_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment