Commit 0aab33e4 authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini

kvm: x86: Add support for fast CR3 switch across different MMU modes

This generalizes the lockless CR3 switch path to be able to work
across different MMU modes (e.g. nested vs non-nested) by checking
that the expected page role of the new root page matches the page role
of the previously stored root page in addition to checking that the new
CR3 matches the previous CR3. Furthermore, instead of loading the
hardware CR3 in fast_cr3_switch(), it is now done in vcpu_enter_guest(),
as by that time the MMU context would be up-to-date with the VCPU mode.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6e42782f
...@@ -4038,7 +4038,8 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, ...@@ -4038,7 +4038,8 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->nx = false; context->nx = false;
} }
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role)
{ {
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = &vcpu->arch.mmu;
...@@ -4057,7 +4058,10 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) ...@@ -4057,7 +4058,10 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3)
swap(mmu->root_hpa, mmu->prev_root.hpa); swap(mmu->root_hpa, mmu->prev_root.hpa);
mmu->prev_root.cr3 = kvm_read_cr3(vcpu); mmu->prev_root.cr3 = kvm_read_cr3(vcpu);
if (new_cr3 == prev_cr3 && VALID_PAGE(mmu->root_hpa)) { if (new_cr3 == prev_cr3 &&
VALID_PAGE(mmu->root_hpa) &&
page_header(mmu->root_hpa) != NULL &&
new_role.word == page_header(mmu->root_hpa)->role.word) {
/* /*
* It is possible that the cached previous root page is * It is possible that the cached previous root page is
* obsolete because of a change in the MMU * obsolete because of a change in the MMU
...@@ -4066,12 +4070,11 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) ...@@ -4066,12 +4070,11 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3)
* have set here and allocate a new one. * have set here and allocate a new one.
*/ */
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count( __clear_sp_write_flooding_count(
page_header(mmu->root_hpa)); page_header(mmu->root_hpa));
mmu->set_cr3(vcpu, mmu->root_hpa);
return true; return true;
} }
} }
...@@ -4079,12 +4082,18 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) ...@@ -4079,12 +4082,18 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3)
return false; return false;
} }
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3) static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role)
{ {
if (!fast_cr3_switch(vcpu, new_cr3)) if (!fast_cr3_switch(vcpu, new_cr3, new_role))
kvm_mmu_free_roots(vcpu, false); kvm_mmu_free_roots(vcpu, false);
} }
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3)
{
__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu));
}
static unsigned long get_cr3(struct kvm_vcpu *vcpu) static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{ {
return kvm_read_cr3(vcpu); return kvm_read_cr3(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment