Commit d8dd54e0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Rename kvm_mmu->get_cr3() to ->get_guest_pgd()

Rename kvm_mmu->get_cr3() to call out that it is retrieving a guest
value, as opposed to kvm_mmu->set_cr3(), which sets a host value, and to
note that it will return something other than CR3 when nested EPT is in
use.  Hopefully the new name will also make it more obvious that L1's
nested_cr3 is returned in SVM's nested NPT case.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ac6389ab
...@@ -385,7 +385,7 @@ struct kvm_mmu_root_info { ...@@ -385,7 +385,7 @@ struct kvm_mmu_root_info {
*/ */
struct kvm_mmu { struct kvm_mmu {
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
bool prefault); bool prefault);
......
...@@ -3733,7 +3733,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3733,7 +3733,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn, root_cr3; gfn_t root_gfn, root_cr3;
int i; int i;
root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
root_gfn = root_cr3 >> PAGE_SHIFT; root_gfn = root_cr3 >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn)) if (mmu_check_root(vcpu, root_gfn))
...@@ -4070,7 +4070,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -4070,7 +4070,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn; arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu->direct_map; arch.direct_map = vcpu->arch.mmu->direct_map;
arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
return kvm_setup_async_pf(vcpu, cr2_or_gpa, return kvm_setup_async_pf(vcpu, cr2_or_gpa,
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
...@@ -4929,7 +4929,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -4929,7 +4929,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu); context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
context->direct_map = true; context->direct_map = true;
context->set_cr3 = kvm_x86_ops->set_tdp_cr3; context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
context->get_cr3 = get_cr3; context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read; context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault; context->inject_page_fault = kvm_inject_page_fault;
...@@ -5076,7 +5076,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) ...@@ -5076,7 +5076,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu);
context->set_cr3 = kvm_x86_ops->set_cr3; context->set_cr3 = kvm_x86_ops->set_cr3;
context->get_cr3 = get_cr3; context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read; context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault; context->inject_page_fault = kvm_inject_page_fault;
} }
...@@ -5090,7 +5090,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) ...@@ -5090,7 +5090,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
return; return;
g_context->mmu_role.as_u64 = new_role.as_u64; g_context->mmu_role.as_u64 = new_role.as_u64;
g_context->get_cr3 = get_cr3; g_context->get_guest_pgd = get_cr3;
g_context->get_pdptr = kvm_pdptr_read; g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault; g_context->inject_page_fault = kvm_inject_page_fault;
......
...@@ -333,7 +333,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -333,7 +333,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
trace_kvm_mmu_pagetable_walk(addr, access); trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk: retry_walk:
walker->level = mmu->root_level; walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu); pte = mmu->get_guest_pgd(vcpu);
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64 #if PTTYPE == 64
......
...@@ -3012,7 +3012,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -3012,7 +3012,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu = &vcpu->arch.guest_mmu; vcpu->arch.mmu = &vcpu->arch.guest_mmu;
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu); vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
......
...@@ -355,7 +355,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -355,7 +355,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
nested_ept_ad_enabled(vcpu), nested_ept_ad_enabled(vcpu),
nested_ept_get_eptp(vcpu)); nested_ept_get_eptp(vcpu));
vcpu->arch.mmu->set_cr3 = vmx_set_cr3; vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
vcpu->arch.mmu->get_cr3 = nested_ept_get_eptp; vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
......
...@@ -10165,7 +10165,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -10165,7 +10165,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
return; return;
if (!vcpu->arch.mmu->direct_map && if (!vcpu->arch.mmu->direct_map &&
work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu)) work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
return; return;
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment