Commit be01e8e2 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Replace "cr3" with "pgd" in "new cr3/pgd" related code

Rename functions and variables in kvm_mmu_new_cr3() and related code to
replace "cr3" with "pgd", i.e. continue the work started by commit
727a7e27 ("KVM: x86: rename set_cr3 callback and related flags to
load_mmu_pgd").  kvm_mmu_new_cr3() and company are not always loading a
new CR3, e.g. when nested EPT is enabled "cr3" is actually an EPTP.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-37-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ce8fe7b7
...@@ -375,12 +375,12 @@ struct rsvd_bits_validate { ...@@ -375,12 +375,12 @@ struct rsvd_bits_validate {
}; };
struct kvm_mmu_root_info { struct kvm_mmu_root_info {
gpa_t cr3; gpa_t pgd;
hpa_t hpa; hpa_t hpa;
}; };
#define KVM_MMU_ROOT_INFO_INVALID \ #define KVM_MMU_ROOT_INFO_INVALID \
((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE }) ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
#define KVM_MMU_NUM_PREV_ROOTS 3 #define KVM_MMU_NUM_PREV_ROOTS 3
...@@ -406,7 +406,7 @@ struct kvm_mmu { ...@@ -406,7 +406,7 @@ struct kvm_mmu {
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte); u64 *spte, const void *pte);
hpa_t root_hpa; hpa_t root_hpa;
gpa_t root_cr3; gpa_t root_pgd;
union kvm_mmu_role mmu_role; union kvm_mmu_role mmu_role;
u8 root_level; u8 root_level;
u8 shadow_root_level; u8 shadow_root_level;
...@@ -1524,7 +1524,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); ...@@ -1524,7 +1524,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t gva, hpa_t root_hpa); gva_t gva, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush, void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
bool skip_mmu_sync); bool skip_mmu_sync);
void kvm_configure_mmu(bool enable_tdp, int tdp_page_level); void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);
......
...@@ -3665,7 +3665,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -3665,7 +3665,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
&invalid_list); &invalid_list);
mmu->root_hpa = INVALID_PAGE; mmu->root_hpa = INVALID_PAGE;
} }
mmu->root_cr3 = 0; mmu->root_pgd = 0;
} }
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
...@@ -3722,8 +3722,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) ...@@ -3722,8 +3722,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
} else } else
BUG(); BUG();
/* root_cr3 is ignored for direct MMUs. */ /* root_pgd is ignored for direct MMUs. */
vcpu->arch.mmu->root_cr3 = 0; vcpu->arch.mmu->root_pgd = 0;
return 0; return 0;
} }
...@@ -3732,11 +3732,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3732,11 +3732,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
u64 pdptr, pm_mask; u64 pdptr, pm_mask;
gfn_t root_gfn, root_cr3; gfn_t root_gfn, root_pgd;
int i; int i;
root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
root_gfn = root_cr3 >> PAGE_SHIFT; root_gfn = root_pgd >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn)) if (mmu_check_root(vcpu, root_gfn))
return 1; return 1;
...@@ -3761,7 +3761,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3761,7 +3761,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
++sp->root_count; ++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = root; vcpu->arch.mmu->root_hpa = root;
goto set_root_cr3; goto set_root_pgd;
} }
/* /*
...@@ -3827,8 +3827,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3827,8 +3827,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
} }
set_root_cr3: set_root_pgd:
vcpu->arch.mmu->root_cr3 = root_cr3; vcpu->arch.mmu->root_pgd = root_pgd;
return 0; return 0;
} }
...@@ -4244,49 +4244,49 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, ...@@ -4244,49 +4244,49 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->nx = false; context->nx = false;
} }
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t cr3, static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
union kvm_mmu_page_role role) union kvm_mmu_page_role role)
{ {
return (role.direct || cr3 == root->cr3) && return (role.direct || pgd == root->pgd) &&
VALID_PAGE(root->hpa) && page_header(root->hpa) && VALID_PAGE(root->hpa) && page_header(root->hpa) &&
role.word == page_header(root->hpa)->role.word; role.word == page_header(root->hpa)->role.word;
} }
/* /*
* Find out if a previously cached root matching the new CR3/role is available. * Find out if a previously cached root matching the new pgd/role is available.
* The current root is also inserted into the cache. * The current root is also inserted into the cache.
* If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
* returned. * returned.
* Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
* false is returned. This root should now be freed by the caller. * false is returned. This root should now be freed by the caller.
*/ */
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
union kvm_mmu_page_role new_role) union kvm_mmu_page_role new_role)
{ {
uint i; uint i;
struct kvm_mmu_root_info root; struct kvm_mmu_root_info root;
struct kvm_mmu *mmu = vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
root.cr3 = mmu->root_cr3; root.pgd = mmu->root_pgd;
root.hpa = mmu->root_hpa; root.hpa = mmu->root_hpa;
if (is_root_usable(&root, new_cr3, new_role)) if (is_root_usable(&root, new_pgd, new_role))
return true; return true;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
swap(root, mmu->prev_roots[i]); swap(root, mmu->prev_roots[i]);
if (is_root_usable(&root, new_cr3, new_role)) if (is_root_usable(&root, new_pgd, new_role))
break; break;
} }
mmu->root_hpa = root.hpa; mmu->root_hpa = root.hpa;
mmu->root_cr3 = root.cr3; mmu->root_pgd = root.pgd;
return i < KVM_MMU_NUM_PREV_ROOTS; return i < KVM_MMU_NUM_PREV_ROOTS;
} }
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
union kvm_mmu_page_role new_role) union kvm_mmu_page_role new_role)
{ {
struct kvm_mmu *mmu = vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
...@@ -4298,17 +4298,17 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, ...@@ -4298,17 +4298,17 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
*/ */
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
mmu->root_level >= PT64_ROOT_4LEVEL) mmu->root_level >= PT64_ROOT_4LEVEL)
return !mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT) && return !mmu_check_root(vcpu, new_pgd >> PAGE_SHIFT) &&
cached_root_available(vcpu, new_cr3, new_role); cached_root_available(vcpu, new_pgd, new_role);
return false; return false;
} }
static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
union kvm_mmu_page_role new_role, union kvm_mmu_page_role new_role,
bool skip_tlb_flush, bool skip_mmu_sync) bool skip_tlb_flush, bool skip_mmu_sync)
{ {
if (!fast_cr3_switch(vcpu, new_cr3, new_role)) { if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
return; return;
} }
...@@ -4337,13 +4337,13 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, ...@@ -4337,13 +4337,13 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
__clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa)); __clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
} }
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush, void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
bool skip_mmu_sync) bool skip_mmu_sync)
{ {
__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu), __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
skip_tlb_flush, skip_mmu_sync); skip_tlb_flush, skip_mmu_sync);
} }
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3); EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
static unsigned long get_cr3(struct kvm_vcpu *vcpu) static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{ {
...@@ -5034,7 +5034,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, ...@@ -5034,7 +5034,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
execonly, level); execonly, level);
__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, true, true); __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
if (new_role.as_u64 == context->mmu_role.as_u64) if (new_role.as_u64 == context->mmu_role.as_u64)
return; return;
...@@ -5551,7 +5551,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) ...@@ -5551,7 +5551,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
if (VALID_PAGE(mmu->prev_roots[i].hpa) && if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) { pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
tlb_flush = true; tlb_flush = true;
} }
...@@ -5705,13 +5705,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) ...@@ -5705,13 +5705,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
vcpu->arch.root_mmu.root_cr3 = 0; vcpu->arch.root_mmu.root_pgd = 0;
vcpu->arch.root_mmu.translate_gpa = translate_gpa; vcpu->arch.root_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
vcpu->arch.guest_mmu.root_cr3 = 0; vcpu->arch.guest_mmu.root_pgd = 0;
vcpu->arch.guest_mmu.translate_gpa = translate_gpa; vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
......
...@@ -1148,7 +1148,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne ...@@ -1148,7 +1148,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
* nested_vmx_transition_mmu_sync for details on skipping the MMU sync. * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
*/ */
if (!nested_ept) if (!nested_ept)
kvm_mmu_new_cr3(vcpu, cr3, true, kvm_mmu_new_pgd(vcpu, cr3, true,
!nested_vmx_transition_mmu_sync(vcpu)); !nested_vmx_transition_mmu_sync(vcpu));
vcpu->arch.cr3 = cr3; vcpu->arch.cr3 = cr3;
...@@ -5228,13 +5228,13 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -5228,13 +5228,13 @@ static int handle_invept(struct kvm_vcpu *vcpu)
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
roots_to_free = 0; roots_to_free = 0;
if (nested_ept_root_matches(mmu->root_hpa, mmu->root_cr3, if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd,
operand.eptp)) operand.eptp))
roots_to_free |= KVM_MMU_ROOT_CURRENT; roots_to_free |= KVM_MMU_ROOT_CURRENT;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
if (nested_ept_root_matches(mmu->prev_roots[i].hpa, if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
mmu->prev_roots[i].cr3, mmu->prev_roots[i].pgd,
operand.eptp)) operand.eptp))
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
} }
......
...@@ -5476,7 +5476,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -5476,7 +5476,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
} }
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
== operand.pcid) == operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
......
...@@ -1031,7 +1031,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -1031,7 +1031,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1; return 1;
kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush, skip_tlb_flush); kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
vcpu->arch.cr3 = cr3; vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment