Commit 44dd3ffa authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

x86/kvm/mmu: make vcpu->mmu a pointer to the current MMU

As a preparation to full MMU split between L1 and L2 make vcpu->arch.mmu
a pointer to the currently used mmu. For now, this is always
vcpu->arch.root_mmu. No functional change.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
parent 0e0a53c5
...@@ -543,7 +543,10 @@ struct kvm_vcpu_arch { ...@@ -543,7 +543,10 @@ struct kvm_vcpu_arch {
* the paging mode of the l1 guest. This context is always used to * the paging mode of the l1 guest. This context is always used to
* handle faults. * handle faults.
*/ */
struct kvm_mmu mmu; struct kvm_mmu *mmu;
/* Non-nested MMU for L1 */
struct kvm_mmu root_mmu;
/* /*
* Paging state of an L2 guest (used for nested npt) * Paging state of an L2 guest (used for nested npt)
......
This diff is collapsed.
...@@ -75,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) ...@@ -75,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{ {
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
return 0; return 0;
return kvm_mmu_load(vcpu); return kvm_mmu_load(vcpu);
...@@ -97,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) ...@@ -97,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
{ {
if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa | vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
kvm_get_active_pcid(vcpu)); kvm_get_active_pcid(vcpu));
} }
/* /*
......
...@@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) ...@@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
int i; int i;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return; return;
if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root); sp = page_header(root);
__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level); __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
return; return;
} }
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) { if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
...@@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
hpa = pfn << PAGE_SHIFT; hpa = pfn << PAGE_SHIFT;
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
"ent %llxn", vcpu->arch.mmu.root_level, pfn, "ent %llxn", vcpu->arch.mmu->root_level, pfn,
hpa, *sptep); hpa, *sptep);
} }
......
...@@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, ...@@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte, struct kvm_mmu_page *sp, u64 *spte,
u64 gpte) u64 gpte)
{ {
if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present; goto no_present;
if (!FNAME(is_present_gpte)(gpte)) if (!FNAME(is_present_gpte)(gpte))
goto no_present; goto no_present;
/* if accessed bit is not supported prefetch non accessed gpte */ /* if accessed bit is not supported prefetch non accessed gpte */
if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK)) if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
!(gpte & PT_GUEST_ACCESSED_MASK))
goto no_present; goto no_present;
return false; return false;
...@@ -480,7 +481,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -480,7 +481,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
static int FNAME(walk_addr)(struct guest_walker *walker, static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, u32 access) struct kvm_vcpu *vcpu, gva_t addr, u32 access)
{ {
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
access); access);
} }
...@@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gfn = gpte_to_gfn(gpte); gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & FNAME(gpte_access)(gpte); pte_access = sp->role.access & FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK)); no_dirty_log && (pte_access & ACC_WRITE_MASK));
if (is_error_pfn(pfn)) if (is_error_pfn(pfn))
...@@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
direct_access = gw->pte_access; direct_access = gw->pte_access;
top_level = vcpu->arch.mmu.root_level; top_level = vcpu->arch.mmu->root_level;
if (top_level == PT32E_ROOT_LEVEL) if (top_level == PT32E_ROOT_LEVEL)
top_level = PT32_ROOT_LEVEL; top_level = PT32_ROOT_LEVEL;
/* /*
...@@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level)) if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed; goto out_gpte_changed;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto out_gpte_changed; goto out_gpte_changed;
for (shadow_walk_init(&it, vcpu, addr); for (shadow_walk_init(&it, vcpu, addr);
...@@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gfn = gpte_to_gfn(gpte); gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access; pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(gpte); pte_access &= FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
&nr_present)) &nr_present))
......
...@@ -2918,18 +2918,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -2918,18 +2918,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{ {
WARN_ON(mmu_is_nested(vcpu)); WARN_ON(mmu_is_nested(vcpu));
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3;
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu); vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
} }
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.walk_mmu = &vcpu->arch.mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
} }
static int nested_svm_check_permissions(struct vcpu_svm *svm) static int nested_svm_check_permissions(struct vcpu_svm *svm)
......
...@@ -5111,9 +5111,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, ...@@ -5111,9 +5111,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
bool invalidate_gpa) bool invalidate_gpa)
{ {
if (enable_ept && (invalidate_gpa || !enable_vpid)) { if (enable_ept && (invalidate_gpa || !enable_vpid)) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return; return;
ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); ept_sync_context(construct_eptp(vcpu,
vcpu->arch.mmu->root_hpa));
} else { } else {
vpid_sync_context(vpid); vpid_sync_context(vpid);
} }
...@@ -9122,7 +9123,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -9122,7 +9123,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
} }
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3) if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
== operand.pcid) == operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
...@@ -11304,16 +11305,16 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -11304,16 +11305,16 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
VMX_EPT_EXECUTE_ONLY_BIT, VMX_EPT_EXECUTE_ONLY_BIT,
nested_ept_ad_enabled(vcpu), nested_ept_ad_enabled(vcpu),
nested_ept_get_cr3(vcpu)); nested_ept_get_cr3(vcpu));
vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
} }
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.walk_mmu = &vcpu->arch.mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
} }
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
......
...@@ -503,7 +503,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau ...@@ -503,7 +503,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau
if (mmu_is_nested(vcpu) && !fault->nested_page_fault) if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else else
vcpu->arch.mmu.inject_page_fault(vcpu, fault); vcpu->arch.mmu->inject_page_fault(vcpu, fault);
return fault->nested_page_fault; return fault->nested_page_fault;
} }
...@@ -602,7 +602,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) ...@@ -602,7 +602,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if ((pdpte[i] & PT_PRESENT_MASK) && if ((pdpte[i] & PT_PRESENT_MASK) &&
(pdpte[i] & (pdpte[i] &
vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) {
ret = 0; ret = 0;
goto out; goto out;
} }
...@@ -4809,7 +4809,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, ...@@ -4809,7 +4809,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
/* NPT walks are always user-walks */ /* NPT walks are always user-walks */
access |= PFERR_USER_MASK; access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
return t_gpa; return t_gpa;
} }
...@@ -5895,7 +5895,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, ...@@ -5895,7 +5895,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
if (WARN_ON_ONCE(is_guest_mode(vcpu))) if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false; return false;
if (!vcpu->arch.mmu.direct_map) { if (!vcpu->arch.mmu->direct_map) {
/* /*
* Write permission should be allowed since only * Write permission should be allowed since only
* write access need to be emulated. * write access need to be emulated.
...@@ -5928,7 +5928,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, ...@@ -5928,7 +5928,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */ /* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) { if (vcpu->arch.mmu->direct_map) {
unsigned int indirect_shadow_pages; unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
...@@ -5995,7 +5995,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, ...@@ -5995,7 +5995,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2; vcpu->arch.last_retry_addr = cr2;
if (!vcpu->arch.mmu.direct_map) if (!vcpu->arch.mmu->direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
...@@ -9333,7 +9333,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -9333,7 +9333,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{ {
int r; int r;
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
work->wakeup_all) work->wakeup_all)
return; return;
...@@ -9341,11 +9341,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -9341,11 +9341,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
if (unlikely(r)) if (unlikely(r))
return; return;
if (!vcpu->arch.mmu.direct_map && if (!vcpu->arch.mmu->direct_map &&
work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
return; return;
vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true);
} }
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment