Commit 44dd3ffa authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

x86/kvm/mmu: make vcpu->mmu a pointer to the current MMU

As a preparation to full MMU split between L1 and L2 make vcpu->arch.mmu
a pointer to the currently used mmu. For now, this is always
vcpu->arch.root_mmu. No functional change.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
parent 0e0a53c5
...@@ -543,7 +543,10 @@ struct kvm_vcpu_arch { ...@@ -543,7 +543,10 @@ struct kvm_vcpu_arch {
* the paging mode of the l1 guest. This context is always used to * the paging mode of the l1 guest. This context is always used to
* handle faults. * handle faults.
*/ */
struct kvm_mmu mmu; struct kvm_mmu *mmu;
/* Non-nested MMU for L1 */
struct kvm_mmu root_mmu;
/* /*
* Paging state of an L2 guest (used for nested npt) * Paging state of an L2 guest (used for nested npt)
......
...@@ -2181,7 +2181,7 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -2181,7 +2181,7 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list) struct list_head *invalid_list)
{ {
if (sp->role.cr4_pae != !!is_pae(vcpu) if (sp->role.cr4_pae != !!is_pae(vcpu)
|| vcpu->arch.mmu.sync_page(vcpu, sp) == 0) { || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false; return false;
} }
...@@ -2375,14 +2375,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2375,14 +2375,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
int collisions = 0; int collisions = 0;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
role = vcpu->arch.mmu.base_role; role = vcpu->arch.mmu->base_role;
role.level = level; role.level = level;
role.direct = direct; role.direct = direct;
if (role.direct) if (role.direct)
role.cr4_pae = 0; role.cr4_pae = 0;
role.access = access; role.access = access;
if (!vcpu->arch.mmu.direct_map if (!vcpu->arch.mmu->direct_map
&& vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant; role.quadrant = quadrant;
...@@ -2457,11 +2457,11 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato ...@@ -2457,11 +2457,11 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
{ {
iterator->addr = addr; iterator->addr = addr;
iterator->shadow_addr = root; iterator->shadow_addr = root;
iterator->level = vcpu->arch.mmu.shadow_root_level; iterator->level = vcpu->arch.mmu->shadow_root_level;
if (iterator->level == PT64_ROOT_4LEVEL && if (iterator->level == PT64_ROOT_4LEVEL &&
vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL && vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
!vcpu->arch.mmu.direct_map) !vcpu->arch.mmu->direct_map)
--iterator->level; --iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) { if (iterator->level == PT32E_ROOT_LEVEL) {
...@@ -2469,10 +2469,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato ...@@ -2469,10 +2469,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
* prev_root is currently only used for 64-bit hosts. So only * prev_root is currently only used for 64-bit hosts. So only
* the active root_hpa is valid here. * the active root_hpa is valid here.
*/ */
BUG_ON(root != vcpu->arch.mmu.root_hpa); BUG_ON(root != vcpu->arch.mmu->root_hpa);
iterator->shadow_addr iterator->shadow_addr
= vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK; iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level; --iterator->level;
if (!iterator->shadow_addr) if (!iterator->shadow_addr)
...@@ -2483,7 +2483,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato ...@@ -2483,7 +2483,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr) struct kvm_vcpu *vcpu, u64 addr)
{ {
shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa, shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
addr); addr);
} }
...@@ -3095,7 +3095,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, ...@@ -3095,7 +3095,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
int emulate = 0; int emulate = 0;
gfn_t pseudo_gfn; gfn_t pseudo_gfn;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return 0; return 0;
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
...@@ -3310,7 +3310,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, ...@@ -3310,7 +3310,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
u64 spte = 0ull; u64 spte = 0ull;
uint retry_count = 0; uint retry_count = 0;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return false; return false;
if (!page_fault_can_be_fast(error_code)) if (!page_fault_can_be_fast(error_code))
...@@ -3484,7 +3484,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free) ...@@ -3484,7 +3484,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free)
{ {
int i; int i;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
...@@ -3544,20 +3544,20 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) ...@@ -3544,20 +3544,20 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
unsigned i; unsigned i;
if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if(make_mmu_pages_available(vcpu) < 0) { if(make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC; return -ENOSPC;
} }
sp = kvm_mmu_get_page(vcpu, 0, 0, sp = kvm_mmu_get_page(vcpu, 0, 0,
vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
++sp->root_count; ++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = __pa(sp->spt); vcpu->arch.mmu->root_hpa = __pa(sp->spt);
} else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root)); MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
...@@ -3570,9 +3570,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) ...@@ -3570,9 +3570,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
} }
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else } else
BUG(); BUG();
...@@ -3586,7 +3586,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3586,7 +3586,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn; gfn_t root_gfn;
int i; int i;
root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn)) if (mmu_check_root(vcpu, root_gfn))
return 1; return 1;
...@@ -3595,8 +3595,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3595,8 +3595,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* Do we shadow a long mode page table? If so we need to * Do we shadow a long mode page table? If so we need to
* write-protect the guests page table root. * write-protect the guests page table root.
*/ */
if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu->root_hpa;
MMU_WARN_ON(VALID_PAGE(root)); MMU_WARN_ON(VALID_PAGE(root));
...@@ -3606,11 +3606,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3606,11 +3606,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
return -ENOSPC; return -ENOSPC;
} }
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = root; vcpu->arch.mmu->root_hpa = root;
return 0; return 0;
} }
...@@ -3620,17 +3620,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3620,17 +3620,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table. * the shadow page table may be a PAE or a long mode page table.
*/ */
pm_mask = PT_PRESENT_MASK; pm_mask = PT_PRESENT_MASK;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root)); MMU_WARN_ON(VALID_PAGE(root));
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
if (!(pdptr & PT_PRESENT_MASK)) { if (!(pdptr & PT_PRESENT_MASK)) {
vcpu->arch.mmu.pae_root[i] = 0; vcpu->arch.mmu->pae_root[i] = 0;
continue; continue;
} }
root_gfn = pdptr >> PAGE_SHIFT; root_gfn = pdptr >> PAGE_SHIFT;
...@@ -3648,16 +3648,16 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3648,16 +3648,16 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
++sp->root_count; ++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.pae_root[i] = root | pm_mask; vcpu->arch.mmu->pae_root[i] = root | pm_mask;
} }
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
/* /*
* If we shadow a 32 bit page table with a long mode page * If we shadow a 32 bit page table with a long mode page
* table we enter this path. * table we enter this path.
*/ */
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
if (vcpu->arch.mmu.lm_root == NULL) { if (vcpu->arch.mmu->lm_root == NULL) {
/* /*
* The additional page necessary for this is only * The additional page necessary for this is only
* allocated on demand. * allocated on demand.
...@@ -3669,12 +3669,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3669,12 +3669,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (lm_root == NULL) if (lm_root == NULL)
return 1; return 1;
lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
vcpu->arch.mmu.lm_root = lm_root; vcpu->arch.mmu->lm_root = lm_root;
} }
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
} }
return 0; return 0;
...@@ -3682,7 +3682,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3682,7 +3682,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
static int mmu_alloc_roots(struct kvm_vcpu *vcpu) static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.mmu.direct_map) if (vcpu->arch.mmu->direct_map)
return mmu_alloc_direct_roots(vcpu); return mmu_alloc_direct_roots(vcpu);
else else
return mmu_alloc_shadow_roots(vcpu); return mmu_alloc_shadow_roots(vcpu);
...@@ -3693,17 +3693,16 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) ...@@ -3693,17 +3693,16 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
int i; int i;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
if (vcpu->arch.mmu.direct_map) if (vcpu->arch.mmu->direct_map)
return; return;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return; return;
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root); sp = page_header(root);
/* /*
...@@ -3734,7 +3733,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) ...@@ -3734,7 +3733,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) { if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
...@@ -3808,7 +3807,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) ...@@ -3808,7 +3807,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf; int root, leaf;
bool reserved = false; bool reserved = false;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto exit; goto exit;
walk_shadow_page_lockless_begin(vcpu); walk_shadow_page_lockless_begin(vcpu);
...@@ -3825,7 +3824,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) ...@@ -3825,7 +3824,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
if (!is_shadow_present_pte(spte)) if (!is_shadow_present_pte(spte))
break; break;
reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
iterator.level); iterator.level);
} }
...@@ -3904,7 +3903,7 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) ...@@ -3904,7 +3903,7 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
u64 spte; u64 spte;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return; return;
walk_shadow_page_lockless_begin(vcpu); walk_shadow_page_lockless_begin(vcpu);
...@@ -3931,7 +3930,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -3931,7 +3930,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
if (r) if (r)
return r; return r;
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
return nonpaging_map(vcpu, gva & PAGE_MASK, return nonpaging_map(vcpu, gva & PAGE_MASK,
...@@ -3944,8 +3943,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) ...@@ -3944,8 +3943,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn; arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu.direct_map; arch.direct_map = vcpu->arch.mmu->direct_map;
arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
} }
...@@ -4051,7 +4050,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -4051,7 +4050,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
int write = error_code & PFERR_WRITE_MASK; int write = error_code & PFERR_WRITE_MASK;
bool map_writable; bool map_writable;
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
if (page_fault_handle_page_track(vcpu, error_code, gfn)) if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE; return RET_PF_EMULATE;
...@@ -4127,7 +4126,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, ...@@ -4127,7 +4126,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
{ {
uint i; uint i;
struct kvm_mmu_root_info root; struct kvm_mmu_root_info root;
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
root.cr3 = mmu->get_cr3(vcpu); root.cr3 = mmu->get_cr3(vcpu);
root.hpa = mmu->root_hpa; root.hpa = mmu->root_hpa;
...@@ -4150,7 +4149,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, ...@@ -4150,7 +4149,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role, union kvm_mmu_page_role new_role,
bool skip_tlb_flush) bool skip_tlb_flush)
{ {
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
/* /*
* For now, limit the fast switch to 64-bit hosts+VMs in order to avoid * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
...@@ -4219,7 +4218,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) ...@@ -4219,7 +4218,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
static void inject_page_fault(struct kvm_vcpu *vcpu, static void inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault) struct x86_exception *fault)
{ {
vcpu->arch.mmu.inject_page_fault(vcpu, fault); vcpu->arch.mmu->inject_page_fault(vcpu, fault);
} }
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
...@@ -4740,7 +4739,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) ...@@ -4740,7 +4739,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = &vcpu->arch.mmu; struct kvm_mmu *context = vcpu->arch.mmu;
context->base_role.word = mmu_base_role_mask.word & context->base_role.word = mmu_base_role_mask.word &
kvm_calc_tdp_mmu_root_page_role(vcpu).word; kvm_calc_tdp_mmu_root_page_role(vcpu).word;
...@@ -4812,7 +4811,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu) ...@@ -4812,7 +4811,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = &vcpu->arch.mmu; struct kvm_mmu *context = vcpu->arch.mmu;
if (!is_paging(vcpu)) if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context); nonpaging_init_context(vcpu, context);
...@@ -4832,7 +4831,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); ...@@ -4832,7 +4831,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
static union kvm_mmu_page_role static union kvm_mmu_page_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
{ {
union kvm_mmu_page_role role = vcpu->arch.mmu.base_role; union kvm_mmu_page_role role = vcpu->arch.mmu->base_role;
role.level = PT64_ROOT_4LEVEL; role.level = PT64_ROOT_4LEVEL;
role.direct = false; role.direct = false;
...@@ -4846,7 +4845,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) ...@@ -4846,7 +4845,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp) bool accessed_dirty, gpa_t new_eptp)
{ {
struct kvm_mmu *context = &vcpu->arch.mmu; struct kvm_mmu *context = vcpu->arch.mmu;
union kvm_mmu_page_role root_page_role = union kvm_mmu_page_role root_page_role =
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty); kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
...@@ -4873,7 +4872,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); ...@@ -4873,7 +4872,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu) static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = &vcpu->arch.mmu; struct kvm_mmu *context = vcpu->arch.mmu;
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu);
context->set_cr3 = kvm_x86_ops->set_cr3; context->set_cr3 = kvm_x86_ops->set_cr3;
...@@ -4891,7 +4890,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) ...@@ -4891,7 +4890,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
g_context->inject_page_fault = kvm_inject_page_fault; g_context->inject_page_fault = kvm_inject_page_fault;
/* /*
* Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
* L1's nested page tables (e.g. EPT12). The nested translation * L1's nested page tables (e.g. EPT12). The nested translation
* of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
* L2's page tables as the first level of translation and L1's * L2's page tables as the first level of translation and L1's
...@@ -4930,10 +4929,10 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) ...@@ -4930,10 +4929,10 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
if (reset_roots) { if (reset_roots) {
uint i; uint i;
vcpu->arch.mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu->root_hpa = INVALID_PAGE;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
} }
if (mmu_is_nested(vcpu)) if (mmu_is_nested(vcpu))
...@@ -4982,7 +4981,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); ...@@ -4982,7 +4981,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu) void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{ {
kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL); kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL);
WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
} }
EXPORT_SYMBOL_GPL(kvm_mmu_unload); EXPORT_SYMBOL_GPL(kvm_mmu_unload);
...@@ -4996,7 +4995,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, ...@@ -4996,7 +4995,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
} }
++vcpu->kvm->stat.mmu_pte_updated; ++vcpu->kvm->stat.mmu_pte_updated;
vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
} }
static bool need_remote_flush(u64 old, u64 new) static bool need_remote_flush(u64 old, u64 new)
...@@ -5176,7 +5175,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -5176,7 +5175,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
entry = *spte; entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte); mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry && if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word) !((sp->role.word ^ vcpu->arch.mmu->base_role.word)
& mmu_base_role_mask.word) && rmap_can_add(vcpu)) & mmu_base_role_mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (need_remote_flush(entry, *spte)) if (need_remote_flush(entry, *spte))
...@@ -5194,7 +5193,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -5194,7 +5193,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa; gpa_t gpa;
int r; int r;
if (vcpu->arch.mmu.direct_map) if (vcpu->arch.mmu->direct_map)
return 0; return 0;
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
...@@ -5230,10 +5229,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, ...@@ -5230,10 +5229,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
{ {
int r, emulation_type = 0; int r, emulation_type = 0;
enum emulation_result er; enum emulation_result er;
bool direct = vcpu->arch.mmu.direct_map; bool direct = vcpu->arch.mmu->direct_map;
/* With shadow page tables, fault_address contains a GVA or nGPA. */ /* With shadow page tables, fault_address contains a GVA or nGPA. */
if (vcpu->arch.mmu.direct_map) { if (vcpu->arch.mmu->direct_map) {
vcpu->arch.gpa_available = true; vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2; vcpu->arch.gpa_val = cr2;
} }
...@@ -5246,8 +5245,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, ...@@ -5246,8 +5245,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
} }
if (r == RET_PF_INVALID) { if (r == RET_PF_INVALID) {
r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), r = vcpu->arch.mmu->page_fault(vcpu, cr2,
false); lower_32_bits(error_code),
false);
WARN_ON(r == RET_PF_INVALID); WARN_ON(r == RET_PF_INVALID);
} }
...@@ -5263,7 +5263,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, ...@@ -5263,7 +5263,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
* paging in both guests. If true, we simply unprotect the page * paging in both guests. If true, we simply unprotect the page
* and resume the guest. * and resume the guest.
*/ */
if (vcpu->arch.mmu.direct_map && if (vcpu->arch.mmu->direct_map &&
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
return 1; return 1;
...@@ -5311,7 +5311,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); ...@@ -5311,7 +5311,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{ {
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
int i; int i;
/* INVLPG on a * non-canonical address is a NOP according to the SDM. */ /* INVLPG on a * non-canonical address is a NOP according to the SDM. */
...@@ -5342,7 +5342,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); ...@@ -5342,7 +5342,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{ {
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
bool tlb_flush = false; bool tlb_flush = false;
uint i; uint i;
...@@ -5386,8 +5386,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp); ...@@ -5386,8 +5386,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
static void free_mmu_pages(struct kvm_vcpu *vcpu) static void free_mmu_pages(struct kvm_vcpu *vcpu)
{ {
free_page((unsigned long)vcpu->arch.mmu.pae_root); free_page((unsigned long)vcpu->arch.mmu->pae_root);
free_page((unsigned long)vcpu->arch.mmu.lm_root); free_page((unsigned long)vcpu->arch.mmu->lm_root);
} }
static int alloc_mmu_pages(struct kvm_vcpu *vcpu) static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
...@@ -5407,9 +5407,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) ...@@ -5407,9 +5407,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
vcpu->arch.mmu.pae_root = page_address(page); vcpu->arch.mmu->pae_root = page_address(page);
for (i = 0; i < 4; ++i) for (i = 0; i < 4; ++i)
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
return 0; return 0;
} }
...@@ -5418,20 +5418,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) ...@@ -5418,20 +5418,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
{ {
uint i; uint i;
vcpu->arch.walk_mmu = &vcpu->arch.mmu; vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu->arch.mmu.translate_gpa = translate_gpa; vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
vcpu->arch.root_mmu.translate_gpa = translate_gpa;
vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
return alloc_mmu_pages(vcpu); return alloc_mmu_pages(vcpu);
} }
void kvm_mmu_setup(struct kvm_vcpu *vcpu) void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{ {
MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
/* /*
* kvm_mmu_setup() is called only on vCPU initialization. * kvm_mmu_setup() is called only on vCPU initialization.
......
...@@ -75,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) ...@@ -75,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{ {
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
return 0; return 0;
return kvm_mmu_load(vcpu); return kvm_mmu_load(vcpu);
...@@ -97,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) ...@@ -97,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
{ {
if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa | vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
kvm_get_active_pcid(vcpu)); kvm_get_active_pcid(vcpu));
} }
/* /*
......
...@@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) ...@@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
int i; int i;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return; return;
if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root); sp = page_header(root);
__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level); __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
return; return;
} }
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) { if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
...@@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
hpa = pfn << PAGE_SHIFT; hpa = pfn << PAGE_SHIFT;
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
"ent %llxn", vcpu->arch.mmu.root_level, pfn, "ent %llxn", vcpu->arch.mmu->root_level, pfn,
hpa, *sptep); hpa, *sptep);
} }
......
...@@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, ...@@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte, struct kvm_mmu_page *sp, u64 *spte,
u64 gpte) u64 gpte)
{ {
if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present; goto no_present;
if (!FNAME(is_present_gpte)(gpte)) if (!FNAME(is_present_gpte)(gpte))
goto no_present; goto no_present;
/* if accessed bit is not supported prefetch non accessed gpte */ /* if accessed bit is not supported prefetch non accessed gpte */
if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK)) if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
!(gpte & PT_GUEST_ACCESSED_MASK))
goto no_present; goto no_present;
return false; return false;
...@@ -480,7 +481,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -480,7 +481,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
static int FNAME(walk_addr)(struct guest_walker *walker, static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, u32 access) struct kvm_vcpu *vcpu, gva_t addr, u32 access)
{ {
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
access); access);
} }
...@@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gfn = gpte_to_gfn(gpte); gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & FNAME(gpte_access)(gpte); pte_access = sp->role.access & FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK)); no_dirty_log && (pte_access & ACC_WRITE_MASK));
if (is_error_pfn(pfn)) if (is_error_pfn(pfn))
...@@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
direct_access = gw->pte_access; direct_access = gw->pte_access;
top_level = vcpu->arch.mmu.root_level; top_level = vcpu->arch.mmu->root_level;
if (top_level == PT32E_ROOT_LEVEL) if (top_level == PT32E_ROOT_LEVEL)
top_level = PT32_ROOT_LEVEL; top_level = PT32_ROOT_LEVEL;
/* /*
...@@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level)) if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed; goto out_gpte_changed;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto out_gpte_changed; goto out_gpte_changed;
for (shadow_walk_init(&it, vcpu, addr); for (shadow_walk_init(&it, vcpu, addr);
...@@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gfn = gpte_to_gfn(gpte); gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access; pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(gpte); pte_access &= FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
&nr_present)) &nr_present))
......
...@@ -2918,18 +2918,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -2918,18 +2918,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{ {
WARN_ON(mmu_is_nested(vcpu)); WARN_ON(mmu_is_nested(vcpu));
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3;
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu); vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
} }
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.walk_mmu = &vcpu->arch.mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
} }
static int nested_svm_check_permissions(struct vcpu_svm *svm) static int nested_svm_check_permissions(struct vcpu_svm *svm)
......
...@@ -5111,9 +5111,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, ...@@ -5111,9 +5111,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
bool invalidate_gpa) bool invalidate_gpa)
{ {
if (enable_ept && (invalidate_gpa || !enable_vpid)) { if (enable_ept && (invalidate_gpa || !enable_vpid)) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return; return;
ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); ept_sync_context(construct_eptp(vcpu,
vcpu->arch.mmu->root_hpa));
} else { } else {
vpid_sync_context(vpid); vpid_sync_context(vpid);
} }
...@@ -9122,7 +9123,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -9122,7 +9123,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
} }
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3) if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
== operand.pcid) == operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
...@@ -11304,16 +11305,16 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -11304,16 +11305,16 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
VMX_EPT_EXECUTE_ONLY_BIT, VMX_EPT_EXECUTE_ONLY_BIT,
nested_ept_ad_enabled(vcpu), nested_ept_ad_enabled(vcpu),
nested_ept_get_cr3(vcpu)); nested_ept_get_cr3(vcpu));
vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
} }
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.walk_mmu = &vcpu->arch.mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
} }
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
......
...@@ -503,7 +503,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau ...@@ -503,7 +503,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau
if (mmu_is_nested(vcpu) && !fault->nested_page_fault) if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else else
vcpu->arch.mmu.inject_page_fault(vcpu, fault); vcpu->arch.mmu->inject_page_fault(vcpu, fault);
return fault->nested_page_fault; return fault->nested_page_fault;
} }
...@@ -602,7 +602,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) ...@@ -602,7 +602,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if ((pdpte[i] & PT_PRESENT_MASK) && if ((pdpte[i] & PT_PRESENT_MASK) &&
(pdpte[i] & (pdpte[i] &
vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) {
ret = 0; ret = 0;
goto out; goto out;
} }
...@@ -4809,7 +4809,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, ...@@ -4809,7 +4809,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
/* NPT walks are always user-walks */ /* NPT walks are always user-walks */
access |= PFERR_USER_MASK; access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
return t_gpa; return t_gpa;
} }
...@@ -5895,7 +5895,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, ...@@ -5895,7 +5895,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
if (WARN_ON_ONCE(is_guest_mode(vcpu))) if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false; return false;
if (!vcpu->arch.mmu.direct_map) { if (!vcpu->arch.mmu->direct_map) {
/* /*
* Write permission should be allowed since only * Write permission should be allowed since only
* write access need to be emulated. * write access need to be emulated.
...@@ -5928,7 +5928,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, ...@@ -5928,7 +5928,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */ /* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) { if (vcpu->arch.mmu->direct_map) {
unsigned int indirect_shadow_pages; unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
...@@ -5995,7 +5995,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, ...@@ -5995,7 +5995,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2; vcpu->arch.last_retry_addr = cr2;
if (!vcpu->arch.mmu.direct_map) if (!vcpu->arch.mmu->direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
...@@ -9333,7 +9333,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -9333,7 +9333,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{ {
int r; int r;
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
work->wakeup_all) work->wakeup_all)
return; return;
...@@ -9341,11 +9341,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -9341,11 +9341,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
if (unlikely(r)) if (unlikely(r))
return; return;
if (!vcpu->arch.mmu.direct_map && if (!vcpu->arch.mmu->direct_map &&
work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
return; return;
vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true);
} }
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment