Commit 2a7266a8 authored by Yu Zhang's avatar Yu Zhang Committed by Paolo Bonzini

KVM: MMU: Rename PT64_ROOT_LEVEL to PT64_ROOT_4LEVEL.

Now we have 4 level page table and 5 level page table in 64 bits
long mode, let's rename the PT64_ROOT_LEVEL to PT64_ROOT_4LEVEL,
then we can use PT64_ROOT_5LEVEL for 5 level page table, it's
helpful to make the code more clear.

Also PT64_ROOT_MAX_LEVEL is defined as 4, so that we can just
redefine it to 5 whenever a replacement is needed for 5 level
paging.
Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d1cd3ce9
...@@ -315,8 +315,10 @@ struct kvm_pio_request { ...@@ -315,8 +315,10 @@ struct kvm_pio_request {
int size; int size;
}; };
#define PT64_ROOT_MAX_LEVEL 4
struct rsvd_bits_validate { struct rsvd_bits_validate {
u64 rsvd_bits_mask[2][4]; u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
u64 bad_mt_xwr; u64 bad_mt_xwr;
}; };
......
...@@ -2167,8 +2167,8 @@ static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -2167,8 +2167,8 @@ static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
} }
struct mmu_page_path { struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL]; struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
unsigned int idx[PT64_ROOT_LEVEL]; unsigned int idx[PT64_ROOT_MAX_LEVEL];
}; };
#define for_each_sp(pvec, sp, parents, i) \ #define for_each_sp(pvec, sp, parents, i) \
...@@ -2383,8 +2383,8 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, ...@@ -2383,8 +2383,8 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
iterator->shadow_addr = vcpu->arch.mmu.root_hpa; iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
iterator->level = vcpu->arch.mmu.shadow_root_level; iterator->level = vcpu->arch.mmu.shadow_root_level;
if (iterator->level == PT64_ROOT_LEVEL && if (iterator->level == PT64_ROOT_4LEVEL &&
vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
!vcpu->arch.mmu.direct_map) !vcpu->arch.mmu.direct_map)
--iterator->level; --iterator->level;
...@@ -3322,8 +3322,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) ...@@ -3322,8 +3322,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return; return;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL &&
(vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL ||
vcpu->arch.mmu.direct_map)) { vcpu->arch.mmu.direct_map)) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu.root_hpa;
...@@ -3375,13 +3375,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) ...@@ -3375,13 +3375,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
unsigned i; unsigned i;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if(make_mmu_pages_available(vcpu) < 0) { if(make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return 1; return 1;
} }
sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL); sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_4LEVEL, 1, ACC_ALL);
++sp->root_count; ++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = __pa(sp->spt); vcpu->arch.mmu.root_hpa = __pa(sp->spt);
...@@ -3425,7 +3425,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3425,7 +3425,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* Do we shadow a long mode page table? If so we need to * Do we shadow a long mode page table? If so we need to
* write-protect the guests page table root. * write-protect the guests page table root.
*/ */
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu.root_hpa;
MMU_WARN_ON(VALID_PAGE(root)); MMU_WARN_ON(VALID_PAGE(root));
...@@ -3435,7 +3435,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3435,7 +3435,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return 1; return 1;
} }
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_4LEVEL,
0, ACC_ALL); 0, ACC_ALL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
...@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table. * the shadow page table may be a PAE or a long mode page table.
*/ */
pm_mask = PT_PRESENT_MASK; pm_mask = PT_PRESENT_MASK;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
...@@ -3486,7 +3486,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3486,7 +3486,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* If we shadow a 32 bit page table with a long mode page * If we shadow a 32 bit page table with a long mode page
* table we enter this path. * table we enter this path.
*/ */
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
if (vcpu->arch.mmu.lm_root == NULL) { if (vcpu->arch.mmu.lm_root == NULL) {
/* /*
* The additional page necessary for this is only * The additional page necessary for this is only
...@@ -3531,7 +3531,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) ...@@ -3531,7 +3531,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root); sp = page_header(root);
mmu_sync_children(vcpu, sp); mmu_sync_children(vcpu, sp);
...@@ -3614,7 +3614,7 @@ static bool ...@@ -3614,7 +3614,7 @@ static bool
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
u64 sptes[PT64_ROOT_LEVEL], spte = 0ull; u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
int root, leaf; int root, leaf;
bool reserved = false; bool reserved = false;
...@@ -4057,7 +4057,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4057,7 +4057,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[1][0] =
rsvd_check->rsvd_bits_mask[0][0]; rsvd_check->rsvd_bits_mask[0][0];
break; break;
case PT64_ROOT_LEVEL: case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) | nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
rsvd_bits(maxphyaddr, 51); rsvd_bits(maxphyaddr, 51);
...@@ -4367,7 +4367,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu, ...@@ -4367,7 +4367,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
static void paging64_init_context(struct kvm_vcpu *vcpu, static void paging64_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context) struct kvm_mmu *context)
{ {
paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); paging64_init_context_common(vcpu, context, PT64_ROOT_4LEVEL);
} }
static void paging32_init_context(struct kvm_vcpu *vcpu, static void paging32_init_context(struct kvm_vcpu *vcpu,
...@@ -4422,7 +4422,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -4422,7 +4422,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->root_level = 0; context->root_level = 0;
} else if (is_long_mode(vcpu)) { } else if (is_long_mode(vcpu)) {
context->nx = is_nx(vcpu); context->nx = is_nx(vcpu);
context->root_level = PT64_ROOT_LEVEL; context->root_level = PT64_ROOT_4LEVEL;
reset_rsvds_bits_mask(vcpu, context); reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa; context->gva_to_gpa = paging64_gva_to_gpa;
} else if (is_pae(vcpu)) { } else if (is_pae(vcpu)) {
...@@ -4533,7 +4533,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) ...@@ -4533,7 +4533,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
} else if (is_long_mode(vcpu)) { } else if (is_long_mode(vcpu)) {
g_context->nx = is_nx(vcpu); g_context->nx = is_nx(vcpu);
g_context->root_level = PT64_ROOT_LEVEL; g_context->root_level = PT64_ROOT_4LEVEL;
reset_rsvds_bits_mask(vcpu, g_context); reset_rsvds_bits_mask(vcpu, g_context);
g_context->gva_to_gpa = paging64_gva_to_gpa_nested; g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
} else if (is_pae(vcpu)) { } else if (is_pae(vcpu)) {
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#define PT32_DIR_PSE36_MASK \ #define PT32_DIR_PSE36_MASK \
(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
#define PT64_ROOT_LEVEL 4 #define PT64_ROOT_4LEVEL 4
#define PT32_ROOT_LEVEL 2 #define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3 #define PT32E_ROOT_LEVEL 3
......
...@@ -62,11 +62,11 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) ...@@ -62,11 +62,11 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return; return;
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root); sp = page_header(root);
__mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_4LEVEL);
return; return;
} }
......
...@@ -584,7 +584,7 @@ static inline void invlpga(unsigned long addr, u32 asid) ...@@ -584,7 +584,7 @@ static inline void invlpga(unsigned long addr, u32 asid)
static int get_npt_level(void) static int get_npt_level(void)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return PT64_ROOT_LEVEL; return PT64_ROOT_4LEVEL;
#else #else
return PT32E_ROOT_LEVEL; return PT32E_ROOT_LEVEL;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment