Commit 92f94f1e authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: MMU: rename has_wrprotected_page to mmu_gfn_lpage_is_disallowed

kvm_lpage_info->write_count is used to detect if the large page mapping
for the gfn on the specified level is allowed, rename it to disallow_lpage
to reflect its purpose, also we rename has_wrprotected_page() to
mmu_gfn_lpage_is_disallowed() to make the code more clearer

Later we will extend this mechanism for page tracking: if the gfn is
tracked then large mapping for that gfn on any level is not allowed.
The new name is more straightforward
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4d99ba89
...@@ -391,11 +391,11 @@ To instantiate a large spte, four constraints must be satisfied: ...@@ -391,11 +391,11 @@ To instantiate a large spte, four constraints must be satisfied:
write-protected pages write-protected pages
- the guest page must be wholly contained by a single memory slot - the guest page must be wholly contained by a single memory slot
To check the last two conditions, the mmu maintains a ->write_count set of To check the last two conditions, the mmu maintains a ->disallow_lpage set of
arrays for each memory slot and large page size. Every write protected page arrays for each memory slot and large page size. Every write protected page
causes its write_count to be incremented, thus preventing instantiation of causes its disallow_lpage to be incremented, thus preventing instantiation of
a large spte. The frames at the end of an unaligned memory slot have a large spte. The frames at the end of an unaligned memory slot have
artificially inflated ->write_counts so they can never be instantiated. artificially inflated ->disallow_lpages so they can never be instantiated.
Zapping all pages (page generation count) Zapping all pages (page generation count)
========================================= =========================================
......
...@@ -644,7 +644,7 @@ struct kvm_vcpu_arch { ...@@ -644,7 +644,7 @@ struct kvm_vcpu_arch {
}; };
struct kvm_lpage_info { struct kvm_lpage_info {
int write_count; int disallow_lpage;
}; };
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
......
...@@ -789,7 +789,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -789,7 +789,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
slot = __gfn_to_memslot(slots, gfn); slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1; linfo->disallow_lpage += 1;
} }
kvm->arch.indirect_shadow_pages++; kvm->arch.indirect_shadow_pages++;
} }
...@@ -807,31 +807,32 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -807,31 +807,32 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
slot = __gfn_to_memslot(slots, gfn); slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count -= 1; linfo->disallow_lpage -= 1;
WARN_ON(linfo->write_count < 0); WARN_ON(linfo->disallow_lpage < 0);
} }
kvm->arch.indirect_shadow_pages--; kvm->arch.indirect_shadow_pages--;
} }
static int __has_wrprotected_page(gfn_t gfn, int level, static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
struct kvm_memory_slot *slot) struct kvm_memory_slot *slot)
{ {
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
if (slot) { if (slot) {
linfo = lpage_info_slot(gfn, slot, level); linfo = lpage_info_slot(gfn, slot, level);
return linfo->write_count; return !!linfo->disallow_lpage;
} }
return 1; return true;
} }
static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
int level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __has_wrprotected_page(gfn, level, slot); return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
} }
static int host_mapping_level(struct kvm *kvm, gfn_t gfn) static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
...@@ -897,7 +898,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, ...@@ -897,7 +898,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
max_level = min(kvm_x86_ops->get_lpage_level(), host_level); max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (__has_wrprotected_page(large_gfn, level, slot)) if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
break; break;
return level - 1; return level - 1;
...@@ -2503,7 +2504,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2503,7 +2504,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* be fixed if guest refault. * be fixed if guest refault.
*/ */
if (level > PT_PAGE_TABLE_LEVEL && if (level > PT_PAGE_TABLE_LEVEL &&
has_wrprotected_page(vcpu, gfn, level)) mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
goto done; goto done;
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
...@@ -2768,7 +2769,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, ...@@ -2768,7 +2769,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL && level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) && PageTransCompound(pfn_to_page(pfn)) &&
!has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) { !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask; unsigned long mask;
/* /*
* mmu_notifier_retry was successful and we hold the * mmu_notifier_retry was successful and we hold the
......
...@@ -7879,6 +7879,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -7879,6 +7879,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
int i; int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn; unsigned long ugfn;
int lpages; int lpages;
int level = i + 1; int level = i + 1;
...@@ -7893,15 +7894,16 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -7893,15 +7894,16 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
if (i == 0) if (i == 0)
continue; continue;
slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
sizeof(*slot->arch.lpage_info[i - 1])); if (!linfo)
if (!slot->arch.lpage_info[i - 1])
goto out_free; goto out_free;
slot->arch.lpage_info[i - 1] = linfo;
if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][0].write_count = 1; linfo[0].disallow_lpage = 1;
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; linfo[lpages - 1].disallow_lpage = 1;
ugfn = slot->userspace_addr >> PAGE_SHIFT; ugfn = slot->userspace_addr >> PAGE_SHIFT;
/* /*
* If the gfn and userspace address are not aligned wrt each * If the gfn and userspace address are not aligned wrt each
...@@ -7913,7 +7915,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -7913,7 +7915,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long j; unsigned long j;
for (j = 0; j < lpages; ++j) for (j = 0; j < lpages; ++j)
slot->arch.lpage_info[i - 1][j].write_count = 1; linfo[j].disallow_lpage = 1;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment