Commit 547ffaed authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: MMU: introduce kvm_mmu_gfn_{allow,disallow}_lpage

Abstract the common operations from account_shadowed() and
unaccount_shadowed(), then introduce kvm_mmu_gfn_disallow_lpage()
and kvm_mmu_gfn_allow_lpage()

These two functions will be used by page tracking in the later patch
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 92f94f1e
...@@ -776,21 +776,39 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, ...@@ -776,21 +776,39 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
return &slot->arch.lpage_info[level - 2][idx]; return &slot->arch.lpage_info[level - 2][idx];
} }
static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
gfn_t gfn, int count)
{
struct kvm_lpage_info *linfo;
int i;
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->disallow_lpage += count;
WARN_ON(linfo->disallow_lpage < 0);
}
}
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
update_gfn_disallow_lpage_count(slot, gfn, 1);
}
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
update_gfn_disallow_lpage_count(slot, gfn, -1);
}
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
gfn_t gfn; gfn_t gfn;
int i;
gfn = sp->gfn; gfn = sp->gfn;
slots = kvm_memslots_for_spte_role(kvm, sp->role); slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn); slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { kvm_mmu_gfn_disallow_lpage(slot, gfn);
linfo = lpage_info_slot(gfn, slot, i);
linfo->disallow_lpage += 1;
}
kvm->arch.indirect_shadow_pages++; kvm->arch.indirect_shadow_pages++;
} }
...@@ -798,18 +816,12 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -798,18 +816,12 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
gfn_t gfn; gfn_t gfn;
int i;
gfn = sp->gfn; gfn = sp->gfn;
slots = kvm_memslots_for_spte_role(kvm, sp->role); slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn); slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { kvm_mmu_gfn_allow_lpage(slot, gfn);
linfo = lpage_info_slot(gfn, slot, i);
linfo->disallow_lpage -= 1;
WARN_ON(linfo->disallow_lpage < 0);
}
kvm->arch.indirect_shadow_pages--; kvm->arch.indirect_shadow_pages--;
} }
......
...@@ -174,4 +174,7 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -174,4 +174,7 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment