Commit 3ed1a478 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: pass struct kvm_mmu_page to account/unaccount_shadowed

Prepare for multiple address spaces this way, since a VCPU is not available
where unaccount_shadowed is called.  We will get to the right kvm_memslots
struct through the role field in struct kvm_mmu_page.
Reviewed-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Reviewed-by: default avatarRadim Krcmar <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e37afc6e
...@@ -804,12 +804,14 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, ...@@ -804,12 +804,14 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
return &slot->arch.lpage_info[level - 2][idx]; return &slot->arch.lpage_info[level - 2][idx];
} }
static void account_shadowed(struct kvm *kvm, gfn_t gfn) static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
gfn_t gfn;
int i; int i;
gfn = sp->gfn;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
...@@ -818,12 +820,14 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) ...@@ -818,12 +820,14 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
kvm->arch.indirect_shadow_pages++; kvm->arch.indirect_shadow_pages++;
} }
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
gfn_t gfn;
int i; int i;
gfn = sp->gfn;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
...@@ -2131,7 +2135,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2131,7 +2135,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (level > PT_PAGE_TABLE_LEVEL && need_sync) if (level > PT_PAGE_TABLE_LEVEL && need_sync)
kvm_sync_pages(vcpu, gfn); kvm_sync_pages(vcpu, gfn);
account_shadowed(vcpu->kvm, gfn); account_shadowed(vcpu->kvm, sp);
} }
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
init_shadow_page_table(sp); init_shadow_page_table(sp);
...@@ -2312,7 +2316,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, ...@@ -2312,7 +2316,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_mmu_unlink_parents(kvm, sp); kvm_mmu_unlink_parents(kvm, sp);
if (!sp->role.invalid && !sp->role.direct) if (!sp->role.invalid && !sp->role.direct)
unaccount_shadowed(kvm, sp->gfn); unaccount_shadowed(kvm, sp);
if (sp->unsync) if (sp->unsync)
kvm_unlink_unsync_page(kvm, sp); kvm_unlink_unsync_page(kvm, sp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment