Commit 2032a93d authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Avi Kivity

KVM: MMU: Don't allocate gfns page for direct mmu pages

When sp->role.direct is set, sp->gfns does not contain any essential
information, leaf sptes reachable from this sp are for a continuous
guest physical memory range (a linear range).
So sp->gfns[i] (if it was set) equals to sp->gfn + i. (PT_PAGE_TABLE_LEVEL)
Obviously, it is not essential information, we can calculate it when need.

It means we don't need sp->gfns when sp->role.direct=1,
Thus we can save one page usage for every kvm_mmu_page.

Note:
  Access to sp->gfns must be wrapped by kvm_mmu_page_get_gfn()
  or kvm_mmu_page_set_gfn().
  It is only exposed in FNAME(sync_page).
Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent c8174f7b
...@@ -180,7 +180,9 @@ Shadow pages contain the following information: ...@@ -180,7 +180,9 @@ Shadow pages contain the following information:
guest pages as leaves. guest pages as leaves.
gfns: gfns:
An array of 512 guest frame numbers, one for each present pte. Used to An array of 512 guest frame numbers, one for each present pte. Used to
perform a reverse map from a pte to a gfn. perform a reverse map from a pte to a gfn. When role.direct is set, any
element of this array can be calculated from the gfn field when used, in
this case, the array of gfns is not allocated. See role.direct and gfn.
slot_bitmap: slot_bitmap:
A bitmap containing one bit per memory slot. If the page contains a pte A bitmap containing one bit per memory slot. If the page contains a pte
mapping a page from memory slot n, then bit n of slot_bitmap will be set mapping a page from memory slot n, then bit n of slot_bitmap will be set
......
...@@ -397,6 +397,22 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) ...@@ -397,6 +397,22 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
kmem_cache_free(rmap_desc_cache, rd); kmem_cache_free(rmap_desc_cache, rd);
} }
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
if (!sp->role.direct)
return sp->gfns[index];
return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}
static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
if (sp->role.direct)
BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
else
sp->gfns[index] = gfn;
}
/* /*
* Return the pointer to the largepage write count for a given * Return the pointer to the largepage write count for a given
* gfn, handling slots that are not large page aligned. * gfn, handling slots that are not large page aligned.
...@@ -547,7 +563,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -547,7 +563,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
return count; return count;
gfn = unalias_gfn(vcpu->kvm, gfn); gfn = unalias_gfn(vcpu->kvm, gfn);
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
sp->gfns[spte - sp->spt] = gfn; kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
if (!*rmapp) { if (!*rmapp) {
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
...@@ -605,6 +621,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -605,6 +621,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_desc *prev_desc; struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
pfn_t pfn; pfn_t pfn;
gfn_t gfn;
unsigned long *rmapp; unsigned long *rmapp;
int i; int i;
...@@ -616,7 +633,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -616,7 +633,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
if (is_writable_pte(*spte)) if (is_writable_pte(*spte))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
if (!*rmapp) { if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
BUG(); BUG();
...@@ -900,7 +918,8 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -900,7 +918,8 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
ASSERT(is_empty_shadow_page(sp->spt)); ASSERT(is_empty_shadow_page(sp->spt));
list_del(&sp->link); list_del(&sp->link);
__free_page(virt_to_page(sp->spt)); __free_page(virt_to_page(sp->spt));
__free_page(virt_to_page(sp->gfns)); if (!sp->role.direct)
__free_page(virt_to_page(sp->gfns));
kmem_cache_free(mmu_page_header_cache, sp); kmem_cache_free(mmu_page_header_cache, sp);
++kvm->arch.n_free_mmu_pages; ++kvm->arch.n_free_mmu_pages;
} }
...@@ -911,13 +930,15 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn) ...@@ -911,13 +930,15 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn)
} }
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
u64 *parent_pte) u64 *parent_pte, int direct)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp); set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
...@@ -1386,7 +1407,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1386,7 +1407,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
return sp; return sp;
} }
++vcpu->kvm->stat.mmu_cache_miss; ++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte); sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
if (!sp) if (!sp)
return sp; return sp;
sp->gfn = gfn; sp->gfn = gfn;
...@@ -3403,7 +3424,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -3403,7 +3424,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
if (*sptep & PT_WRITABLE_MASK) { if (*sptep & PT_WRITABLE_MASK) {
rev_sp = page_header(__pa(sptep)); rev_sp = page_header(__pa(sptep));
gfn = rev_sp->gfns[sptep - rev_sp->spt]; gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
if (!gfn_to_memslot(kvm, gfn)) { if (!gfn_to_memslot(kvm, gfn)) {
if (!printk_ratelimit()) if (!printk_ratelimit())
...@@ -3417,8 +3438,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -3417,8 +3438,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return; return;
} }
rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt], rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
rev_sp->role.level);
if (!*rmapp) { if (!*rmapp) {
if (!printk_ratelimit()) if (!printk_ratelimit())
return; return;
......
...@@ -582,6 +582,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -582,6 +582,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
offset = nr_present = 0; offset = nr_present = 0;
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
if (PTTYPE == 32) if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS; offset = sp->role.quadrant << PT64_LEVEL_BITS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment