Commit b4231d61 authored by Izik Eidus's avatar Izik Eidus Committed by Avi Kivity

KVM: MMU: Selectively set PageDirty when releasing guest memory

Improve dirty bit setting for pages that kvm release, until now every page
that we released we marked dirty, from now only pages that have potential
to get dirty we mark dirty.
Signed-off-by: default avatarIzik Eidus <izike@qumranet.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 2065b372
...@@ -393,7 +393,8 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -393,7 +393,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
int user_alloc); int user_alloc);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
void kvm_release_page(struct page *page); void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len); int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
......
...@@ -543,13 +543,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) ...@@ -543,13 +543,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
EXPORT_SYMBOL_GPL(gfn_to_page); EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page(struct page *page) void kvm_release_page_clean(struct page *page)
{
put_page(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_page_dirty(struct page *page)
{ {
if (!PageReserved(page)) if (!PageReserved(page))
SetPageDirty(page); SetPageDirty(page);
put_page(page); put_page(page);
} }
EXPORT_SYMBOL_GPL(kvm_release_page); EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
static int next_segment(unsigned long len, int offset) static int next_segment(unsigned long len, int offset)
{ {
...@@ -1055,7 +1061,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma, ...@@ -1055,7 +1061,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
/* current->mm->mmap_sem is already held so call lockless version */ /* current->mm->mmap_sem is already held so call lockless version */
page = __gfn_to_page(kvm, pgoff); page = __gfn_to_page(kvm, pgoff);
if (is_error_page(page)) { if (is_error_page(page)) {
kvm_release_page(page); kvm_release_page_clean(page);
return NOPAGE_SIGBUS; return NOPAGE_SIGBUS;
} }
if (type != NULL) if (type != NULL)
......
...@@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_desc *desc; struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc; struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *page; struct kvm_mmu_page *page;
struct page *release_page;
unsigned long *rmapp; unsigned long *rmapp;
int i; int i;
if (!is_rmap_pte(*spte)) if (!is_rmap_pte(*spte))
return; return;
page = page_header(__pa(spte)); page = page_header(__pa(spte));
kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
PAGE_SHIFT)); if (is_writeble_pte(*spte))
kvm_release_page_dirty(release_page);
else
kvm_release_page_clean(release_page);
rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]); rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
if (!*rmapp) { if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
...@@ -893,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) ...@@ -893,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
{ {
int level = PT32E_ROOT_LEVEL; int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->mmu.root_hpa; hpa_t table_addr = vcpu->mmu.root_hpa;
struct page *page;
page = pfn_to_page(p >> PAGE_SHIFT);
for (; ; level--) { for (; ; level--) {
u32 index = PT64_INDEX(v, level); u32 index = PT64_INDEX(v, level);
u64 *table; u64 *table;
...@@ -908,7 +914,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) ...@@ -908,7 +914,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
pte = table[index]; pte = table[index];
was_rmapped = is_rmap_pte(pte); was_rmapped = is_rmap_pte(pte);
if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) { if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); kvm_release_page_clean(page);
return 0; return 0;
} }
mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
...@@ -918,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) ...@@ -918,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
if (!was_rmapped) if (!was_rmapped)
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
else else
kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); kvm_release_page_clean(page);
return 0; return 0;
} }
...@@ -933,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) ...@@ -933,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
1, 3, &table[index]); 1, 3, &table[index]);
if (!new_table) { if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n"); pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_page(pfn_to_page(p >> PAGE_SHIFT)); kvm_release_page_clean(page);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1049,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -1049,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK); paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
if (is_error_hpa(paddr)) { if (is_error_hpa(paddr)) {
kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK) kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT)); >> PAGE_SHIFT));
return 1; return 1;
} }
...@@ -1580,7 +1587,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, ...@@ -1580,7 +1587,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
" valid guest gva %lx\n", audit_msg, va); " valid guest gva %lx\n", audit_msg, va);
page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK) page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT); >> PAGE_SHIFT);
kvm_release_page(page); kvm_release_page_clean(page);
} }
} }
......
...@@ -212,8 +212,8 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, ...@@ -212,8 +212,8 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
if (is_error_hpa(paddr)) { if (is_error_hpa(paddr)) {
set_shadow_pte(shadow_pte, set_shadow_pte(shadow_pte,
shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK) kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT)); >> PAGE_SHIFT));
return; return;
} }
...@@ -259,12 +259,12 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, ...@@ -259,12 +259,12 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK) page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT); >> PAGE_SHIFT);
kvm_release_page(page); kvm_release_page_clean(page);
} }
} }
else else
kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK) kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT)); >> PAGE_SHIFT));
if (!ptwrite || !*ptwrite) if (!ptwrite || !*ptwrite)
vcpu->last_pte_updated = shadow_pte; vcpu->last_pte_updated = shadow_pte;
} }
...@@ -503,7 +503,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, ...@@ -503,7 +503,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
else else
sp->spt[i] = shadow_notrap_nonpresent_pte; sp->spt[i] = shadow_notrap_nonpresent_pte;
kunmap_atomic(gpt, KM_USER0); kunmap_atomic(gpt, KM_USER0);
kvm_release_page(page); kvm_release_page_clean(page);
} }
#undef pt_element_t #undef pt_element_t
......
...@@ -1472,7 +1472,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu) ...@@ -1472,7 +1472,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i) for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
if (vcpu->pio.guest_pages[i]) { if (vcpu->pio.guest_pages[i]) {
kvm_release_page(vcpu->pio.guest_pages[i]); kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
vcpu->pio.guest_pages[i] = NULL; vcpu->pio.guest_pages[i] = NULL;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment