Commit 99894a79 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Fix off-by-one calculating large page count

The large page initialization code concludes there are two large pages spanned
by a slot covering 1 (small) page starting at gfn 1.  This is incorrect, and
also results in incorrect write_count initialization in some cases (base = 1,
npages = 513 for example).

Cc: stable@kernel.org
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 09106974
...@@ -920,6 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -920,6 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
int r; int r;
gfn_t base_gfn; gfn_t base_gfn;
unsigned long npages; unsigned long npages;
int largepages;
unsigned long i; unsigned long i;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new; struct kvm_memory_slot old, new;
...@@ -995,11 +996,8 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -995,11 +996,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.userspace_addr = 0; new.userspace_addr = 0;
} }
if (npages && !new.lpage_info) { if (npages && !new.lpage_info) {
int largepages = npages / KVM_PAGES_PER_HPAGE; largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
if (npages % KVM_PAGES_PER_HPAGE) largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
largepages++;
if (base_gfn % KVM_PAGES_PER_HPAGE)
largepages++;
new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment