Commit 39de71ec authored by Dave Hansen's avatar Dave Hansen Committed by Avi Kivity

KVM: rename x86 kvm->arch.n_alloc_mmu_pages

arch.n_alloc_mmu_pages is a poor choice of name. This value truly
means, "the number of pages which _may_ be allocated".  But,
reading the name, "n_alloc_mmu_pages" implies "the number of allocated
mmu pages", which is dead wrong.

It's really the high watermark, so let's give it a name to match:
nr_max_mmu_pages.  This change will make the next few patches
much more obvious and easy to read.
Signed-off-by: default avatarDave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: default avatarTim Pepper <lnxninja@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent e0df7b9f
...@@ -369,7 +369,7 @@ struct kvm_vcpu_arch { ...@@ -369,7 +369,7 @@ struct kvm_vcpu_arch {
struct kvm_arch { struct kvm_arch {
unsigned int n_free_mmu_pages; unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages; unsigned int n_requested_mmu_pages;
unsigned int n_alloc_mmu_pages; unsigned int n_max_mmu_pages;
atomic_t invlpg_counter; atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/* /*
......
...@@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) ...@@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
int used_pages; int used_pages;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm); used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
used_pages = max(0, used_pages); used_pages = max(0, used_pages);
/* /*
...@@ -1721,9 +1721,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) ...@@ -1721,9 +1721,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
} }
else else
kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
- kvm->arch.n_alloc_mmu_pages; - kvm->arch.n_max_mmu_pages;
kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages; kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
} }
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
...@@ -3141,7 +3141,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) ...@@ -3141,7 +3141,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages - npages = kvm->arch.n_max_mmu_pages -
kvm_mmu_available_pages(kvm); kvm_mmu_available_pages(kvm);
cache_count += npages; cache_count += npages;
if (!kvm_freed && nr_to_scan > 0 && npages > 0) { if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
......
...@@ -2759,7 +2759,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, ...@@ -2759,7 +2759,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{ {
return kvm->arch.n_alloc_mmu_pages; return kvm->arch.n_max_mmu_pages;
} }
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment