Commit b4028260 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

ksm: rename kernel_pages_allocated

We're not implementing swapping of KSM pages in its first release;
but when that follows, "kernel_pages_allocated" will be a very poor
name for the sysfs file showing number of nodes in the stable tree:
rename that to "pages_shared" throughout.

But we already have a "pages_shared", counting those page slots
sharing the shared pages: first rename that to... "pages_sharing".

What will become of "max_kernel_pages" when the pages shared can
be swapped?  I guess it will just be removed, so keep that name.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: default avatarIzik Eidus <ieidus@redhat.com>
Acked-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 339aa624
...@@ -150,10 +150,10 @@ static struct kmem_cache *rmap_item_cache; ...@@ -150,10 +150,10 @@ static struct kmem_cache *rmap_item_cache;
static struct kmem_cache *mm_slot_cache; static struct kmem_cache *mm_slot_cache;
/* The number of nodes in the stable tree */ /* The number of nodes in the stable tree */
static unsigned long ksm_kernel_pages_allocated; static unsigned long ksm_pages_shared;
/* The number of page slots sharing those nodes */ /* The number of page slots sharing those nodes */
static unsigned long ksm_pages_shared; static unsigned long ksm_pages_sharing;
/* Limit on the number of unswappable pages used */ /* Limit on the number of unswappable pages used */
static unsigned long ksm_max_kernel_pages; static unsigned long ksm_max_kernel_pages;
...@@ -384,7 +384,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) ...@@ -384,7 +384,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
next_item->address |= NODE_FLAG; next_item->address |= NODE_FLAG;
} else { } else {
rb_erase(&rmap_item->node, &root_stable_tree); rb_erase(&rmap_item->node, &root_stable_tree);
ksm_kernel_pages_allocated--; ksm_pages_shared--;
} }
} else { } else {
struct rmap_item *prev_item = rmap_item->prev; struct rmap_item *prev_item = rmap_item->prev;
...@@ -398,7 +398,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) ...@@ -398,7 +398,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
} }
rmap_item->next = NULL; rmap_item->next = NULL;
ksm_pages_shared--; ksm_pages_sharing--;
} else if (rmap_item->address & NODE_FLAG) { } else if (rmap_item->address & NODE_FLAG) {
unsigned char age; unsigned char age;
...@@ -748,7 +748,7 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, ...@@ -748,7 +748,7 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
* is the number of kernel pages that we hold. * is the number of kernel pages that we hold.
*/ */
if (ksm_max_kernel_pages && if (ksm_max_kernel_pages &&
ksm_max_kernel_pages <= ksm_kernel_pages_allocated) ksm_max_kernel_pages <= ksm_pages_shared)
return err; return err;
kpage = alloc_page(GFP_HIGHUSER); kpage = alloc_page(GFP_HIGHUSER);
...@@ -787,7 +787,7 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, ...@@ -787,7 +787,7 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
if (err) if (err)
break_cow(mm1, addr1); break_cow(mm1, addr1);
else else
ksm_pages_shared += 2; ksm_pages_sharing += 2;
} }
put_page(kpage); put_page(kpage);
...@@ -817,7 +817,7 @@ static int try_to_merge_with_ksm_page(struct mm_struct *mm1, ...@@ -817,7 +817,7 @@ static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
up_read(&mm1->mmap_sem); up_read(&mm1->mmap_sem);
if (!err) if (!err)
ksm_pages_shared++; ksm_pages_sharing++;
return err; return err;
} }
...@@ -928,7 +928,7 @@ static struct rmap_item *stable_tree_insert(struct page *page, ...@@ -928,7 +928,7 @@ static struct rmap_item *stable_tree_insert(struct page *page,
} }
} }
ksm_kernel_pages_allocated++; ksm_pages_shared++;
rmap_item->address |= NODE_FLAG | STABLE_FLAG; rmap_item->address |= NODE_FLAG | STABLE_FLAG;
rmap_item->next = NULL; rmap_item->next = NULL;
...@@ -1044,7 +1044,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) ...@@ -1044,7 +1044,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
tree_rmap_item = stable_tree_search(page, page2, rmap_item); tree_rmap_item = stable_tree_search(page, page2, rmap_item);
if (tree_rmap_item) { if (tree_rmap_item) {
if (page == page2[0]) { /* forked */ if (page == page2[0]) { /* forked */
ksm_pages_shared++; ksm_pages_sharing++;
err = 0; err = 0;
} else } else
err = try_to_merge_with_ksm_page(rmap_item->mm, err = try_to_merge_with_ksm_page(rmap_item->mm,
...@@ -1107,7 +1107,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) ...@@ -1107,7 +1107,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
break_cow(tree_rmap_item->mm, break_cow(tree_rmap_item->mm,
tree_rmap_item->address); tree_rmap_item->address);
break_cow(rmap_item->mm, rmap_item->address); break_cow(rmap_item->mm, rmap_item->address);
ksm_pages_shared -= 2; ksm_pages_sharing -= 2;
} }
} }
...@@ -1423,7 +1423,7 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -1423,7 +1423,7 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
/* /*
* KSM_RUN_MERGE sets ksmd running, and 0 stops it running. * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
* KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
* breaking COW to free the kernel_pages_allocated (but leaves * breaking COW to free the unswappable pages_shared (but leaves
* mm_slots on the list for when ksmd may be set running again). * mm_slots on the list for when ksmd may be set running again).
*/ */
...@@ -1442,22 +1442,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -1442,22 +1442,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
} }
KSM_ATTR(run); KSM_ATTR(run);
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n",
ksm_pages_shared - ksm_kernel_pages_allocated);
}
KSM_ATTR_RO(pages_shared);
static ssize_t kernel_pages_allocated_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", ksm_kernel_pages_allocated);
}
KSM_ATTR_RO(kernel_pages_allocated);
static ssize_t max_kernel_pages_store(struct kobject *kobj, static ssize_t max_kernel_pages_store(struct kobject *kobj,
struct kobj_attribute *attr, struct kobj_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
...@@ -1481,13 +1465,28 @@ static ssize_t max_kernel_pages_show(struct kobject *kobj, ...@@ -1481,13 +1465,28 @@ static ssize_t max_kernel_pages_show(struct kobject *kobj,
} }
KSM_ATTR(max_kernel_pages); KSM_ATTR(max_kernel_pages);
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_pages_shared);
}
KSM_ATTR_RO(pages_shared);
static ssize_t pages_sharing_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n",
ksm_pages_sharing - ksm_pages_shared);
}
KSM_ATTR_RO(pages_sharing);
static struct attribute *ksm_attrs[] = { static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr, &sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr, &pages_to_scan_attr.attr,
&run_attr.attr, &run_attr.attr,
&pages_shared_attr.attr,
&kernel_pages_allocated_attr.attr,
&max_kernel_pages_attr.attr, &max_kernel_pages_attr.attr,
&pages_shared_attr.attr,
&pages_sharing_attr.attr,
NULL, NULL,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment