Commit 473b0ce4 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

ksm: pages_unshared and pages_volatile

The pages_shared and pages_sharing counts give a good picture of how
successful KSM is at sharing; but no clue to how much wasted work it's
doing to get there.  Add pages_unshared (count of unique pages waiting
in the unstable tree, hoping to find a mate) and pages_volatile.

pages_volatile is harder to define.  It includes those pages changing
too fast to get into the unstable tree, but also whatever other edge
conditions prevent a page getting into the trees: a high value may
deserve investigation.  Don't try to calculate it from the various
conditions: it's the total of rmap_items less those accounted for.

Also show full_scans: the number of completed scans of everything
registered in the mm list.

The locking for all these counts is simply ksm_thread_mutex.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: default avatarIzik Eidus <ieidus@redhat.com>
Acked-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e178dfde
...@@ -155,6 +155,12 @@ static unsigned long ksm_pages_shared; ...@@ -155,6 +155,12 @@ static unsigned long ksm_pages_shared;
/* The number of page slots additionally sharing those nodes */ /* The number of page slots additionally sharing those nodes */
static unsigned long ksm_pages_sharing; static unsigned long ksm_pages_sharing;
/* The number of nodes in the unstable tree */
static unsigned long ksm_pages_unshared;
/* The number of rmap_items in use: to calculate pages_volatile */
static unsigned long ksm_rmap_items;
/* Limit on the number of unswappable pages used */ /* Limit on the number of unswappable pages used */
static unsigned long ksm_max_kernel_pages; static unsigned long ksm_max_kernel_pages;
...@@ -204,11 +210,17 @@ static void __init ksm_slab_free(void) ...@@ -204,11 +210,17 @@ static void __init ksm_slab_free(void)
static inline struct rmap_item *alloc_rmap_item(void) static inline struct rmap_item *alloc_rmap_item(void)
{ {
return kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); struct rmap_item *rmap_item;
rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
if (rmap_item)
ksm_rmap_items++;
return rmap_item;
} }
static inline void free_rmap_item(struct rmap_item *rmap_item) static inline void free_rmap_item(struct rmap_item *rmap_item)
{ {
ksm_rmap_items--;
rmap_item->mm = NULL; /* debug safety */ rmap_item->mm = NULL; /* debug safety */
kmem_cache_free(rmap_item_cache, rmap_item); kmem_cache_free(rmap_item_cache, rmap_item);
} }
...@@ -419,6 +431,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) ...@@ -419,6 +431,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
BUG_ON(age > 2); BUG_ON(age > 2);
if (!age) if (!age)
rb_erase(&rmap_item->node, &root_unstable_tree); rb_erase(&rmap_item->node, &root_unstable_tree);
ksm_pages_unshared--;
} }
rmap_item->address &= PAGE_MASK; rmap_item->address &= PAGE_MASK;
...@@ -995,6 +1008,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page, ...@@ -995,6 +1008,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
rb_link_node(&rmap_item->node, parent, new); rb_link_node(&rmap_item->node, parent, new);
rb_insert_color(&rmap_item->node, &root_unstable_tree); rb_insert_color(&rmap_item->node, &root_unstable_tree);
ksm_pages_unshared++;
return NULL; return NULL;
} }
...@@ -1091,6 +1105,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) ...@@ -1091,6 +1105,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
if (!err) { if (!err) {
rb_erase(&tree_rmap_item->node, &root_unstable_tree); rb_erase(&tree_rmap_item->node, &root_unstable_tree);
tree_rmap_item->address &= ~NODE_FLAG; tree_rmap_item->address &= ~NODE_FLAG;
ksm_pages_unshared--;
/* /*
* If we fail to insert the page into the stable tree, * If we fail to insert the page into the stable tree,
* we will have 2 virtual addresses that are pointing * we will have 2 virtual addresses that are pointing
...@@ -1474,6 +1490,37 @@ static ssize_t pages_sharing_show(struct kobject *kobj, ...@@ -1474,6 +1490,37 @@ static ssize_t pages_sharing_show(struct kobject *kobj,
} }
KSM_ATTR_RO(pages_sharing); KSM_ATTR_RO(pages_sharing);
static ssize_t pages_unshared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_pages_unshared);
}
KSM_ATTR_RO(pages_unshared);
static ssize_t pages_volatile_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
long ksm_pages_volatile;
ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
- ksm_pages_sharing - ksm_pages_unshared;
/*
* It was not worth any locking to calculate that statistic,
* but it might therefore sometimes be negative: conceal that.
*/
if (ksm_pages_volatile < 0)
ksm_pages_volatile = 0;
return sprintf(buf, "%ld\n", ksm_pages_volatile);
}
KSM_ATTR_RO(pages_volatile);
static ssize_t full_scans_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_scan.seqnr);
}
KSM_ATTR_RO(full_scans);
static struct attribute *ksm_attrs[] = { static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr, &sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr, &pages_to_scan_attr.attr,
...@@ -1481,6 +1528,9 @@ static struct attribute *ksm_attrs[] = { ...@@ -1481,6 +1528,9 @@ static struct attribute *ksm_attrs[] = {
&max_kernel_pages_attr.attr, &max_kernel_pages_attr.attr,
&pages_shared_attr.attr, &pages_shared_attr.attr,
&pages_sharing_attr.attr, &pages_sharing_attr.attr,
&pages_unshared_attr.attr,
&pages_volatile_attr.attr,
&full_scans_attr.attr,
NULL, NULL,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment