Commit 43b5fbbd authored by Sasha Levin's avatar Sasha Levin Committed by Linus Torvalds

mm/huge_memory.c: use new hashtable implementation

Switch hugemem to use the new hashtable implementation.  This reduces
the amount of generic unrelated code in the hugemem.

This also removes the dymanic allocation of the hash table.  The upside
is that we save a pointer dereference when accessing the hashtable, but
we lose 8KB if CONFIG_TRANSPARENT_HUGEPAGE is enabled but the processor
doesn't support hugepages.
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a9aacbcc
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/migrate.h> #include <linux/migrate.h>
#include <linux/hashtable.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -62,12 +63,11 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); ...@@ -62,12 +63,11 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
static int khugepaged(void *none); static int khugepaged(void *none);
static int mm_slots_hash_init(void);
static int khugepaged_slab_init(void); static int khugepaged_slab_init(void);
static void khugepaged_slab_free(void);
#define MM_SLOTS_HASH_HEADS 1024 #define MM_SLOTS_HASH_BITS 10
static struct hlist_head *mm_slots_hash __read_mostly; static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
static struct kmem_cache *mm_slot_cache __read_mostly; static struct kmem_cache *mm_slot_cache __read_mostly;
/** /**
...@@ -634,12 +634,6 @@ static int __init hugepage_init(void) ...@@ -634,12 +634,6 @@ static int __init hugepage_init(void)
if (err) if (err)
goto out; goto out;
err = mm_slots_hash_init();
if (err) {
khugepaged_slab_free();
goto out;
}
register_shrinker(&huge_zero_page_shrinker); register_shrinker(&huge_zero_page_shrinker);
/* /*
...@@ -1908,12 +1902,6 @@ static int __init khugepaged_slab_init(void) ...@@ -1908,12 +1902,6 @@ static int __init khugepaged_slab_init(void)
return 0; return 0;
} }
static void __init khugepaged_slab_free(void)
{
kmem_cache_destroy(mm_slot_cache);
mm_slot_cache = NULL;
}
static inline struct mm_slot *alloc_mm_slot(void) static inline struct mm_slot *alloc_mm_slot(void)
{ {
if (!mm_slot_cache) /* initialization failed */ if (!mm_slot_cache) /* initialization failed */
...@@ -1926,47 +1914,23 @@ static inline void free_mm_slot(struct mm_slot *mm_slot) ...@@ -1926,47 +1914,23 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
kmem_cache_free(mm_slot_cache, mm_slot); kmem_cache_free(mm_slot_cache, mm_slot);
} }
static int __init mm_slots_hash_init(void)
{
mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
GFP_KERNEL);
if (!mm_slots_hash)
return -ENOMEM;
return 0;
}
#if 0
static void __init mm_slots_hash_free(void)
{
kfree(mm_slots_hash);
mm_slots_hash = NULL;
}
#endif
static struct mm_slot *get_mm_slot(struct mm_struct *mm) static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{ {
struct mm_slot *mm_slot; struct mm_slot *mm_slot;
struct hlist_head *bucket;
struct hlist_node *node; struct hlist_node *node;
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm)
% MM_SLOTS_HASH_HEADS];
hlist_for_each_entry(mm_slot, node, bucket, hash) {
if (mm == mm_slot->mm) if (mm == mm_slot->mm)
return mm_slot; return mm_slot;
}
return NULL; return NULL;
} }
static void insert_to_mm_slots_hash(struct mm_struct *mm, static void insert_to_mm_slots_hash(struct mm_struct *mm,
struct mm_slot *mm_slot) struct mm_slot *mm_slot)
{ {
struct hlist_head *bucket;
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
% MM_SLOTS_HASH_HEADS];
mm_slot->mm = mm; mm_slot->mm = mm;
hlist_add_head(&mm_slot->hash, bucket); hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
} }
static inline int khugepaged_test_exit(struct mm_struct *mm) static inline int khugepaged_test_exit(struct mm_struct *mm)
...@@ -2035,7 +1999,7 @@ void __khugepaged_exit(struct mm_struct *mm) ...@@ -2035,7 +1999,7 @@ void __khugepaged_exit(struct mm_struct *mm)
spin_lock(&khugepaged_mm_lock); spin_lock(&khugepaged_mm_lock);
mm_slot = get_mm_slot(mm); mm_slot = get_mm_slot(mm);
if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
hlist_del(&mm_slot->hash); hash_del(&mm_slot->hash);
list_del(&mm_slot->mm_node); list_del(&mm_slot->mm_node);
free = 1; free = 1;
} }
...@@ -2484,7 +2448,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) ...@@ -2484,7 +2448,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
if (khugepaged_test_exit(mm)) { if (khugepaged_test_exit(mm)) {
/* free mm_slot */ /* free mm_slot */
hlist_del(&mm_slot->hash); hash_del(&mm_slot->hash);
list_del(&mm_slot->mm_node); list_del(&mm_slot->mm_node);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment