Commit 83d1674a authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Linus Torvalds

mm: make CONFIG_MIGRATION available w/o CONFIG_NUMA

We'd like to support CONFIG_MEMORY_HOTREMOVE on s390, which depends on
CONFIG_MIGRATION.  So far, CONFIG_MIGRATION is only available with NUMA
support.

This patch makes CONFIG_MIGRATION selectable for architectures that define
ARCH_ENABLE_MEMORY_HOTREMOVE.  When MIGRATION is enabled w/o NUMA, the
kernel won't compile because migrate_vmas() does not know about
vm_ops->migrate() and vma_migratable() does not know about policy_zone.
To fix this, those two functions can be restricted to '#ifdef CONFIG_NUMA'
because they are not being used w/o NUMA.  vma_migratable() is moved over
from migrate.h to mempolicy.h.

[kosaki.motohiro@jp.fujitsu.com: build fix]
Acked-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarKOSAKI Motorhiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9ca908f4
...@@ -59,6 +59,7 @@ enum { ...@@ -59,6 +59,7 @@ enum {
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/pagemap.h>
struct mm_struct; struct mm_struct;
...@@ -220,6 +221,24 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); ...@@ -220,6 +221,24 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
int no_context); int no_context);
#endif #endif
/* Check if a vma is migratable */
static inline int vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
return 0;
/*
* Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not
* possible.
*/
if (vma->vm_file &&
gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
< policy_zone)
return 0;
return 1;
}
#else #else
struct mempolicy {}; struct mempolicy {};
......
...@@ -3,28 +3,10 @@ ...@@ -3,28 +3,10 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/pagemap.h>
typedef struct page *new_page_t(struct page *, unsigned long private, int **); typedef struct page *new_page_t(struct page *, unsigned long private, int **);
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
/* Check if a vma is migratable */
static inline int vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
return 0;
/*
* Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not
* possible.
*/
if (vma->vm_file &&
gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
< policy_zone)
return 0;
return 1;
}
extern int isolate_lru_page(struct page *p, struct list_head *pagelist); extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l); extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *, extern int migrate_page(struct address_space *,
...@@ -39,9 +21,6 @@ extern int migrate_vmas(struct mm_struct *mm, ...@@ -39,9 +21,6 @@ extern int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to, const nodemask_t *from, const nodemask_t *to,
unsigned long flags); unsigned long flags);
#else #else
static inline int vma_migratable(struct vm_area_struct *vma)
{ return 0; }
static inline int isolate_lru_page(struct page *p, struct list_head *list) static inline int isolate_lru_page(struct page *p, struct list_head *list)
{ return -ENOSYS; } { return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; } static inline int putback_lru_pages(struct list_head *l) { return 0; }
......
...@@ -174,7 +174,7 @@ config SPLIT_PTLOCK_CPUS ...@@ -174,7 +174,7 @@ config SPLIT_PTLOCK_CPUS
config MIGRATION config MIGRATION
bool "Page migration" bool "Page migration"
def_bool y def_bool y
depends on NUMA depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
help help
Allows the migration of the physical location of pages of processes Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful for while the virtual addresses are not changed. This is useful for
......
...@@ -1071,7 +1071,6 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, ...@@ -1071,7 +1071,6 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
mmput(mm); mmput(mm);
return err; return err;
} }
#endif
/* /*
* Call migration functions in the vma_ops that may prepare * Call migration functions in the vma_ops that may prepare
...@@ -1093,3 +1092,4 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, ...@@ -1093,3 +1092,4 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
} }
return err; return err;
} }
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment