Commit 5877231f authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Benjamin Herrenschmidt

mm: Move change_prot_numa outside CONFIG_ARCH_USES_NUMA_PROT_NONE

change_prot_numa should work even if _PAGE_NUMA != _PAGE_PROTNONE.
On archs like ppc64 that don't use _PAGE_PROTNONE and also have
a separate page table outside linux pagetable, we just need to
make sure that when calling change_prot_numa we flush the
hardware page table entry so that next page access  result in a numa
fault.

We still need to make sure we use the numa faulting logic only
when CONFIG_NUMA_BALANCING is set. This implies the migrate-on-fault
(Lazy migration) via mbind will only work if CONFIG_NUMA_BALANCING
is set.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 2c49195b
...@@ -1842,7 +1842,7 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) ...@@ -1842,7 +1842,7 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
} }
#endif #endif
#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE #ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
#endif #endif
......
...@@ -613,7 +613,7 @@ static inline int queue_pages_pgd_range(struct vm_area_struct *vma, ...@@ -613,7 +613,7 @@ static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
return 0; return 0;
} }
#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE #ifdef CONFIG_NUMA_BALANCING
/* /*
* This is used to mark a range of virtual addresses to be inaccessible. * This is used to mark a range of virtual addresses to be inaccessible.
* These are later cleared by a NUMA hinting fault. Depending on these * These are later cleared by a NUMA hinting fault. Depending on these
...@@ -627,7 +627,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, ...@@ -627,7 +627,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
int nr_updated; int nr_updated;
BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
if (nr_updated) if (nr_updated)
...@@ -641,7 +640,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, ...@@ -641,7 +640,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
{ {
return 0; return 0;
} }
#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ #endif /* CONFIG_NUMA_BALANCING */
/* /*
* Walk through page tables and collect pages to be migrated. * Walk through page tables and collect pages to be migrated.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment