Commit 2f5f9486 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

mm: change alloc_pages_vma to pass down the policy node for local policy

Currently alloc_pages_vma() always uses the local node as policy node for
the LOCAL policy.  Pass this node down as an argument instead.

No behaviour change from this patch, but will be needed for followons.
Acked-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b8bc1dd3
...@@ -332,16 +332,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) ...@@ -332,16 +332,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
return alloc_pages_current(gfp_mask, order); return alloc_pages_current(gfp_mask, order);
} }
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr); struct vm_area_struct *vma, unsigned long addr,
int node);
#else #else
#define alloc_pages(gfp_mask, order) \ #define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order) alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_pages_vma(gfp_mask, order, vma, addr) \ #define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
alloc_pages(gfp_mask, order) alloc_pages(gfp_mask, order)
#endif #endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \ #define alloc_page_vma(gfp_mask, vma, addr) \
alloc_pages_vma(gfp_mask, 0, vma, addr) alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask); extern unsigned long get_zeroed_page(gfp_t gfp_mask);
......
...@@ -653,7 +653,7 @@ static inline struct page *alloc_hugepage_vma(int defrag, ...@@ -653,7 +653,7 @@ static inline struct page *alloc_hugepage_vma(int defrag,
unsigned long haddr) unsigned long haddr)
{ {
return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
HPAGE_PMD_ORDER, vma, haddr); HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
} }
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
......
...@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) ...@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
} }
/* Return a zonelist indicated by gfp for node representing a mempolicy */ /* Return a zonelist indicated by gfp for node representing a mempolicy */
static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
int nd)
{ {
int nd = numa_node_id();
switch (policy->mode) { switch (policy->mode) {
case MPOL_PREFERRED: case MPOL_PREFERRED:
if (!(policy->flags & MPOL_F_LOCAL)) if (!(policy->flags & MPOL_F_LOCAL))
...@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, ...@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
zl = node_zonelist(interleave_nid(*mpol, vma, addr, zl = node_zonelist(interleave_nid(*mpol, vma, addr,
huge_page_shift(hstate_vma(vma))), gfp_flags); huge_page_shift(hstate_vma(vma))), gfp_flags);
} else { } else {
zl = policy_zonelist(gfp_flags, *mpol); zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
if ((*mpol)->mode == MPOL_BIND) if ((*mpol)->mode == MPOL_BIND)
*nodemask = &(*mpol)->v.nodes; *nodemask = &(*mpol)->v.nodes;
} }
...@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, ...@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
*/ */
struct page * struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr) unsigned long addr, int node)
{ {
struct mempolicy *pol = get_vma_policy(current, vma, addr); struct mempolicy *pol = get_vma_policy(current, vma, addr);
struct zonelist *zl; struct zonelist *zl;
...@@ -1836,7 +1835,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, ...@@ -1836,7 +1835,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
put_mems_allowed(); put_mems_allowed();
return page; return page;
} }
zl = policy_zonelist(gfp, pol); zl = policy_zonelist(gfp, pol, node);
if (unlikely(mpol_needs_cond_ref(pol))) { if (unlikely(mpol_needs_cond_ref(pol))) {
/* /*
* slow path: ref counted shared policy * slow path: ref counted shared policy
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment