Commit 75c70128 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: mempolicy: make mpol_misplaced() to take a folio

In preparation for large folio numa balancing, make mpol_misplaced() to
take a folio, no functional change intended.

Link: https://lkml.kernel.org/r/20230921074417.24004-6-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cda6d936
......@@ -174,7 +174,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
extern bool vma_migratable(struct vm_area_struct *vma);
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
......@@ -278,7 +278,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif
static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
static inline int mpol_misplaced(struct folio *folio,
struct vm_area_struct *vma,
unsigned long address)
{
return -1; /* no node preference */
......
......@@ -4738,7 +4738,7 @@ int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
*flags |= TNF_FAULT_LOCAL;
}
return mpol_misplaced(&folio->page, vma, addr);
return mpol_misplaced(folio, vma, addr);
}
static vm_fault_t do_numa_page(struct vm_fault *vmf)
......
......@@ -2564,24 +2564,25 @@ static void sp_free(struct sp_node *n)
}
/**
* mpol_misplaced - check whether current page node is valid in policy
* mpol_misplaced - check whether current folio node is valid in policy
*
* @page: page to be checked
* @vma: vm area where page mapped
* @addr: virtual address where page mapped
* @folio: folio to be checked
* @vma: vm area where folio mapped
* @addr: virtual address in @vma for shared policy lookup and interleave policy
*
* Lookup current policy node id for vma,addr and "compare to" page's
* Lookup current policy node id for vma,addr and "compare to" folio's
* node id. Policy determination "mimics" alloc_page_vma().
* Called from fault path where we know the vma and faulting address.
*
* Return: NUMA_NO_NODE if the page is in a node that is valid for this
* policy, or a suitable node ID to allocate a replacement page from.
* policy, or a suitable node ID to allocate a replacement folio from.
*/
int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol;
struct zoneref *z;
int curnid = page_to_nid(page);
int curnid = folio_nid(folio);
unsigned long pgoff;
int thiscpu = raw_smp_processor_id();
int thisnid = cpu_to_node(thiscpu);
......@@ -2637,11 +2638,12 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
BUG();
}
/* Migrate the page towards the node whose CPU is referencing it */
/* Migrate the folio towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON) {
polnid = thisnid;
if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
if (!should_numa_migrate_memory(current, &folio->page, curnid,
thiscpu))
goto out;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment