Commit 46c3a0ac authored by SeongJae Park's avatar SeongJae Park Committed by Linus Torvalds

mm/damon/vaddr: separate commonly usable functions

This moves functions in the default virtual address spaces monitoring
primitives that commonly usable from other address spaces like physical
address space into a header file.  Those will be reused by the physical
address space monitoring primitives which will be implemented by the
following commit.

[sj@kernel.org: include 'highmem.h' to fix a build failure]
  Link: https://lkml.kernel.org/r/20211014110848.5204-1-sj@kernel.org

Link: https://lkml.kernel.org/r/20211012205711.29216-5-sj@kernel.orgSigned-off-by: default avatarSeongJae Park <sj@kernel.org>
Cc: Amit Shah <amit@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rienjes <rientjes@google.com>
Cc: David Woodhouse <dwmw@amazon.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Leonard Foerster <foersleo@amazon.de>
Cc: Marco Elver <elver@google.com>
Cc: Markus Boehme <markubo@amazon.de>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c2fe4987
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DAMON) := core.o obj-$(CONFIG_DAMON) := core.o
obj-$(CONFIG_DAMON_VADDR) += vaddr.o obj-$(CONFIG_DAMON_VADDR) += prmtv-common.o vaddr.o
obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
// SPDX-License-Identifier: GPL-2.0
/*
* Common Primitives for Data Access Monitoring
*
* Author: SeongJae Park <sj@kernel.org>
*/
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include "prmtv-common.h"
/*
* Get an online page for a pfn if it's in the LRU list. Otherwise, returns
* NULL.
*
* The body of this function is stolen from the 'page_idle_get_page()'. We
* steal rather than reuse it because the code is quite simple.
*/
struct page *damon_get_page(unsigned long pfn)
{
struct page *page = pfn_to_online_page(pfn);
if (!page || !PageLRU(page) || !get_page_unless_zero(page))
return NULL;
if (unlikely(!PageLRU(page))) {
put_page(page);
page = NULL;
}
return page;
}
void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
{
bool referenced = false;
struct page *page = damon_get_page(pte_pfn(*pte));
if (!page)
return;
if (pte_young(*pte)) {
referenced = true;
*pte = pte_mkold(*pte);
}
#ifdef CONFIG_MMU_NOTIFIER
if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE))
referenced = true;
#endif /* CONFIG_MMU_NOTIFIER */
if (referenced)
set_page_young(page);
set_page_idle(page);
put_page(page);
}
void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool referenced = false;
struct page *page = damon_get_page(pmd_pfn(*pmd));
if (!page)
return;
if (pmd_young(*pmd)) {
referenced = true;
*pmd = pmd_mkold(*pmd);
}
#ifdef CONFIG_MMU_NOTIFIER
if (mmu_notifier_clear_young(mm, addr,
addr + ((1UL) << HPAGE_PMD_SHIFT)))
referenced = true;
#endif /* CONFIG_MMU_NOTIFIER */
if (referenced)
set_page_young(page);
set_page_idle(page);
put_page(page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common Primitives for Data Access Monitoring
*
* Author: SeongJae Park <sj@kernel.org>
*/
#include <linux/damon.h>
#include <linux/random.h>
/* Get a random number in [l, r) */
#define damon_rand(l, r) (l + prandom_u32_max(r - l))
struct page *damon_get_page(unsigned long pfn);
void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
...@@ -8,25 +8,19 @@ ...@@ -8,25 +8,19 @@
#define pr_fmt(fmt) "damon-va: " fmt #define pr_fmt(fmt) "damon-va: " fmt
#include <asm-generic/mman-common.h> #include <asm-generic/mman-common.h>
#include <linux/damon.h> #include <linux/highmem.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/highmem.h>
#include <linux/page_idle.h> #include <linux/page_idle.h>
#include <linux/pagewalk.h> #include <linux/pagewalk.h>
#include <linux/random.h>
#include <linux/sched/mm.h> #include "prmtv-common.h"
#include <linux/slab.h>
#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
#undef DAMON_MIN_REGION #undef DAMON_MIN_REGION
#define DAMON_MIN_REGION 1 #define DAMON_MIN_REGION 1
#endif #endif
/* Get a random number in [l, r) */
#define damon_rand(l, r) (l + prandom_u32_max(r - l))
/* /*
* 't->id' should be the pointer to the relevant 'struct pid' having reference * 't->id' should be the pointer to the relevant 'struct pid' having reference
* count. Caller must put the returned task, unless it is NULL. * count. Caller must put the returned task, unless it is NULL.
...@@ -373,82 +367,6 @@ void damon_va_update(struct damon_ctx *ctx) ...@@ -373,82 +367,6 @@ void damon_va_update(struct damon_ctx *ctx)
} }
} }
/*
* Get an online page for a pfn if it's in the LRU list. Otherwise, returns
* NULL.
*
* The body of this function is stolen from the 'page_idle_get_page()'. We
* steal rather than reuse it because the code is quite simple.
*/
static struct page *damon_get_page(unsigned long pfn)
{
struct page *page = pfn_to_online_page(pfn);
if (!page || !PageLRU(page) || !get_page_unless_zero(page))
return NULL;
if (unlikely(!PageLRU(page))) {
put_page(page);
page = NULL;
}
return page;
}
static void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm,
unsigned long addr)
{
bool referenced = false;
struct page *page = damon_get_page(pte_pfn(*pte));
if (!page)
return;
if (pte_young(*pte)) {
referenced = true;
*pte = pte_mkold(*pte);
}
#ifdef CONFIG_MMU_NOTIFIER
if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE))
referenced = true;
#endif /* CONFIG_MMU_NOTIFIER */
if (referenced)
set_page_young(page);
set_page_idle(page);
put_page(page);
}
static void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm,
unsigned long addr)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool referenced = false;
struct page *page = damon_get_page(pmd_pfn(*pmd));
if (!page)
return;
if (pmd_young(*pmd)) {
referenced = true;
*pmd = pmd_mkold(*pmd);
}
#ifdef CONFIG_MMU_NOTIFIER
if (mmu_notifier_clear_young(mm, addr,
addr + ((1UL) << HPAGE_PMD_SHIFT)))
referenced = true;
#endif /* CONFIG_MMU_NOTIFIER */
if (referenced)
set_page_young(page);
set_page_idle(page);
put_page(page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk) unsigned long next, struct mm_walk *walk)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment