Commit a28397be authored by SeongJae Park's avatar SeongJae Park Committed by Linus Torvalds

mm/damon: implement primitives for physical address space monitoring

This implements the monitoring primitives for the physical memory
address space.  Internally, it uses the PTE Accessed bit, similar to
that of the virtual address spaces monitoring primitives.  It supports
only user memory pages, as idle pages tracking does.  If the monitoring
target physical memory address range contains non-user memory pages,
access check of the pages will do nothing but simply treat the pages as
not accessed.

Link: https://lkml.kernel.org/r/20211012205711.29216-6-sj@kernel.orgSigned-off-by: default avatarSeongJae Park <sj@kernel.org>
Cc: Amit Shah <amit@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rienjes <rientjes@google.com>
Cc: David Woodhouse <dwmw@amazon.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Leonard Foerster <foersleo@amazon.de>
Cc: Marco Elver <elver@google.com>
Cc: Markus Boehme <markubo@amazon.de>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 46c3a0ac
...@@ -351,4 +351,14 @@ void damon_va_set_primitives(struct damon_ctx *ctx); ...@@ -351,4 +351,14 @@ void damon_va_set_primitives(struct damon_ctx *ctx);
#endif /* CONFIG_DAMON_VADDR */ #endif /* CONFIG_DAMON_VADDR */
#ifdef CONFIG_DAMON_PADDR
/* Monitoring primitives for the physical memory address space */
void damon_pa_prepare_access_checks(struct damon_ctx *ctx);
unsigned int damon_pa_check_accesses(struct damon_ctx *ctx);
bool damon_pa_target_valid(void *t);
void damon_pa_set_primitives(struct damon_ctx *ctx);
#endif /* CONFIG_DAMON_PADDR */
#endif /* _DAMON_H */ #endif /* _DAMON_H */
...@@ -32,6 +32,14 @@ config DAMON_VADDR ...@@ -32,6 +32,14 @@ config DAMON_VADDR
This builds the default data access monitoring primitives for DAMON This builds the default data access monitoring primitives for DAMON
that work for virtual address spaces. that work for virtual address spaces.
config DAMON_PADDR
bool "Data access monitoring primitives for the physical address space"
depends on DAMON && MMU
select PAGE_IDLE_FLAG
help
This builds the default data access monitoring primitives for DAMON
that works for the physical address space.
config DAMON_VADDR_KUNIT_TEST config DAMON_VADDR_KUNIT_TEST
bool "Test for DAMON primitives" if !KUNIT_ALL_TESTS bool "Test for DAMON primitives" if !KUNIT_ALL_TESTS
depends on DAMON_VADDR && KUNIT=y depends on DAMON_VADDR && KUNIT=y
......
...@@ -2,4 +2,5 @@ ...@@ -2,4 +2,5 @@
obj-$(CONFIG_DAMON) := core.o obj-$(CONFIG_DAMON) := core.o
obj-$(CONFIG_DAMON_VADDR) += prmtv-common.o vaddr.o obj-$(CONFIG_DAMON_VADDR) += prmtv-common.o vaddr.o
obj-$(CONFIG_DAMON_PADDR) += prmtv-common.o paddr.o
obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
// SPDX-License-Identifier: GPL-2.0
/*
* DAMON Primitives for The Physical Address Space
*
* Author: SeongJae Park <sj@kernel.org>
*/
#define pr_fmt(fmt) "damon-pa: " fmt
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include "prmtv-common.h"
static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct page_vma_mapped_walk pvmw = {
.page = page,
.vma = vma,
.address = addr,
};
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte)
damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
else
damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
}
return true;
}
static void damon_pa_mkold(unsigned long paddr)
{
struct page *page = damon_get_page(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
.rmap_one = __damon_pa_mkold,
.anon_lock = page_lock_anon_vma_read,
};
bool need_lock;
if (!page)
return;
if (!page_mapped(page) || !page_rmapping(page)) {
set_page_idle(page);
goto out;
}
need_lock = !PageAnon(page) || PageKsm(page);
if (need_lock && !trylock_page(page))
goto out;
rmap_walk(page, &rwc);
if (need_lock)
unlock_page(page);
out:
put_page(page);
}
static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
struct damon_region *r)
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
damon_pa_mkold(r->sampling_addr);
}
void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
{
struct damon_target *t;
struct damon_region *r;
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t)
__damon_pa_prepare_access_check(ctx, r);
}
}
struct damon_pa_access_chk_result {
unsigned long page_sz;
bool accessed;
};
static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct damon_pa_access_chk_result *result = arg;
struct page_vma_mapped_walk pvmw = {
.page = page,
.vma = vma,
.address = addr,
};
result->accessed = false;
result->page_sz = PAGE_SIZE;
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
result->accessed = pte_young(*pvmw.pte) ||
!page_is_idle(page) ||
mmu_notifier_test_young(vma->vm_mm, addr);
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
result->accessed = pmd_young(*pvmw.pmd) ||
!page_is_idle(page) ||
mmu_notifier_test_young(vma->vm_mm, addr);
result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
#else
WARN_ON_ONCE(1);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
if (result->accessed) {
page_vma_mapped_walk_done(&pvmw);
break;
}
}
/* If accessed, stop walking */
return !result->accessed;
}
static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
{
struct page *page = damon_get_page(PHYS_PFN(paddr));
struct damon_pa_access_chk_result result = {
.page_sz = PAGE_SIZE,
.accessed = false,
};
struct rmap_walk_control rwc = {
.arg = &result,
.rmap_one = __damon_pa_young,
.anon_lock = page_lock_anon_vma_read,
};
bool need_lock;
if (!page)
return false;
if (!page_mapped(page) || !page_rmapping(page)) {
if (page_is_idle(page))
result.accessed = false;
else
result.accessed = true;
put_page(page);
goto out;
}
need_lock = !PageAnon(page) || PageKsm(page);
if (need_lock && !trylock_page(page)) {
put_page(page);
return NULL;
}
rmap_walk(page, &rwc);
if (need_lock)
unlock_page(page);
put_page(page);
out:
*page_sz = result.page_sz;
return result.accessed;
}
static void __damon_pa_check_access(struct damon_ctx *ctx,
struct damon_region *r)
{
static unsigned long last_addr;
static unsigned long last_page_sz = PAGE_SIZE;
static bool last_accessed;
/* If the region is in the last checked page, reuse the result */
if (ALIGN_DOWN(last_addr, last_page_sz) ==
ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
if (last_accessed)
r->nr_accesses++;
return;
}
last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
if (last_accessed)
r->nr_accesses++;
last_addr = r->sampling_addr;
}
unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
{
struct damon_target *t;
struct damon_region *r;
unsigned int max_nr_accesses = 0;
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t) {
__damon_pa_check_access(ctx, r);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
}
return max_nr_accesses;
}
bool damon_pa_target_valid(void *t)
{
return true;
}
void damon_pa_set_primitives(struct damon_ctx *ctx)
{
ctx->primitive.init = NULL;
ctx->primitive.update = NULL;
ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks;
ctx->primitive.check_accesses = damon_pa_check_accesses;
ctx->primitive.reset_aggregated = NULL;
ctx->primitive.target_valid = damon_pa_target_valid;
ctx->primitive.cleanup = NULL;
ctx->primitive.apply_scheme = NULL;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment