Commit 95cd2522 authored by Kaixu Xia's avatar Kaixu Xia Committed by Andrew Morton

mm/damon/vaddr: remove comparison between mm and last_mm when checking region accesses

The damon regions that belong to the same damon target have the same
'struct mm_struct *mm', so it's unnecessary to compare the mm and last_mm
objects among the damon regions in one damon target when checking
accesses.  But the check is necessary when the target changed in
'__damon_va_check_accesses()', so we can simplify the whole operation by
using the bool 'same_target' to indicate whether the target changed.

Link: https://lkml.kernel.org/r/1661590971-20893-3-git-send-email-kaixuxia@tencent.comSigned-off-by: default avatarKaixu Xia <kaixuxia@tencent.com>
Reviewed-by: default avatarSeongJae Park <sj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 09876ae7
...@@ -543,15 +543,14 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr, ...@@ -543,15 +543,14 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
* r the region to be checked * r the region to be checked
*/ */
static void __damon_va_check_access(struct mm_struct *mm, static void __damon_va_check_access(struct mm_struct *mm,
struct damon_region *r) struct damon_region *r, bool same_target)
{ {
static struct mm_struct *last_mm;
static unsigned long last_addr; static unsigned long last_addr;
static unsigned long last_page_sz = PAGE_SIZE; static unsigned long last_page_sz = PAGE_SIZE;
static bool last_accessed; static bool last_accessed;
/* If the region is in the last checked page, reuse the result */ /* If the region is in the last checked page, reuse the result */
if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) == if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
ALIGN_DOWN(r->sampling_addr, last_page_sz))) { ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
if (last_accessed) if (last_accessed)
r->nr_accesses++; r->nr_accesses++;
...@@ -562,7 +561,6 @@ static void __damon_va_check_access(struct mm_struct *mm, ...@@ -562,7 +561,6 @@ static void __damon_va_check_access(struct mm_struct *mm,
if (last_accessed) if (last_accessed)
r->nr_accesses++; r->nr_accesses++;
last_mm = mm;
last_addr = r->sampling_addr; last_addr = r->sampling_addr;
} }
...@@ -572,14 +570,17 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) ...@@ -572,14 +570,17 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
struct mm_struct *mm; struct mm_struct *mm;
struct damon_region *r; struct damon_region *r;
unsigned int max_nr_accesses = 0; unsigned int max_nr_accesses = 0;
bool same_target;
damon_for_each_target(t, ctx) { damon_for_each_target(t, ctx) {
mm = damon_get_mm(t); mm = damon_get_mm(t);
if (!mm) if (!mm)
continue; continue;
same_target = false;
damon_for_each_region(r, t) { damon_for_each_region(r, t) {
__damon_va_check_access(mm, r); __damon_va_check_access(mm, r, same_target);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses); max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
same_target = true;
} }
mmput(mm); mmput(mm);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment