Commit 2d823764 authored by Kinsey Ho's avatar Kinsey Ho Committed by Andrew Morton

mm/mglru: improve reset_mm_stats()

struct lruvec* is already a field of struct lru_gen_mm_walk.  Remove the
parameter struct lruvec* into functions that already have access to struct
lru_gen_mm_walk*.

Also, we do not need to handle reset histogram stats when
!should_walk_mmu().  Remove the call to reset_mm_stats() in
iterate_mm_list_nowalk().

Link: https://lkml.kernel.org/r/20240214060538.3524462-4-kinseyho@google.comSigned-off-by: default avatarKinsey Ho <kinseyho@google.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Donet Tom <donettom@linux.vnet.ibm.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 51973cc9
...@@ -2879,38 +2879,37 @@ static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) ...@@ -2879,38 +2879,37 @@ static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
#endif #endif
static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last) static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
{ {
int i; int i;
int hist; int hist;
struct lruvec *lruvec = walk->lruvec;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
if (walk) { hist = lru_hist_from_seq(walk->max_seq);
hist = lru_hist_from_seq(walk->max_seq);
for (i = 0; i < NR_MM_STATS; i++) { for (i = 0; i < NR_MM_STATS; i++) {
WRITE_ONCE(mm_state->stats[hist][i], WRITE_ONCE(mm_state->stats[hist][i],
mm_state->stats[hist][i] + walk->mm_stats[i]); mm_state->stats[hist][i] + walk->mm_stats[i]);
walk->mm_stats[i] = 0; walk->mm_stats[i] = 0;
}
} }
if (NR_HIST_GENS > 1 && last) { if (NR_HIST_GENS > 1 && last) {
hist = lru_hist_from_seq(mm_state->seq + 1); hist = lru_hist_from_seq(walk->max_seq + 1);
for (i = 0; i < NR_MM_STATS; i++) for (i = 0; i < NR_MM_STATS; i++)
WRITE_ONCE(mm_state->stats[hist][i], 0); WRITE_ONCE(mm_state->stats[hist][i], 0);
} }
} }
static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
struct mm_struct **iter)
{ {
bool first = false; bool first = false;
bool last = false; bool last = false;
struct mm_struct *mm = NULL; struct mm_struct *mm = NULL;
struct lruvec *lruvec = walk->lruvec;
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg); struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
...@@ -2954,7 +2953,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, ...@@ -2954,7 +2953,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
} while (!(mm = get_next_mm(walk))); } while (!(mm = get_next_mm(walk)));
done: done:
if (*iter || last) if (*iter || last)
reset_mm_stats(lruvec, walk, last); reset_mm_stats(walk, last);
spin_unlock(&mm_list->lock); spin_unlock(&mm_list->lock);
...@@ -2984,7 +2983,6 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) ...@@ -2984,7 +2983,6 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
mm_state->head = NULL; mm_state->head = NULL;
mm_state->tail = NULL; mm_state->tail = NULL;
WRITE_ONCE(mm_state->seq, mm_state->seq + 1); WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
reset_mm_stats(lruvec, NULL, true);
success = true; success = true;
} }
...@@ -3159,9 +3157,10 @@ static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, ...@@ -3159,9 +3157,10 @@ static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
walk->nr_pages[new_gen][type][zone] += delta; walk->nr_pages[new_gen][type][zone] += delta;
} }
static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk) static void reset_batch_size(struct lru_gen_mm_walk *walk)
{ {
int gen, type, zone; int gen, type, zone;
struct lruvec *lruvec = walk->lruvec;
struct lru_gen_folio *lrugen = &lruvec->lrugen; struct lru_gen_folio *lrugen = &lruvec->lrugen;
walk->batched = 0; walk->batched = 0;
...@@ -3591,7 +3590,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, ...@@ -3591,7 +3590,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
return -EAGAIN; return -EAGAIN;
} }
static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk) static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
{ {
static const struct mm_walk_ops mm_walk_ops = { static const struct mm_walk_ops mm_walk_ops = {
.test_walk = should_skip_vma, .test_walk = should_skip_vma,
...@@ -3600,6 +3599,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_ ...@@ -3600,6 +3599,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
}; };
int err; int err;
struct lruvec *lruvec = walk->lruvec;
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
walk->next_addr = FIRST_USER_ADDRESS; walk->next_addr = FIRST_USER_ADDRESS;
...@@ -3628,7 +3628,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_ ...@@ -3628,7 +3628,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
if (walk->batched) { if (walk->batched) {
spin_lock_irq(&lruvec->lru_lock); spin_lock_irq(&lruvec->lru_lock);
reset_batch_size(lruvec, walk); reset_batch_size(walk);
spin_unlock_irq(&lruvec->lru_lock); spin_unlock_irq(&lruvec->lru_lock);
} }
...@@ -3856,9 +3856,9 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, ...@@ -3856,9 +3856,9 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
walk->force_scan = force_scan; walk->force_scan = force_scan;
do { do {
success = iterate_mm_list(lruvec, walk, &mm); success = iterate_mm_list(walk, &mm);
if (mm) if (mm)
walk_mm(lruvec, mm, walk); walk_mm(mm, walk);
} while (mm); } while (mm);
done: done:
if (success) { if (success) {
...@@ -4558,8 +4558,10 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap ...@@ -4558,8 +4558,10 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
move_folios_to_lru(lruvec, &list); move_folios_to_lru(lruvec, &list);
walk = current->reclaim_state->mm_walk; walk = current->reclaim_state->mm_walk;
if (walk && walk->batched) if (walk && walk->batched) {
reset_batch_size(lruvec, walk); walk->lruvec = lruvec;
reset_batch_size(walk);
}
item = PGSTEAL_KSWAPD + reclaimer_offset(); item = PGSTEAL_KSWAPD + reclaimer_offset();
if (!cgroup_reclaim(sc)) if (!cgroup_reclaim(sc))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment