Commit 95e73fb1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2024-02-22-15-02' of...

Merge tag 'mm-hotfixes-stable-2024-02-22-15-02' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "A batch of MM (and one non-MM) hotfixes.

  Ten are cc:stable and the remainder address post-6.7 issues or aren't
  considered appropriate for backporting"

* tag 'mm-hotfixes-stable-2024-02-22-15-02' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  kasan: guard release_free_meta() shadow access with kasan_arch_is_ready()
  mm/damon/lru_sort: fix quota status loss due to online tunings
  mm/damon/reclaim: fix quota stauts loss due to online tunings
  MAINTAINERS: mailmap: update Shakeel's email address
  mm/damon/sysfs-schemes: handle schemes sysfs dir removal before commit_schemes_quota_goals
  mm: memcontrol: clarify swapaccount=0 deprecation warning
  mm/memblock: add MEMBLOCK_RSRV_NOINIT into flagname[] array
  mm/zswap: invalidate duplicate entry when !zswap_enabled
  lib/Kconfig.debug: TEST_IOV_ITER depends on MMU
  mm/swap: fix race when skipping swapcache
  mm/swap_state: update zswap LRU's protection range with the folio locked
  selftests/mm: uffd-unit-test check if huge page size is 0
  mm/damon/core: check apply interval in damon_do_apply_schemes()
  mm: zswap: fix missing folio cleanup in writeback race path
parents e7768e65 2597c994
......@@ -553,6 +553,7 @@ Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org>
Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com>
Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com>
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
Shakeel Butt <shakeel.butt@linux.dev> <shakeelb@google.com>
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
......
......@@ -5378,7 +5378,7 @@ CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
M: Roman Gushchin <roman.gushchin@linux.dev>
M: Shakeel Butt <shakeelb@google.com>
M: Shakeel Butt <shakeel.butt@linux.dev>
R: Muchun Song <muchun.song@linux.dev>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
......
......@@ -549,6 +549,11 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
static inline int swapcache_prepare(swp_entry_t swp)
{
return 0;
}
static inline void swap_free(swp_entry_t swp)
{
}
......
......@@ -2235,6 +2235,7 @@ config TEST_DIV64
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
depends on MMU
default KUNIT_ALL_TESTS
help
Enable this to turn on testing of the operation of the I/O iterator
......
......@@ -1026,6 +1026,9 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
damon_for_each_scheme(s, c) {
struct damos_quota *quota = &s->quota;
if (c->passed_sample_intervals != s->next_apply_sis)
continue;
if (!s->wmarks.activated)
continue;
......@@ -1176,10 +1179,6 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
if (c->passed_sample_intervals != s->next_apply_sis)
continue;
s->next_apply_sis +=
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
if (!s->wmarks.activated)
continue;
......@@ -1195,6 +1194,14 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
damon_for_each_region_safe(r, next_r, t)
damon_do_apply_schemes(c, t, r);
}
damon_for_each_scheme(s, c) {
if (c->passed_sample_intervals != s->next_apply_sis)
continue;
s->next_apply_sis +=
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
}
}
/*
......
......@@ -185,9 +185,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
}
static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
struct damos_quota *src)
{
dst->total_charged_sz = src->total_charged_sz;
dst->total_charged_ns = src->total_charged_ns;
dst->charged_sz = src->charged_sz;
dst->charged_from = src->charged_from;
dst->charge_target_from = src->charge_target_from;
dst->charge_addr_from = src->charge_addr_from;
}
static int damon_lru_sort_apply_parameters(void)
{
struct damos *scheme;
struct damos *scheme, *hot_scheme, *cold_scheme;
struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
unsigned int hot_thres, cold_thres;
int err = 0;
......@@ -195,18 +207,35 @@ static int damon_lru_sort_apply_parameters(void)
if (err)
return err;
damon_for_each_scheme(scheme, ctx) {
if (!old_hot_scheme) {
old_hot_scheme = scheme;
continue;
}
old_cold_scheme = scheme;
}
hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
hot_thres_access_freq / 1000;
scheme = damon_lru_sort_new_hot_scheme(hot_thres);
if (!scheme)
hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
if (!hot_scheme)
return -ENOMEM;
damon_set_schemes(ctx, &scheme, 1);
if (old_hot_scheme)
damon_lru_sort_copy_quota_status(&hot_scheme->quota,
&old_hot_scheme->quota);
cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
scheme = damon_lru_sort_new_cold_scheme(cold_thres);
if (!scheme)
cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
if (!cold_scheme) {
damon_destroy_scheme(hot_scheme);
return -ENOMEM;
damon_add_scheme(ctx, scheme);
}
if (old_cold_scheme)
damon_lru_sort_copy_quota_status(&cold_scheme->quota,
&old_cold_scheme->quota);
damon_set_schemes(ctx, &hot_scheme, 1);
damon_add_scheme(ctx, cold_scheme);
return damon_set_region_biggest_system_ram_default(target,
&monitor_region_start,
......
......@@ -150,9 +150,20 @@ static struct damos *damon_reclaim_new_scheme(void)
&damon_reclaim_wmarks);
}
static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
struct damos_quota *src)
{
dst->total_charged_sz = src->total_charged_sz;
dst->total_charged_ns = src->total_charged_ns;
dst->charged_sz = src->charged_sz;
dst->charged_from = src->charged_from;
dst->charge_target_from = src->charge_target_from;
dst->charge_addr_from = src->charge_addr_from;
}
static int damon_reclaim_apply_parameters(void)
{
struct damos *scheme;
struct damos *scheme, *old_scheme;
struct damos_filter *filter;
int err = 0;
......@@ -164,6 +175,11 @@ static int damon_reclaim_apply_parameters(void)
scheme = damon_reclaim_new_scheme();
if (!scheme)
return -ENOMEM;
if (!list_empty(&ctx->schemes)) {
damon_for_each_scheme(old_scheme, ctx)
damon_reclaim_copy_quota_status(&scheme->quota,
&old_scheme->quota);
}
if (skip_anon) {
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
if (!filter) {
......
......@@ -1905,6 +1905,10 @@ void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
damon_for_each_scheme(scheme, ctx) {
struct damon_sysfs_scheme *sysfs_scheme;
/* user could have removed the scheme sysfs dir */
if (i >= sysfs_schemes->nr)
break;
sysfs_scheme = sysfs_schemes->schemes_arr[i];
damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals,
&scheme->quota);
......
......@@ -522,6 +522,9 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
static void release_free_meta(const void *object, struct kasan_free_meta *meta)
{
if (!kasan_arch_is_ready())
return;
/* Check if free meta is valid. */
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return;
......
......@@ -2249,6 +2249,7 @@ static const char * const flagname[] = {
[ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
[ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
[ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
[ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
};
static int memblock_debug_show(struct seq_file *m, void *private)
......
......@@ -7971,7 +7971,11 @@ bool mem_cgroup_swap_full(struct folio *folio)
static int __init setup_swap_account(char *s)
{
pr_warn_once("The swapaccount= commandline option is deprecated. "
bool res;
if (!kstrtobool(s, &res) && !res)
pr_warn_once("The swapaccount=0 commandline option is deprecated "
"in favor of configuring swap control via cgroupfs. "
"Please report your usecase to linux-mm@kvack.org if you "
"depend on this functionality.\n");
return 1;
......
......@@ -3799,6 +3799,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
struct page *page;
struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE;
bool need_clear_cache = false;
bool exclusive = false;
swp_entry_t entry;
pte_t pte;
......@@ -3867,6 +3868,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
/*
* Prevent parallel swapin from proceeding with
* the cache flag. Otherwise, another thread may
* finish swapin first, free the entry, and swapout
* reusing the same entry. It's undetectable as
* pte_same() returns true due to entry reuse.
*/
if (swapcache_prepare(entry)) {
/* Relax a bit to prevent rapid repeated page faults */
schedule_timeout_uninterruptible(1);
goto out;
}
need_clear_cache = true;
/* skip swapcache */
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
vma, vmf->address, false);
......@@ -4117,6 +4132,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
/* Clear the swap cache pin for direct swapin after PTL unlock */
if (need_clear_cache)
swapcache_clear(si, entry);
if (si)
put_swap_device(si);
return ret;
......@@ -4131,6 +4149,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_unlock(swapcache);
folio_put(swapcache);
}
if (need_clear_cache)
swapcache_clear(si, entry);
if (si)
put_swap_device(si);
return ret;
......
......@@ -41,6 +41,7 @@ void __delete_from_swap_cache(struct folio *folio,
void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
struct folio *filemap_get_incore_folio(struct address_space *mapping,
......@@ -97,6 +98,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
return 0;
}
static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
{
}
static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr)
{
......
......@@ -680,9 +680,10 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
/* The page was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
if (unlikely(page_allocated))
swap_read_folio(folio, false, NULL);
if (unlikely(page_allocated)) {
zswap_folio_swapin(folio);
swap_read_folio(folio, false, NULL);
}
return folio;
}
......@@ -855,9 +856,10 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
/* The folio was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
if (unlikely(page_allocated))
swap_read_folio(folio, false, NULL);
if (unlikely(page_allocated)) {
zswap_folio_swapin(folio);
swap_read_folio(folio, false, NULL);
}
return folio;
}
......
......@@ -3365,6 +3365,19 @@ int swapcache_prepare(swp_entry_t entry)
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
{
struct swap_cluster_info *ci;
unsigned long offset = swp_offset(entry);
unsigned char usage;
ci = lock_cluster_or_swap_info(si, offset);
usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
unlock_cluster_or_swap_info(si, ci);
if (!usage)
free_swap_slot(entry);
}
struct swap_info_struct *swp_swap_info(swp_entry_t entry)
{
return swap_type_to_swap_info(swp_type(entry));
......
......@@ -377,10 +377,9 @@ void zswap_folio_swapin(struct folio *folio)
{
struct lruvec *lruvec;
if (folio) {
VM_WARN_ON_ONCE(!folio_test_locked(folio));
lruvec = folio_lruvec(folio);
atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
}
}
/*********************************
......@@ -1440,6 +1439,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(folio);
folio_unlock(folio);
folio_put(folio);
return -ENOMEM;
}
spin_unlock(&tree->lock);
......@@ -1517,7 +1518,7 @@ bool zswap_store(struct folio *folio)
if (folio_test_large(folio))
return false;
if (!zswap_enabled || !tree)
if (!tree)
return false;
/*
......@@ -1532,6 +1533,10 @@ bool zswap_store(struct folio *folio)
zswap_invalidate_entry(tree, dupentry);
}
spin_unlock(&tree->lock);
if (!zswap_enabled)
return false;
objcg = get_obj_cgroup_from_folio(folio);
if (objcg && !obj_cgroup_may_zswap(objcg)) {
memcg = get_mem_cgroup_from_objcg(objcg);
......
......@@ -1517,6 +1517,12 @@ int main(int argc, char *argv[])
continue;
uffd_test_start("%s on %s", test->name, mem_type->name);
if ((mem_type->mem_flag == MEM_HUGETLB ||
mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
(default_huge_page_size() == 0)) {
uffd_test_skip("huge page size is 0, feature missing?");
continue;
}
if (!uffd_feature_supported(test)) {
uffd_test_skip("feature missing");
continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment