Commit 4c6355b2 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: charge swapin pages on instantiation

Right now, users that are otherwise memory controlled can easily escape
their containment and allocate significant amounts of memory that they're
not being charged for.  That's because swap readahead pages are not being
charged until somebody actually faults them into their page table.  This
can be exploited with MADV_WILLNEED, which triggers arbitrary readahead
allocations without charging the pages.

There are additional problems with the delayed charging of swap pages:

1. To implement refault/workingset detection for anonymous pages, we
   need to have a target LRU available at swapin time, but the LRU is not
   determinable until the page has been charged.

2. To implement per-cgroup LRU locking, we need page->mem_cgroup to be
   stable when the page is isolated from the LRU; otherwise, the locks
   change under us.  But swapcache gets charged after it's already on the
   LRU, and even if we cannot isolate it ourselves (since charging is not
   exactly optional).

The previous patch ensured we always maintain cgroup ownership records for
swap pages.  This patch moves the swapcache charging point from the fault
handler to swapin time to fix all of the above problems.

v2: simplify swapin error checking (Joonsoo)

[hughd@google.com: fix livelock in __read_swap_cache_async()]
  Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2005212246080.8458@eggly.anvilsSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarAlex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Rafael Aquini <aquini@redhat.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-17-hannes@cmpxchg.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2d1c4980
...@@ -3125,9 +3125,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3125,9 +3125,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address); vmf->address);
if (page) { if (page) {
int err;
__SetPageLocked(page); __SetPageLocked(page);
__SetPageSwapBacked(page); __SetPageSwapBacked(page);
set_page_private(page, entry.val); set_page_private(page, entry.val);
/* Tell memcg to use swap ownership records */
SetPageSwapCache(page);
err = mem_cgroup_charge(page, vma->vm_mm,
GFP_KERNEL, false);
ClearPageSwapCache(page);
if (err)
goto out_page;
lru_cache_add_anon(page); lru_cache_add_anon(page);
swap_readpage(page, true); swap_readpage(page, true);
} }
...@@ -3189,10 +3200,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3189,10 +3200,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_page; goto out_page;
} }
if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, true)) {
ret = VM_FAULT_OOM;
goto out_page;
}
cgroup_throttle_swaprate(page, GFP_KERNEL); cgroup_throttle_swaprate(page, GFP_KERNEL);
/* /*
......
...@@ -623,14 +623,16 @@ static int shmem_add_to_page_cache(struct page *page, ...@@ -623,14 +623,16 @@ static int shmem_add_to_page_cache(struct page *page,
page->mapping = mapping; page->mapping = mapping;
page->index = index; page->index = index;
error = mem_cgroup_charge(page, charge_mm, gfp, PageSwapCache(page)); if (!PageSwapCache(page)) {
error = mem_cgroup_charge(page, charge_mm, gfp, false);
if (error) { if (error) {
if (!PageSwapCache(page) && PageTransHuge(page)) { if (PageTransHuge(page)) {
count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK);
count_vm_event(THP_FILE_FALLBACK_CHARGE); count_vm_event(THP_FILE_FALLBACK_CHARGE);
} }
goto error; goto error;
} }
}
cgroup_throttle_swaprate(page, gfp); cgroup_throttle_swaprate(page, gfp);
do { do {
......
...@@ -360,12 +360,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -360,12 +360,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr, struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated) bool *new_page_allocated)
{ {
struct page *found_page = NULL, *new_page = NULL;
struct swap_info_struct *si; struct swap_info_struct *si;
int err; struct page *page;
*new_page_allocated = false; *new_page_allocated = false;
do { for (;;) {
int err;
/* /*
* First check the swap cache. Since this is normally * First check the swap cache. Since this is normally
* called after lookup_swap_cache() failed, re-calling * called after lookup_swap_cache() failed, re-calling
...@@ -373,12 +374,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -373,12 +374,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
*/ */
si = get_swap_device(entry); si = get_swap_device(entry);
if (!si) if (!si)
break; return NULL;
found_page = find_get_page(swap_address_space(entry), page = find_get_page(swap_address_space(entry),
swp_offset(entry)); swp_offset(entry));
put_swap_device(si); put_swap_device(si);
if (found_page) if (page)
break; return page;
/* /*
* Just skip read ahead for unused swap slot. * Just skip read ahead for unused swap slot.
...@@ -389,54 +390,66 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -389,54 +390,66 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* else swap_off will be aborted if we return NULL. * else swap_off will be aborted if we return NULL.
*/ */
if (!__swp_swapcount(entry) && swap_slot_cache_enabled) if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
break; return NULL;
/* /*
* Get a new page to read into from swap. * Get a new page to read into from swap. Allocate it now,
* before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
* cause any racers to loop around until we add it to cache.
*/ */
if (!new_page) { page = alloc_page_vma(gfp_mask, vma, addr);
new_page = alloc_page_vma(gfp_mask, vma, addr); if (!page)
if (!new_page) return NULL;
break; /* Out of memory */
}
/* /*
* Swap entry may have been freed since our caller observed it. * Swap entry may have been freed since our caller observed it.
*/ */
err = swapcache_prepare(entry); err = swapcache_prepare(entry);
if (err == -EEXIST) { if (!err)
break;
put_page(page);
if (err != -EEXIST)
return NULL;
/* /*
* We might race against get_swap_page() and stumble * We might race against __delete_from_swap_cache(), and
* across a SWAP_HAS_CACHE swap_map entry whose page * stumble across a swap_map entry whose SWAP_HAS_CACHE
* has not been brought into the swapcache yet. * has not yet been cleared. Or race against another
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
* in swap_map, but not yet added its page to swap cache.
*/ */
cond_resched(); cond_resched();
continue;
} else if (err) /* swp entry is obsolete ? */
break;
/* May fail (-ENOMEM) if XArray node allocation failed. */
__SetPageLocked(new_page);
__SetPageSwapBacked(new_page);
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (likely(!err)) {
/* Initiate read into locked page */
SetPageWorkingset(new_page);
lru_cache_add_anon(new_page);
*new_page_allocated = true;
return new_page;
} }
__ClearPageLocked(new_page);
/* /*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely * The swap entry is ours to swap in. Prepare the new page.
* clear SWAP_HAS_CACHE flag.
*/ */
put_swap_page(new_page, entry);
} while (err != -ENOMEM);
if (new_page) __SetPageLocked(page);
put_page(new_page); __SetPageSwapBacked(page);
return found_page;
/* May fail (-ENOMEM) if XArray node allocation failed. */
if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL)) {
put_swap_page(page, entry);
goto fail_unlock;
}
if (mem_cgroup_charge(page, NULL, gfp_mask, false)) {
delete_from_swap_cache(page);
goto fail_unlock;
}
/* Caller will initiate read into locked page */
SetPageWorkingset(page);
lru_cache_add_anon(page);
*new_page_allocated = true;
return page;
fail_unlock:
unlock_page(page);
put_page(page);
return NULL;
} }
/* /*
......
...@@ -1901,11 +1901,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1901,11 +1901,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
if (unlikely(!page)) if (unlikely(!page))
return -ENOMEM; return -ENOMEM;
if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, true)) {
ret = -ENOMEM;
goto out_nolock;
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
ret = 0; ret = 0;
...@@ -1931,7 +1926,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1931,7 +1926,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
activate_page(page); activate_page(page);
out: out:
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
out_nolock:
if (page != swapcache) { if (page != swapcache) {
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment