Commit c2234eee authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] vmscan: ignore swap token when in trouble

The token-based thrashing control patches introduced a problem: when a task
which doesn't hold the token tries to run direct-reclaim, that task is told
that pages which belong to the token-holding mm are referenced, even though
they are not.  This means that it is possible for a huge number of a
non-token-holding mm's pages to be scanned to no effect.  Eventually, we give
up and go and oom-kill something.

So the patch arranges for the thrashing control logic to be defeated if the
caller has reached the highest level of scanning priority.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b356ea4b
...@@ -89,7 +89,7 @@ static inline void page_dup_rmap(struct page *page) ...@@ -89,7 +89,7 @@ static inline void page_dup_rmap(struct page *page)
/* /*
* Called from mm/vmscan.c to handle paging out * Called from mm/vmscan.c to handle paging out
*/ */
int page_referenced(struct page *, int is_locked); int page_referenced(struct page *, int is_locked, int ignore_token);
int try_to_unmap(struct page *); int try_to_unmap(struct page *);
/* /*
...@@ -103,7 +103,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); ...@@ -103,7 +103,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
#define anon_vma_prepare(vma) (0) #define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0) #define anon_vma_link(vma) do {} while (0)
#define page_referenced(page,l) TestClearPageReferenced(page) #define page_referenced(page,l,i) TestClearPageReferenced(page)
#define try_to_unmap(page) SWAP_FAIL #define try_to_unmap(page) SWAP_FAIL
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -254,7 +254,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) ...@@ -254,7 +254,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
* repeatedly from either page_referenced_anon or page_referenced_file. * repeatedly from either page_referenced_anon or page_referenced_file.
*/ */
static int page_referenced_one(struct page *page, static int page_referenced_one(struct page *page,
struct vm_area_struct *vma, unsigned int *mapcount) struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long address; unsigned long address;
...@@ -289,7 +289,7 @@ static int page_referenced_one(struct page *page, ...@@ -289,7 +289,7 @@ static int page_referenced_one(struct page *page,
if (ptep_clear_flush_young(vma, address, pte)) if (ptep_clear_flush_young(vma, address, pte))
referenced++; referenced++;
if (mm != current->mm && has_swap_token(mm)) if (mm != current->mm && !ignore_token && has_swap_token(mm))
referenced++; referenced++;
(*mapcount)--; (*mapcount)--;
...@@ -302,7 +302,7 @@ static int page_referenced_one(struct page *page, ...@@ -302,7 +302,7 @@ static int page_referenced_one(struct page *page,
return referenced; return referenced;
} }
static int page_referenced_anon(struct page *page) static int page_referenced_anon(struct page *page, int ignore_token)
{ {
unsigned int mapcount; unsigned int mapcount;
struct anon_vma *anon_vma; struct anon_vma *anon_vma;
...@@ -315,7 +315,8 @@ static int page_referenced_anon(struct page *page) ...@@ -315,7 +315,8 @@ static int page_referenced_anon(struct page *page)
mapcount = page_mapcount(page); mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
referenced += page_referenced_one(page, vma, &mapcount); referenced += page_referenced_one(page, vma, &mapcount,
ignore_token);
if (!mapcount) if (!mapcount)
break; break;
} }
...@@ -334,7 +335,7 @@ static int page_referenced_anon(struct page *page) ...@@ -334,7 +335,7 @@ static int page_referenced_anon(struct page *page)
* *
* This function is only called from page_referenced for object-based pages. * This function is only called from page_referenced for object-based pages.
*/ */
static int page_referenced_file(struct page *page) static int page_referenced_file(struct page *page, int ignore_token)
{ {
unsigned int mapcount; unsigned int mapcount;
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
...@@ -372,7 +373,8 @@ static int page_referenced_file(struct page *page) ...@@ -372,7 +373,8 @@ static int page_referenced_file(struct page *page)
referenced++; referenced++;
break; break;
} }
referenced += page_referenced_one(page, vma, &mapcount); referenced += page_referenced_one(page, vma, &mapcount,
ignore_token);
if (!mapcount) if (!mapcount)
break; break;
} }
...@@ -389,7 +391,7 @@ static int page_referenced_file(struct page *page) ...@@ -389,7 +391,7 @@ static int page_referenced_file(struct page *page)
* Quick test_and_clear_referenced for all mappings to a page, * Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page. * returns the number of ptes which referenced the page.
*/ */
int page_referenced(struct page *page, int is_locked) int page_referenced(struct page *page, int is_locked, int ignore_token)
{ {
int referenced = 0; int referenced = 0;
...@@ -401,14 +403,15 @@ int page_referenced(struct page *page, int is_locked) ...@@ -401,14 +403,15 @@ int page_referenced(struct page *page, int is_locked)
if (page_mapped(page) && page->mapping) { if (page_mapped(page) && page->mapping) {
if (PageAnon(page)) if (PageAnon(page))
referenced += page_referenced_anon(page); referenced += page_referenced_anon(page, ignore_token);
else if (is_locked) else if (is_locked)
referenced += page_referenced_file(page); referenced += page_referenced_file(page, ignore_token);
else if (TestSetPageLocked(page)) else if (TestSetPageLocked(page))
referenced++; referenced++;
else { else {
if (page->mapping) if (page->mapping)
referenced += page_referenced_file(page); referenced += page_referenced_file(page,
ignore_token);
unlock_page(page); unlock_page(page);
} }
} }
......
...@@ -377,7 +377,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) ...@@ -377,7 +377,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
if (page_mapped(page) || PageSwapCache(page)) if (page_mapped(page) || PageSwapCache(page))
sc->nr_scanned++; sc->nr_scanned++;
referenced = page_referenced(page, 1); referenced = page_referenced(page, 1, sc->priority <= 0);
/* In active use or really unfreeable? Activate it. */ /* In active use or really unfreeable? Activate it. */
if (referenced && page_mapping_inuse(page)) if (referenced && page_mapping_inuse(page))
goto activate_locked; goto activate_locked;
...@@ -715,7 +715,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) ...@@ -715,7 +715,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
if (page_mapped(page)) { if (page_mapped(page)) {
if (!reclaim_mapped || if (!reclaim_mapped ||
(total_swap_pages == 0 && PageAnon(page)) || (total_swap_pages == 0 && PageAnon(page)) ||
page_referenced(page, 0)) { page_referenced(page, 0, sc->priority <= 0)) {
list_add(&page->lru, &l_active); list_add(&page->lru, &l_active);
continue; continue;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment