Commit 4dd4b920 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

revert "kswapd should only wait on IO if there is IO"

Revert commit f1a9ee75:

  Author: Rik van Riel <riel@redhat.com>
  Date:   Thu Feb 7 00:14:08 2008 -0800

    kswapd should only wait on IO if there is IO

    The current kswapd (and try_to_free_pages) code has an oddity where the
    code will wait on IO, even if there is no IO in flight.  This problem is
    notable especially when the system scans through many unfreeable pages,
    causing unnecessary stalls in the VM.

    Additionally, tasks without __GFP_FS or __GFP_IO in the direct reclaim path
    will sleep if a significant number of pages are encountered that should be
    written out.  This gives kswapd a chance to write out those pages, while
    the direct reclaim task sleeps.
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>

Because of large latencies and interactivity problems reported by Carlos,
here: http://lkml.org/lkml/2008/3/22/211

Cc: Rik van Riel <riel@redhat.com>
Cc: "Carlos R.  Mafra" <crmafra2@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 537878d2
...@@ -70,13 +70,6 @@ struct scan_control { ...@@ -70,13 +70,6 @@ struct scan_control {
int order; int order;
/*
* Pages that have (or should have) IO pending. If we run into
* a lot of these, we're better off waiting a little for IO to
* finish rather than scanning more pages in the VM.
*/
int nr_io_pages;
/* Which cgroup do we reclaim from */ /* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
...@@ -512,10 +505,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -512,10 +505,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/ */
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
wait_on_page_writeback(page); wait_on_page_writeback(page);
else { else
sc->nr_io_pages++;
goto keep_locked; goto keep_locked;
}
} }
referenced = page_referenced(page, 1, sc->mem_cgroup); referenced = page_referenced(page, 1, sc->mem_cgroup);
...@@ -554,10 +545,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -554,10 +545,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageDirty(page)) { if (PageDirty(page)) {
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
goto keep_locked; goto keep_locked;
if (!may_enter_fs) { if (!may_enter_fs)
sc->nr_io_pages++;
goto keep_locked; goto keep_locked;
}
if (!sc->may_writepage) if (!sc->may_writepage)
goto keep_locked; goto keep_locked;
...@@ -568,10 +557,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -568,10 +557,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
case PAGE_ACTIVATE: case PAGE_ACTIVATE:
goto activate_locked; goto activate_locked;
case PAGE_SUCCESS: case PAGE_SUCCESS:
if (PageWriteback(page) || PageDirty(page)) { if (PageWriteback(page) || PageDirty(page))
sc->nr_io_pages++;
goto keep; goto keep;
}
/* /*
* A synchronous write - probably a ramdisk. Go * A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page. * ahead and try to reclaim the page.
...@@ -1344,7 +1331,6 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, ...@@ -1344,7 +1331,6 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
for (priority = DEF_PRIORITY; priority >= 0; priority--) { for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc->nr_scanned = 0; sc->nr_scanned = 0;
sc->nr_io_pages = 0;
if (!priority) if (!priority)
disable_swap_token(); disable_swap_token();
nr_reclaimed += shrink_zones(priority, zones, sc); nr_reclaimed += shrink_zones(priority, zones, sc);
...@@ -1379,8 +1365,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, ...@@ -1379,8 +1365,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
} }
/* Take a nap, wait for some writeback to complete */ /* Take a nap, wait for some writeback to complete */
if (sc->nr_scanned && priority < DEF_PRIORITY - 2 && if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
sc->nr_io_pages > sc->swap_cluster_max)
congestion_wait(WRITE, HZ/10); congestion_wait(WRITE, HZ/10);
} }
/* top priority shrink_caches still had more to do? don't OOM, then */ /* top priority shrink_caches still had more to do? don't OOM, then */
...@@ -1514,7 +1499,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) ...@@ -1514,7 +1499,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
if (!priority) if (!priority)
disable_swap_token(); disable_swap_token();
sc.nr_io_pages = 0;
all_zones_ok = 1; all_zones_ok = 1;
/* /*
...@@ -1607,8 +1591,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) ...@@ -1607,8 +1591,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
* OK, kswapd is getting into trouble. Take a nap, then take * OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones. * another pass across the zones.
*/ */
if (total_scanned && priority < DEF_PRIORITY - 2 && if (total_scanned && priority < DEF_PRIORITY - 2)
sc.nr_io_pages > sc.swap_cluster_max)
congestion_wait(WRITE, HZ/10); congestion_wait(WRITE, HZ/10);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment