Commit c24f21bd authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] zoned vm counters: remove useless struct wbs

Remove writeback state

We can remove some functions now that were needed to calculate the page state
for writeback control since these statistics are now directly available.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d2c5e30c
...@@ -99,23 +99,6 @@ EXPORT_SYMBOL(laptop_mode); ...@@ -99,23 +99,6 @@ EXPORT_SYMBOL(laptop_mode);
static void background_writeout(unsigned long _min_pages); static void background_writeout(unsigned long _min_pages);
struct writeback_state
{
unsigned long nr_dirty;
unsigned long nr_unstable;
unsigned long nr_mapped;
unsigned long nr_writeback;
};
static void get_writeback_state(struct writeback_state *wbs)
{
wbs->nr_dirty = global_page_state(NR_FILE_DIRTY);
wbs->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES);
wbs->nr_writeback = global_page_state(NR_WRITEBACK);
}
/* /*
* Work out the current dirty-memory clamping and background writeout * Work out the current dirty-memory clamping and background writeout
* thresholds. * thresholds.
...@@ -134,7 +117,7 @@ static void get_writeback_state(struct writeback_state *wbs) ...@@ -134,7 +117,7 @@ static void get_writeback_state(struct writeback_state *wbs)
* clamping level. * clamping level.
*/ */
static void static void
get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, get_dirty_limits(long *pbackground, long *pdirty,
struct address_space *mapping) struct address_space *mapping)
{ {
int background_ratio; /* Percentages */ int background_ratio; /* Percentages */
...@@ -145,8 +128,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, ...@@ -145,8 +128,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
unsigned long available_memory = total_pages; unsigned long available_memory = total_pages;
struct task_struct *tsk; struct task_struct *tsk;
get_writeback_state(wbs);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* /*
* If this mapping can only allocate from low memory, * If this mapping can only allocate from low memory,
...@@ -157,7 +138,9 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, ...@@ -157,7 +138,9 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
#endif #endif
unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES)) * 100) /
total_pages;
dirty_ratio = vm_dirty_ratio; dirty_ratio = vm_dirty_ratio;
if (dirty_ratio > unmapped_ratio / 2) if (dirty_ratio > unmapped_ratio / 2)
...@@ -190,7 +173,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, ...@@ -190,7 +173,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
*/ */
static void balance_dirty_pages(struct address_space *mapping) static void balance_dirty_pages(struct address_space *mapping)
{ {
struct writeback_state wbs;
long nr_reclaimable; long nr_reclaimable;
long background_thresh; long background_thresh;
long dirty_thresh; long dirty_thresh;
...@@ -208,10 +190,11 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -208,10 +190,11 @@ static void balance_dirty_pages(struct address_space *mapping)
.range_cyclic = 1, .range_cyclic = 1,
}; };
get_dirty_limits(&wbs, &background_thresh, get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
&dirty_thresh, mapping); nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; global_page_state(NR_UNSTABLE_NFS);
if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
dirty_thresh)
break; break;
if (!dirty_exceeded) if (!dirty_exceeded)
...@@ -225,10 +208,13 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -225,10 +208,13 @@ static void balance_dirty_pages(struct address_space *mapping)
*/ */
if (nr_reclaimable) { if (nr_reclaimable) {
writeback_inodes(&wbc); writeback_inodes(&wbc);
get_dirty_limits(&wbs, &background_thresh, get_dirty_limits(&background_thresh,
&dirty_thresh, mapping); &dirty_thresh, mapping);
nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) global_page_state(NR_UNSTABLE_NFS);
if (nr_reclaimable +
global_page_state(NR_WRITEBACK)
<= dirty_thresh)
break; break;
pages_written += write_chunk - wbc.nr_to_write; pages_written += write_chunk - wbc.nr_to_write;
if (pages_written >= write_chunk) if (pages_written >= write_chunk)
...@@ -237,7 +223,8 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -237,7 +223,8 @@ static void balance_dirty_pages(struct address_space *mapping)
blk_congestion_wait(WRITE, HZ/10); blk_congestion_wait(WRITE, HZ/10);
} }
if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) if (nr_reclaimable + global_page_state(NR_WRITEBACK)
<= dirty_thresh && dirty_exceeded)
dirty_exceeded = 0; dirty_exceeded = 0;
if (writeback_in_progress(bdi)) if (writeback_in_progress(bdi))
...@@ -300,12 +287,11 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); ...@@ -300,12 +287,11 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
void throttle_vm_writeout(void) void throttle_vm_writeout(void)
{ {
struct writeback_state wbs;
long background_thresh; long background_thresh;
long dirty_thresh; long dirty_thresh;
for ( ; ; ) { for ( ; ; ) {
get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
/* /*
* Boost the allowable dirty threshold a bit for page * Boost the allowable dirty threshold a bit for page
...@@ -313,7 +299,8 @@ void throttle_vm_writeout(void) ...@@ -313,7 +299,8 @@ void throttle_vm_writeout(void)
*/ */
dirty_thresh += dirty_thresh / 10; /* wheeee... */ dirty_thresh += dirty_thresh / 10; /* wheeee... */
if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) if (global_page_state(NR_UNSTABLE_NFS) +
global_page_state(NR_WRITEBACK) <= dirty_thresh)
break; break;
blk_congestion_wait(WRITE, HZ/10); blk_congestion_wait(WRITE, HZ/10);
} }
...@@ -337,12 +324,12 @@ static void background_writeout(unsigned long _min_pages) ...@@ -337,12 +324,12 @@ static void background_writeout(unsigned long _min_pages)
}; };
for ( ; ; ) { for ( ; ; ) {
struct writeback_state wbs;
long background_thresh; long background_thresh;
long dirty_thresh; long dirty_thresh;
get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
if (wbs.nr_dirty + wbs.nr_unstable < background_thresh if (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) < background_thresh
&& min_pages <= 0) && min_pages <= 0)
break; break;
wbc.encountered_congestion = 0; wbc.encountered_congestion = 0;
...@@ -366,12 +353,9 @@ static void background_writeout(unsigned long _min_pages) ...@@ -366,12 +353,9 @@ static void background_writeout(unsigned long _min_pages)
*/ */
int wakeup_pdflush(long nr_pages) int wakeup_pdflush(long nr_pages)
{ {
if (nr_pages == 0) { if (nr_pages == 0)
struct writeback_state wbs; nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
get_writeback_state(&wbs);
nr_pages = wbs.nr_dirty + wbs.nr_unstable;
}
return pdflush_operation(background_writeout, nr_pages); return pdflush_operation(background_writeout, nr_pages);
} }
...@@ -402,7 +386,6 @@ static void wb_kupdate(unsigned long arg) ...@@ -402,7 +386,6 @@ static void wb_kupdate(unsigned long arg)
unsigned long start_jif; unsigned long start_jif;
unsigned long next_jif; unsigned long next_jif;
long nr_to_write; long nr_to_write;
struct writeback_state wbs;
struct writeback_control wbc = { struct writeback_control wbc = {
.bdi = NULL, .bdi = NULL,
.sync_mode = WB_SYNC_NONE, .sync_mode = WB_SYNC_NONE,
...@@ -415,11 +398,11 @@ static void wb_kupdate(unsigned long arg) ...@@ -415,11 +398,11 @@ static void wb_kupdate(unsigned long arg)
sync_supers(); sync_supers();
get_writeback_state(&wbs);
oldest_jif = jiffies - dirty_expire_interval; oldest_jif = jiffies - dirty_expire_interval;
start_jif = jiffies; start_jif = jiffies;
next_jif = start_jif + dirty_writeback_interval; next_jif = start_jif + dirty_writeback_interval;
nr_to_write = wbs.nr_dirty + wbs.nr_unstable + nr_to_write = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused); (inodes_stat.nr_inodes - inodes_stat.nr_unused);
while (nr_to_write > 0) { while (nr_to_write > 0) {
wbc.encountered_congestion = 0; wbc.encountered_congestion = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment