Commit 4bcb6689 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Fix PG_launder

Set PG_launder against pages which are under VM writeback.  So page
allocators will throttle against them.
parent 46c709c0
......@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/writeback.h>
/**
......@@ -135,7 +136,7 @@ static void __sync_single_inode(struct inode *inode, int wait, int *nr_to_write)
if (mapping->a_ops->writeback_mapping)
mapping->a_ops->writeback_mapping(mapping, nr_to_write);
else
filemap_fdatawrite(mapping);
generic_writeback_mapping(mapping, NULL);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC))
......
......@@ -174,6 +174,7 @@ extern void get_page_state(struct page_state *ret);
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
......
......@@ -659,7 +659,6 @@ EXPORT_SYMBOL(wait_on_page_writeback);
void unlock_page(struct page *page)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
clear_bit(PG_launder, &(page)->flags);
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
BUG();
......@@ -674,7 +673,7 @@ void unlock_page(struct page *page)
void end_page_writeback(struct page *page)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
clear_bit(PG_launder, &(page)->flags);
ClearPageLaunder(page);
smp_mb__before_clear_bit();
if (!TestClearPageWriteback(page))
BUG();
......
......@@ -354,6 +354,8 @@ int generic_writeback_mapping(struct address_space *mapping, int *nr_to_write)
lock_page(page);
if (TestClearPageDirty(page)) {
if (current->flags & PF_MEMALLOC)
SetPageLaunder(page);
err = writepage(page);
if (!ret)
ret = err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment