Commit 6b5dbcf2 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] cleanup of page->flags manipulations

I've had this patch hanging around for a couple of months (you liked an
earlier version, but I never found time to resubmit it), remove some
unnecessary PageDirty and PageUptodate manipulations.

add_to_page_cache can only receive a dirty page in the add_to_swap
case, so deal with it there.  add_to_swap is better off using
add_to_page_cache directly than add_to_swap_cache.  Keep move_to_ and
_from_swap_cache simple, and don't fiddle with flags without reason.
It's a little less efficient to correct clean->dirty list as an
afterthought, but cuts unusual code from slow path.
parent a2495207
...@@ -228,7 +228,6 @@ int add_to_page_cache(struct page *page, ...@@ -228,7 +228,6 @@ int add_to_page_cache(struct page *page,
if (!error) { if (!error) {
SetPageLocked(page); SetPageLocked(page);
___add_to_page_cache(page, mapping, offset); ___add_to_page_cache(page, mapping, offset);
ClearPageDirty(page);
} else { } else {
page_cache_release(page); page_cache_release(page);
} }
......
...@@ -462,7 +462,6 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -462,7 +462,6 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
info->swapped--; info->swapped--;
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
SetPageUptodate(page);
/* /*
* Decrement swap count even when the entry is left behind: * Decrement swap count even when the entry is left behind:
* try_to_unuse will skip over mms, then reincrement count. * try_to_unuse will skip over mms, then reincrement count.
...@@ -540,8 +539,6 @@ static int shmem_writepage(struct page * page) ...@@ -540,8 +539,6 @@ static int shmem_writepage(struct page * page)
*entry = swap; *entry = swap;
info->swapped++; info->swapped++;
spin_unlock(&info->lock); spin_unlock(&info->lock);
SetPageUptodate(page);
set_page_dirty(page);
unlock_page(page); unlock_page(page);
return 0; return 0;
} }
......
...@@ -105,7 +105,6 @@ void __delete_from_swap_cache(struct page *page) ...@@ -105,7 +105,6 @@ void __delete_from_swap_cache(struct page *page)
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(!PageSwapCache(page)); BUG_ON(!PageSwapCache(page));
BUG_ON(PageWriteback(page)); BUG_ON(PageWriteback(page));
ClearPageDirty(page);
__remove_from_page_cache(page); __remove_from_page_cache(page);
INC_CACHE_INFO(del_total); INC_CACHE_INFO(del_total);
} }
...@@ -146,30 +145,30 @@ int add_to_swap(struct page * page) ...@@ -146,30 +145,30 @@ int add_to_swap(struct page * page)
pf_flags = current->flags; pf_flags = current->flags;
current->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
current->flags |= PF_NOWARN; current->flags |= PF_NOWARN;
ClearPageUptodate(page); /* why? */
/* /*
* Add it to the swap cache and mark it dirty * Add it to the swap cache and mark it dirty
* (adding to the page cache will clear the dirty
* and uptodate bits, so we need to do it again)
*/ */
switch (add_to_swap_cache(page, entry)) { switch (add_to_page_cache(page, &swapper_space, entry.val)) {
case 0: /* Success */ case 0: /* Success */
current->flags = pf_flags; current->flags = pf_flags;
SetPageUptodate(page); SetPageUptodate(page);
ClearPageDirty(page);
set_page_dirty(page); set_page_dirty(page);
swap_free(entry); INC_CACHE_INFO(add_total);
return 1; return 1;
case -ENOMEM: /* radix-tree allocation */ case -EEXIST:
/* Raced with "speculative" read_swap_cache_async */
current->flags = pf_flags; current->flags = pf_flags;
INC_CACHE_INFO(exist_race);
swap_free(entry); swap_free(entry);
return 0; continue;
default: /* ENOENT: raced */ default:
break; /* -ENOMEM radix-tree allocation failure */
}
/* Raced with "speculative" read_swap_cache_async */
current->flags = pf_flags; current->flags = pf_flags;
swap_free(entry); swap_free(entry);
return 0;
}
} }
} }
...@@ -203,33 +202,13 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry) ...@@ -203,33 +202,13 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry)
void **pslot; void **pslot;
int err; int err;
if (!mapping)
BUG();
if (!swap_duplicate(entry)) {
INC_CACHE_INFO(noent_race);
return -ENOENT;
}
write_lock(&swapper_space.page_lock); write_lock(&swapper_space.page_lock);
write_lock(&mapping->page_lock); write_lock(&mapping->page_lock);
err = radix_tree_reserve(&swapper_space.page_tree, entry.val, &pslot); err = radix_tree_reserve(&swapper_space.page_tree, entry.val, &pslot);
if (!err) { if (!err) {
/* Remove it from the page cache */
__remove_from_page_cache(page); __remove_from_page_cache(page);
/* Add it to the swap cache */
*pslot = page; *pslot = page;
/*
* This code used to clear PG_uptodate, PG_error, PG_arch1,
* PG_referenced and PG_checked. What _should_ it clear?
*/
ClearPageUptodate(page);
ClearPageReferenced(page);
SetPageLocked(page);
ClearPageDirty(page);
___add_to_page_cache(page, &swapper_space, entry.val); ___add_to_page_cache(page, &swapper_space, entry.val);
} }
...@@ -237,21 +216,21 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry) ...@@ -237,21 +216,21 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry)
write_unlock(&swapper_space.page_lock); write_unlock(&swapper_space.page_lock);
if (!err) { if (!err) {
if (!swap_duplicate(entry))
BUG();
/* shift page from clean_pages to dirty_pages list */
BUG_ON(PageDirty(page));
set_page_dirty(page);
INC_CACHE_INFO(add_total); INC_CACHE_INFO(add_total);
return 0; } else if (err == -EEXIST)
}
swap_free(entry);
if (err == -EEXIST)
INC_CACHE_INFO(exist_race); INC_CACHE_INFO(exist_race);
return err; return err;
} }
int move_from_swap_cache(struct page *page, unsigned long index, int move_from_swap_cache(struct page *page, unsigned long index,
struct address_space *mapping) struct address_space *mapping)
{ {
swp_entry_t entry;
void **pslot; void **pslot;
int err; int err;
...@@ -259,44 +238,27 @@ int move_from_swap_cache(struct page *page, unsigned long index, ...@@ -259,44 +238,27 @@ int move_from_swap_cache(struct page *page, unsigned long index,
BUG_ON(PageWriteback(page)); BUG_ON(PageWriteback(page));
BUG_ON(page_has_buffers(page)); BUG_ON(page_has_buffers(page));
entry.val = page->index;
write_lock(&swapper_space.page_lock); write_lock(&swapper_space.page_lock);
write_lock(&mapping->page_lock); write_lock(&mapping->page_lock);
err = radix_tree_reserve(&mapping->page_tree, index, &pslot); err = radix_tree_reserve(&mapping->page_tree, index, &pslot);
if (!err) { if (!err) {
swp_entry_t entry;
entry.val = page->index;
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
*pslot = page; *pslot = page;
/*
* This code used to clear PG_uptodate, PG_error, PG_referenced,
* PG_arch_1 and PG_checked. It's not really clear why.
*/
ClearPageUptodate(page);
ClearPageReferenced(page);
/*
* ___add_to_page_cache puts the page on ->clean_pages,
* but it's dirty. If it's on ->clean_pages, it will basically
* never get written out.
*/
SetPageDirty(page);
___add_to_page_cache(page, mapping, index); ___add_to_page_cache(page, mapping, index);
/* fix that up */ }
list_move(&page->list, &mapping->dirty_pages);
write_unlock(&mapping->page_lock); write_unlock(&mapping->page_lock);
write_unlock(&swapper_space.page_lock); write_unlock(&swapper_space.page_lock);
/* Do this outside ->page_lock */ if (!err) {
swap_free(entry); swap_free(entry);
return 0; /* shift page from clean_pages to dirty_pages list */
ClearPageDirty(page);
set_page_dirty(page);
} }
write_unlock(&mapping->page_lock);
write_unlock(&swapper_space.page_lock);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment