Commit e7c89646 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] shmem fixes

A shmem cleanup/bugfix patch from Hugh Dickins.

- Minor: in try_to_unuse(), only wait on writeout if we actually
  started new writeout.  Otherwise, there is no need because a
  wait_on_page_writeback() has already been executed against this page.
  And it's locked, so no new writeback can start.

- Minor: in shmem_unuse_inode(): remove all the
  wait_on_page_writeback() logic.  We already did that in
  try_to_unuse(), adn the page is locked so no new writeback can start.

- Less minor: add a missing a page_cache_release() to
  shmem_get_page_locked() in the uncommon case where the page was found
  to be under writeout.
parent b6a7f088
......@@ -426,22 +426,15 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
swap_free(entry);
ptr[offset] = (swp_entry_t) {0};
while (inode && (PageWriteback(page) ||
move_from_swap_cache(page, idx, inode->i_mapping))) {
while (inode && move_from_swap_cache(page, idx, inode->i_mapping)) {
/*
* Yield for kswapd, and try again - but we're still
* holding the page lock - ugh! fix this up later on.
* Beware of inode being unlinked or truncated: just
* leave try_to_unuse to delete_from_swap_cache if so.
*
* AKPM: We now wait on writeback too. Note that it's
* the page lock which prevents new writeback from starting.
*/
spin_unlock(&info->lock);
if (PageWriteback(page))
wait_on_page_writeback(page);
else
yield();
yield();
spin_lock(&info->lock);
ptr = shmem_swp_entry(info, idx, 0);
if (IS_ERR(ptr))
......@@ -607,6 +600,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
spin_unlock(&info->lock);
wait_on_page_writeback(page);
unlock_page(page);
page_cache_release(page);
goto repeat;
}
error = move_from_swap_cache(page, idx, mapping);
......
......@@ -687,11 +687,10 @@ static int try_to_unuse(unsigned int type)
if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
swap_writepage(page);
lock_page(page);
}
if (PageSwapCache(page)) {
wait_on_page_writeback(page);
delete_from_swap_cache(page);
}
if (PageSwapCache(page))
delete_from_swap_cache(page);
/*
* So we could skip searching mms once swap count went
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment