Commit 5ba2948d authored by Andrew Morton's avatar Andrew Morton Committed by Richard Henderson

[PATCH] fix set_page_dirty vs truncate&free races

set_page_dirty() is racy if the caller has no reference against
page->mapping->host, and if the page is unlocked.  This is because
another CPU could truncate the page off the mapping and then free the
mapping.

Usually, the page _is_ locked, or the caller is a user-space process which
holds a reference on the inode by having an open file.

The exceptional cases are where the page was obtained via
get_user_pages().  The patch changes those to lock the page around the
set_page_dirty() call.
parent 0c682373
...@@ -538,6 +538,8 @@ struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr, ...@@ -538,6 +538,8 @@ struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
* Unmap a bio previously mapped by bio_map_user(). The @write_to_vm * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
* must be the same as passed into bio_map_user(). Must be called with * must be the same as passed into bio_map_user(). Must be called with
* a process context. * a process context.
*
* bio_unmap_user() may sleep.
*/ */
void bio_unmap_user(struct bio *bio, int write_to_vm) void bio_unmap_user(struct bio *bio, int write_to_vm)
{ {
...@@ -561,7 +563,7 @@ void bio_unmap_user(struct bio *bio, int write_to_vm) ...@@ -561,7 +563,7 @@ void bio_unmap_user(struct bio *bio, int write_to_vm)
*/ */
__bio_for_each_segment(bvec, bio, i, 0) { __bio_for_each_segment(bvec, bio, i, 0) {
if (write_to_vm) if (write_to_vm)
set_page_dirty(bvec->bv_page); set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page); page_cache_release(bvec->bv_page);
} }
...@@ -601,7 +603,7 @@ void bio_set_pages_dirty(struct bio *bio) ...@@ -601,7 +603,7 @@ void bio_set_pages_dirty(struct bio *bio)
struct page *page = bvec[i].bv_page; struct page *page = bvec[i].bv_page;
if (page) if (page)
set_page_dirty(bvec[i].bv_page); set_page_dirty_lock(bvec[i].bv_page);
} }
} }
......
...@@ -343,7 +343,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) ...@@ -343,7 +343,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
struct page *page = bvec[page_no].bv_page; struct page *page = bvec[page_no].bv_page;
if (dio->rw == READ) if (dio->rw == READ)
set_page_dirty(page); set_page_dirty_lock(page);
page_cache_release(page); page_cache_release(page);
} }
bio_put(bio); bio_put(bio);
......
...@@ -396,6 +396,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -396,6 +396,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
int __set_page_dirty_buffers(struct page *page); int __set_page_dirty_buffers(struct page *page);
int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page);
int set_page_dirty_lock(struct page *page);
/* /*
* Prototype to add a shrinker callback for ageable caches. * Prototype to add a shrinker callback for ageable caches.
......
...@@ -562,6 +562,26 @@ int __set_page_dirty_nobuffers(struct page *page) ...@@ -562,6 +562,26 @@ int __set_page_dirty_nobuffers(struct page *page)
} }
EXPORT_SYMBOL(__set_page_dirty_nobuffers); EXPORT_SYMBOL(__set_page_dirty_nobuffers);
/*
* set_page_dirty() is racy if the caller has no reference against
* page->mapping->host, and if the page is unlocked. This is because another
* CPU could truncate the page off the mapping and then free the mapping.
*
* Usually, the page _is_ locked, or the caller is a user-space process which
* holds a reference on the inode by having an open file.
*
* In other cases, the page should be locked before running set_page_dirty().
*/
int set_page_dirty_lock(struct page *page)
{
int ret;
lock_page(page);
ret = set_page_dirty(page);
unlock_page(page);
return ret;
}
/* /*
* Clear a page's dirty flag, while caring for dirty memory accounting. * Clear a page's dirty flag, while caring for dirty memory accounting.
* Returns true if the page was previously dirty. * Returns true if the page was previously dirty.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment