Commit 43ce1d23 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Theodore Ts'o

ext4: Fix mmap/truncate race when blocksize < pagesize && !nodellaoc

This patch fixes the mmap/truncate race that was fixed for delayed
allocation by merging ext4_{journalled,normal,da}_writepage() into
ext4_writepage().
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent c364b22c
...@@ -47,6 +47,10 @@ ...@@ -47,6 +47,10 @@
#define MPAGE_DA_EXTENT_TAIL 0x01 #define MPAGE_DA_EXTENT_TAIL 0x01
static int __ext4_journalled_writepage(struct page *page,
struct writeback_control *wbc,
unsigned int len);
static inline int ext4_begin_ordered_truncate(struct inode *inode, static inline int ext4_begin_ordered_truncate(struct inode *inode,
loff_t new_size) loff_t new_size)
{ {
...@@ -2392,7 +2396,7 @@ static int __mpage_da_writepage(struct page *page, ...@@ -2392,7 +2396,7 @@ static int __mpage_da_writepage(struct page *page,
* We need to try to allocate * We need to try to allocate
* unmapped blocks in the same page. * unmapped blocks in the same page.
* Otherwise we won't make progress * Otherwise we won't make progress
* with the page in ext4_da_writepage * with the page in ext4_writepage
*/ */
if (ext4_bh_delay_or_unwritten(NULL, bh)) { if (ext4_bh_delay_or_unwritten(NULL, bh)) {
mpage_add_bh_to_extent(mpd, logical, mpage_add_bh_to_extent(mpd, logical,
...@@ -2519,13 +2523,47 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, ...@@ -2519,13 +2523,47 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
} }
/* /*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
* need to file the inode to the transaction's list in ordered mode because if
* we are writing back data added by write(), the inode is already there and if
* we are writing back data modified via mmap(), noone guarantees in which
* transaction the data will hit the disk. In case we are journaling data, we
* cannot start transaction directly because transaction start ranks above page
* lock so we have to do some magic.
*
* This function can get called via... * This function can get called via...
* - ext4_da_writepages after taking page lock (have journal handle) * - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle) * - journal_submit_inode_data_buffers (no journal handle)
* - shrink_page_list via pdflush (no journal handle) * - shrink_page_list via pdflush (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle) * - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
* multiple blocks we need to write those buffer_heads that are mapped. This
* is important for mmaped based write. So if we do with blocksize 1K
* truncate(f, 1024);
* a = mmap(f, 0, 4096);
* a[0] = 'a';
* truncate(f, 4096);
* we have in the page first buffer_head mapped via page_mkwrite call back
* but other bufer_heads would be unmapped but dirty(dirty done via the
* do_wp_page). So writepage should write the first block. If we modify
* the mmap area beyond 1024 we will again get a page_fault and the
* page_mkwrite callback will do the block allocation and mark the
* buffer_heads mapped.
*
* We redirty the page if we have any buffer_heads that is either delay or
* unwritten in the page.
*
* We can get recursively called as show below.
*
* ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
* ext4_writepage()
*
* But since we don't do any block allocation we should not deadlock.
* Page also have the dirty flag cleared so we don't get recurive page_lock.
*/ */
static int ext4_da_writepage(struct page *page, static int ext4_writepage(struct page *page,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
int ret = 0; int ret = 0;
...@@ -2534,7 +2572,7 @@ static int ext4_da_writepage(struct page *page, ...@@ -2534,7 +2572,7 @@ static int ext4_da_writepage(struct page *page,
struct buffer_head *page_bufs; struct buffer_head *page_bufs;
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
trace_ext4_da_writepage(inode, page); trace_ext4_writepage(inode, page);
size = i_size_read(inode); size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT) if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK; len = size & ~PAGE_CACHE_MASK;
...@@ -2596,6 +2634,15 @@ static int ext4_da_writepage(struct page *page, ...@@ -2596,6 +2634,15 @@ static int ext4_da_writepage(struct page *page,
block_commit_write(page, 0, len); block_commit_write(page, 0, len);
} }
if (PageChecked(page) && ext4_should_journal_data(inode)) {
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
ClearPageChecked(page);
return __ext4_journalled_writepage(page, wbc, len);
}
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
ret = nobh_writepage(page, noalloc_get_block_write, wbc); ret = nobh_writepage(page, noalloc_get_block_write, wbc);
else else
...@@ -3135,112 +3182,10 @@ static int bput_one(handle_t *handle, struct buffer_head *bh) ...@@ -3135,112 +3182,10 @@ static int bput_one(handle_t *handle, struct buffer_head *bh)
return 0; return 0;
} }
/*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
* need to file the inode to the transaction's list in ordered mode because if
* we are writing back data added by write(), the inode is already there and if
* we are writing back data modified via mmap(), noone guarantees in which
* transaction the data will hit the disk. In case we are journaling data, we
* cannot start transaction directly because transaction start ranks above page
* lock so we have to do some magic.
*
* In all journaling modes block_write_full_page() will start the I/O.
*
* Problem:
*
* ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
* ext4_writepage()
*
* Similar for:
*
* ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
*
* Same applies to ext4_get_block(). We will deadlock on various things like
* lock_journal and i_data_sem
*
* Setting PF_MEMALLOC here doesn't work - too many internal memory
* allocations fail.
*
* 16May01: If we're reentered then journal_current_handle() will be
* non-zero. We simply *return*.
*
* 1 July 2001: @@@ FIXME:
* In journalled data mode, a data buffer may be metadata against the
* current transaction. But the same file is part of a shared mapping
* and someone does a writepage() on it.
*
* We will move the buffer onto the async_data list, but *after* it has
* been dirtied. So there's a small window where we have dirty data on
* BJ_Metadata.
*
* Note that this only applies to the last partial page in the file. The
* bit which block_write_full_page() uses prepare/commit for. (That's
* broken code anyway: it's wrong for msync()).
*
* It's a rare case: affects the final partial page, for journalled data
* where the file is subject to bith write() and writepage() in the same
* transction. To fix it we'll need a custom block_write_full_page().
* We'll probably need that anyway for journalling writepage() output.
*
* We don't honour synchronous mounts for writepage(). That would be
* disastrous. Any write() or metadata operation will sync the fs for
* us.
*
*/
static int __ext4_normal_writepage(struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
if (test_opt(inode->i_sb, NOBH))
return nobh_writepage(page, noalloc_get_block_write, wbc);
else
return block_write_full_page(page, noalloc_get_block_write,
wbc);
}
static int ext4_normal_writepage(struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
loff_t size = i_size_read(inode);
loff_t len;
trace_ext4_normal_writepage(inode, page);
J_ASSERT(PageLocked(page));
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
if (page_has_buffers(page)) {
/* if page has buffers it should all be mapped
* and allocated. If there are not buffers attached
* to the page we know the page is dirty but it lost
* buffers. That means that at some moment in time
* after write_begin() / write_end() has been called
* all buffers have been clean and thus they must have been
* written at least once. So they are all mapped and we can
* happily proceed with mapping them and writing the page.
*/
BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
ext4_bh_delay_or_unwritten));
}
if (!ext4_journal_current_handle())
return __ext4_normal_writepage(page, wbc);
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
static int __ext4_journalled_writepage(struct page *page, static int __ext4_journalled_writepage(struct page *page,
struct writeback_control *wbc) struct writeback_control *wbc,
unsigned int len)
{ {
loff_t size;
unsigned int len;
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct buffer_head *page_bufs; struct buffer_head *page_bufs;
...@@ -3248,16 +3193,8 @@ static int __ext4_journalled_writepage(struct page *page, ...@@ -3248,16 +3193,8 @@ static int __ext4_journalled_writepage(struct page *page,
int ret = 0; int ret = 0;
int err; int err;
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
ret = block_prepare_write(page, 0, len, noalloc_get_block_write);
if (ret != 0)
goto out_unlock;
page_bufs = page_buffers(page); page_bufs = page_buffers(page);
BUG_ON(!page_bufs);
walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
/* As soon as we unlock the page, it can go away, but we have /* As soon as we unlock the page, it can go away, but we have
* references to buffers so we are safe */ * references to buffers so we are safe */
...@@ -3282,67 +3219,10 @@ static int __ext4_journalled_writepage(struct page *page, ...@@ -3282,67 +3219,10 @@ static int __ext4_journalled_writepage(struct page *page,
walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
goto out;
out_unlock:
unlock_page(page);
out: out:
return ret; return ret;
} }
static int ext4_journalled_writepage(struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
loff_t size = i_size_read(inode);
loff_t len;
trace_ext4_journalled_writepage(inode, page);
J_ASSERT(PageLocked(page));
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
if (page_has_buffers(page)) {
/* if page has buffers it should all be mapped
* and allocated. If there are not buffers attached
* to the page we know the page is dirty but it lost
* buffers. That means that at some moment in time
* after write_begin() / write_end() has been called
* all buffers have been clean and thus they must have been
* written at least once. So they are all mapped and we can
* happily proceed with mapping them and writing the page.
*/
BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
ext4_bh_delay_or_unwritten));
}
if (ext4_journal_current_handle())
goto no_write;
if (PageChecked(page)) {
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
ClearPageChecked(page);
return __ext4_journalled_writepage(page, wbc);
} else {
/*
* It may be a page full of checkpoint-mode buffers. We don't
* really know unless we go poke around in the buffer_heads.
* But block_write_full_page will do the right thing.
*/
return block_write_full_page(page, noalloc_get_block_write,
wbc);
}
no_write:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
static int ext4_readpage(struct file *file, struct page *page) static int ext4_readpage(struct file *file, struct page *page)
{ {
return mpage_readpage(page, ext4_get_block); return mpage_readpage(page, ext4_get_block);
...@@ -3489,7 +3369,7 @@ static int ext4_journalled_set_page_dirty(struct page *page) ...@@ -3489,7 +3369,7 @@ static int ext4_journalled_set_page_dirty(struct page *page)
static const struct address_space_operations ext4_ordered_aops = { static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage, .readpage = ext4_readpage,
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_normal_writepage, .writepage = ext4_writepage,
.sync_page = block_sync_page, .sync_page = block_sync_page,
.write_begin = ext4_write_begin, .write_begin = ext4_write_begin,
.write_end = ext4_ordered_write_end, .write_end = ext4_ordered_write_end,
...@@ -3504,7 +3384,7 @@ static const struct address_space_operations ext4_ordered_aops = { ...@@ -3504,7 +3384,7 @@ static const struct address_space_operations ext4_ordered_aops = {
static const struct address_space_operations ext4_writeback_aops = { static const struct address_space_operations ext4_writeback_aops = {
.readpage = ext4_readpage, .readpage = ext4_readpage,
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_normal_writepage, .writepage = ext4_writepage,
.sync_page = block_sync_page, .sync_page = block_sync_page,
.write_begin = ext4_write_begin, .write_begin = ext4_write_begin,
.write_end = ext4_writeback_write_end, .write_end = ext4_writeback_write_end,
...@@ -3519,7 +3399,7 @@ static const struct address_space_operations ext4_writeback_aops = { ...@@ -3519,7 +3399,7 @@ static const struct address_space_operations ext4_writeback_aops = {
static const struct address_space_operations ext4_journalled_aops = { static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage, .readpage = ext4_readpage,
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_journalled_writepage, .writepage = ext4_writepage,
.sync_page = block_sync_page, .sync_page = block_sync_page,
.write_begin = ext4_write_begin, .write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end, .write_end = ext4_journalled_write_end,
...@@ -3533,7 +3413,7 @@ static const struct address_space_operations ext4_journalled_aops = { ...@@ -3533,7 +3413,7 @@ static const struct address_space_operations ext4_journalled_aops = {
static const struct address_space_operations ext4_da_aops = { static const struct address_space_operations ext4_da_aops = {
.readpage = ext4_readpage, .readpage = ext4_readpage,
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_da_writepage, .writepage = ext4_writepage,
.writepages = ext4_da_writepages, .writepages = ext4_da_writepages,
.sync_page = block_sync_page, .sync_page = block_sync_page,
.write_begin = ext4_da_write_begin, .write_begin = ext4_da_write_begin,
......
...@@ -190,7 +190,7 @@ TRACE_EVENT(ext4_journalled_write_end, ...@@ -190,7 +190,7 @@ TRACE_EVENT(ext4_journalled_write_end,
__entry->copied) __entry->copied)
); );
TRACE_EVENT(ext4_da_writepage, TRACE_EVENT(ext4_writepage,
TP_PROTO(struct inode *inode, struct page *page), TP_PROTO(struct inode *inode, struct page *page),
TP_ARGS(inode, page), TP_ARGS(inode, page),
...@@ -342,49 +342,6 @@ TRACE_EVENT(ext4_da_write_end, ...@@ -342,49 +342,6 @@ TRACE_EVENT(ext4_da_write_end,
__entry->copied) __entry->copied)
); );
TRACE_EVENT(ext4_normal_writepage,
TP_PROTO(struct inode *inode, struct page *page),
TP_ARGS(inode, page),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = page->index;
),
TP_printk("dev %s ino %lu page_index %lu",
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
);
TRACE_EVENT(ext4_journalled_writepage,
TP_PROTO(struct inode *inode, struct page *page),
TP_ARGS(inode, page),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = page->index;
),
TP_printk("dev %s ino %lu page_index %lu",
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
);
TRACE_EVENT(ext4_discard_blocks, TRACE_EVENT(ext4_discard_blocks,
TP_PROTO(struct super_block *sb, unsigned long long blk, TP_PROTO(struct super_block *sb, unsigned long long blk,
unsigned long long count), unsigned long long count),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment