Commit 34094537 authored by Ryusuke Konishi's avatar Ryusuke Konishi

nilfs2: fix data loss in mmap page write for hole blocks

From the result of a function test of mmap, mmap write to shared pages
turned out to be broken for hole blocks.  It doesn't write out filled
blocks and the data will be lost after umount.  This is due to a bug
that the target file is not queued for log writer when filling hole
blocks.

Also, nilfs_page_mkwrite function exits normal code path even after
successfully filled hole blocks due to a change of block_page_mkwrite
function; just after nilfs was merged into the mainline,
block_page_mkwrite() started to return VM_FAULT_LOCKED instead of zero
by the patch "mm: close page_mkwrite races" (commit:
b827e496).  The current nilfs_page_mkwrite() is not handling
this value properly.

This corrects nilfs_page_mkwrite() and will resolve the data loss
problem in mmap write.

[This should be applied to every kernel since 2.6.30 but a fix is
 needed for 2.6.37 and prior kernels]
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Tested-by: default avatarRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: stable <stable@kernel.org>  [2.6.38]
parent 0ce790e7
...@@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
/* /*
* check to see if the page is mapped already (no holes) * check to see if the page is mapped already (no holes)
*/ */
if (PageMappedToDisk(page)) { if (PageMappedToDisk(page))
unlock_page(page);
goto mapped; goto mapped;
}
if (page_has_buffers(page)) { if (page_has_buffers(page)) {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int fully_mapped = 1; int fully_mapped = 1;
...@@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (fully_mapped) { if (fully_mapped) {
SetPageMappedToDisk(page); SetPageMappedToDisk(page);
unlock_page(page);
goto mapped; goto mapped;
} }
} }
...@@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
ret = block_page_mkwrite(vma, vmf, nilfs_get_block); ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
if (unlikely(ret)) { if (ret != VM_FAULT_LOCKED) {
nilfs_transaction_abort(inode->i_sb); nilfs_transaction_abort(inode->i_sb);
return ret; return ret;
} }
nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
nilfs_transaction_commit(inode->i_sb); nilfs_transaction_commit(inode->i_sb);
mapped: mapped:
SetPageChecked(page); SetPageChecked(page);
wait_on_page_writeback(page); wait_on_page_writeback(page);
return 0; return VM_FAULT_LOCKED;
} }
static const struct vm_operations_struct nilfs_file_vm_ops = { static const struct vm_operations_struct nilfs_file_vm_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment