Commit cfbb8488 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] do_no_page() fix

From: David Mosberger <davidm@napali.hpl.hp.com>,
	"Sharma, Arun" <arun.sharma@intel.com>

The truncate race fix assumed that a non-zero vma->vm_ops->nopage implies a
non-zero vma->vm_file.

The ia64 x86 emulation code breaks this assumption, so teach do_no_page() to
handle it.
parent ba869a99
...@@ -1387,10 +1387,10 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1387,10 +1387,10 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd) unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
{ {
struct page * new_page; struct page * new_page;
struct address_space *mapping; struct address_space *mapping = NULL;
pte_t entry; pte_t entry;
struct pte_chain *pte_chain; struct pte_chain *pte_chain;
int sequence; int sequence = 0;
int ret; int ret;
if (!vma->vm_ops || !vma->vm_ops->nopage) if (!vma->vm_ops || !vma->vm_ops->nopage)
...@@ -1399,8 +1399,10 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1399,8 +1399,10 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap(page_table); pte_unmap(page_table);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
if (vma->vm_file) {
mapping = vma->vm_file->f_dentry->d_inode->i_mapping; mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
sequence = atomic_read(&mapping->truncate_count); sequence = atomic_read(&mapping->truncate_count);
}
smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */ smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */
retry: retry:
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0); new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
...@@ -1436,7 +1438,8 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1436,7 +1438,8 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
* invalidated this page. If invalidate_mmap_range got called, * invalidated this page. If invalidate_mmap_range got called,
* retry getting the page. * retry getting the page.
*/ */
if (unlikely(sequence != atomic_read(&mapping->truncate_count))) { if (mapping &&
(unlikely(sequence != atomic_read(&mapping->truncate_count)))) {
sequence = atomic_read(&mapping->truncate_count); sequence = atomic_read(&mapping->truncate_count);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
page_cache_release(new_page); page_cache_release(new_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment