Commit f729c8c9 authored by Ross Zwisler's avatar Ross Zwisler Committed by Linus Torvalds

dax: wrprotect pmd_t in dax_mapping_entry_mkclean

Currently dax_mapping_entry_mkclean() fails to clean and write protect
the pmd_t of a DAX PMD entry during an *sync operation.  This can result
in data loss in the following sequence:

1) mmap write to DAX PMD, dirtying PMD radix tree entry and making the
   pmd_t dirty and writeable
2) fsync, flushing out PMD data and cleaning the radix tree entry. We
   currently fail to mark the pmd_t as clean and write protected.
3) more mmap writes to the PMD.  These don't cause any page faults since
   the pmd_t is dirty and writeable.  The radix tree entry remains clean.
4) fsync, which fails to flush the dirty PMD data because the radix tree
   entry was clean.
5) crash - dirty data that should have been fsync'd as part of 4) could
   still have been in the processor cache, and is lost.

Fix this by marking the pmd_t clean and write protected in
dax_mapping_entry_mkclean(), which is called as part of the fsync
operation 2).  This will cause the writes in step 3) above to generate
page faults where we'll re-dirty the PMD radix tree entry, resulting in
flushes in the fsync that happens in step 4).

Fixes: 4b4bb46d ("dax: clear dirty entry tags on cache flush")
Link: http://lkml.kernel.org/r/1482272586-21177-3-git-send-email-ross.zwisler@linux.intel.comSigned-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 09796395
...@@ -691,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -691,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pgoff_t index, unsigned long pfn) pgoff_t index, unsigned long pfn)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
pte_t *ptep; pte_t pte, *ptep = NULL;
pte_t pte; pmd_t *pmdp = NULL;
spinlock_t *ptl; spinlock_t *ptl;
bool changed; bool changed;
...@@ -707,12 +707,32 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -707,12 +707,32 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
address = pgoff_address(index, vma); address = pgoff_address(index, vma);
changed = false; changed = false;
if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
continue; continue;
if (pmdp) {
#ifdef CONFIG_FS_DAX_PMD
pmd_t pmd;
if (pfn != pmd_pfn(*pmdp))
goto unlock_pmd;
if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
goto unlock_pmd;
flush_cache_page(vma, address, pfn);
pmd = pmdp_huge_clear_flush(vma, address, pmdp);
pmd = pmd_wrprotect(pmd);
pmd = pmd_mkclean(pmd);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
changed = true;
unlock_pmd:
spin_unlock(ptl);
#endif
} else {
if (pfn != pte_pfn(*ptep)) if (pfn != pte_pfn(*ptep))
goto unlock; goto unlock_pte;
if (!pte_dirty(*ptep) && !pte_write(*ptep)) if (!pte_dirty(*ptep) && !pte_write(*ptep))
goto unlock; goto unlock_pte;
flush_cache_page(vma, address, pfn); flush_cache_page(vma, address, pfn);
pte = ptep_clear_flush(vma, address, ptep); pte = ptep_clear_flush(vma, address, ptep);
...@@ -720,8 +740,9 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -720,8 +740,9 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pte = pte_mkclean(pte); pte = pte_mkclean(pte);
set_pte_at(vma->vm_mm, address, ptep, pte); set_pte_at(vma->vm_mm, address, ptep, pte);
changed = true; changed = true;
unlock: unlock_pte:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
}
if (changed) if (changed)
mmu_notifier_invalidate_page(vma->vm_mm, address); mmu_notifier_invalidate_page(vma->vm_mm, address);
......
...@@ -1210,8 +1210,6 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -1210,8 +1210,6 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma); struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping, void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows); loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
spinlock_t **ptlp);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address, int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address, int follow_pfn(struct vm_area_struct *vma, unsigned long address,
......
...@@ -3819,8 +3819,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, ...@@ -3819,8 +3819,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
return -EINVAL; return -EINVAL;
} }
int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, static inline int follow_pte(struct mm_struct *mm, unsigned long address,
spinlock_t **ptlp) pte_t **ptepp, spinlock_t **ptlp)
{ {
int res; int res;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment