Commit c4d92e6b authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap: flush_dcache revisited

From: Hugh Dickins <hugh@veritas.com>

One of the callers of flush_dcache_page is do_generic_mapping_read, where
file is read without i_sem and without page lock: concurrent truncation may
at any moment remove page from cache, NULLing ->mapping, making
flush_dcache_page liable to oops.  Put result of page_mapping in a local
variable and apply mapping_mapped to that (if we were to check for NULL
within mapping_mapped, it's unclear whether to say yes or no).

parisc and arm do have other locking unsafety in their i_mmap(_shared)
searching, but that's a larger issue to be dealt with down the line.
parent 1896ae13
...@@ -186,19 +186,20 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) ...@@ -186,19 +186,20 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
void __flush_dcache_page(struct page *page) void __flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
struct list_head *l; struct list_head *l;
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_page(page_address(page));
if (!page_mapping(page)) if (!mapping)
return; return;
/* /*
* With a VIVT cache, we need to also write back * With a VIVT cache, we need to also write back
* and invalidate any user data. * and invalidate any user data.
*/ */
list_for_each(l, &page->mapping->i_mmap_shared) { list_for_each(l, &mapping->i_mmap_shared) {
struct vm_area_struct *mpnt; struct vm_area_struct *mpnt;
unsigned long off; unsigned long off;
...@@ -224,11 +225,15 @@ void __flush_dcache_page(struct page *page) ...@@ -224,11 +225,15 @@ void __flush_dcache_page(struct page *page)
static void static void
make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
{ {
struct address_space *mapping = page_mapping(page);
struct list_head *l; struct list_head *l;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long pgoff; unsigned long pgoff;
int aliases = 0; int aliases = 0;
if (!mapping)
return;
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
/* /*
...@@ -236,7 +241,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, ...@@ -236,7 +241,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
* space, then we need to handle them specially to maintain * space, then we need to handle them specially to maintain
* cache coherency. * cache coherency.
*/ */
list_for_each(l, &page->mapping->i_mmap_shared) { list_for_each(l, &mapping->i_mmap_shared) {
struct vm_area_struct *mpnt; struct vm_area_struct *mpnt;
unsigned long off; unsigned long off;
......
...@@ -55,9 +55,10 @@ asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) ...@@ -55,9 +55,10 @@ asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page);
unsigned long addr; unsigned long addr;
if (page_mapping(page) && !mapping_mapped(page->mapping)) { if (mapping && !mapping_mapped(mapping)) {
SetPageDcacheDirty(page); SetPageDcacheDirty(page);
return; return;
} }
......
...@@ -229,16 +229,17 @@ void disable_sr_hashing(void) ...@@ -229,16 +229,17 @@ void disable_sr_hashing(void)
void __flush_dcache_page(struct page *page) void __flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
struct list_head *l; struct list_head *l;
flush_kernel_dcache_page(page_address(page)); flush_kernel_dcache_page(page_address(page));
if (!page_mapping(page)) if (!mapping)
return; return;
/* check shared list first if it's not empty...it's usually /* check shared list first if it's not empty...it's usually
* the shortest */ * the shortest */
list_for_each(l, &page->mapping->i_mmap_shared) { list_for_each(l, &mapping->i_mmap_shared) {
struct vm_area_struct *mpnt; struct vm_area_struct *mpnt;
unsigned long off; unsigned long off;
...@@ -267,7 +268,7 @@ void __flush_dcache_page(struct page *page) ...@@ -267,7 +268,7 @@ void __flush_dcache_page(struct page *page)
/* then check private mapping list for read only shared mappings /* then check private mapping list for read only shared mappings
* which are flagged by VM_MAYSHARE */ * which are flagged by VM_MAYSHARE */
list_for_each(l, &page->mapping->i_mmap) { list_for_each(l, &mapping->i_mmap) {
struct vm_area_struct *mpnt; struct vm_area_struct *mpnt;
unsigned long off; unsigned long off;
......
...@@ -224,10 +224,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p ...@@ -224,10 +224,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page);
int dirty = test_bit(PG_dcache_dirty, &page->flags); int dirty = test_bit(PG_dcache_dirty, &page->flags);
int dirty_cpu = dcache_dirty_cpu(page); int dirty_cpu = dcache_dirty_cpu(page);
if (page_mapping(page) && !mapping_mapped(page->mapping)) { if (mapping && !mapping_mapped(mapping)) {
if (dirty) { if (dirty) {
if (dirty_cpu == smp_processor_id()) if (dirty_cpu == smp_processor_id())
return; return;
......
...@@ -295,7 +295,9 @@ extern void __flush_dcache_page(struct page *); ...@@ -295,7 +295,9 @@ extern void __flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page) static inline void flush_dcache_page(struct page *page)
{ {
if (page_mapping(page) && !mapping_mapped(page->mapping)) struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags); set_bit(PG_dcache_dirty, &page->flags);
else else
__flush_dcache_page(page); __flush_dcache_page(page);
......
...@@ -69,7 +69,9 @@ extern void __flush_dcache_page(struct page *page); ...@@ -69,7 +69,9 @@ extern void __flush_dcache_page(struct page *page);
static inline void flush_dcache_page(struct page *page) static inline void flush_dcache_page(struct page *page)
{ {
if (page_mapping(page) && !mapping_mapped(page->mapping)) { struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags); set_bit(PG_dcache_dirty, &page->flags);
} else { } else {
__flush_dcache_page(page); __flush_dcache_page(page);
......
...@@ -97,12 +97,11 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep) ...@@ -97,12 +97,11 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
pte_clear(ptep); pte_clear(ptep);
if (!pte_not_present(pte)) { if (!pte_not_present(pte)) {
struct page *page;
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
if (!page_mapping(page) || struct address_space *mapping = page_mapping(page);
!mapping_writably_mapped(page->mapping)) if (!mapping || !mapping_writably_mapped(mapping))
__clear_bit(PG_mapped, &page->flags); __clear_bit(PG_mapped, &page->flags);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment