Commit c4d92e6b authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap: flush_dcache revisited

From: Hugh Dickins <hugh@veritas.com>

One of the callers of flush_dcache_page is do_generic_mapping_read, where
file is read without i_sem and without page lock: concurrent truncation may
at any moment remove page from cache, NULLing ->mapping, making
flush_dcache_page liable to oops.  Put result of page_mapping in a local
variable and apply mapping_mapped to that (if we were to check for NULL
within mapping_mapped, it's unclear whether to say yes or no).

parisc and arm do have other locking unsafety in their i_mmap(_shared)
searching, but that's a larger issue to be dealt with down the line.
parent 1896ae13
......@@ -186,19 +186,20 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = current->active_mm;
struct list_head *l;
__cpuc_flush_dcache_page(page_address(page));
if (!page_mapping(page))
if (!mapping)
return;
/*
* With a VIVT cache, we need to also write back
* and invalidate any user data.
*/
list_for_each(l, &page->mapping->i_mmap_shared) {
list_for_each(l, &mapping->i_mmap_shared) {
struct vm_area_struct *mpnt;
unsigned long off;
......@@ -224,11 +225,15 @@ void __flush_dcache_page(struct page *page)
static void
make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
{
struct address_space *mapping = page_mapping(page);
struct list_head *l;
struct mm_struct *mm = vma->vm_mm;
unsigned long pgoff;
int aliases = 0;
if (!mapping)
return;
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
/*
......@@ -236,7 +241,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
* space, then we need to handle them specially to maintain
* cache coherency.
*/
list_for_each(l, &page->mapping->i_mmap_shared) {
list_for_each(l, &mapping->i_mmap_shared) {
struct vm_area_struct *mpnt;
unsigned long off;
......
......@@ -55,9 +55,10 @@ asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long addr;
if (page_mapping(page) && !mapping_mapped(page->mapping)) {
if (mapping && !mapping_mapped(mapping)) {
SetPageDcacheDirty(page);
return;
}
......
......@@ -229,16 +229,17 @@ void disable_sr_hashing(void)
void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = current->active_mm;
struct list_head *l;
flush_kernel_dcache_page(page_address(page));
if (!page_mapping(page))
if (!mapping)
return;
/* check shared list first if it's not empty...it's usually
* the shortest */
list_for_each(l, &page->mapping->i_mmap_shared) {
list_for_each(l, &mapping->i_mmap_shared) {
struct vm_area_struct *mpnt;
unsigned long off;
......@@ -267,7 +268,7 @@ void __flush_dcache_page(struct page *page)
/* then check private mapping list for read only shared mappings
* which are flagged by VM_MAYSHARE */
list_for_each(l, &page->mapping->i_mmap) {
list_for_each(l, &mapping->i_mmap) {
struct vm_area_struct *mpnt;
unsigned long off;
......
......@@ -224,10 +224,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
int dirty = test_bit(PG_dcache_dirty, &page->flags);
int dirty_cpu = dcache_dirty_cpu(page);
if (page_mapping(page) && !mapping_mapped(page->mapping)) {
if (mapping && !mapping_mapped(mapping)) {
if (dirty) {
if (dirty_cpu == smp_processor_id())
return;
......
......@@ -295,7 +295,9 @@ extern void __flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page)
{
if (page_mapping(page) && !mapping_mapped(page->mapping))
struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(page);
......
......@@ -69,7 +69,9 @@ extern void __flush_dcache_page(struct page *page);
static inline void flush_dcache_page(struct page *page)
{
if (page_mapping(page) && !mapping_mapped(page->mapping)) {
struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
} else {
__flush_dcache_page(page);
......
......@@ -97,12 +97,11 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
pte_clear(ptep);
if (!pte_not_present(pte)) {
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!page_mapping(page) ||
!mapping_writably_mapped(page->mapping))
struct page *page = pfn_to_page(pfn);
struct address_space *mapping = page_mapping(page);
if (!mapping || !mapping_writably_mapped(mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment