Commit f8dae006 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Ensure full cache coherency for kmap/kunmap

Helge Deller noted a few weeks ago problems with the AIO support on
parisc. This change is the result of numerous iterations on how best to
deal with this problem.

The solution adopted here is to provide full cache coherency in a
uniform manner on all parisc systems. This involves calling
flush_dcache_page() on kmap operations and flush_kernel_dcache_page() on
kunmap operations. As a result, the copy_user_page() and
clear_user_page() functions can be removed and the overall code is
simpler.

The change ensures that both userspace and kernel aliases to a mapped
page are invalidated and flushed. This is necessary for the correct
operation of PA8800 and PA8900 based systems which do not support
inequivalent aliases.

With this change, I have observed no cache related issues on c8000 and
rp3440. It is now possible for example to do kernel builds with "-j64"
on four way systems.

On systems using XFS file systems, the patch recently posted by Mikulas
Patocka to "fix crash using XFS on loopback" is needed to avoid a hang
caused by an uninitialized lock passed to flush_dcache_page() in the
page struct.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # v3.9+
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent ceb3b021
...@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma ...@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
void mark_rodata_ro(void); void mark_rodata_ro(void);
#endif #endif
#ifdef CONFIG_PA8X00
/* Only pa8800, pa8900 needs this */
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#define ARCH_HAS_KMAP #define ARCH_HAS_KMAP
void kunmap_parisc(void *addr);
static inline void *kmap(struct page *page) static inline void *kmap(struct page *page)
{ {
might_sleep(); might_sleep();
flush_dcache_page(page);
return page_address(page); return page_address(page);
} }
static inline void kunmap(struct page *page) static inline void kunmap(struct page *page)
{ {
kunmap_parisc(page_address(page)); flush_kernel_dcache_page_addr(page_address(page));
} }
static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
pagefault_disable(); pagefault_disable();
flush_dcache_page(page);
return page_address(page); return page_address(page);
} }
static inline void __kunmap_atomic(void *addr) static inline void __kunmap_atomic(void *addr)
{ {
kunmap_parisc(addr); flush_kernel_dcache_page_addr(addr);
pagefault_enable(); pagefault_enable();
} }
#define kmap_atomic_prot(page, prot) kmap_atomic(page) #define kmap_atomic_prot(page, prot) kmap_atomic(page)
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr) #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
#endif /* _PARISC_CACHEFLUSH_H */ #endif /* _PARISC_CACHEFLUSH_H */
...@@ -28,9 +28,8 @@ struct page; ...@@ -28,9 +28,8 @@ struct page;
void clear_page_asm(void *page); void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from); void copy_page_asm(void *to, void *from);
void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, #define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
struct page *pg);
/* #define CONFIG_PARISC_TMPALIAS */ /* #define CONFIG_PARISC_TMPALIAS */
......
...@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr) ...@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
} }
EXPORT_SYMBOL(flush_kernel_dcache_page_addr); EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
{
clear_page_asm(vto);
if (!parisc_requires_coherency())
flush_kernel_dcache_page_asm(vto);
}
EXPORT_SYMBOL(clear_user_page);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
/* Copy using kernel mapping. No coherency is needed
(all in kmap/kunmap) on machines that don't support
non-equivalent aliasing. However, the `from' page
needs to be flushed before it can be accessed through
the kernel mapping. */
preempt_disable();
flush_dcache_page_asm(__pa(vfrom), vaddr);
preempt_enable();
copy_page_asm(vto, vfrom);
if (!parisc_requires_coherency())
flush_kernel_dcache_page_asm(vto);
}
EXPORT_SYMBOL(copy_user_page);
#ifdef CONFIG_PA8X00
void kunmap_parisc(void *addr)
{
if (parisc_requires_coherency())
flush_kernel_dcache_page_addr(addr);
}
EXPORT_SYMBOL(kunmap_parisc);
#endif
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{ {
unsigned long flags; unsigned long flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment