Commit 985c30ef authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] Fix aliasing bug in copy_user_highpage, take 2.

Turns out b868868a  wasn't quite right.
When called for a page that isn't marked dirty it would artificially
create an alias instead of doing the obvious thing and access the page
via KSEG0.

The same issue also exists in copy_to_user_page and copy_from_user_page
which was causing the machine to die under rare circumstances for example
when running ps if the BUG_ON() assertion added by the earlier fix was
getting triggered.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 736fad17
...@@ -211,7 +211,7 @@ void copy_user_highpage(struct page *to, struct page *from, ...@@ -211,7 +211,7 @@ void copy_user_highpage(struct page *to, struct page *from,
void *vfrom, *vto; void *vfrom, *vto;
vto = kmap_atomic(to, KM_USER1); vto = kmap_atomic(to, KM_USER1);
if (cpu_has_dc_aliases && !Page_dcache_dirty(from)) { if (cpu_has_dc_aliases && page_mapped(from)) {
vfrom = kmap_coherent(from, vaddr); vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom); copy_page(vto, vfrom);
kunmap_coherent(); kunmap_coherent();
...@@ -234,12 +234,15 @@ void copy_to_user_page(struct vm_area_struct *vma, ...@@ -234,12 +234,15 @@ void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src, struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
{ {
if (cpu_has_dc_aliases) { if (cpu_has_dc_aliases && page_mapped(page)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len); memcpy(vto, src, len);
kunmap_coherent(); kunmap_coherent();
} else } else {
memcpy(dst, src, len); memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
flush_cache_page(vma, vaddr, page_to_pfn(page)); flush_cache_page(vma, vaddr, page_to_pfn(page));
} }
...@@ -250,13 +253,15 @@ void copy_from_user_page(struct vm_area_struct *vma, ...@@ -250,13 +253,15 @@ void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src, struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
{ {
if (cpu_has_dc_aliases) { if (cpu_has_dc_aliases && page_mapped(page)) {
void *vfrom = void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len); memcpy(dst, vfrom, len);
kunmap_coherent(); kunmap_coherent();
} else } else {
memcpy(dst, src, len); memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
} }
EXPORT_SYMBOL(copy_from_user_page); EXPORT_SYMBOL(copy_from_user_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment