Commit de73b6b1 authored by Max Filippov's avatar Max Filippov Committed by Chris Zankel

xtensa: avoid mmap cache aliasing

Provide arch_get_unmapped_area function aligning shared memory mapping
addresses to the biggest of the page size or the cache way size. That
guarantees that corresponding virtual addresses of shared mappings are
cached by the same cache sets.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
parent 475c32d0
...@@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t; ...@@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t;
#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PTE_SAME
/* We provide our own get_unmapped_area to cope with
* SHM area cache aliasing for userland.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
......
...@@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { ...@@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
}; };
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
{ {
unsigned long ret; unsigned long ret;
...@@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, ...@@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
{ {
return sys_fadvise64_64(fd, offset, len, advice); return sys_fadvise64_64(fd, offset, len, advice);
} }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vmm;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment