Commit 4197692e authored by Russell King's avatar Russell King

[ARM] Fix shared mmap()ings for ARM VIPT caches.

This allows us to appropriately align shared mappings on VIPT caches
with aliasing issues.
parent bb3c779f
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
obj-y := consistent.o extable.o fault-armv.o \ obj-y := consistent.o extable.o fault-armv.o \
fault-common.o init.o ioremap.o mm-armv.o fault-common.o init.o ioremap.o mmap.o mm-armv.o
obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_MODULES) += proc-syms.o
......
/*
* linux/arch/arm/mm/mmap.c
*/
#include <linux/config.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <asm/system.h>
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes.
*
* We unconditionally provide this function for all cases, however
* in the VIVT case, we optimise out the alignment rules.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
#ifdef CONFIG_CPU_V6
unsigned int cache_type;
int do_align = 0, aliasing = 0;
/*
* We only need to do colour alignment if either the I or D
* caches alias. This is indicated by bits 9 and 21 of the
* cache type register.
*/
cache_type = read_cpuid(CPUID_CACHETYPE);
if (cache_type != read_cpuid(CPUID_ID)) {
aliasing = (cache_type | cache_type >> 12) & (1 << 9);
if (aliasing)
do_align = filp || flags & MAP_SHARED;
}
#else
#define do_align 0
#define aliasing 0
#endif
/*
* We should enforce the MAP_FIXED case. However, currently
* the generic kernel code doesn't allow us to handle this.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
start_addr = addr = mm->free_area_cache;
full_search:
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
addr = vma->vm_end;
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
}
...@@ -395,6 +395,11 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ...@@ -395,6 +395,11 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
#define HAVE_ARCH_UNMAPPED_AREA
/* /*
* remap a physical address `phys' of size `size' with page protection `prot' * remap a physical address `phys' of size `size' with page protection `prot'
* into virtual address `from' * into virtual address `from'
......
...@@ -6,10 +6,11 @@ ...@@ -6,10 +6,11 @@
* or page size, whichever is greater since the cache aliases * or page size, whichever is greater since the cache aliases
* every size/ways bytes. * every size/ways bytes.
*/ */
#if __LINUX_ARM_ARCH__ > 5 #define SHMLBA (4 * PAGE_SIZE) /* attach addr a multiple of this */
#define SHMLBA (4 * PAGE_SIZE)
#else /*
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ * Enforce SHMLBA in shmat
#endif */
#define __ARCH_FORCE_SHMLBA
#endif /* _ASMARM_SHMPARAM_H */ #endif /* _ASMARM_SHMPARAM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment