Commit ef3d3246 authored by Dave Kleikamp's avatar Dave Kleikamp Committed by Benjamin Herrenschmidt

powerpc/mm: Add Strong Access Ordering support

Allow an application to enable Strong Access Ordering on specific pages of
memory on Power 7 hardware. Currently, power has a weaker memory model than
x86. Implementing a stronger memory model allows an emulator to more
efficiently translate x86 code into power code, resulting in faster code
execution.

On Power 7 hardware, storing 0b1110 in the WIMG bits of the hpte enables
strong access ordering mode for the memory page.  This patchset allows a
user to specify which pages are thus enabled by passing a new protection
bit through mmap() and mprotect().  I have defined PROT_SAO to be 0x10.
Signed-off-by: default avatarDave Kleikamp <shaggy@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 37907049
...@@ -143,6 +143,9 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, ...@@ -143,6 +143,9 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
struct file * file = NULL; struct file * file = NULL;
unsigned long ret = -EINVAL; unsigned long ret = -EINVAL;
if (!arch_validate_prot(prot))
goto out;
if (shift) { if (shift) {
if (off & ((1 << shift) - 1)) if (off & ((1 << shift) - 1))
goto out; goto out;
......
#ifndef _ASM_POWERPC_MMAN_H #ifndef _ASM_POWERPC_MMAN_H
#define _ASM_POWERPC_MMAN_H #define _ASM_POWERPC_MMAN_H
#include <asm/cputable.h>
#include <asm-generic/mman.h> #include <asm-generic/mman.h>
#include <linux/mm.h>
/* /*
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
...@@ -26,4 +28,32 @@ ...@@ -26,4 +28,32 @@
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
#ifdef CONFIG_PPC64
/*
* This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
* here. How important is the optimization?
*/
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
{
return (prot & PROT_SAO) ? VM_SAO : 0;
}
#define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0;
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
static inline int arch_validate_prot(unsigned long prot)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
return 0;
if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO))
return 0;
return 1;
}
#define arch_validate_prot(prot) arch_validate_prot(prot)
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_MMAN_H */ #endif /* _ASM_POWERPC_MMAN_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment