Commit 8bd6964c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm changes from Ingo Molnar:
 "A cleanup, a fix and ASLR support for hugetlb mappings"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/numa: Fix 32-bit kernel NUMA boot
  x86/mm: Implement ASLR for hugetlb mappings
  x86/mm: Unify pte_to_pgoff() and pgoff_to_pte() helpers
parents 2bb2c5e2 f3d815cb
...@@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr); ...@@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr);
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#define __HAVE_ARCH_GATE_AREA 1 #define __HAVE_ARCH_GATE_AREA 1
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_PAGE_H */ #endif /* _ASM_X86_PAGE_H */
...@@ -5,10 +5,6 @@ ...@@ -5,10 +5,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL #ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long); extern unsigned long __phys_addr(unsigned long);
......
...@@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) ...@@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif #endif
/* Bit manipulation helper on pte/pgoff entry */
static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
unsigned long mask, unsigned int leftshift)
{
return ((value >> rightshift) & mask) << leftshift;
}
#ifdef CONFIG_MEM_SOFT_DIRTY #ifdef CONFIG_MEM_SOFT_DIRTY
/* /*
...@@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) ...@@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
#define pte_to_pgoff(pte) \ #define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ #define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
& ((1U << PTE_FILE_BITS1) - 1))) \ #define PTE_FILE_MASK3 ((1U << PTE_FILE_BITS3) - 1)
+ ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
& ((1U << PTE_FILE_BITS2) - 1)) \ #define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
<< (PTE_FILE_BITS1)) \ #define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+ ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ #define PTE_FILE_LSHIFT4 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)
& ((1U << PTE_FILE_BITS3) - 1)) \
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
+ ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ {
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) return (pgoff_t)
(pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
#define pgoff_to_pte(off) \ pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
((pte_t) { .pte_low = \ pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, PTE_FILE_MASK3, PTE_FILE_LSHIFT3) +
((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ pte_bitop(pte.pte_low, PTE_FILE_SHIFT4, -1UL, PTE_FILE_LSHIFT4));
+ ((((off) >> PTE_FILE_BITS1) \ }
& ((1U << PTE_FILE_BITS2) - 1)) \
<< PTE_FILE_SHIFT2) \ static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+ ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ {
& ((1U << PTE_FILE_BITS3) - 1)) \ return (pte_t){
<< PTE_FILE_SHIFT3) \ .pte_low =
+ ((((off) >> \ pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
(PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
<< PTE_FILE_SHIFT4) \ pte_bitop(off, PTE_FILE_LSHIFT3, PTE_FILE_MASK3, PTE_FILE_SHIFT3) +
+ _PAGE_FILE }) pte_bitop(off, PTE_FILE_LSHIFT4, -1UL, PTE_FILE_SHIFT4) +
_PAGE_FILE,
};
}
#else /* CONFIG_MEM_SOFT_DIRTY */ #else /* CONFIG_MEM_SOFT_DIRTY */
...@@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) ...@@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
#define pte_to_pgoff(pte) \ #define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
((((pte).pte_low >> PTE_FILE_SHIFT1) \ #define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
& ((1U << PTE_FILE_BITS1) - 1)) \
+ ((((pte).pte_low >> PTE_FILE_SHIFT2) \ #define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
& ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \ #define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+ (((pte).pte_low >> PTE_FILE_SHIFT3) \
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2))) static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
{
#define pgoff_to_pte(off) \ return (pgoff_t)
((pte_t) { .pte_low = \ (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
(((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
+ ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \ pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
<< PTE_FILE_SHIFT2) \ }
+ (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
<< PTE_FILE_SHIFT3) \ static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+ _PAGE_FILE }) {
return (pte_t){
.pte_low =
pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
_PAGE_FILE,
};
}
#endif /* CONFIG_MEM_SOFT_DIRTY */ #endif /* CONFIG_MEM_SOFT_DIRTY */
......
...@@ -87,9 +87,7 @@ int pmd_huge_support(void) ...@@ -87,9 +87,7 @@ int pmd_huge_support(void)
} }
#endif #endif
/* x86_64 also uses this file */ #ifdef CONFIG_HUGETLB_PAGE
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
...@@ -99,7 +97,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, ...@@ -99,7 +97,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.flags = 0; info.flags = 0;
info.length = len; info.length = len;
info.low_limit = TASK_UNMAPPED_BASE; info.low_limit = current->mm->mmap_legacy_base;
info.high_limit = TASK_SIZE; info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0; info.align_offset = 0;
...@@ -172,8 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -172,8 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return hugetlb_get_unmapped_area_topdown(file, addr, len, return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags); pgoff, flags);
} }
#endif /* CONFIG_HUGETLB_PAGE */
#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static __init int setup_hugepagesz(char *opt) static __init int setup_hugepagesz(char *opt)
......
...@@ -210,11 +210,15 @@ static void __init setup_node_data(int nid, u64 start, u64 end) ...@@ -210,11 +210,15 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
* Never allocate in DMA zone. * Never allocate in DMA zone.
*/ */
nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
MEMBLOCK_ALLOC_ACCESSIBLE);
if (!nd_pa) { if (!nd_pa) {
pr_err("Cannot find %zu bytes in node %d\n", pr_err("Cannot find %zu bytes in node %d\n",
nd_size, nid); nd_size, nid);
return; return;
} }
}
nd = __va(nd_pa); nd = __va(nd_pa);
/* report and initialize */ /* report and initialize */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment