Commit a6146888 authored by Becky Bruce's avatar Becky Bruce Committed by Benjamin Herrenschmidt

powerpc: Add gpages reservation code for 64-bit FSL BOOKE

For 64-bit FSL_BOOKE implementations, gigantic pages need to be
reserved at boot time by the memblock code based on the command line.
This adds the call that handles the reservation, and fixes some code
comments.

It also removes the previous pr_err when reserve_hugetlb_gpages
is called on a system without hugetlb enabled - the way the code is
structured, the call is unconditional and the resulting error message
spurious and confusing.
Signed-off-by: default avatarBecky Bruce <beckyb@kernel.crashing.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent d1b9b128
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include <asm/page.h> #include <asm/page.h>
extern struct kmem_cache *hugepte_cache; extern struct kmem_cache *hugepte_cache;
extern void __init reserve_hugetlb_gpages(void);
static inline pte_t *hugepd_page(hugepd_t hpd) static inline pte_t *hugepd_page(hugepd_t hpd)
{ {
...@@ -153,14 +152,24 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -153,14 +152,24 @@ static inline void arch_release_hugepage(struct page *page)
} }
#else /* ! CONFIG_HUGETLB_PAGE */ #else /* ! CONFIG_HUGETLB_PAGE */
static inline void reserve_hugetlb_gpages(void)
{
pr_err("Cannot reserve gpages without hugetlb enabled\n");
}
static inline void flush_hugetlb_page(struct vm_area_struct *vma, static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr) unsigned long vmaddr)
{ {
} }
#endif /* CONFIG_HUGETLB_PAGE */
/*
* FSL Book3E platforms require special gpage handling - the gpages
* are reserved early in the boot process by memblock instead of via
* the .dts as on IBM platforms.
*/
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
extern void __init reserve_hugetlb_gpages(void);
#else
static inline void reserve_hugetlb_gpages(void)
{
}
#endif #endif
#endif /* _ASM_POWERPC_HUGETLB_H */ #endif /* _ASM_POWERPC_HUGETLB_H */
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/kdump.h> #include <asm/kdump.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -64,6 +66,7 @@ ...@@ -64,6 +66,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/hugetlb.h>
#include "setup.h" #include "setup.h"
...@@ -217,6 +220,13 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -217,6 +220,13 @@ void __init early_setup(unsigned long dt_ptr)
/* Initialize the hash table or TLB handling */ /* Initialize the hash table or TLB handling */
early_init_mmu(); early_init_mmu();
/*
* Reserve any gigantic pages requested on the command line.
* memblock needs to have been initialized by the time this is
* called since this will reserve memory.
*/
reserve_hugetlb_gpages();
DBG(" <- early_setup()\n"); DBG(" <- early_setup()\n");
} }
......
...@@ -28,10 +28,10 @@ unsigned int HPAGE_SHIFT; ...@@ -28,10 +28,10 @@ unsigned int HPAGE_SHIFT;
/* /*
* Tracks gpages after the device tree is scanned and before the * Tracks gpages after the device tree is scanned and before the
* huge_boot_pages list is ready. On 64-bit implementations, this is * huge_boot_pages list is ready. On non-Freescale implementations, this is
* just used to track 16G pages and so is a single array. 32-bit * just used to track 16G pages and so is a single array. FSL-based
* implementations may have more than one gpage size due to limitations * implementations may have more than one gpage size, so we need multiple
* of the memory allocators, so we need multiple arrays * arrays
*/ */
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
#define MAX_NUMBER_GPAGES 128 #define MAX_NUMBER_GPAGES 128
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment