Commit bacf9cf8 authored by Michael Ellerman's avatar Michael Ellerman

powerpc/mm: Do hash device tree scanning earlier

Currently MMU initialisation (early_init_mmu()) consists of a mixture of
scanning the device tree, setting MMU feature bits, and then also doing
actual initialisation of MMU data structures.

We'd like to decouple the setting of the MMU features from the actual
setup. So split out the device tree scanning, and associated code, and
call it from mmu_init_early_devtree().
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent c610ec60
...@@ -108,6 +108,7 @@ extern int mmu_io_psize; ...@@ -108,6 +108,7 @@ extern int mmu_io_psize;
/* MMU initialization */ /* MMU initialization */
void mmu_early_init_devtree(void); void mmu_early_init_devtree(void);
void hash__early_init_devtree(void);
extern void radix_init_native(void); extern void radix_init_native(void);
extern void hash__early_init_mmu(void); extern void hash__early_init_mmu(void);
extern void radix__early_init_mmu(void); extern void radix__early_init_mmu(void);
......
...@@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, ...@@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
return 0; return 0;
} }
static void __init htab_init_seg_sizes(void)
{
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
}
static int __init get_idx_from_shift(unsigned int shift) static int __init get_idx_from_shift(unsigned int shift)
{ {
int idx = -1; int idx = -1;
...@@ -539,7 +534,7 @@ static bool might_have_hea(void) ...@@ -539,7 +534,7 @@ static bool might_have_hea(void)
#endif /* #ifdef CONFIG_PPC_64K_PAGES */ #endif /* #ifdef CONFIG_PPC_64K_PAGES */
static void __init htab_init_page_sizes(void) static void __init htab_scan_page_sizes(void)
{ {
int rc; int rc;
...@@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void) ...@@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void)
* Try to find the available page sizes in the device-tree * Try to find the available page sizes in the device-tree
*/ */
rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
if (rc != 0) /* Found */ if (rc == 0 && mmu_has_feature(MMU_FTR_16M_PAGE)) {
goto found; /*
* Nothing in the device-tree, but the CPU supports 16M pages,
/* * so let's fallback on a known size list for 16M capable CPUs.
* Not in the device-tree, let's fallback on known size */
* list for 16M capable GP & GR
*/
if (mmu_has_feature(MMU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp, memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp)); sizeof(mmu_psize_defaults_gp));
found: }
#ifdef CONFIG_HUGETLB_PAGE
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
#endif /* CONFIG_HUGETLB_PAGE */
}
static void __init htab_init_page_sizes(void)
{
if (!debug_pagealloc_enabled()) { if (!debug_pagealloc_enabled()) {
/* /*
* Pick a size for the linear mapping. Currently, we only * Pick a size for the linear mapping. Currently, we only
...@@ -630,11 +631,6 @@ static void __init htab_init_page_sizes(void) ...@@ -630,11 +631,6 @@ static void __init htab_init_page_sizes(void)
,mmu_psize_defs[mmu_vmemmap_psize].shift ,mmu_psize_defs[mmu_vmemmap_psize].shift
#endif #endif
); );
#ifdef CONFIG_HUGETLB_PAGE
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
#endif /* CONFIG_HUGETLB_PAGE */
} }
static int __init htab_dt_scan_pftsize(unsigned long node, static int __init htab_dt_scan_pftsize(unsigned long node,
...@@ -759,12 +755,6 @@ static void __init htab_initialize(void) ...@@ -759,12 +755,6 @@ static void __init htab_initialize(void)
DBG(" -> htab_initialize()\n"); DBG(" -> htab_initialize()\n");
/* Initialize segment sizes */
htab_init_seg_sizes();
/* Initialize page sizes */
htab_init_page_sizes();
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
mmu_kernel_ssize = MMU_SEGSIZE_1T; mmu_kernel_ssize = MMU_SEGSIZE_1T;
mmu_highuser_ssize = MMU_SEGSIZE_1T; mmu_highuser_ssize = MMU_SEGSIZE_1T;
...@@ -885,8 +875,19 @@ static void __init htab_initialize(void) ...@@ -885,8 +875,19 @@ static void __init htab_initialize(void)
#undef KB #undef KB
#undef MB #undef MB
void __init hash__early_init_devtree(void)
{
/* Initialize segment sizes */
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
/* Initialize page sizes */
htab_scan_page_sizes();
}
void __init hash__early_init_mmu(void) void __init hash__early_init_mmu(void)
{ {
htab_init_page_sizes();
/* /*
* initialize page table size * initialize page table size
*/ */
......
...@@ -426,5 +426,8 @@ void __init mmu_early_init_devtree(void) ...@@ -426,5 +426,8 @@ void __init mmu_early_init_devtree(void)
/* Disable radix mode based on kernel command line. */ /* Disable radix mode based on kernel command line. */
if (disable_radix) if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
if (!radix_enabled())
hash__early_init_devtree();
} }
#endif /* CONFIG_PPC_STD_MMU_64 */ #endif /* CONFIG_PPC_STD_MMU_64 */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment