Commit b8f1b4f8 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Convert early cpu/mmu feature check to use the new helpers

This switches early feature checks to use the non static key variant of
the function. In later patches we will be switching cpu_has_feature()
and mmu_has_feature() to use static keys and we can use them only after
static key/jump label is initialized. Any check for feature before jump
label init should be done using this new helper.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a141cca3
...@@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, ...@@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
{ {
if (radix_enabled()) if (early_radix_enabled())
return radix__setup_initial_memory_limit(first_memblock_base, return radix__setup_initial_memory_limit(first_memblock_base,
first_memblock_size); first_memblock_size);
return hash__setup_initial_memory_limit(first_memblock_base, return hash__setup_initial_memory_limit(first_memblock_base,
......
...@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca) ...@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
* if we do a GET_PACA() before the feature fixups have been * if we do a GET_PACA() before the feature fixups have been
* applied * applied
*/ */
if (cpu_has_feature(CPU_FTR_HVMODE)) if (early_cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca); mtspr(SPRN_SPRG_HPACA, local_paca);
#endif #endif
mtspr(SPRN_SPRG_PACA, local_paca); mtspr(SPRN_SPRG_PACA, local_paca);
......
...@@ -227,8 +227,8 @@ static void __init configure_exceptions(void) ...@@ -227,8 +227,8 @@ static void __init configure_exceptions(void)
opal_configure_cores(); opal_configure_cores();
/* Enable AIL if supported, and we are in hypervisor mode */ /* Enable AIL if supported, and we are in hypervisor mode */
if (cpu_has_feature(CPU_FTR_HVMODE) && if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
cpu_has_feature(CPU_FTR_ARCH_207S)) { early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
unsigned long lpcr = mfspr(SPRN_LPCR); unsigned long lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
} }
......
...@@ -549,7 +549,7 @@ static void __init htab_scan_page_sizes(void) ...@@ -549,7 +549,7 @@ static void __init htab_scan_page_sizes(void)
* Try to find the available page sizes in the device-tree * Try to find the available page sizes in the device-tree
*/ */
rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
if (rc == 0 && mmu_has_feature(MMU_FTR_16M_PAGE)) { if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
/* /*
* Nothing in the device-tree, but the CPU supports 16M pages, * Nothing in the device-tree, but the CPU supports 16M pages,
* so let's fallback on a known size list for 16M capable CPUs. * so let's fallback on a known size list for 16M capable CPUs.
......
...@@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void) ...@@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void)
if (disable_radix) if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
if (radix_enabled()) if (early_radix_enabled())
radix__early_init_devtree(); radix__early_init_devtree();
else else
hash__early_init_devtree(); hash__early_init_devtree();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment