Commit b1923caa authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Michael Ellerman

powerpc: Merge 32-bit and 64-bit setup_arch()

There is little enough differences now.

mpe: Add a/p/k/setup.h to contain the prototypes and empty versions of
functions we need, rather than using weak functions. Add a few other
empty versions to avoid as many #ifdefs as possible in the code.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 009776ba
...@@ -351,7 +351,6 @@ extern bool rtas_indicator_present(int token, int *maxindex); ...@@ -351,7 +351,6 @@ extern bool rtas_indicator_present(int token, int *maxindex);
extern int rtas_set_indicator(int indicator, int index, int new_value); extern int rtas_set_indicator(int indicator, int index, int new_value);
extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex); extern void rtas_progress(char *s, unsigned short hex);
extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_online_cpus_mask(cpumask_var_t cpus); extern int rtas_online_cpus_mask(cpumask_var_t cpus);
...@@ -460,9 +459,11 @@ static inline int page_is_rtas_user_buf(unsigned long pfn) ...@@ -460,9 +459,11 @@ static inline int page_is_rtas_user_buf(unsigned long pfn)
/* Not the best place to put pSeries_coalesce_init, will be fixed when we /* Not the best place to put pSeries_coalesce_init, will be fixed when we
* move some of the rtas suspend-me stuff to pseries */ * move some of the rtas suspend-me stuff to pseries */
extern void pSeries_coalesce_init(void); extern void pSeries_coalesce_init(void);
void rtas_initialize(void);
#else #else
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
static inline void pSeries_coalesce_init(void) { } static inline void pSeries_coalesce_init(void) { }
static inline void rtas_initialize(void) { };
#endif #endif
extern int call_rtas(const char *, int, int, unsigned long *, ...); extern int call_rtas(const char *, int, int, unsigned long *, ...);
......
...@@ -160,9 +160,6 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) ...@@ -160,9 +160,6 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
{ {
paca[cpu].hw_cpu_id = phys; paca[cpu].hw_cpu_id = phys;
} }
extern void smp_release_cpus(void);
#else #else
/* 32-bit */ /* 32-bit */
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -179,6 +176,12 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) ...@@ -179,6 +176,12 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
#endif /* !CONFIG_PPC64 */ #endif /* !CONFIG_PPC64 */
#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC))
extern void smp_release_cpus(void);
#else
static inline void smp_release_cpus(void) { };
#endif
extern int smt_enabled_at_boot; extern int smt_enabled_at_boot;
extern void smp_mpic_probe(void); extern void smp_mpic_probe(void);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/hugetlb.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -61,6 +62,12 @@ ...@@ -61,6 +62,12 @@
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <mm/mmu_decl.h> #include <mm/mmu_decl.h>
#include <asm/fadump.h> #include <asm/fadump.h>
#include <asm/udbg.h>
#include <asm/hugetlb.h>
#include <asm/livepatch.h>
#include <asm/mmu_context.h>
#include "setup.h"
#ifdef DEBUG #ifdef DEBUG
#include <asm/udbg.h> #include <asm/udbg.h>
...@@ -758,3 +765,169 @@ void arch_setup_pdev_archdata(struct platform_device *pdev) ...@@ -758,3 +765,169 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->archdata.dma_mask; pdev->dev.dma_mask = &pdev->archdata.dma_mask;
set_dma_ops(&pdev->dev, &dma_direct_ops); set_dma_ops(&pdev->dev, &dma_direct_ops);
} }
static __init void print_system_info(void)
{
pr_info("-----------------------------------------------------\n");
#ifdef CONFIG_PPC_STD_MMU_64
pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
#endif
#ifdef CONFIG_PPC_STD_MMU_32
pr_info("Hash_size = 0x%lx\n", Hash_size);
#endif
pr_info("phys_mem_size = 0x%llx\n",
(unsigned long long)memblock_phys_mem_size());
pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
pr_info("icache_bsize = 0x%x\n", icache_bsize);
if (ucache_bsize != 0)
pr_info("ucache_bsize = 0x%x\n", ucache_bsize);
pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
pr_info(" possible = 0x%016lx\n",
(unsigned long)CPU_FTRS_POSSIBLE);
pr_info(" always = 0x%016lx\n",
(unsigned long)CPU_FTRS_ALWAYS);
pr_info("cpu_user_features = 0x%08x 0x%08x\n",
cur_cpu_spec->cpu_user_features,
cur_cpu_spec->cpu_user_features2);
pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
#ifdef CONFIG_PPC64
pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
#endif
#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
pr_info("htab_address = 0x%p\n", htab_address);
if (htab_hash_mask)
pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#endif
#ifdef CONFIG_PPC_STD_MMU_32
if (Hash)
pr_info("Hash = 0x%p\n", Hash);
if (Hash_mask)
pr_info("Hash_mask = 0x%lx\n", Hash_mask);
#endif
if (PHYSICAL_START > 0)
pr_info("physical_start = 0x%llx\n",
(unsigned long long)PHYSICAL_START);
pr_info("-----------------------------------------------------\n");
}
/*
* Called into from start_kernel this initializes memblock, which is used
* to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
*cmdline_p = boot_command_line;
/* Set a half-reasonable default so udelay does something sensible */
loops_per_jiffy = 500000000 / HZ;
/* Unflatten the device-tree passed by prom_init or kexec */
unflatten_device_tree();
/*
* Initialize cache line/block info from device-tree (on ppc64) or
* just cputable (on ppc32).
*/
initialize_cache_info();
/* Initialize RTAS if available. */
rtas_initialize();
/* Check if we have an initrd provided via the device-tree. */
check_for_initrd();
/* Probe the machine type, establish ppc_md. */
probe_machine();
/* Setup panic notifier if requested by the platform. */
setup_panic();
/*
* Configure ppc_md.power_save (ppc32 only, 64-bit machines do
* it from their respective probe() function.
*/
setup_power_save();
/* Discover standard serial ports. */
find_legacy_serial_ports();
/* Register early console with the printk subsystem. */
register_early_udbg_console();
/* Setup the various CPU maps based on the device-tree. */
smp_setup_cpu_maps();
/* Initialize xmon. */
xmon_setup();
/* Check the SMT related command line arguments (ppc64). */
check_smt_enabled();
/* On BookE, setup per-core TLB data structures. */
setup_tlb_core_data();
/*
* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids.
*
* Freescale Book3e parts spin in a loop provided by firmware,
* so smp_release_cpus() does nothing for them.
*/
#ifdef CONFIG_SMP
smp_release_cpus();
#endif
/* Print various info about the machine that has been gathered so far. */
print_system_info();
/* Reserve large chunks of memory for use by CMA for KVM. */
kvm_cma_reserve();
/*
* Reserve any gigantic pages requested on the command line.
* memblock needs to have been initialized by the time this is
* called since this will reserve memory.
*/
reserve_hugetlb_gpages();
klp_init_thread_info(&init_thread_info);
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
#ifdef CONFIG_PPC_64K_PAGES
init_mm.context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(&init_mm.context);
#endif
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
initmem_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
if (ppc_md.setup_arch)
ppc_md.setup_arch();
paging_init();
/* Initialize the MMU context management stuff. */
mmu_context_init();
#ifdef CONFIG_PPC64
/* Interrupt code needs to be 64K-aligned. */
if ((unsigned long)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
(unsigned long)_stext);
#endif
}
/*
* Prototypes for functions that are shared between setup_(32|64|common).c
*
* Copyright 2016 Michael Ellerman, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ARCH_POWERPC_KERNEL_SETUP_H
#define __ARCH_POWERPC_KERNEL_SETUP_H
void initialize_cache_info(void);
void irqstack_early_init(void);
#ifdef CONFIG_PPC32
void setup_power_save(void);
#else
static inline void setup_power_save(void) { };
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
void check_smt_enabled(void);
#else
static inline void check_smt_enabled(void) { };
#endif
#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
void setup_tlb_core_data(void);
#else
static inline void setup_tlb_core_data(void) { };
#endif
#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x)
void exc_lvl_early_init(void);
#else
static inline void exc_lvl_early_init(void) { };
#endif
#ifdef CONFIG_PPC64
void emergency_stack_init(void);
#else
static inline void emergency_stack_init(void) { };
#endif
/*
* Having this in kvm_ppc.h makes include dependencies too
* tricky to solve for setup-common.c so have it here.
*/
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void kvm_cma_reserve(void);
#else
static inline void kvm_cma_reserve(void) { };
#endif
#endif /* __ARCH_POWERPC_KERNEL_SETUP_H */
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <asm/time.h> #include <asm/time.h>
#include <asm/serial.h> #include <asm/serial.h>
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/mmu_context.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#define DBG(fmt...) #define DBG(fmt...)
...@@ -191,7 +190,7 @@ int __init ppc_init(void) ...@@ -191,7 +190,7 @@ int __init ppc_init(void)
arch_initcall(ppc_init); arch_initcall(ppc_init);
static void __init irqstack_early_init(void) void __init irqstack_early_init(void)
{ {
unsigned int i; unsigned int i;
...@@ -206,7 +205,7 @@ static void __init irqstack_early_init(void) ...@@ -206,7 +205,7 @@ static void __init irqstack_early_init(void)
} }
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
static void __init exc_lvl_early_init(void) void __init exc_lvl_early_init(void)
{ {
unsigned int i, hw_cpu; unsigned int i, hw_cpu;
...@@ -229,11 +228,9 @@ static void __init exc_lvl_early_init(void) ...@@ -229,11 +228,9 @@ static void __init exc_lvl_early_init(void)
#endif #endif
} }
} }
#else
#define exc_lvl_early_init()
#endif #endif
static void setup_power_save(void) void __init setup_power_save(void)
{ {
#ifdef CONFIG_6xx #ifdef CONFIG_6xx
if (cpu_has_feature(CPU_FTR_CAN_DOZE) || if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
...@@ -248,7 +245,7 @@ static void setup_power_save(void) ...@@ -248,7 +245,7 @@ static void setup_power_save(void)
#endif #endif
} }
static __init void initialize_cache_info(void) __init void initialize_cache_info(void)
{ {
/* /*
* Set cache line size based on type of cpu as a default. * Set cache line size based on type of cpu as a default.
...@@ -261,57 +258,3 @@ static __init void initialize_cache_info(void) ...@@ -261,57 +258,3 @@ static __init void initialize_cache_info(void)
if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
ucache_bsize = icache_bsize = dcache_bsize; ucache_bsize = icache_bsize = dcache_bsize;
} }
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
*cmdline_p = boot_command_line;
/* so udelay does something sensible, assume <= 1000 bogomips */
loops_per_jiffy = 500000000 / HZ;
unflatten_device_tree();
initialize_cache_info();
check_for_initrd();
probe_machine();
setup_panic();
setup_power_save();
find_legacy_serial_ports();
/* Register early console */
register_early_udbg_console();
smp_setup_cpu_maps();
xmon_setup();
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
exc_lvl_early_init();
irqstack_early_init();
initmem_init();
if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab);
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
if (ppc_md.setup_arch)
ppc_md.setup_arch();
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
paging_init();
/* Initialize the MMU context management stuff */
mmu_context_init();
}
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/nmi.h> #include <linux/nmi.h>
...@@ -64,12 +63,10 @@ ...@@ -64,12 +63,10 @@
#include <asm/xmon.h> #include <asm/xmon.h>
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/mmu_context.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/kvm_ppc.h>
#include <asm/hugetlb.h>
#include <asm/livepatch.h> #include <asm/livepatch.h>
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/cputhreads.h>
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
...@@ -100,7 +97,7 @@ int icache_bsize; ...@@ -100,7 +97,7 @@ int icache_bsize;
int ucache_bsize; int ucache_bsize;
#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
static void setup_tlb_core_data(void) void __init setup_tlb_core_data(void)
{ {
int cpu; int cpu;
...@@ -133,10 +130,6 @@ static void setup_tlb_core_data(void) ...@@ -133,10 +130,6 @@ static void setup_tlb_core_data(void)
} }
} }
} }
#else
static void setup_tlb_core_data(void)
{
}
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -144,7 +137,7 @@ static void setup_tlb_core_data(void) ...@@ -144,7 +137,7 @@ static void setup_tlb_core_data(void)
static char *smt_enabled_cmdline; static char *smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */ /* Look for ibm,smt-enabled OF option */
static void check_smt_enabled(void) void __init check_smt_enabled(void)
{ {
struct device_node *dn; struct device_node *dn;
const char *smt_option; const char *smt_option;
...@@ -193,8 +186,6 @@ static int __init early_smt_enabled(char *p) ...@@ -193,8 +186,6 @@ static int __init early_smt_enabled(char *p)
} }
early_param("smt-enabled", early_smt_enabled); early_param("smt-enabled", early_smt_enabled);
#else
#define check_smt_enabled()
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/** Fix up paca fields required for the boot cpu */ /** Fix up paca fields required for the boot cpu */
...@@ -408,7 +399,7 @@ void smp_release_cpus(void) ...@@ -408,7 +399,7 @@ void smp_release_cpus(void)
* cache informations about the CPU that will be used by cache flush * cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland * routines and/or provided to userland
*/ */
static void __init initialize_cache_info(void) void __init initialize_cache_info(void)
{ {
struct device_node *np; struct device_node *np;
unsigned long num_cpus = 0; unsigned long num_cpus = 0;
...@@ -480,38 +471,6 @@ static void __init initialize_cache_info(void) ...@@ -480,38 +471,6 @@ static void __init initialize_cache_info(void)
DBG(" <- initialize_cache_info()\n"); DBG(" <- initialize_cache_info()\n");
} }
static __init void print_system_info(void)
{
pr_info("-----------------------------------------------------\n");
pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
if (ppc64_caches.dline_size != 0x80)
pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
if (ppc64_caches.iline_size != 0x80)
pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
cur_cpu_spec->cpu_user_features2);
pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
pr_info("htab_address = 0x%p\n", htab_address);
pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#endif
if (PHYSICAL_START > 0)
pr_info("physical_start = 0x%llx\n",
(unsigned long long)PHYSICAL_START);
pr_info("-----------------------------------------------------\n");
}
/* This returns the limit below which memory accesses to the linear /* This returns the limit below which memory accesses to the linear
* mapping are guarnateed not to cause a TLB or SLB miss. This is * mapping are guarnateed not to cause a TLB or SLB miss. This is
* used to allocate interrupt or emergency stacks for which our * used to allocate interrupt or emergency stacks for which our
...@@ -533,7 +492,7 @@ static __init u64 safe_stack_limit(void) ...@@ -533,7 +492,7 @@ static __init u64 safe_stack_limit(void)
#endif #endif
} }
static void __init irqstack_early_init(void) void __init irqstack_early_init(void)
{ {
u64 limit = safe_stack_limit(); u64 limit = safe_stack_limit();
unsigned int i; unsigned int i;
...@@ -553,7 +512,7 @@ static void __init irqstack_early_init(void) ...@@ -553,7 +512,7 @@ static void __init irqstack_early_init(void)
} }
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void) void __init exc_lvl_early_init(void)
{ {
unsigned int i; unsigned int i;
unsigned long sp; unsigned long sp;
...@@ -575,8 +534,6 @@ static void __init exc_lvl_early_init(void) ...@@ -575,8 +534,6 @@ static void __init exc_lvl_early_init(void)
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
patch_exception(0x040, exc_debug_debug_book3e); patch_exception(0x040, exc_debug_debug_book3e);
} }
#else
#define exc_lvl_early_init()
#endif #endif
/* /*
...@@ -584,7 +541,7 @@ static void __init exc_lvl_early_init(void) ...@@ -584,7 +541,7 @@ static void __init exc_lvl_early_init(void)
* early in SMP boots before relocation is enabled. Exclusive emergency * early in SMP boots before relocation is enabled. Exclusive emergency
* stack for machine checks. * stack for machine checks.
*/ */
static void __init emergency_stack_init(void) void __init emergency_stack_init(void)
{ {
u64 limit; u64 limit;
unsigned int i; unsigned int i;
...@@ -615,124 +572,6 @@ static void __init emergency_stack_init(void) ...@@ -615,124 +572,6 @@ static void __init emergency_stack_init(void)
} }
} }
/*
* Called into from start_kernel this initializes memblock, which is used
* to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
*cmdline_p = boot_command_line;
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
unflatten_device_tree();
/*
* Fill the ppc64_caches & systemcfg structures with informations
* retrieved from the device-tree.
*/
initialize_cache_info();
#ifdef CONFIG_PPC_RTAS
/*
* Initialize RTAS if available
*/
rtas_initialize();
#endif /* CONFIG_PPC_RTAS */
/*
* Check if we have an initrd provided via the device-tree
*/
check_for_initrd();
/* Probe the machine type */
probe_machine();
setup_panic();
/*
* We can discover serial ports now since the above did setup the
* hash table management for us, thus ioremap works. We do that early
* so that further code can be debugged
*/
find_legacy_serial_ports();
/*
* Register early console
*/
register_early_udbg_console();
smp_setup_cpu_maps();
/*
* Initialize xmon
*/
xmon_setup();
check_smt_enabled();
setup_tlb_core_data();
/*
* Freescale Book3e parts spin in a loop provided by firmware,
* so smp_release_cpus() does nothing for them
*/
#if defined(CONFIG_SMP)
/*
* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
smp_release_cpus();
#endif
/* Print various info about the machine that has been gathered so far. */
print_system_info();
/* Reserve large chunks of memory for use by CMA for KVM */
kvm_cma_reserve();
/*
* Reserve any gigantic pages requested on the command line.
* memblock needs to have been initialized by the time this is
* called since this will reserve memory.
*/
reserve_hugetlb_gpages();
klp_init_thread_info(&init_thread_info);
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
#ifdef CONFIG_PPC_64K_PAGES
init_mm.context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(&init_mm.context);
#endif
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
initmem_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
if (ppc_md.setup_arch)
ppc_md.setup_arch();
paging_init();
/* Initialize the MMU context management stuff */
mmu_context_init();
/* Interrupt code needs to be 64K-aligned */
if ((unsigned long)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
(unsigned long)_stext);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define PCPU_DYN_SIZE () #define PCPU_DYN_SIZE ()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment