Commit edb48757 authored by David Mosberger's avatar David Mosberger Committed by Tony Luck

[IA64] Move allocation of per-CPU MCA data out of per_cpu_init()

This patch moves the per-CPU MCA data allocation out of per_cpu_init()
so the code can be shared for contig and discontig memory
architectures.  Also, it means we can revert back to the old way
of doing per_cpu_init() on UP.

Also cleanup initialization of ar.k3 in setup.c.  Need to use
ia64_tpa() rather than __pa() because address will be in region 5
on UP build.
Signed-off-by: default avatarDavid Mosberger-Tang <davidm@hpl.hp.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent d998bfd9
......@@ -1209,6 +1209,18 @@ ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
if (smp_processor_id() == 0) {
void *mca_data;
int cpu;
mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
* NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) {
__per_cpu_mca[cpu] = __pa(mca_data);
mca_data += sizeof(struct ia64_mca_cpu);
}
}
/*
* The MCA info structure was allocated earlier and its
* physical address saved in __per_cpu_mca[cpu]. Copy that
......
......@@ -609,7 +609,13 @@ cpu_init (void)
cpu_data = per_cpu_init();
ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data - (void *) __per_cpu_start));
/*
* We set ar.k3 so that assembly code in MCA handler can compute
* physical addresses of per cpu variables with a simple:
* phys = ar.k3 + &per_cpu_var
*/
ia64_set_kr(IA64_KR_PER_CPU_DATA,
ia64_tpa(cpu_data) - (long) __per_cpu_start);
get_max_cacheline_size();
......
......@@ -169,6 +169,7 @@ find_memory (void)
find_initrd();
}
#ifdef CONFIG_SMP
/**
* per_cpu_init - setup per-cpu variables
*
......@@ -177,18 +178,15 @@ find_memory (void)
void *
per_cpu_init (void)
{
void *mca_data, *my_data;
void *cpu_data;
int cpu;
#ifdef CONFIG_SMP
/*
* get_free_pages() cannot be used before cpu_init() done. BSP
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
* get_zeroed_page().
*/
if (smp_processor_id() == 0) {
void *cpu_data;
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
for (cpu = 0; cpu < NR_CPUS; cpu++) {
......@@ -198,20 +196,9 @@ per_cpu_init (void)
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
}
}
my_data = __per_cpu_start + __per_cpu_offset[smp_processor_id()];
#else
my_data = (void *) __phys_per_cpu_start;
#endif
if (smp_processor_id() == 0) {
mca_data = alloc_bootmem(sizeof (struct ia64_mca_cpu) * NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) {
__per_cpu_mca[cpu] = __pa(mca_data);
mca_data += sizeof (struct ia64_mca_cpu);
}
}
return my_data;
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
#endif /* CONFIG_SMP */
static int
count_pages (u64 start, u64 end, void *arg)
......
......@@ -26,7 +26,6 @@
#include <asm/meminit.h>
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/mca.h>
/*
* Track per-node information needed to setup the boot memory allocator, the
......@@ -294,9 +293,6 @@ static int early_nr_cpus_node(int node)
* |------------------------|
* | local ia64_node_data |
* |------------------------|
* | MCA/INIT data * |
* | cpus_on_this_node |
* |------------------------|
* | ??? |
* |________________________|
*
......@@ -310,7 +306,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
{
unsigned long epfn, cpu, cpus, phys_cpus;
unsigned long pernodesize = 0, pernode, pages, mapsize;
void *cpu_data, *mca_data_phys;
void *cpu_data;
struct bootmem_data *bdp = &mem_data[node].bootmem_data;
epfn = (start + len) >> PAGE_SHIFT;
......@@ -339,7 +335,6 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
pernodesize += node * L1_CACHE_BYTES;
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_mca_cpu)) * phys_cpus;
pernodesize = PAGE_ALIGN(pernodesize);
pernode = NODEDATA_ALIGN(start, node);
......@@ -362,9 +357,6 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
mem_data[node].pgdat->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mca_data_phys = (void *)pernode;
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_mca_cpu)) * phys_cpus;
/*
* Copy the static per-cpu data into the region we
* just set aside and then setup __per_cpu_offset
......@@ -374,18 +366,6 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
if (node == node_cpuid[cpu].nid) {
memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end - __per_cpu_start);
if ((cpu == 0) || (node_cpuid[cpu].phys_id > 0)) {
/*
* The memory for the cpuinfo structure is allocated
* here, but the data in the structure is initialized
* later. Save the physical address of the MCA save
* area in __per_cpu_mca[cpu]. When the cpuinfo struct
* is initialized, the value in __per_cpu_mca[cpu]
* will be put in the cpuinfo structure.
*/
__per_cpu_mca[cpu] = __pa(mca_data_phys);
mca_data_phys += L1_CACHE_ALIGN(sizeof(struct ia64_mca_cpu));
}
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
__per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
......
......@@ -46,11 +46,13 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
extern void setup_per_cpu_areas (void);
extern void *per_cpu_init(void);
#else /* ! SMP */
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define per_cpu_init() (__phys_per_cpu_start)
#endif /* SMP */
......@@ -65,8 +67,6 @@ extern void setup_per_cpu_areas (void);
*/
#define __ia64_per_cpu_var(var) (per_cpu__##var)
extern void *per_cpu_init(void);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PERCPU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment