Commit c8e409a3 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/irq: use memblock functions returning virtual address

Since only the virtual address of allocated blocks is used,
lets use functions returning directly virtual address.

Those functions have the advantage of also zeroing the block.
Suggested-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent eafd825e
...@@ -725,18 +725,15 @@ void exc_lvl_ctx_init(void) ...@@ -725,18 +725,15 @@ void exc_lvl_ctx_init(void)
#endif #endif
#endif #endif
memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = critirq_ctx[cpu_nr]; tp = critirq_ctx[cpu_nr];
tp->cpu = cpu_nr; tp->cpu = cpu_nr;
tp->preempt_count = 0; tp->preempt_count = 0;
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = dbgirq_ctx[cpu_nr]; tp = dbgirq_ctx[cpu_nr];
tp->cpu = cpu_nr; tp->cpu = cpu_nr;
tp->preempt_count = 0; tp->preempt_count = 0;
memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = mcheckirq_ctx[cpu_nr]; tp = mcheckirq_ctx[cpu_nr];
tp->cpu = cpu_nr; tp->cpu = cpu_nr;
tp->preempt_count = HARDIRQ_OFFSET; tp->preempt_count = HARDIRQ_OFFSET;
...@@ -754,12 +751,10 @@ void irq_ctx_init(void) ...@@ -754,12 +751,10 @@ void irq_ctx_init(void)
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i]; tp = softirq_ctx[i];
tp->cpu = i; tp->cpu = i;
klp_init_thread_info(tp); klp_init_thread_info(tp);
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i]; tp = hardirq_ctx[i];
tp->cpu = i; tp->cpu = i;
klp_init_thread_info(tp); klp_init_thread_info(tp);
......
...@@ -196,6 +196,17 @@ static int __init ppc_init(void) ...@@ -196,6 +196,17 @@ static int __init ppc_init(void)
} }
arch_initcall(ppc_init); arch_initcall(ppc_init);
static void *__init alloc_stack(void)
{
void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
if (!ptr)
panic("cannot allocate %d bytes for stack at %pS\n",
THREAD_SIZE, (void *)_RET_IP_);
return ptr;
}
void __init irqstack_early_init(void) void __init irqstack_early_init(void)
{ {
unsigned int i; unsigned int i;
...@@ -203,10 +214,8 @@ void __init irqstack_early_init(void) ...@@ -203,10 +214,8 @@ void __init irqstack_early_init(void)
/* interrupt stacks must be in lowmem, we get that for free on ppc32 /* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by default */ * as the memblock is limited to lowmem by default */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *) softirq_ctx[i] = alloc_stack();
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); hardirq_ctx[i] = alloc_stack();
hardirq_ctx[i] = (struct thread_info *)
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
} }
} }
...@@ -224,13 +233,10 @@ void __init exc_lvl_early_init(void) ...@@ -224,13 +233,10 @@ void __init exc_lvl_early_init(void)
hw_cpu = 0; hw_cpu = 0;
#endif #endif
critirq_ctx[hw_cpu] = (struct thread_info *) critirq_ctx[hw_cpu] = alloc_stack();
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
dbgirq_ctx[hw_cpu] = (struct thread_info *) dbgirq_ctx[hw_cpu] = alloc_stack();
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); mcheckirq_ctx[hw_cpu] = alloc_stack();
mcheckirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
#endif #endif
} }
} }
......
...@@ -634,19 +634,17 @@ __init u64 ppc64_bolted_size(void) ...@@ -634,19 +634,17 @@ __init u64 ppc64_bolted_size(void)
static void *__init alloc_stack(unsigned long limit, int cpu) static void *__init alloc_stack(unsigned long limit, int cpu)
{ {
unsigned long pa; void *ptr;
BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
early_cpu_to_node(cpu), MEMBLOCK_NONE); MEMBLOCK_LOW_LIMIT, limit,
if (!pa) { early_cpu_to_node(cpu));
pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); if (!ptr)
if (!pa) panic("cannot allocate stacks");
panic("cannot allocate stacks");
}
return __va(pa); return ptr;
} }
void __init irqstack_early_init(void) void __init irqstack_early_init(void)
...@@ -739,20 +737,17 @@ void __init emergency_stack_init(void) ...@@ -739,20 +737,17 @@ void __init emergency_stack_init(void)
struct thread_info *ti; struct thread_info *ti;
ti = alloc_stack(limit, i); ti = alloc_stack(limit, i);
memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i); emerg_stack_init_thread_info(ti, i);
paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */ /* emergency stack for NMI exception handling. */
ti = alloc_stack(limit, i); ti = alloc_stack(limit, i);
memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i); emerg_stack_init_thread_info(ti, i);
paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
/* emergency stack for machine check exception handling. */ /* emergency stack for machine check exception handling. */
ti = alloc_stack(limit, i); ti = alloc_stack(limit, i);
memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i); emerg_stack_init_thread_info(ti, i);
paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment