Commit 42c4aaad authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

[POWERPC] Consolidate feature fixup code

There are currently two versions of the functions for applying the
feature fixups, one for CPU features and one for firmware features. In
addition, they are both in assembly and with separate implementations
for 32 and 64 bits. identify_cpu() is also implemented in assembly and
separately for 32 and 64 bits.

This patch replaces them with a pair of C functions. The call sites are
slightly moved on ppc64 as well to be called from C instead of from
assembly, though it's a very small change, and thus shouldn't cause any
problem.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: default avatarOlof Johansson <olof@lixom.net>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent fb20f65a
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/oprofile_impl.h> #include <asm/oprofile_impl.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
struct cpu_spec* cur_cpu_spec = NULL; struct cpu_spec* cur_cpu_spec = NULL;
EXPORT_SYMBOL(cur_cpu_spec); EXPORT_SYMBOL(cur_cpu_spec);
...@@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void); ...@@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void);
#define PPC_FEATURE_SPE_COMP 0 #define PPC_FEATURE_SPE_COMP 0
#endif #endif
struct cpu_spec cpu_specs[] = { static struct cpu_spec cpu_specs[] = {
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
{ /* Power3 */ { /* Power3 */
.pvr_mask = 0xffff0000, .pvr_mask = 0xffff0000,
...@@ -1167,3 +1168,72 @@ struct cpu_spec cpu_specs[] = { ...@@ -1167,3 +1168,72 @@ struct cpu_spec cpu_specs[] = {
#endif /* !CLASSIC_PPC */ #endif /* !CLASSIC_PPC */
#endif /* CONFIG_PPC32 */ #endif /* CONFIG_PPC32 */
}; };
struct cpu_spec *identify_cpu(unsigned long offset)
{
struct cpu_spec *s = cpu_specs;
struct cpu_spec **cur = &cur_cpu_spec;
unsigned int pvr = mfspr(SPRN_PVR);
int i;
s = PTRRELOC(s);
cur = PTRRELOC(cur);
if (*cur != NULL)
return PTRRELOC(*cur);
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
if ((pvr & s->pvr_mask) == s->pvr_value) {
*cur = cpu_specs + i;
#ifdef CONFIG_PPC64
/* ppc64 expects identify_cpu to also call setup_cpu
* for that processor. I will consolidate that at a
* later time, for now, just use our friend #ifdef.
* we also don't need to PTRRELOC the function pointer
* on ppc64 as we are running at 0 in real mode.
*/
if (s->cpu_setup) {
s->cpu_setup(offset, s);
}
#endif /* CONFIG_PPC64 */
return s;
}
BUG();
return NULL;
}
void do_feature_fixups(unsigned long offset, unsigned long value,
void *fixup_start, void *fixup_end)
{
struct fixup_entry {
unsigned long mask;
unsigned long value;
unsigned int *start;
unsigned int *end;
} *fcur, *fend;
fcur = fixup_start;
fend = fixup_end;
for (; fcur < fend; fcur++) {
unsigned int *pstart, *pend, *p;
if ((value & fcur->mask) == fcur->value)
continue;
/* These PTRRELOCs will disappear once the new scheme for
* modules and vdso is implemented
*/
pstart = PTRRELOC(fcur->start);
pend = PTRRELOC(fcur->end);
for (p = pstart; p < pend; p++) {
*p = 0x60000000u;
asm volatile ("dcbst 0, %0" : : "r" (p));
}
asm volatile ("sync" : : : "memory");
for (p = pstart; p < pend; p++)
asm volatile ("icbi 0,%0" : : "r" (p));
asm volatile ("sync; isync" : : : "memory");
}
}
...@@ -1580,11 +1580,6 @@ _STATIC(__start_initialization_iSeries) ...@@ -1580,11 +1580,6 @@ _STATIC(__start_initialization_iSeries)
li r0,0 li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1) stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOAD_REG_IMMEDIATE(r3,cpu_specs)
LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
li r5,0
bl .identify_cpu
LOAD_REG_IMMEDIATE(r2,__toc_start) LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000 addi r2,r2,0x4000
addi r2,r2,0x4000 addi r2,r2,0x4000
...@@ -1964,13 +1959,6 @@ _STATIC(start_here_multiplatform) ...@@ -1964,13 +1959,6 @@ _STATIC(start_here_multiplatform)
addi r2,r2,0x4000 addi r2,r2,0x4000
add r2,r2,r26 add r2,r2,r26
LOAD_REG_IMMEDIATE(r3, cpu_specs)
add r3,r3,r26
LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
add r4,r4,r26
mr r5,r26
bl .identify_cpu
/* Do very early kernel initializations, including initial hash table, /* Do very early kernel initializations, including initial hash table,
* stab and slb setup before we turn on relocation. */ * stab and slb setup before we turn on relocation. */
...@@ -2000,13 +1988,6 @@ _STATIC(start_here_common) ...@@ -2000,13 +1988,6 @@ _STATIC(start_here_common)
li r0,0 li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1) stdu r0,-STACK_FRAME_OVERHEAD(r1)
/* Apply the CPUs-specific fixups (nop out sections not relevant
* to this CPU
*/
li r3,0
bl .do_cpu_ftr_fixups
bl .do_fw_ftr_fixups
/* ptr to current */ /* ptr to current */
LOAD_REG_IMMEDIATE(r4, init_task) LOAD_REG_IMMEDIATE(r4, init_task)
std r4,PACACURRENT(r13) std r4,PACACURRENT(r13)
......
...@@ -101,80 +101,6 @@ _GLOBAL(reloc_got2) ...@@ -101,80 +101,6 @@ _GLOBAL(reloc_got2)
mtlr r11 mtlr r11
blr blr
/*
* identify_cpu,
* called with r3 = data offset and r4 = CPU number
* doesn't change r3
*/
_GLOBAL(identify_cpu)
addis r8,r3,cpu_specs@ha
addi r8,r8,cpu_specs@l
mfpvr r7
1:
lwz r5,CPU_SPEC_PVR_MASK(r8)
and r5,r5,r7
lwz r6,CPU_SPEC_PVR_VALUE(r8)
cmplw 0,r6,r5
beq 1f
addi r8,r8,CPU_SPEC_ENTRY_SIZE
b 1b
1:
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
sub r8,r8,r3
stw r8,0(r6)
blr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
lwz r4,0(r6)
add r4,r4,r3
lwz r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
addis r6,r3,__start___ftr_fixup@ha
addi r6,r6,__start___ftr_fixup@l
addis r7,r3,__stop___ftr_fixup@ha
addi r7,r7,__stop___ftr_fixup@l
/* Do the fixup */
1: cmplw 0,r6,r7
bgelr
addi r6,r6,16
lwz r8,-16(r6) /* mask */
and r8,r8,r4
lwz r9,-12(r6) /* value */
cmplw 0,r8,r9
beq 1b
lwz r8,-8(r6) /* section begin */
lwz r9,-4(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
add r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/* /*
* call_setup_cpu - call the setup_cpu function for this cpu * call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number * r3 = data offset, r24 = cpu number
......
...@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache) ...@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache)
isync isync
blr blr
/*
* identify_cpu and calls setup_cpu
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
*/
_GLOBAL(identify_cpu)
mfpvr r7
1:
lwz r8,CPU_SPEC_PVR_MASK(r3)
and r8,r8,r7
lwz r9,CPU_SPEC_PVR_VALUE(r3)
cmplw 0,r9,r8
beq 1f
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
sub r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
cmpdi 0,r4,0
add r4,r4,r5
beqlr
ld r4,0(r4)
add r4,r4,r5
mtctr r4
/* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
mr r4,r3
mr r3,r5
bctr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
sub r6,r6,r3
LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/*
* do_fw_ftr_fixups - goes through the list of firmware feature fixups
* and writes nop's over sections of code that don't apply for this firmware.
* r3 = data offset (not changed)
*/
_GLOBAL(do_fw_ftr_fixups)
/* Get firmware features */
LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
sub r6,r6,r3
ld r4,0(r6)
/* Get the fixup table */
LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
sub r6,r6,r3
LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
BEGIN_FTR_SECTION
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/* /*
......
...@@ -91,6 +91,7 @@ int ucache_bsize; ...@@ -91,6 +91,7 @@ int ucache_bsize;
unsigned long __init early_init(unsigned long dt_ptr) unsigned long __init early_init(unsigned long dt_ptr)
{ {
unsigned long offset = reloc_offset(); unsigned long offset = reloc_offset();
struct cpu_spec *spec;
/* First zero the BSS -- use memset_io, some platforms don't have /* First zero the BSS -- use memset_io, some platforms don't have
* caches on yet */ * caches on yet */
...@@ -100,8 +101,11 @@ unsigned long __init early_init(unsigned long dt_ptr) ...@@ -100,8 +101,11 @@ unsigned long __init early_init(unsigned long dt_ptr)
* Identify the CPU type and fix up code sections * Identify the CPU type and fix up code sections
* that depend on which cpu we have. * that depend on which cpu we have.
*/ */
identify_cpu(offset, 0); spec = identify_cpu(offset);
do_cpu_ftr_fixups(offset);
do_feature_fixups(offset, spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
return KERNELBASE + offset; return KERNELBASE + offset;
} }
......
...@@ -170,6 +170,9 @@ void __init setup_paca(int cpu) ...@@ -170,6 +170,9 @@ void __init setup_paca(int cpu)
void __init early_setup(unsigned long dt_ptr) void __init early_setup(unsigned long dt_ptr)
{ {
/* Identify CPU type */
identify_cpu(0);
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */ /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
setup_paca(0); setup_paca(0);
...@@ -348,6 +351,14 @@ void __init setup_system(void) ...@@ -348,6 +351,14 @@ void __init setup_system(void)
{ {
DBG(" -> setup_system()\n"); DBG(" -> setup_system()\n");
/* Apply the CPUs-specific and firmware specific fixups to kernel
* text (nop out sections not relevant to this CPU or this firmware)
*/
do_feature_fixups(0, cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup);
do_feature_fixups(0, powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
/* /*
* Unflatten the device-tree passed by prom_init or kexec * Unflatten the device-tree passed by prom_init or kexec
*/ */
......
...@@ -694,6 +694,11 @@ void * __init iSeries_early_setup(void) ...@@ -694,6 +694,11 @@ void * __init iSeries_early_setup(void)
{ {
unsigned long phys_mem_size; unsigned long phys_mem_size;
/* Identify CPU type. This is done again by the common code later
* on but calling this function multiple times is fine.
*/
identify_cpu(0);
powerpc_firmware_features |= FW_FEATURE_ISERIES; powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR; powerpc_firmware_features |= FW_FEATURE_LPAR;
......
...@@ -109,80 +109,6 @@ _GLOBAL(reloc_got2) ...@@ -109,80 +109,6 @@ _GLOBAL(reloc_got2)
mtlr r11 mtlr r11
blr blr
/*
* identify_cpu,
* called with r3 = data offset and r4 = CPU number
* doesn't change r3
*/
_GLOBAL(identify_cpu)
addis r8,r3,cpu_specs@ha
addi r8,r8,cpu_specs@l
mfpvr r7
1:
lwz r5,CPU_SPEC_PVR_MASK(r8)
and r5,r5,r7
lwz r6,CPU_SPEC_PVR_VALUE(r8)
cmplw 0,r6,r5
beq 1f
addi r8,r8,CPU_SPEC_ENTRY_SIZE
b 1b
1:
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
sub r8,r8,r3
stw r8,0(r6)
blr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
lwz r4,0(r6)
add r4,r4,r3
lwz r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
addis r6,r3,__start___ftr_fixup@ha
addi r6,r6,__start___ftr_fixup@l
addis r7,r3,__stop___ftr_fixup@ha
addi r7,r7,__stop___ftr_fixup@l
/* Do the fixup */
1: cmplw 0,r6,r7
bgelr
addi r6,r6,16
lwz r8,-16(r6) /* mask */
and r8,r8,r4
lwz r9,-12(r6) /* value */
cmplw 0,r8,r9
beq 1b
lwz r8,-8(r6) /* section begin */
lwz r9,-4(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
add r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/* /*
* call_setup_cpu - call the setup_cpu function for this cpu * call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number * r3 = data offset, r24 = cpu number
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/nvram.h> #include <asm/nvram.h>
#include <asm/xmon.h> #include <asm/xmon.h>
#include <asm/ocp.h> #include <asm/ocp.h>
#include <asm/prom.h>
#define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \ #define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \
defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \ defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
...@@ -53,8 +54,6 @@ ...@@ -53,8 +54,6 @@
extern void platform_init(unsigned long r3, unsigned long r4, extern void platform_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7); unsigned long r5, unsigned long r6, unsigned long r7);
extern void identify_cpu(unsigned long offset, unsigned long cpu);
extern void do_cpu_ftr_fixups(unsigned long offset);
extern void reloc_got2(unsigned long offset); extern void reloc_got2(unsigned long offset);
extern void ppc6xx_idle(void); extern void ppc6xx_idle(void);
...@@ -301,6 +300,7 @@ early_init(int r3, int r4, int r5) ...@@ -301,6 +300,7 @@ early_init(int r3, int r4, int r5)
{ {
unsigned long phys; unsigned long phys;
unsigned long offset = reloc_offset(); unsigned long offset = reloc_offset();
struct cpu_spec *spec;
/* Default */ /* Default */
phys = offset + KERNELBASE; phys = offset + KERNELBASE;
...@@ -313,8 +313,10 @@ early_init(int r3, int r4, int r5) ...@@ -313,8 +313,10 @@ early_init(int r3, int r4, int r5)
* Identify the CPU type and fix up code sections * Identify the CPU type and fix up code sections
* that depend on which cpu we have. * that depend on which cpu we have.
*/ */
identify_cpu(offset, 0); spec = identify_cpu(offset);
do_cpu_ftr_fixups(offset); do_feature_fixups(offset, spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
return phys; return phys;
} }
......
...@@ -89,8 +89,11 @@ struct cpu_spec { ...@@ -89,8 +89,11 @@ struct cpu_spec {
extern struct cpu_spec *cur_cpu_spec; extern struct cpu_spec *cur_cpu_spec;
extern void identify_cpu(unsigned long offset, unsigned long cpu); extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
extern void do_cpu_ftr_fixups(unsigned long offset);
extern struct cpu_spec *identify_cpu(unsigned long offset);
extern void do_feature_fixups(unsigned long offset, unsigned long value,
void *fixup_start, void *fixup_end);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -96,6 +96,8 @@ extern void machine_check_fwnmi(void); ...@@ -96,6 +96,8 @@ extern void machine_check_fwnmi(void);
/* This is true if we are using the firmware NMI handler (typically LPAR) */ /* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active; extern int fwnmi_active;
extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
#define BEGIN_FW_FTR_SECTION 96: #define BEGIN_FW_FTR_SECTION 96:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment