Commit 8934d673 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

PPC32: Extend CPU and idle handling for new 7xx/7xxx PPC cpus.

parent e8c25900
...@@ -43,6 +43,9 @@ head-$(CONFIG_4xx) := head_4xx.o ...@@ -43,6 +43,9 @@ head-$(CONFIG_4xx) := head_4xx.o
head-$(CONFIG_440) := head_44x.o head-$(CONFIG_440) := head_44x.o
HEAD := arch/ppc/kernel/$(head-y) HEAD := arch/ppc/kernel/$(head-y)
ifdef CONFIG_6xx
HEAD += arch/ppc/kernel/idle_6xx.o
endif
core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ core-y += arch/ppc/kernel/ arch/ppc/platforms/ \
arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/
......
...@@ -14,6 +14,7 @@ HEAD-y := head.o ...@@ -14,6 +14,7 @@ HEAD-y := head.o
HEAD-$(CONFIG_40x) := head_4xx.o HEAD-$(CONFIG_40x) := head_4xx.o
HEAD-$(CONFIG_8xx) := head_8xx.o HEAD-$(CONFIG_8xx) := head_8xx.o
HEAD-$(CONFIG_PPC_ISERIES) := iSeries_head.o HEAD-$(CONFIG_PPC_ISERIES) := iSeries_head.o
HEAD-$(CONFIG_6xx) += idle_6xx.o
EXTRA_TARGETS := $(HEAD-y) EXTRA_TARGETS := $(HEAD-y)
...@@ -23,8 +24,7 @@ obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ ...@@ -23,8 +24,7 @@ obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
process.o signal.o ptrace.o align.o \ process.o signal.o ptrace.o align.o \
semaphore.o syscalls.o setup.o \ semaphore.o syscalls.o setup.o \
cputable.o ppc_htab.o cputable.o ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o ppc6xx_idle.o obj-$(CONFIG_6xx) += l2cr.o
obj-$(CONFIG_ALL_PPC) += ppc6xx_idle.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PCI) += pci.o
ifneq ($(CONFIG_PPC_ISERIES),y) ifneq ($(CONFIG_PPC_ISERIES),y)
......
...@@ -18,18 +18,19 @@ ...@@ -18,18 +18,19 @@
struct cpu_spec* cur_cpu_spec[NR_CPUS]; struct cpu_spec* cur_cpu_spec[NR_CPUS];
extern void __setup_cpu_601(int cpu_nr); extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_603(int cpu_nr); extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_604(int cpu_nr); extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_750(int cpu_nr); extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_7400(int cpu_nr); extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_7410(int cpu_nr); extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_7450(int cpu_nr); extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_7450_23(int cpu_nr); extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_7455(int cpu_nr); extern void __setup_cpu_7450(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_power3(int cpu_nr); extern void __setup_cpu_7455(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_8xx(int cpu_nr); extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_generic(int cpu_nr); extern void __setup_cpu_8xx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \ #define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
!defined(CONFIG_POWER3) && !defined(CONFIG_PPC_ISERIES)) !defined(CONFIG_POWER3) && !defined(CONFIG_PPC_ISERIES))
...@@ -136,7 +137,7 @@ struct cpu_spec cpu_specs[] = { ...@@ -136,7 +137,7 @@ struct cpu_spec cpu_specs[] = {
CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP, CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP,
COMMON_PPC, COMMON_PPC,
32, 32, 32, 32,
__setup_cpu_750 __setup_cpu_750cx
}, },
{ /* 750CX (82201 and 82202) */ { /* 750CX (82201 and 82202) */
0xfffffff0, 0x00082200, "750CX", 0xfffffff0, 0x00082200, "750CX",
...@@ -144,7 +145,7 @@ struct cpu_spec cpu_specs[] = { ...@@ -144,7 +145,7 @@ struct cpu_spec cpu_specs[] = {
CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP, CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP,
COMMON_PPC, COMMON_PPC,
32, 32, 32, 32,
__setup_cpu_750 __setup_cpu_750cx
}, },
{ /* 750CXe (82214) */ { /* 750CXe (82214) */
0xfffffff0, 0x00082210, "750CXe", 0xfffffff0, 0x00082210, "750CXe",
...@@ -152,7 +153,7 @@ struct cpu_spec cpu_specs[] = { ...@@ -152,7 +153,7 @@ struct cpu_spec cpu_specs[] = {
CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP, CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP,
COMMON_PPC, COMMON_PPC,
32, 32, 32, 32,
__setup_cpu_750 __setup_cpu_750cx
}, },
{ /* 750FX rev 2.0 must disable HID0[DPM] */ { /* 750FX rev 2.0 must disable HID0[DPM] */
0xffffffff, 0x70000200, "750FX", 0xffffffff, 0x70000200, "750FX",
...@@ -166,16 +167,17 @@ struct cpu_spec cpu_specs[] = { ...@@ -166,16 +167,17 @@ struct cpu_spec cpu_specs[] = {
{ /* 750FX (All revs except 2.0) */ { /* 750FX (All revs except 2.0) */
0xffff0000, 0x70000000, "750FX", 0xffff0000, 0x70000000, "750FX",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_CAN_DOZE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP, CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP |
CPU_FTR_DUAL_PLL_750FX,
COMMON_PPC, COMMON_PPC,
32, 32, 32, 32,
__setup_cpu_750 __setup_cpu_750fx
}, },
{ /* 740/750 (L2CR bit need fixup for 740) */ { /* 740/750 (L2CR bit need fixup for 740) */
0xffff0000, 0x00080000, "740/750", 0xffff0000, 0x00080000, "740/750",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_CAN_DOZE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE, CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_CAN_NAP,
COMMON_PPC, COMMON_PPC,
32, 32, 32, 32,
__setup_cpu_750 __setup_cpu_750
...@@ -210,7 +212,7 @@ struct cpu_spec cpu_specs[] = { ...@@ -210,7 +212,7 @@ struct cpu_spec cpu_specs[] = {
{ /* 7450 2.0 - no doze/nap */ { /* 7450 2.0 - no doze/nap */
0xffffffff, 0x80000200, "7450", 0xffffffff, 0x80000200, "7450",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450, CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC, COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32, 32, 32,
...@@ -220,7 +222,8 @@ struct cpu_spec cpu_specs[] = { ...@@ -220,7 +222,8 @@ struct cpu_spec cpu_specs[] = {
0xffffffff, 0x80000201, "7450", 0xffffffff, 0x80000201, "7450",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450, CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
CPU_FTR_L3_DISABLE_NAP,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC, COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32, 32, 32,
__setup_cpu_7450 __setup_cpu_7450
...@@ -229,16 +232,35 @@ struct cpu_spec cpu_specs[] = { ...@@ -229,16 +232,35 @@ struct cpu_spec cpu_specs[] = {
0xffff0000, 0x80000000, "7450", 0xffff0000, 0x80000000, "7450",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32,
__setup_cpu_7450
},
{ /* 7455 rev 1.x */
0xffffff00, 0x80010100, "7455",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450, CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC, COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32, 32, 32,
__setup_cpu_7450_23 __setup_cpu_7455
}, },
{ /* 7455 */ { /* 7455 rev 2.0 */
0xffffffff, 0x80010200, "7455",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
CPU_FTR_L3_DISABLE_NAP,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32,
__setup_cpu_7455
},
{ /* 7455 others */
0xffff0000, 0x80010000, "7455", 0xffff0000, 0x80010000, "7455",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450, CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC, COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32, 32, 32,
__setup_cpu_7455 __setup_cpu_7455
......
...@@ -67,7 +67,21 @@ transfer_to_handler: ...@@ -67,7 +67,21 @@ transfer_to_handler:
addi r11,r1,STACK_FRAME_OVERHEAD addi r11,r1,STACK_FRAME_OVERHEAD
stw r11,PT_REGS(r12) stw r11,PT_REGS(r12)
b 3f b 3f
2: /* if from kernel, check for stack overflow */ 2: /* if from kernel, check interrupted DOZE/NAP mode and
* check for stack overflow
*/
#ifdef CONFIG_6xx
mfspr r11,SPRN_HID0
mtcr r11
BEGIN_FTR_SECTION
bt- 8,power_save_6xx_restore /* Check DOZE */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
BEGIN_FTR_SECTION
bt- 9,power_save_6xx_restore /* Check NAP */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
#endif /* CONFIG_6xx */
.globl transfer_to_handler_cont
transfer_to_handler_cont:
lwz r11,THREAD_INFO-THREAD(r12) lwz r11,THREAD_INFO-THREAD(r12)
cmplw r1,r11 /* if r1 <= current->thread_info */ cmplw r1,r11 /* if r1 <= current->thread_info */
ble- stack_ovf /* then the kernel stack overflowed */ ble- stack_ovf /* then the kernel stack overflowed */
......
...@@ -168,6 +168,18 @@ __after_mmu_off: ...@@ -168,6 +168,18 @@ __after_mmu_off:
bl setup_disp_bat bl setup_disp_bat
#endif #endif
/*
* Call setup_cpu for CPU 0 and initialize 6xx Idle
*/
bl reloc_offset
li r24,0 /* cpu# */
bl call_setup_cpu /* Call setup_cpu for this CPU */
#ifdef CONFIG_6xx
bl reloc_offset
bl init_idle_6xx
#endif /* CONFIG_6xx */
#ifndef CONFIG_APUS #ifndef CONFIG_APUS
/* /*
* We need to run with _start at physical address 0. * We need to run with _start at physical address 0.
...@@ -344,6 +356,10 @@ i##n: \ ...@@ -344,6 +356,10 @@ i##n: \
*/ */
. = 0x200 . = 0x200
MachineCheck: MachineCheck:
BEGIN_FTR_SECTION
dssall
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtspr SPRG0,r10 mtspr SPRG0,r10
mtspr SPRG1,r11 mtspr SPRG1,r11
mfcr r10 mfcr r10
...@@ -1182,6 +1198,10 @@ __secondary_start: ...@@ -1182,6 +1198,10 @@ __secondary_start:
mr r4,r24 mr r4,r24
bl identify_cpu bl identify_cpu
bl call_setup_cpu /* Call setup_cpu for this CPU */ bl call_setup_cpu /* Call setup_cpu for this CPU */
#ifdef CONFIG_6xx
lis r3,-KERNELBASE@h
bl init_idle_6xx
#endif /* CONFIG_6xx */
/* get current_thread_info and current */ /* get current_thread_info and current */
lis r1,secondary_ti@ha lis r1,secondary_ti@ha
...@@ -1236,14 +1256,30 @@ _GLOBAL(__setup_cpu_750) ...@@ -1236,14 +1256,30 @@ _GLOBAL(__setup_cpu_750)
bl setup_750_7400_hid0 bl setup_750_7400_hid0
mtlr r4 mtlr r4
blr blr
_GLOBAL(__setup_cpu_750cx)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750cx
mtlr r4
blr
_GLOBAL(__setup_cpu_750fx)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750fx
mtlr r4
blr
_GLOBAL(__setup_cpu_7400) _GLOBAL(__setup_cpu_7400)
mflr r4 mflr r4
bl setup_7400_workarounds
bl setup_common_caches bl setup_common_caches
bl setup_750_7400_hid0 bl setup_750_7400_hid0
mtlr r4 mtlr r4
blr blr
_GLOBAL(__setup_cpu_7410) _GLOBAL(__setup_cpu_7410)
mflr r4 mflr r4
bl setup_7410_workarounds
bl setup_common_caches bl setup_common_caches
bl setup_750_7400_hid0 bl setup_750_7400_hid0
li r3,0 li r3,0
...@@ -1253,19 +1289,13 @@ _GLOBAL(__setup_cpu_7410) ...@@ -1253,19 +1289,13 @@ _GLOBAL(__setup_cpu_7410)
_GLOBAL(__setup_cpu_7450) _GLOBAL(__setup_cpu_7450)
mflr r4 mflr r4
bl setup_common_caches bl setup_common_caches
bl setup_7450_hid0 bl setup_745x_specifics
mtlr r4
blr
_GLOBAL(__setup_cpu_7450_23)
mflr r4
bl setup_common_caches
bl setup_7450_23_hid0
mtlr r4 mtlr r4
blr blr
_GLOBAL(__setup_cpu_7455) _GLOBAL(__setup_cpu_7455)
mflr r4 mflr r4
bl setup_common_caches bl setup_common_caches
bl setup_7455_hid0 bl setup_745x_specifics
mtlr r4 mtlr r4
blr blr
_GLOBAL(__setup_cpu_power3) _GLOBAL(__setup_cpu_power3)
...@@ -1308,6 +1338,47 @@ setup_604_hid0: ...@@ -1308,6 +1338,47 @@ setup_604_hid0:
isync isync
blr blr
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here.
* Moto MPC710CE.pdf describes them, those are errata
* #3, #4 and #5
* Note that we assume the firmware didn't choose to
* apply other workarounds (there are other ones documented
* in the .pdf). It appear that Apple firmware only works
* around #3 and with the same fix we use. We may want to
* check if the CPU is using 60x bus mode in which case
* the workaround for errata #4 is useless. Also, we may
* want to explicitely clear HID0_NOPDST as this is not
* needed once we have applied workaround #5 (though it's
* not set by Apple's firmware at least).
*/
setup_7400_workarounds:
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0207
ble 1f
blr
setup_7410_workarounds:
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0100
bnelr
1:
mfspr r11,SPRN_MSSSR0
/* Errata #3: Set L1OPQ_SIZE to 0x10 */
rlwinm r11,r11,0,9,6
oris r11,r11,0x0100
/* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
oris r11,r11,0x0002
/* Errata #5: Set DRLT_SIZE to 0x01 */
rlwinm r11,r11,0,5,2
oris r11,r11,0x0800
sync
mtspr SPRN_MSSSR0,r11
sync
isync
blr
/* 740/750/7400/7410 /* 740/750/7400/7410
* Enable Store Gathering (SGE), Address Brodcast (ABE), * Enable Store Gathering (SGE), Address Brodcast (ABE),
* Branch History Table (BHTE), Branch Target ICache (BTIC) * Branch History Table (BHTE), Branch Target ICache (BTIC)
...@@ -1330,7 +1401,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) ...@@ -1330,7 +1401,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
isync isync
blr blr
/* 7450 /* 750cx specific
* Looks like we have to disable NAP feature for some PLL settings...
* (waiting for confirmation)
*/
setup_750cx:
mfspr r10, SPRN_HID1
rlwinm r10,r10,4,28,31
cmpi cr0,r10,7
cmpi cr1,r10,9
cmpi cr2,r10,11
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bnelr
lwz r6,CPU_SPEC_FEATURES(r5)
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
blr
/* 750fx specific
*/
setup_750fx:
blr
/* MPC 745x
* Enable Store Gathering (SGE), Branch Folding (FOLD) * Enable Store Gathering (SGE), Branch Folding (FOLD)
* Branch History Table (BHTE), Branch Target ICache (BTIC) * Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD) * Dynamic Power Management (DPM), Speculative (SPD)
...@@ -1338,8 +1433,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) ...@@ -1338,8 +1433,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
* Timebase has to be running or we wouldn't have made it here, * Timebase has to be running or we wouldn't have made it here,
* just ensure we don't disable it. * just ensure we don't disable it.
* Clear Instruction cache throttling (ICTC) * Clear Instruction cache throttling (ICTC)
* Enable L2 HW prefetch
*/ */
setup_7450_hid0: setup_745x_specifics:
/* We check for the presence of an L3 cache setup by /* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable NAP capability as * the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier * it's known to be bogus on rev 2.1 and earlier
...@@ -1347,12 +1443,13 @@ setup_7450_hid0: ...@@ -1347,12 +1443,13 @@ setup_7450_hid0:
mfspr r11,SPRN_L3CR mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h andis. r11,r11,L3CR_L3E@h
beq 1f beq 1f
li r7,CPU_FTR_CAN_NAP
lwz r6,CPU_SPEC_FEATURES(r5) lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7 andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5) stw r6,CPU_SPEC_FEATURES(r5)
1: 1:
setup_7450_23_hid0:
mfspr r11,HID0 mfspr r11,HID0
/* All of the bits we have to set..... /* All of the bits we have to set.....
...@@ -1373,34 +1470,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) ...@@ -1373,34 +1470,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr HID0,r11 mtspr HID0,r11
sync sync
isync isync
blr
/* 7455 /* Enable L2 HW prefetch
* Enable Store Gathering (SGE), Branch Folding (FOLD)
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Ensure our data cache instructions really operate.
* Timebase has to be running or we wouldn't have made it here,
* just ensure we don't disable it.
* Clear Instruction cache throttling (ICTC)
*/ */
setup_7455_hid0: mfspr r3,SPRN_MSSCR0
mfspr r11,HID0 ori r3,r3,3
sync
/* All of the bits we have to set..... mtspr SPRN_MSSCR0,r3
*/
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
/* All of the bits we have to clear....
*/
li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
sync sync
isync isync
blr blr
...@@ -1446,11 +1522,6 @@ load_up_mmu: ...@@ -1446,11 +1522,6 @@ load_up_mmu:
* This is where the main kernel code starts. * This is where the main kernel code starts.
*/ */
start_here: start_here:
/* Call setup_cpu for CPU 0 */
li r3,0 /* data offset */
li r24,0 /* cpu# */
bl call_setup_cpu
/* ptr to current */ /* ptr to current */
lis r2,init_task@h lis r2,init_task@h
ori r2,r2,init_task@l ori r2,r2,init_task@l
...@@ -1562,7 +1633,7 @@ BEGIN_FTR_SECTION ...@@ -1562,7 +1633,7 @@ BEGIN_FTR_SECTION
dssall dssall
sync sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3: 3: isync
#ifdef CONFIG_PPC64BRIDGE #ifdef CONFIG_PPC64BRIDGE
slbie r4 slbie r4
#endif /* CONFIG_PPC64BRIDGE */ #endif /* CONFIG_PPC64BRIDGE */
......
/*
* This file contains the power_save function for 6xx & 7xxx CPUs
* rewritten in assembler
*
* Warning ! This code assumes that if your machine has a 750fx
* it will have PLL 1 set to low speed mode (used during NAP/DOZE).
* if this is not the case some additional changes will have to
* be done to check a runtime var (a bit like powersave-nap)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
#undef DEBUG
.text
/*
* Init idle, called at early CPU setup time from head.S for each CPU
* Make sure no rest of NAP mode remains in HID0, save default
* values for some CPU specific registers. Called with r24
* containing CPU number and r3 reloc offset
*/
.globl init_idle_6xx
init_idle_6xx:
BEGIN_FTR_SECTION
mfspr r4,SPRN_HID0
rlwinm r4,r4,0,10,8 /* Clear NAP */
mtspr SPRN_HID0, r4
b 1f
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
blr
1:
slwi r5,r24,2
add r5,r5,r3
BEGIN_FTR_SECTION
mfspr r4,SPRN_MSSCR0
addis r6,r5, nap_save_msscr0@ha
stw r4,nap_save_msscr0@l(r6)
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
BEGIN_FTR_SECTION
mfspr r4,SPRN_HID1
addis r6,r5,nap_save_hid1@ha
stw r4,nap_save_hid1@l(r6)
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
blr
/*
* Here is the power_save_6xx function. This could eventually be
* split into several functions & changing the function pointer
* depending on the various features.
*/
.globl ppc6xx_idle
ppc6xx_idle:
/* Check if we can nap or doze, put HID0 mask in r3
*/
lis r3, 0
BEGIN_FTR_SECTION
lis r3,HID0_DOZE@h
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
BEGIN_FTR_SECTION
/* We must dynamically check for the NAP feature as it
* can be cleared by CPU init after the fixups are done
*/
lis r4,cur_cpu_spec@ha
lwz r4,cur_cpu_spec@l(r4)
lwz r4,CPU_SPEC_FEATURES(r4)
andi. r0,r4,CPU_FTR_CAN_NAP
beq 1f
/* Now check if user or arch enabled NAP mode */
lis r4,powersave_nap@ha
lwz r4,powersave_nap@l(r4)
cmpi 0,r4,0
beq 1f
lis r3,HID0_NAP@h
1:
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
cmpi 0,r3,0
beqlr
/* Clear MSR:EE */
mfmsr r7
rlwinm r0,r7,0,17,15
mtmsr r0
/* Check current_thread_info()->flags */
rlwinm r4,r1,0,0,18
lwz r4,TI_FLAGS(r4)
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
mtmsr r7 /* out of line this ? */
blr
1:
/* Some pre-nap cleanups needed on some CPUs */
andis. r0,r3,HID0_NAP@h
beq 2f
BEGIN_FTR_SECTION
/* Disable L2 prefetch on some 745x and try to ensure
* L2 prefetch engines are idle. As explained by errata
* text, we can't be sure they are, we just hope very hard
* that well be enough (sic !). At least I noticed Apple
* doesn't even bother doing the dcbf's here...
*/
mfspr r4,SPRN_MSSCR0
rlwinm r4,r4,0,0,29
sync
mtspr SPRN_MSSCR0,r4
sync
isync
lis r4,KERNELBASE@h
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
#ifdef DEBUG
lis r6,nap_enter_count@ha
lwz r4,nap_enter_count@l(r6)
addi r4,r4,1
stw r4,nap_enter_count@l(r6)
#endif
2:
BEGIN_FTR_SECTION
/* Go to low speed mode on some 750FX */
lis r4,powersave_lowspeed@ha
lwz r4,powersave_lowspeed@l(r4)
cmpi 0,r4,0
beq 1f
mfspr r4,SPRN_HID1
oris r4,r4,0x0001
mtspr SPRN_HID1,r4
1:
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
/* Go to NAP or DOZE now */
mfspr r4,SPRN_HID0
lis r5,(HID0_NAP|HID0_SLEEP)@h
BEGIN_FTR_SECTION
oris r5,r5,HID0_DOZE@h
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
andc r4,r4,r5
or r4,r4,r3
BEGIN_FTR_SECTION
oris r4,r4,HID0_DPM@h /* that should be done once for all */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr SPRN_HID0,r4
BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
ori r7,r7,MSR_EE /* Could be ommited (already set) */
oris r7,r7,MSR_POW@h
sync
isync
mtmsr r7
isync
sync
blr
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
* we are called with DR/IR still off and r2 containing physical
* address of current.
*/
.globl power_save_6xx_restore
power_save_6xx_restore:
mfspr r11,SPRN_HID0
rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
BEGIN_FTR_SECTION
rlwinm r11,r11,0,9,7 /* Clear DOZE */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
mtspr SPRN_HID0, r11
#ifdef DEBUG
beq cr1,1f
lis r11,(nap_return_count-KERNELBASE)@ha
lwz r9,nap_return_count@l(r11)
addi r9,r9,1
stw r9,nap_return_count@l(r11)
1:
#endif
rlwinm r9,r1,0,0,18
tophys(r9,r9)
lwz r11,TI_CPU(r9)
slwi r11,r11,2
/* Todo make sure all these are in the same page
* and load r22 (@ha part + CPU offset) only once
*/
BEGIN_FTR_SECTION
beq cr1,1f
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
lwz r9,nap_save_msscr0@l(r9)
mtspr SPRN_MSSCR0, r9
sync
isync
1:
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
BEGIN_FTR_SECTION
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
lwz r9,nap_save_hid1@l(r9)
mtspr SPRN_HID1, r9
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
b transfer_to_handler_cont
.data
.globl nap_save_msscr0
nap_save_msscr0:
.space 4*NR_CPUS
.globl nap_save_hid1
nap_save_hid1:
.space 4*NR_CPUS
.globl powersave_nap
powersave_nap:
.long 0
.globl powersave_lowspeed
powersave_lowspeed:
.long 0
#ifdef DEBUG
.globl nap_enter_count
nap_enter_count:
.space 4
.globl nap_return_count
nap_return_count:
.space 4
#endif
...@@ -188,8 +188,10 @@ _GLOBAL(do_cpu_ftr_fixups) ...@@ -188,8 +188,10 @@ _GLOBAL(do_cpu_ftr_fixups)
* call_setup_cpu - call the setup_cpu function for this cpu * call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number * r3 = data offset, r24 = cpu number
* *
* Don't change register layout, the setup function may rely * Setup function is called with:
* on r5 containing a relocated pointer to the current cpu spec. * r3 = data offset
* r4 = CPU number
* r5 = ptr to CPU spec (relocated)
*/ */
_GLOBAL(call_setup_cpu) _GLOBAL(call_setup_cpu)
addis r5,r3,cur_cpu_spec@ha addis r5,r3,cur_cpu_spec@ha
...@@ -200,7 +202,7 @@ _GLOBAL(call_setup_cpu) ...@@ -200,7 +202,7 @@ _GLOBAL(call_setup_cpu)
lwz r6,CPU_SPEC_SETUP(r5) lwz r6,CPU_SPEC_SETUP(r5)
add r6,r6,r3 add r6,r6,r3
mtctr r6 mtctr r6
mr r3,r24 mr r4,r24
bctr bctr
#ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_misc.S */ #ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_misc.S */
......
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* power_save() rountine for classic PPC CPUs.
*
* Written by Cort Dougan (cort@cs.nmt.edu)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/stringify.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/processor.h>
unsigned long powersave_nap = 0;
#define DSSALL .long (0x1f<<26)+(0x10<<21)+(0x336<<1)
void
ppc6xx_idle(void)
{
unsigned long hid0;
int nap = powersave_nap;
/* 7450 has no DOZE mode mode, we return if powersave_nap
* isn't enabled
*/
if (!(nap || (cur_cpu_spec[smp_processor_id()]->cpu_features
& CPU_FTR_CAN_DOZE)))
return;
/*
* Disable interrupts to prevent a lost wakeup
* when going to sleep. This is necessary even with
* RTLinux since we are not guaranteed an interrupt
* didn't come in and is waiting for a __sti() before
* emulating one. This way, we really do hard disable.
*
* We assume that we're sti-ed when we come in here. We
* are in the idle loop so if we're cli-ed then it's a bug
* anyway.
* -- Cort
*/
_nmask_and_or_msr(MSR_EE, 0);
if (!need_resched()) {
__asm__ __volatile__("mfspr %0,1008":"=r"(hid0):);
hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
hid0 |= (powersave_nap ? HID0_NAP : HID0_DOZE);
if (!(cur_cpu_spec[smp_processor_id()]->cpu_features
& CPU_FTR_NO_DPM))
hid0 |= HID0_DPM;
__asm__ __volatile__("mtspr 1008,%0"::"r"(hid0));
/* Flush pending data streams, consider this instruction
* exist on all altivec capable CPUs
*/
__asm__ __volatile__("98: " __stringify(DSSALL) "\n"
" sync\n"
"99:\n"
".section __ftr_fixup,\"a\"\n"
" .long %0\n"
" .long %1\n"
" .long 98b\n"
" .long 99b\n"
".previous"::"i"
(CPU_FTR_ALTIVEC), "i"(CPU_FTR_ALTIVEC));
/* set the POW bit in the MSR, and enable interrupts
* so we wake up sometime! */
_nmask_and_or_msr(0, MSR_POW | MSR_EE);
}
_nmask_and_or_msr(0, MSR_EE);
}
...@@ -29,6 +29,10 @@ ...@@ -29,6 +29,10 @@
/* This structure can grow, it's real size is used by head.S code /* This structure can grow, it's real size is used by head.S code
* via the mkdefs mecanism. * via the mkdefs mecanism.
*/ */
struct cpu_spec;
typedef void (*cpu_setup_t)(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
struct cpu_spec { struct cpu_spec {
/* CPU is matched via (PVR & pvr_mask) == pvr_value */ /* CPU is matched via (PVR & pvr_mask) == pvr_value */
unsigned int pvr_mask; unsigned int pvr_mask;
...@@ -45,7 +49,7 @@ struct cpu_spec { ...@@ -45,7 +49,7 @@ struct cpu_spec {
/* this is called to initialize various CPU bits like L1 cache, /* this is called to initialize various CPU bits like L1 cache,
* BHT, SPD, etc... from head.S before branching to identify_machine * BHT, SPD, etc... from head.S before branching to identify_machine
*/ */
void (*cpu_setup)(int cpu_nr); cpu_setup_t cpu_setup;
}; };
extern struct cpu_spec cpu_specs[]; extern struct cpu_spec cpu_specs[];
...@@ -66,6 +70,9 @@ extern struct cpu_spec *cur_cpu_spec[]; ...@@ -66,6 +70,9 @@ extern struct cpu_spec *cur_cpu_spec[];
#define CPU_FTR_HPTE_TABLE 0x00000200 #define CPU_FTR_HPTE_TABLE 0x00000200
#define CPU_FTR_CAN_NAP 0x00000400 #define CPU_FTR_CAN_NAP 0x00000400
#define CPU_FTR_L3CR 0x00000800 #define CPU_FTR_L3CR 0x00000800
#define CPU_FTR_L3_DISABLE_NAP 0x00001000
#define CPU_FTR_NAP_DISABLE_L2_PR 0x00002000
#define CPU_FTR_DUAL_PLL_750FX 0x00004000
#define CPU_FTR_NO_DPM 0x00008000 #define CPU_FTR_NO_DPM 0x00008000
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment