Commit 76b6688a authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

PPC32: Factor out common code for reading/setting various SPRs.

parent 28afe878
......@@ -21,7 +21,7 @@ obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
process.o signal.o ptrace.o align.o \
semaphore.o syscalls.o setup.o \
cputable.o ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_PCI) += pci-dma.o
......
/*
* This file contains low level CPU setup functions.
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
_GLOBAL(__setup_cpu_601)
blr
_GLOBAL(__setup_cpu_603)
b setup_common_caches
_GLOBAL(__setup_cpu_604)
mflr r4
bl setup_common_caches
bl setup_604_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_750)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_750cx)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750cx
mtlr r4
blr
_GLOBAL(__setup_cpu_750fx)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750fx
mtlr r4
blr
_GLOBAL(__setup_cpu_7400)
mflr r4
bl setup_7400_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_7410)
mflr r4
bl setup_7410_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
li r3,0
mtspr SPRN_L2CR2,r3
mtlr r4
blr
_GLOBAL(__setup_cpu_7450)
mflr r4
bl setup_common_caches
bl setup_745x_specifics
mtlr r4
blr
_GLOBAL(__setup_cpu_7455)
mflr r4
bl setup_common_caches
bl setup_745x_specifics
mtlr r4
blr
/* Enable caches for 603's, 604, 750 & 7400 */
setup_common_caches:
mfspr r11,HID0
andi. r0,r11,HID0_DCE
#ifdef CONFIG_DCACHE_DISABLE
ori r11,r11,HID0_ICE
#else
ori r11,r11,HID0_ICE|HID0_DCE
#endif
ori r8,r11,HID0_ICFI
bne 1f /* don't invalidate the D-cache */
ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1: sync
mtspr HID0,r8 /* enable and invalidate caches */
sync
mtspr HID0,r11 /* enable caches */
sync
isync
blr
/* 604, 604e, 604ev, ...
* Enable superscalar execution & branch history table
*/
setup_604_hid0:
mfspr r11,HID0
ori r11,r11,HID0_SIED|HID0_BHTE
ori r8,r11,HID0_BTCD
sync
mtspr HID0,r8 /* flush branch target address cache */
sync /* on 604e/604r */
mtspr HID0,r11
sync
isync
blr
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here.
* Moto MPC710CE.pdf describes them, those are errata
* #3, #4 and #5
* Note that we assume the firmware didn't choose to
* apply other workarounds (there are other ones documented
* in the .pdf). It appear that Apple firmware only works
* around #3 and with the same fix we use. We may want to
* check if the CPU is using 60x bus mode in which case
* the workaround for errata #4 is useless. Also, we may
* want to explicitely clear HID0_NOPDST as this is not
* needed once we have applied workaround #5 (though it's
* not set by Apple's firmware at least).
*/
setup_7400_workarounds:
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0207
ble 1f
blr
setup_7410_workarounds:
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0100
bnelr
1:
mfspr r11,SPRN_MSSSR0
/* Errata #3: Set L1OPQ_SIZE to 0x10 */
rlwinm r11,r11,0,9,6
oris r11,r11,0x0100
/* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
oris r11,r11,0x0002
/* Errata #5: Set DRLT_SIZE to 0x01 */
rlwinm r11,r11,0,5,2
oris r11,r11,0x0800
sync
mtspr SPRN_MSSSR0,r11
sync
isync
blr
/* 740/750/7400/7410
* Enable Store Gathering (SGE), Address Brodcast (ABE),
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC)
*/
setup_750_7400_hid0:
mfspr r11,HID0
ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
li r3,HID0_SPD
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
sync
isync
blr
/* 750cx specific
* Looks like we have to disable NAP feature for some PLL settings...
* (waiting for confirmation)
*/
setup_750cx:
mfspr r10, SPRN_HID1
rlwinm r10,r10,4,28,31
cmpi cr0,r10,7
cmpi cr1,r10,9
cmpi cr2,r10,11
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bnelr
lwz r6,CPU_SPEC_FEATURES(r5)
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
blr
/* 750fx specific
*/
setup_750fx:
blr
/* MPC 745x
* Enable Store Gathering (SGE), Branch Folding (FOLD)
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Ensure our data cache instructions really operate.
* Timebase has to be running or we wouldn't have made it here,
* just ensure we don't disable it.
* Clear Instruction cache throttling (ICTC)
* Enable L2 HW prefetch
*/
setup_745x_specifics:
/* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
*/
mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h
beq 1f
lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
1:
mfspr r11,HID0
/* All of the bits we have to set.....
*/
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
/* All of the bits we have to clear....
*/
li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
sync
isync
/* Enable L2 HW prefetch
*/
mfspr r3,SPRN_MSSCR0
ori r3,r3,3
sync
mtspr SPRN_MSSCR0,r3
sync
isync
blr
/* Definitions for the table use to save CPU states */
#define CS_HID0 0
#define CS_HID1 4
#define CS_MSSCR0 8
#define CS_MSSSR0 12
#define CS_ICTRL 16
#define CS_LDSTCR 20
#define CS_LDSTDB 24
#define CS_SIZE 28
.data
.balign 4
cpu_state_storage:
.space CS_SIZE
.text
/* Called in normal context to backup CPU 0 state. This
* does not include cache settings. This function is also
* called for machine sleep. This does not include the MMU
* setup, BATs, etc... but rather the "special" registers
* like HID0, HID1, MSSCR0, etc...
*/
_GLOBAL(__save_cpu_setup)
/* Get storage ptr */
lis r5,cpu_state_storage@h
ori r5,r5,cpu_state_storage@l
/* Save HID0 (common to all CONFIG_6xx cpus) */
mfspr r3,SPRN_HID0
stw r3,CS_HID0(r5)
/* Now deal with CPU type dependent registers */
mfspr r3,PVR
srwi r3,r3,16
cmpli cr0,r3,0x8000 /* 7450 */
cmpli cr1,r3,0x000c /* 7400 */
cmpli cr2,r3,0x800c /* 7410 */
cmpli cr3,r3,0x8001 /* 7455 */
cmpli cr4,r3,0x8002 /* 7457 */
cmpli cr5,r3,0x7000 /* 750FX */
/* cr1 is 7400 || 7410 */
cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
/* cr0 is 74xx */
cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
bne 1f
/* Backup 74xx specific regs */
mfspr r4,SPRN_MSSCR0
stw r4,CS_MSSCR0(r5)
mfspr r4,SPRN_MSSSR0
stw r4,CS_MSSSR0(r5)
beq cr1,1f
/* Backup 745x specific registers */
mfspr r4,SPRN_HID1
stw r4,CS_HID1(r5)
mfspr r4,SPRN_ICTRL
stw r4,CS_ICTRL(r5)
mfspr r4,SPRN_LDSTCR
stw r4,CS_LDSTCR(r5)
mfspr r4,SPRN_LDSTDB
stw r4,CS_LDSTDB(r5)
1:
bne cr5,1f
/* Backup 750FX specific registers */
mfspr r4,SPRN_HID1
stw r4,CS_HID1(r5)
1:
blr
/* Called with no MMU context (typically MSR:IR/DR off) to
* restore CPU state as backed up by the previous
* function. This does not include cache setting
*/
_GLOBAL(__restore_cpu_setup)
/* Get storage ptr */
lis r5,(cpu_state_storage-KERNELBASE)@h
ori r5,r5,cpu_state_storage@l
/* Restore HID0 */
lwz r3,CS_HID0(r5)
sync
isync
mtspr SPRN_HID0,r3
sync
isync
/* Now deal with CPU type dependent registers */
mfspr r3,PVR
srwi r3,r3,16
cmpli cr0,r3,0x8000 /* 7450 */
cmpli cr1,r3,0x000c /* 7400 */
cmpli cr2,r3,0x800c /* 7410 */
cmpli cr3,r3,0x8001 /* 7455 */
cmpli cr4,r3,0x8002 /* 7457 */
cmpli cr5,r3,0x7000 /* 750FX */
/* cr1 is 7400 || 7410 */
cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
/* cr0 is 74xx */
cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
bne 2f
/* Restore 74xx specific regs */
lwz r4,CS_MSSCR0(r5)
sync
mtspr SPRN_MSSCR0,r4
sync
isync
lwz r4,CS_MSSSR0(r5)
sync
mtspr SPRN_MSSSR0,r4
sync
isync
bne cr2,1f
/* Clear 7410 L2CR2 */
li r4,0
mtspr SPRN_L2CR2,r4
1: beq cr1,2f
/* Restore 745x specific registers */
lwz r4,CS_HID1(r5)
sync
mtspr SPRN_HID1,r4
isync
sync
lwz r4,CS_ICTRL(r5)
sync
mtspr SPRN_ICTRL,r4
isync
sync
lwz r4,CS_LDSTCR(r5)
sync
mtspr SPRN_LDSTCR,r4
isync
sync
lwz r4,CS_LDSTDB(r5)
sync
mtspr SPRN_LDSTDB,r4
isync
sync
2: bne cr5,1f
/* Restore 750FX specific registers
* that is restore PLL config & switch
* to PLL 0
*/
lwz r4,CS_HID1(r5)
rlwinm r5,r4,0,16,14
mtspr SPRN_HID1,r5
/* Wait for PLL to stabilize */
mftbl r5
3: mftbl r6
sub r6,r6,r5
cmpli cr0,r6,10000
ble 3b
/* Setup final PLL */
mtspr SPRN_HID1,r4
1:
blr
......@@ -890,7 +890,7 @@ load_up_altivec:
*/
mfmsr r5
oris r5,r5,MSR_VEC@h
mtmsr r5 /* enable use of AltiVec now */
MTMSRD(r5) /* enable use of AltiVec now */
isync
/*
* For SMP, we don't do lazy AltiVec switching because it just gets too
......@@ -962,7 +962,7 @@ giveup_altivec:
mfmsr r5
oris r5,r5,MSR_VEC@h
SYNC
mtmsr r5 /* enable use of AltiVec now */
MTMSRD(r5) /* enable use of AltiVec now */
isync
cmpi 0,r3,0
beqlr- /* if no previous owner, done */
......@@ -999,7 +999,7 @@ giveup_fpu:
ori r5,r5,MSR_FP
SYNC_601
ISYNC_601
mtmsr r5 /* enable use of fpu now */
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
cmpi 0,r3,0
......@@ -1191,6 +1191,8 @@ __secondary_start:
MTMSRD(r0)
isync
#endif
/* Copy some CPU settings from CPU 0 */
bl __restore_cpu_setup
lis r3,-KERNELBASE@h
mr r4,r24
......@@ -1236,248 +1238,21 @@ __secondary_start:
#endif /* CONFIG_SMP */
/*
* Enable caches and 604-specific features if necessary.
* Those generic dummy functions are kept for CPUs not
* included in CONFIG_6xx
*/
_GLOBAL(__setup_cpu_601)
blr
_GLOBAL(__setup_cpu_603)
b setup_common_caches
_GLOBAL(__setup_cpu_604)
mflr r4
bl setup_common_caches
bl setup_604_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_750)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_750cx)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750cx
mtlr r4
blr
_GLOBAL(__setup_cpu_750fx)
mflr r4
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750fx
mtlr r4
blr
_GLOBAL(__setup_cpu_7400)
mflr r4
bl setup_7400_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_7410)
mflr r4
bl setup_7410_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
li r3,0
mtspr SPRN_L2CR2,r3
mtlr r4
blr
_GLOBAL(__setup_cpu_7450)
mflr r4
bl setup_common_caches
bl setup_745x_specifics
mtlr r4
blr
_GLOBAL(__setup_cpu_7455)
mflr r4
bl setup_common_caches
bl setup_745x_specifics
mtlr r4
blr
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(__setup_cpu_generic)
blr
/* Enable caches for 603's, 604, 750 & 7400 */
setup_common_caches:
mfspr r11,HID0
andi. r0,r11,HID0_DCE
#ifdef CONFIG_DCACHE_DISABLE
ori r11,r11,HID0_ICE
#else
ori r11,r11,HID0_ICE|HID0_DCE
#endif
ori r8,r11,HID0_ICFI
bne 1f /* don't invalidate the D-cache */
ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1: sync
mtspr HID0,r8 /* enable and invalidate caches */
sync
mtspr HID0,r11 /* enable caches */
sync
isync
#ifndef CONFIG_6xx
_GLOBAL(__save_cpu_setup)
blr
/* 604, 604e, 604ev, ...
* Enable superscalar execution & branch history table
*/
setup_604_hid0:
mfspr r11,HID0
ori r11,r11,HID0_SIED|HID0_BHTE
ori r8,r11,HID0_BTCD
sync
mtspr HID0,r8 /* flush branch target address cache */
sync /* on 604e/604r */
mtspr HID0,r11
sync
isync
_GLOBAL(__restore_cpu_setup)
blr
#endif /* CONFIG_6xx */
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* errata we work around here.
* Moto MPC710CE.pdf describes them, those are errata
* #3, #4 and #5
* Note that we assume the firmware didn't choose to
* apply other workarounds (there are other ones documented
* in the .pdf). It appear that Apple firmware only works
* around #3 and with the same fix we use. We may want to
* check if the CPU is using 60x bus mode in which case
* the workaround for errata #4 is useless. Also, we may
* want to explicitely clear HID0_NOPDST as this is not
* needed once we have applied workaround #5 (though it's
* not set by Apple's firmware at least).
*/
setup_7400_workarounds:
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0207
ble 1f
blr
setup_7410_workarounds:
mfpvr r3
rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0100
bnelr
1:
mfspr r11,SPRN_MSSSR0
/* Errata #3: Set L1OPQ_SIZE to 0x10 */
rlwinm r11,r11,0,9,6
oris r11,r11,0x0100
/* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
oris r11,r11,0x0002
/* Errata #5: Set DRLT_SIZE to 0x01 */
rlwinm r11,r11,0,5,2
oris r11,r11,0x0800
sync
mtspr SPRN_MSSSR0,r11
sync
isync
blr
/* 740/750/7400/7410
* Enable Store Gathering (SGE), Address Brodcast (ABE),
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC)
*/
setup_750_7400_hid0:
mfspr r11,HID0
ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
li r3,HID0_SPD
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
sync
isync
blr
/* 750cx specific
* Looks like we have to disable NAP feature for some PLL settings...
* (waiting for confirmation)
*/
setup_750cx:
mfspr r10, SPRN_HID1
rlwinm r10,r10,4,28,31
cmpi cr0,r10,7
cmpi cr1,r10,9
cmpi cr2,r10,11
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bnelr
lwz r6,CPU_SPEC_FEATURES(r5)
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
blr
/* 750fx specific
*/
setup_750fx:
blr
/* MPC 745x
* Enable Store Gathering (SGE), Branch Folding (FOLD)
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Ensure our data cache instructions really operate.
* Timebase has to be running or we wouldn't have made it here,
* just ensure we don't disable it.
* Clear Instruction cache throttling (ICTC)
* Enable L2 HW prefetch
*/
setup_745x_specifics:
/* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
*/
mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h
beq 1f
lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
1:
mfspr r11,HID0
/* All of the bits we have to set.....
*/
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
/* All of the bits we have to clear....
*/
li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
sync
isync
/* Enable L2 HW prefetch
*/
mfspr r3,SPRN_MSSCR0
ori r3,r3,3
sync
mtspr SPRN_MSSCR0,r3
sync
isync
blr
/*
* Load stuff into the MMU. Intended to be called with
......
......@@ -68,6 +68,9 @@ void smp_call_function_interrupt(void);
static int __smp_call_function(void (*func) (void *info), void *info,
int wait, int target);
/* Low level assembly function used to backup CPU 0 state */
extern void __save_cpu_setup(void);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
*
* Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
......@@ -349,6 +352,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
num_cpus = smp_ops->probe();
cpu_possible_map = (1 << num_cpus)-1;
/* Backup CPU 0 state */
__save_cpu_setup();
if (smp_ops->space_timers)
smp_ops->space_timers(num_cpus);
}
......
......@@ -19,12 +19,11 @@ endif
obj-$(CONFIG_ALL_PPC) += pmac_pic.o pmac_setup.o pmac_time.o \
pmac_feature.o pmac_pci.o chrp_setup.o\
chrp_time.o chrp_pci.o prep_pci.o \
prep_time.o prep_setup.o
prep_time.o prep_setup.o pmac_sleep.o
ifeq ($(CONFIG_ALL_PPC),y)
obj-$(CONFIG_NVRAM) += pmac_nvram.o
endif
obj-$(CONFIG_PMAC_BACKLIGHT) += pmac_backlight.o
obj-$(CONFIG_PMAC_PBOOK) += sleep.o
obj-$(CONFIG_PPC_RTAS) += error_log.o proc_rtas.o
obj-$(CONFIG_PREP_RESIDUAL) += residual.o
obj-$(CONFIG_ADIR) += adir_setup.o adir_pic.o adir_pci.o
......
......@@ -10,10 +10,13 @@
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/offsets.h>
#define MAGIC 0x4c617273 /* 'Lars' */
......@@ -34,21 +37,16 @@
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
#define SL_TB 0x60
#define SL_HID0 0x68
#define SL_HID1 0x6c
#define SL_MSSCR0 0x70
#define SL_MSSSR0 0x74
#define SL_ICTRL 0x78
#define SL_LDSTCR 0x7c
#define SL_LDSTDB 0x80
#define SL_R2 0x84
#define SL_CR 0x88
#define SL_R12 0x8c /* r12 to r31 */
#define SL_R2 0x68
#define SL_CR 0x6c
#define SL_R12 0x70 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.text
.section .text
.align 5
#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ_PMAC)
/* This gets called by via-pmu.c late during the sleep process.
* The PMU was already send the sleep command and will shut us down
* soon. We need to save all that is needed and setup the wakeup
......@@ -122,34 +120,9 @@ _GLOBAL(low_sleep_handler)
mfibatl r4,3
stw r4,SL_IBAT3+4(r1)
/* Save HID0 */
mfspr r4,HID0
stw r4,SL_HID0(r1)
/* Save 7400/7410/7450 specific registers */
mfspr r3,PVR
srwi r3,r3,16
cmpli cr0,r3,0x8000
cmpli cr1,r3,0x000c
cmpli cr2,r3,0x800c
cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
bne 1f
mfspr r4,SPRN_MSSCR0
stw r4,SL_MSSCR0(r1)
mfspr r4,SPRN_MSSSR0
stw r4,SL_MSSSR0(r1)
/* Save 7450 specific registers */
beq cr1,1f
mfspr r4,HID1
stw r4,SL_HID1(r1)
mfspr r4,SPRN_ICTRL
stw r4,SL_ICTRL(r1)
mfspr r4,SPRN_LDSTCR
stw r4,SL_LDSTCR(r1)
mfspr r4,SPRN_LDSTDB
stw r4,SL_LDSTDB(r1)
1:
/* Backup various CPU config stuffs */
bl __save_cpu_setup
/* The ROM can wake us up via 2 different vectors:
* - On wallstreet & lombard, we must write a magic
* value 'Lars' at address 4 and a pointer to a
......@@ -184,6 +157,10 @@ _GLOBAL(low_sleep_handler)
addi r3,r3,sleep_storage@l
stw r5,0(r3)
BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/*
* Flush the L1 data cache by reading the first 128kB of RAM
......@@ -298,60 +275,11 @@ grackle_wake_up:
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
/* Restore the remaining bits of the HID0 register. */
subi r1,r1,SL_PC
lwz r3,SL_HID0(r1)
sync
isync
mtspr HID0,r3
sync
isync
/* Restore 7400/7410/7450 specific registers */
mfspr r3,PVR
srwi r3,r3,16
cmpli cr0,r3,0x8000
cmpli cr1,r3,0x000c
cmpli cr2,r3,0x800c
cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
bne 1f
lwz r4,SL_MSSCR0(r1)
sync
mtspr SPRN_MSSCR0,r4
sync
isync
lwz r4,SL_MSSSR0(r1)
sync
mtspr SPRN_MSSSR0,r4
sync
isync
bne cr2,1f
li r4,0
mtspr SPRN_L2CR2,r4
/* Restore 7450 specific registers */
beq cr1,1f
lwz r4,SL_HID1(r1)
sync
mtspr HID1,r4
isync
sync
lwz r4,SPRN_ICTRL(r1)
sync
mtspr SPRN_ICTRL,r4
isync
sync
lwz r4,SPRN_LDSTCR(r1)
sync
mtspr SPRN_LDSTCR,r4
isync
sync
lwz r4,SL_LDSTDB(r1)
sync
mtspr SPRN_LDSTDB,r4
isync
sync
1:
/* Restore various CPU config stuffs */
bl __restore_cpu_setup
/* Restore the BATs, and SDR1. Then we can turn on the MMU. */
lwz r4,SL_SDR1(r1)
mtsdr1 r4
......@@ -438,6 +366,9 @@ turn_on_mmu:
isync
rfi
#endif /* defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ) */
.data
.globl sleep_storage
sleep_storage:
......
......@@ -106,14 +106,16 @@ volatile static long int core99_l2_cache;
volatile static long int core99_l3_cache;
static void __init
core99_init_caches(void)
core99_init_caches(int cpu)
{
int cpu = smp_processor_id();
/* Check cache presence on cpu 0, we assume all CPUs have
* same features here. We also assume that if we don't have
* L2CR, we don't have L3CR neither
*/
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR))
return;
if (cpu == 0){
if (cpu == 0) {
core99_l2_cache = _get_L2CR();
printk("CPU0: L2CR is %lx\n", core99_l2_cache);
} else {
......@@ -137,106 +139,6 @@ core99_init_caches(void)
}
}
/* Some CPU registers have to be saved from the first CPU and
* applied to others. Note that we override what is setup by
* the cputable intentionally.
*/
#define reg_hid0 0
#define reg_hid1 1
#define reg_msscr0 2
#define reg_msssr0 3
#define reg_ictrl 4
#define reg_ldstcr 5
#define reg_ldstdb 6
#define reg_count 7
static unsigned long cpu_regs[reg_count];
static void __pmac
cpu_setup_grab(void)
{
unsigned int pvers = mfspr(SPRN_PVR)>>16;
/* Read cache setting of CPU 0 */
core99_init_caches();
/* 7400/7410/7450 */
if (pvers == 0x8000 || pvers == 0x000c || pvers == 0x800c) {
cpu_regs[reg_hid0] = mfspr(SPRN_HID0);
cpu_regs[reg_msscr0] = mfspr(SPRN_MSSCR0);
cpu_regs[reg_msssr0] = mfspr(SPRN_MSSSR0);
}
/* 7450 only */
if (pvers == 0x8000) {
cpu_regs[reg_hid1] = mfspr(SPRN_HID1);
cpu_regs[reg_ictrl] = mfspr(SPRN_ICTRL);
cpu_regs[reg_ldstcr] = mfspr(SPRN_LDSTCR);
cpu_regs[reg_ldstdb] = mfspr(SPRN_LDSTDB);
}
flush_dcache_range((unsigned long)cpu_regs, (unsigned long)&cpu_regs[reg_count]);
}
static void __pmac
cpu_setup_apply(int cpu_nr)
{
unsigned int pvers = mfspr(SPRN_PVR)>>16;
/* Apply cache setting from CPU 0 */
core99_init_caches();
/* 7400/7410/7450 */
if (pvers == 0x8000 || pvers == 0x000c || pvers == 0x800c) {
unsigned long tmp;
__asm__ __volatile__ (
"lwz %0,4*"stringify(reg_hid0)"(%1)\n"
"sync\n"
"mtspr "stringify(SPRN_HID0)", %0\n"
"isync;sync\n"
"lwz %0, 4*"stringify(reg_msscr0)"(%1)\n"
"sync\n"
"mtspr "stringify(SPRN_MSSCR0)", %0\n"
"isync;sync\n"
// "lwz %0, "stringify(reg_msssr0)"(%1)\n"
// "sync\n"
// "mtspr "stringify(SPRN_MSSSR0)", %0\n"
// "isync;sync\n"
: "=&r" (tmp) : "r" (cpu_regs));
}
/* 7410 only */
if (pvers == 0x800c) {
unsigned long tmp;
__asm__ __volatile__ (
"li %0, 0\n"
"sync\n"
"mtspr "stringify(SPRN_L2CR2)", %0\n"
"isync;sync\n"
: "=&r" (tmp));
}
/* 7450 only */
if (pvers == 0x8000) {
unsigned long tmp;
__asm__ __volatile__ (
"lwz %0, 4*"stringify(reg_hid1)"(%1)\n"
"sync\n"
"mtspr "stringify(SPRN_HID1)", %0\n"
"isync;sync\n"
"lwz %0, 4*"stringify(reg_ictrl)"(%1)\n"
"sync\n"
"mtspr "stringify(SPRN_ICTRL)", %0\n"
"isync;sync\n"
"lwz %0, 4*"stringify(reg_ldstcr)"(%1)\n"
"sync\n"
"mtspr "stringify(SPRN_LDSTCR)", %0\n"
"isync;sync\n"
"lwz %0, 4*"stringify(reg_ldstdb)"(%1)\n"
"sync\n"
"mtspr "stringify(SPRN_LDSTDB)", %0\n"
"isync;sync\n"
: "=&r" (tmp) : "r" (cpu_regs));
}
}
/*
* Set and clear IPIs for powersurge.
*/
......@@ -501,7 +403,7 @@ smp_psurge_setup_cpu(int cpu_nr)
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
if (request_irq(30, psurge_primary_intr, 0, "primary IPI", 0))
if (request_irq(30, psurge_primary_intr, SA_INTERRUPT, "primary IPI", 0))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
......@@ -526,8 +428,10 @@ smp_core99_probe(void)
openpic_request_IPIs();
for (i = 1; i < ncpus; ++i)
smp_hw_index[i] = i;
#ifdef CONFIG_6xx
powersave_nap = 0;
cpu_setup_grab();
#endif
core99_init_caches(0);
}
return ncpus;
......@@ -593,7 +497,7 @@ smp_core99_setup_cpu(int cpu_nr)
{
/* Setup some registers */
if (cpu_nr != 0)
cpu_setup_apply(cpu_nr);
core99_init_caches(cpu_nr);
/* Setup openpic */
do_openpic_setup_cpu();
......@@ -605,20 +509,20 @@ smp_core99_setup_cpu(int cpu_nr)
/* PowerSurge-style Macs */
struct smp_ops_t psurge_smp_ops __pmacdata = {
smp_psurge_message_pass,
smp_psurge_probe,
smp_psurge_kick_cpu,
smp_psurge_setup_cpu,
.message_pass = smp_psurge_message_pass,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
};
/* Core99 Macs (dual G4s) */
struct smp_ops_t core99_smp_ops __pmacdata = {
smp_openpic_message_pass,
smp_core99_probe,
smp_core99_kick_cpu,
smp_core99_setup_cpu,
.message_pass = smp_openpic_message_pass,
.probe = smp_core99_probe,
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment