ppc32: Update/cleanup low level POWER4 & G5 CPU support

parent 6791a22a
......@@ -22,6 +22,7 @@ obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
semaphore.o syscalls.o setup.o \
cputable.o ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_POWER4) += cpu_setup_power4.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_PCI) += pci-dma.o
......
/*
* This file contains low level CPU setup functions.
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
#include <asm/cache.h>
_GLOBAL(__power4_cpu_preinit)
/*
* On the PPC970, we have to turn off real-mode cache inhibit
* early, before we first turn the MMU off.
*/
mfspr r0,SPRN_PVR
srwi r0,r0,16
cmpwi r0,0x39
bnelr
li r0,0
sync
mtspr SPRN_HID4,r0
isync
sync
mtspr SPRN_HID5,r0
isync
mfspr r0,SPRN_HID1
li r11,0x1200 /* enable i-fetch cacheability */
sldi r11,r11,44 /* and prefetch */
or r0,r0,r11
mtspr SPRN_HID1,r0
mtspr SPRN_HID1,r0
isync
li r0,0
sync
mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
isync
blr
_GLOBAL(__setup_cpu_power4)
blr
_GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
rldimi r0,r11,52,8 /* set NAP and DPM */
mtspr SPRN_HID0,r0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
sync
isync
blr
/* Definitions for the table use to save CPU states */
#define CS_HID0 0
#define CS_HID1 8
#define CS_HID4 16
#define CS_HID5 24
#define CS_SIZE 32
.data
.balign L1_CACHE_LINE_SIZE
cpu_state_storage:
.space CS_SIZE
.balign L1_CACHE_LINE_SIZE,0
.text
/* Called in normal context to backup CPU 0 state. This
* does not include cache settings. This function is also
* called for machine sleep. This does not include the MMU
* setup, BATs, etc... but rather the "special" registers
* like HID0, HID1, HID4, etc...
*/
_GLOBAL(__save_cpu_setup)
/* Some CR fields are volatile, we back it up all */
mfcr r7
/* Get storage ptr */
lis r5,cpu_state_storage@h
ori r5,r5,cpu_state_storage@l
/* We only deal with 970 for now */
mfspr r0,SPRN_PVR
srwi r0,r0,16
cmpwi r0,0x39
bne 1f
/* Save HID0,1,4 and 5 */
mfspr r3,SPRN_HID0
std r3,CS_HID0(r5)
mfspr r3,SPRN_HID1
std r3,CS_HID1(r5)
mfspr r3,SPRN_HID4
std r3,CS_HID4(r5)
mfspr r3,SPRN_HID5
std r3,CS_HID5(r5)
1:
mtcr r7
blr
/* Called with no MMU context (typically MSR:IR/DR off) to
* restore CPU state as backed up by the previous
* function. This does not include cache setting
*/
_GLOBAL(__restore_cpu_setup)
/* Some CR fields are volatile, we back it up all */
mfcr r7
/* Get storage ptr */
lis r5,(cpu_state_storage-KERNELBASE)@h
ori r5,r5,cpu_state_storage@l
/* We only deal with 970 for now */
mfspr r0,SPRN_PVR
srwi r0,r0,16
cmpwi r0,0x39
bne 1f
/* Clear interrupt prefix */
li r0,0
sync
mtspr SPRN_HIOR,0
isync
/* Restore HID0 */
ld r3,CS_HID0(r5)
sync
isync
mtspr SPRN_HID0,r3
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
sync
isync
/* Restore HID1 */
ld r3,CS_HID1(r5)
sync
isync
mtspr SPRN_HID1,r3
mtspr SPRN_HID1,r3
sync
isync
/* Restore HID4 */
ld r3,CS_HID4(r5)
sync
isync
mtspr SPRN_HID4,r3
sync
isync
/* Restore HID5 */
ld r3,CS_HID5(r5)
sync
isync
mtspr SPRN_HID5,r3
sync
isync
1:
mtcr r7
blr
......@@ -141,17 +141,6 @@ __start:
mr r27,r7
li r24,0 /* cpu # */
#ifdef CONFIG_POWER4
/*
* On the PPC970, we have to turn off real-mode cache inhibit
* early, before we first turn the MMU off.
*/
mfspr r0,SPRN_PVR
srwi r0,r0,16
cmpwi r0,0x39
beql ppc970_setup_hid
#endif /* CONFIG_POWER4 */
/*
* early_init() does the early machine identification and does
* the necessary low-level setup and clears the BSS
......@@ -159,6 +148,14 @@ __start:
*/
bl early_init
/*
* On POWER4, we first need to tweak some CPU configuration registers
* like real mode cache inhibit or exception base
*/
#ifdef CONFIG_POWER4
bl __power4_cpu_preinit
#endif /* CONFIG_POWER4 */
#ifdef CONFIG_APUS
/* On APUS the __va/__pa constants need to be set to the correct
* values before continuing.
......@@ -1216,7 +1213,7 @@ __secondary_start_psurge99:
__secondary_start:
#ifdef CONFIG_PPC64BRIDGE
mfmsr r0
clrldi r0,r0,1 /* make sure it's in 32-bit mode */
clrldi r0,r0,1 /* make sure it's in 32-bit mode */
SYNC
MTMSRD(r0)
isync
......@@ -1278,26 +1275,15 @@ __secondary_start:
*/
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(__setup_cpu_power4)
blr
_GLOBAL(__setup_cpu_ppc970)
blr
_GLOBAL(__setup_cpu_generic)
blr
#ifndef CONFIG_6xx
#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
_GLOBAL(__save_cpu_setup)
blr
_GLOBAL(__restore_cpu_setup)
#ifdef CONFIG_POWER4
/* turn off real-mode cache inhibit on the PPC970 */
mfspr r0,SPRN_PVR
srwi r0,r0,16
cmpwi r0,0x39
beq ppc970_setup_hid
#endif
blr
#endif /* CONFIG_6xx */
#endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */
/*
......@@ -1633,10 +1619,14 @@ initial_mm_power4:
lis r4,0x2000 /* set pseudo-segment reg 12 */
ori r5,r4,0x0ccc
mtsr 12,r5
#if 0
ori r5,r4,0x0888 /* set pseudo-segment reg 8 */
mtsr 8,r5 /* (for access to serial port) */
ori r5,r4,0x0999 /* set pseudo-segment reg 8 */
#endif
#ifdef CONFIG_BOOTX_TEXT
ori r5,r4,0x0999 /* set pseudo-segment reg 9 */
mtsr 9,r5 /* (for access to screen) */
#endif
mfmsr r0
clrldi r0,r0,1
sync
......@@ -1644,43 +1634,8 @@ initial_mm_power4:
isync
blr
/*
* On 970 (G5), we pre-set a few bits in HID0 & HID1
*/
ppc970_setup_hid:
li r0,0
sync
mtspr 0x3f4,r0
isync
sync
mtspr 0x3f6,r0
isync
mfspr r0,SPRN_HID0
li r11,1 /* clear DOZE, NAP and SLEEP */
rldimi r0,r11,52,8 /* set DPM */
mtspr SPRN_HID0,r0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
sync
isync
mfspr r0,SPRN_HID1
li r11,0x1200 /* enable i-fetch cacheability */
sldi r11,r11,44 /* and prefetch */
or r0,r0,r11
mtspr SPRN_HID1,r0
mtspr SPRN_HID1,r0
isync
li r0,0
sync
mtspr 0x137,0
isync
blr
#endif /* CONFIG_POWER4 */
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
* We first disable the MMU, and then jump to the ROM reset address.
......
......@@ -28,17 +28,11 @@
/*
* Init idle, called at early CPU setup time from head.S for each CPU
* Make sure no rest of NAP mode remains in HID0, save default
* values for some CPU specific registers. Called with r24
* containing CPU number and r3 reloc offset
* So nothing for now. Called with r24 containing CPU number and r3
* reloc offset
*/
.globl init_idle_power4
init_idle_power4:
BEGIN_FTR_SECTION
mfspr r4,SPRN_HID0
rlwinm r4,r4,0,10,8 /* Clear NAP */
mtspr SPRN_HID0, r4
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
blr
/*
......@@ -48,10 +42,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
*/
.globl power4_idle
power4_idle:
/* Check if we can nap or doze, put HID0 mask in r3
*/
lis r3, 0
BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
/* We must dynamically check for the NAP feature as it
* can be cleared by CPU init after the fixups are done
*/
......@@ -59,16 +52,11 @@ BEGIN_FTR_SECTION
lwz r4,cur_cpu_spec@l(r4)
lwz r4,CPU_SPEC_FEATURES(r4)
andi. r0,r4,CPU_FTR_CAN_NAP
beq 1f
beqlr
/* Now check if user or arch enabled NAP mode */
lis r4,powersave_nap@ha
lwz r4,powersave_nap@l(r4)
cmpi 0,r4,0
beq 1f
lis r3,HID0_NAP@h
1:
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
cmpi 0,r3,0
beqlr
/* Clear MSR:EE */
......@@ -85,18 +73,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
blr
1:
/* Go to NAP now */
mfspr r4,SPRN_HID0
lis r5,(HID0_NAP|HID0_SLEEP)@h
andc r4,r4,r5
or r4,r4,r3
oris r4,r4,HID0_DPM@h /* that should be done once for all */
mtspr SPRN_HID0,r4
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
BEGIN_FTR_SECTION
DSSALL
sync
......
......@@ -201,7 +201,7 @@ _GLOBAL(call_setup_cpu)
mr r4,r24
bctr
#ifdef CONFIG_CPU_FREQ_PMAC
#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
/* This gets called by via-pmu.c to switch the PLL selection
* on 750fx CPU. This function should really be moved to some
......@@ -253,7 +253,7 @@ _GLOBAL(low_choose_750fx_pll)
mtmsr r7
blr
#endif /* CONFIG_CPU_FREQ_PMAC */
#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
/* void local_save_flags_ptr(unsigned long *flags) */
_GLOBAL(local_save_flags_ptr)
......
......@@ -417,21 +417,6 @@ _GLOBAL(hash_page_patch_C)
lwz r6,next_slot@l(r4)
addi r6,r6,PTE_SIZE
andi. r6,r6,7*PTE_SIZE
#ifdef CONFIG_POWER4
/*
* Since we don't have BATs on POWER4, we rely on always having
* PTEs in the hash table to map the hash table and the code
* that manipulates it in virtual mode, namely flush_hash_page and
* flush_hash_segments. Otherwise we can get a DSI inside those
* routines which leads to a deadlock on the hash_table_lock on
* SMP machines. We avoid this by never overwriting the first
* PTE of each PTEG if it is already valid.
* -- paulus.
*/
bne 102f
li r6,PTE_SIZE
102:
#endif /* CONFIG_POWER4 */
stw r6,next_slot@l(r4)
add r4,r3,r6
......
......@@ -83,6 +83,9 @@ unsigned long p_mapped_by_bats(unsigned long pa)
unsigned long __init mmu_mapin_ram(void)
{
#ifdef CONFIG_POWER4
return 0;
#else
unsigned long tot, bl, done;
unsigned long max_size = (256<<20);
unsigned long align;
......@@ -119,6 +122,7 @@ unsigned long __init mmu_mapin_ram(void)
}
return done;
#endif
}
/*
......@@ -244,9 +248,10 @@ void __init MMU_init_hw(void)
Hash = mem_pieces_find(Hash_size, Hash_size);
cacheable_memzero(Hash, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
#endif /* CONFIG_POWER4 */
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
total_memory >> 20, Hash_size >> 10, Hash);
......
......@@ -91,6 +91,7 @@
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
......@@ -179,7 +180,10 @@
#define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */
#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment