Commit 9bad068c authored by Kumar Gala's avatar Kumar Gala Committed by Linus Torvalds

[PATCH] ppc32: support for e500 and 85xx

Here is both a GNU style and BK patch for adding support for the e500 core and
85xx platform to 2.6.  This is pretty much a direct port from 2.4 with a bit
of cleanup around the edges.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f866d89a
......@@ -72,8 +72,21 @@ config POWER4
config 8xx
bool "8xx"
config E500
bool "e500"
endchoice
config BOOKE
bool
depends on E500
default y
config FSL_BOOKE
bool
depends on E500
default y
config PTE_64BIT
bool
depends on 44x
......@@ -96,6 +109,21 @@ config ALTIVEC
If in doubt, say Y here.
config SPE
bool "SPE Support"
depends on E500
---help---
This option enables kernel support for the Signal Processing
Extensions (SPE) to the PowerPC processor. The kernel currently
supports saving and restoring SPE registers, and turning on the
'spe enable' bit so user processes can execute SPE instructions.
This option is only usefully if you have a processor that supports
SPE (e500, otherwise known as 85xx series), but does not have any
affect on a non-spe cpu (it does, however add code to the kernel).
If in doubt, say Y here.
config TAU
bool "Thermal Management Support"
depends on 6xx && !8260
......@@ -142,7 +170,7 @@ config TAU_AVERAGE
config MATH_EMULATION
bool "Math emulation"
depends on 4xx || 8xx
depends on 4xx || 8xx || E500
---help---
Some PowerPC chips designed for embedded applications do not have
a floating-point unit and therefore do not implement the
......@@ -198,6 +226,7 @@ config PPC601_SYNC_FIX
If in doubt, say Y here.
source arch/ppc/platforms/4xx/Kconfig
source arch/ppc/platforms/85xx/Kconfig
config PPC64BRIDGE
bool
......@@ -974,8 +1003,8 @@ config MCA
bool
config PCI
bool "PCI support" if 40x || 8260
default y if !40x && !8260 && !8xx && !APUS
bool "PCI support" if 40x || 8260 || 85xx
default y if !40x && !8260 && !8xx && !APUS && !85xx
default PCI_PERMEDIA if !4xx && !8260 && !8xx && APUS
default PCI_QSPAN if !4xx && !8260 && 8xx
help
......@@ -1273,7 +1302,7 @@ config SERIAL_TEXT_DEBUG
config PPC_OCP
bool
depends on IBM_OCP
depends on IBM_OCP || FSL_OCP
default y
endmenu
......
......@@ -17,19 +17,24 @@ LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic
CPPFLAGS += -Iarch/$(ARCH)
AFLAGS += -Iarch/$(ARCH)
cflags-y += -Iarch/$(ARCH) -msoft-float -pipe \
-ffixed-r2 -Wno-uninitialized -mmultiple -mstring
-ffixed-r2 -Wno-uninitialized -mmultiple
CPP = $(CC) -E $(CFLAGS)
ifndef CONFIG_E500
cflags-y += -mstring
endif
cflags-$(CONFIG_4xx) += -Wa,-m405
cflags-$(CONFIG_E500) += -Wa,-me500
cflags-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
CFLAGS += $(cflags-y)
head-y := arch/ppc/kernel/head.o
head-$(CONFIG_8xx) := arch/ppc/kernel/head_8xx.o
head-$(CONFIG_4xx) := arch/ppc/kernel/head_4xx.o
head-$(CONFIG_44x) := arch/ppc/kernel/head_44x.o
head-$(CONFIG_E500) := arch/ppc/kernel/head_e500.o
head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
......@@ -37,6 +42,7 @@ head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
core-y += arch/ppc/kernel/ arch/ppc/platforms/ \
arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/
core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/
core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/
core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/
core-$(CONFIG_XMON) += arch/ppc/xmon/
core-$(CONFIG_APUS) += arch/ppc/amiga/
......
......@@ -8,10 +8,14 @@ endif
ifdef CONFIG_4xx
EXTRA_AFLAGS := -Wa,-m405
endif
ifdef CONFIG_E500
EXTRA_AFLAGS := -Wa,-me500
endif
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_E500) := head_e500.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-$(CONFIG_6xx) += idle_6xx.o
extra-$(CONFIG_POWER4) += idle_power4.o
......
......@@ -21,7 +21,7 @@ struct aligninfo {
unsigned char flags;
};
#if defined(CONFIG_4xx) || defined(CONFIG_POWER4)
#if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE)
#define OPCD(inst) (((inst) & 0xFC000000) >> 26)
#define RS(inst) (((inst) & 0x03E00000) >> 21)
#define RA(inst) (((inst) & 0x001F0000) >> 16)
......@@ -184,7 +184,7 @@ int
fix_alignment(struct pt_regs *regs)
{
int instr, nb, flags;
#if defined(CONFIG_4xx) || defined(CONFIG_POWER4)
#if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE)
int opcode, f1, f2, f3;
#endif
int i, t;
......@@ -199,8 +199,8 @@ fix_alignment(struct pt_regs *regs)
CHECK_FULL_REGS(regs);
#if defined(CONFIG_4xx) || defined(CONFIG_POWER4)
/* The 4xx-family processors have no DSISR register,
#if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE)
/* The 4xx-family & Book-E processors have no DSISR register,
* so we emulate it.
* The POWER4 has a DSISR register but doesn't set it on
* an alignment fault. -- paulus
......@@ -250,7 +250,7 @@ fix_alignment(struct pt_regs *regs)
flags = aligninfo[instr].flags;
/* For the 4xx-family processors, the 'dar' field of the
/* For the 4xx-family & Book-E processors, the 'dar' field of the
* pt_regs structure is overloaded and is really from the DEAR.
*/
......
......@@ -44,7 +44,7 @@ main(void)
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
#ifdef CONFIG_4xx
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
DEFINE(PT_PTRACED, PT_PTRACED);
#endif
......@@ -54,6 +54,12 @@ main(void)
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
#endif /* CONFIG_SPE */
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
......
......@@ -34,7 +34,8 @@ extern void __setup_cpu_8xx(unsigned long offset, int cpu_nr, struct cpu_spec* s
extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
!defined(CONFIG_POWER3) && !defined(CONFIG_POWER4))
!defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
!defined(CONFIG_BOOKE))
/* This table only contains "desktop" CPUs, it need to be filled with embedded
* ones as well...
......@@ -561,6 +562,16 @@ struct cpu_spec cpu_specs[] = {
0, /*__setup_cpu_440 */
},
#endif /* CONFIG_44x */
#ifdef CONFIG_E500
{ /* e500 */
0xffff0000, 0x80200000, "e500",
/* xxx - galak: add CPU_FTR_CAN_DOZE */
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
32, 32,
0, /*__setup_cpu_e500 */
},
#endif
#if !CLASSIC_PPC
{ /* default match */
0x00000000, 0x00000000, "(generic PPC)",
......
......@@ -36,7 +36,7 @@
#undef SHOW_SYSCALLS_TASK
/*
* MSR_KERNEL is > 0x10000 on 4xx since it include MSR_CE.
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
*/
#if MSR_KERNEL >= 0x10000
#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
......@@ -45,7 +45,7 @@
#endif
#ifdef CONFIG_BOOKE
#define COR r8
#define COR r8 /* Critical Offset Register (COR) */
#define BOOKE_LOAD_COR lis COR,crit_save@ha
#define BOOKE_REST_COR mfspr COR,SPRG2
#define BOOKE_SAVE_COR mtspr SPRG2,COR
......@@ -241,11 +241,11 @@ ret_from_syscall:
andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne- syscall_exit_work
syscall_exit_cont:
#ifdef CONFIG_4xx
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up */
lwz r0,PTRACE(r2)
andi. r0,r0,PT_PTRACED
bnel- load_4xx_dbcr0
bnel- load_dbcr0
#endif
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
......@@ -510,7 +510,12 @@ BEGIN_FTR_SECTION
stw r12,THREAD+THREAD_VRSAVE(r2)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
and. r0,r0,r11 /* FP or altivec enabled? */
#ifdef CONFIG_SPE
oris r0,r0,MSR_SPE@h /* Disable SPE */
mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
stw r12,THREAD+THREAD_SPEFSCR(r2)
#endif /* CONFIG_SPE */
and. r0,r0,r11 /* FP or altivec or SPE enabled? */
beq+ 1f
andc r11,r11,r0
MTMSRD(r11)
......@@ -543,6 +548,10 @@ BEGIN_FTR_SECTION
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
lwz r0,THREAD+THREAD_SPEFSCR(r2)
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
#endif /* CONFIG_SPE */
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
......@@ -589,11 +598,11 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
bne do_work
restore_user:
#ifdef CONFIG_4xx
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* Check whether this process has its own DBCR0 value */
lwz r0,PTRACE(r2)
andi. r0,r0,PT_PTRACED
bnel- load_4xx_dbcr0
bnel- load_dbcr0
#endif
#ifdef CONFIG_PREEMPT
......@@ -645,7 +654,7 @@ restore:
PPC405_ERR77(0,r1)
stwcx. r0,0,r1 /* to clear the reservation */
#ifndef CONFIG_4xx
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
......@@ -681,9 +690,9 @@ exc_exit_restart_end:
SYNC
RFI
#else /* CONFIG_4xx */
#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
/*
* This is a bit different on 4xx because 4xx doesn't have
* This is a bit different on 4xx/Book-E because it doesn't have
* the RI bit in the MSR.
* The TLB miss handler checks if we have interrupted
* the exception exit path and restarts it if so
......@@ -720,6 +729,9 @@ exc_exit_restart_end:
* give the wrong answer).
* We have to restore various SPRs that may have been in use at the
* time of the critical interrupt.
*
* Note that SPRG6 is used for machine check on CONFIG_BOOKE parts and
* thus not saved in the critical handler
*/
.globl ret_from_crit_exc
ret_from_crit_exc:
......@@ -866,7 +878,7 @@ ret_from_mcheck_exc:
* Load the DBCR0 value for a task that is being ptraced,
* having first saved away the global DBCR0.
*/
load_4xx_dbcr0:
load_dbcr0:
mfmsr r0 /* first disable debug exceptions */
rlwinm r0,r0,0,~MSR_DE
mtmsr r0
......@@ -885,7 +897,7 @@ load_4xx_dbcr0:
blr
.comm global_dbcr0,8
#endif /* CONFIG_4xx */
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
andi. r0,r9,_TIF_NEED_RESCHED
......
/*
* arch/ppc/kernel/head_e500.S
*
* Kernel execution entry point code.
*
* Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
* Initial PowerPC version.
* Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
* Rewritten for PReP
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Low-level exception handers, MMU support, and rewrite.
* Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
* PowerPC 8xx modifications.
* Copyright (c) 1998-1999 TiVo, Inc.
* PowerPC 403GCX modifications.
* Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
* PowerPC 403GCX/405GP modifications.
* Copyright 2000 MontaVista Software Inc.
* PPC405 modifications
* PowerPC 403GCX/405GP modifications.
* Author: MontaVista Software, Inc.
* frank_rowand@mvista.com or source@mvista.com
* debbie_chu@mvista.com
* Copyright 2002-2004 MontaVista Software, Inc.
* PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
* Copyright 2004 Freescale Semiconductor, Inc
* PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
/*
* Macros
*/
#define SET_IVOR(vector_number, vector_label) \
li r26,vector_label@l; \
mtspr SPRN_IVOR##vector_number,r26; \
sync
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
* optional, information:
*
* r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
* r4 - Starting address of the init RAM disk
* r5 - Ending address of the init RAM disk
* r6 - Start of kernel command line string (e.g. "mem=128")
* r7 - End of kernel command line string
*
*/
.text
_GLOBAL(_stext)
_GLOBAL(_start)
/*
* Reserve a word at a fixed location to store the address
* of abatron_pteptrs
*/
nop
/*
* Save parameters we are passed
*/
mr r31,r3
mr r30,r4
mr r29,r5
mr r28,r6
mr r27,r7
li r24,0 /* CPU number */
/* We try to not make any assumptions about how the boot loader
* setup or used the TLBs. We invalidate all mappings from the
* boot loader and load a single entry in TLB1[0] to map the
* first 16M of kernel memory. Any boot info passed from the
* bootloader needs to live in this first 16M.
*
* Requirement on bootloader:
* - The page we're executing in needs to reside in TLB1 and
* have IPROT=1. If not an invalidate broadcast could
* evict the entry we're currently executing in.
*
* r3 = Index of TLB1 were executing in
* r4 = Current MSR[IS]
* r5 = Index of TLB1 temp mapping
*
* Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
* if needed
*/
/* 1. Find the index of the entry we're executing in */
bl invstr /* Find our address */
invstr: mflr r6 /* Make it accessible */
mfmsr r7
rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
mfspr r7, SPRN_PID0
slwi r7,r7,16
or r7,r7,r4
mtspr SPRN_MAS6,r7
tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
mfspr r7,SPRN_MAS1
andis. r7,r7,MAS1_VALID@h
bne match_TLB
mfspr r7,SPRN_PID1
slwi r7,r7,16
or r7,r7,r4
mtspr SPRN_MAS6,r7
tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
mfspr r7,SPRN_MAS1
andis. r7,r7,MAS1_VALID@h
bne match_TLB
mfspr r7, SPRN_PID2
slwi r7,r7,16
or r7,r7,r4
mtspr SPRN_MAS6,r7
tlbsx 0,r6 /* Fall through, we had to match */
match_TLB:
mfspr r7,SPRN_MAS0
rlwinm r3,r7,16,28,31 /* Extract MAS0(Entry) */
mfspr r7,SPRN_MAS1 /* Insure IPROT set */
oris r7,r7,MAS1_IPROT@h
mtspr SPRN_MAS1,r7
tlbwe
/* 2. Invalidate all entries except the entry we're executing in */
mfspr r9,SPRN_TLB1CFG
andi. r9,r9,0xfff
li r6,0 /* Set Entry counter to 0 */
1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r6,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
mtspr SPRN_MAS0,r7
tlbre
mfspr r7,SPRN_MAS1
rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
cmpw r3,r6
beq skpinv /* Dont update the current execution TLB */
mtspr SPRN_MAS1,r7
tlbwe
isync
skpinv: addi r6,r6,1 /* Increment */
cmpw r6,r9 /* Are we done? */
bne 1b /* If not, repeat */
/* Invalidate TLB0 */
li r6,0x04
tlbivax 0,r6
#ifdef CONFIG_SMP
tlbsync
#endif
/* Invalidate TLB1 */
li r6,0x0c
tlbivax 0,r6
#ifdef CONFIG_SMP
tlbsync
#endif
msync
/* 3. Setup a temp mapping and jump to it */
andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
addi r5, r5, 0x1
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r3,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
mtspr SPRN_MAS0,r7
tlbre
/* Just modify the entry ID and EPN for the temp mapping */
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r5,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
mtspr SPRN_MAS0,r7
xori r6,r4,1 /* Setup TMP mapping in the other Address space */
slwi r6,r6,12
oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
mtspr SPRN_MAS1,r6
mfspr r6,SPRN_MAS2
li r7,0 /* temp EPN = 0 */
rlwimi r7,r6,0,20,31
mtspr SPRN_MAS2,r7
tlbwe
xori r6,r4,1
slwi r6,r6,5 /* setup new context with other address space */
bl 1f /* Find our address */
1: mflr r9
rlwimi r7,r9,0,20,31
addi r7,r7,24
mtspr SRR0,r7
mtspr SRR1,r6
rfi
/* 4. Clear out PIDs & Search info */
li r6,0
mtspr SPRN_PID0,r6
mtspr SPRN_PID1,r6
mtspr SPRN_PID2,r6
mtspr SPRN_MAS6,r6
/* 5. Invalidate mapping we started in */
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r3,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
mtspr SPRN_MAS0,r7
tlbre
li r6,0
mtspr SPRN_MAS1,r6
tlbwe
/* Invalidate TLB1 */
li r9,0x0c
tlbivax 0,r9
#ifdef CONFIG_SMP
tlbsync
#endif
msync
/* 6. Setup KERNELBASE mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
mtspr SPRN_MAS0,r6
lis r6,(MAS1_VALID|MAS1_IPROT)@h
ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
mtspr SPRN_MAS1,r6
li r7,0
lis r6,KERNELBASE@h
ori r6,r6,KERNELBASE@l
rlwimi r6,r7,0,20,31
mtspr SPRN_MAS2,r6
li r7,(MAS3_SX|MAS3_SW|MAS3_SR)
mtspr SPRN_MAS3,r7
tlbwe
/* 7. Jump to KERNELBASE mapping */
li r7,0
bl 1f /* Find our address */
1: mflr r9
rlwimi r6,r9,0,20,31
addi r6,r6,24
mtspr SRR0,r6
mtspr SRR1,r7
rfi /* start execution out of TLB1[0] entry */
/* 8. Clear out the temp mapping */
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r5,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
mtspr SPRN_MAS0,r7
tlbre
mtspr SPRN_MAS1,r8
tlbwe
/* Invalidate TLB1 */
li r9,0x0c
tlbivax 0,r9
#ifdef CONFIG_SMP
tlbsync
#endif
msync
/* Establish the interrupt vector offsets */
SET_IVOR(0, CriticalInput);
SET_IVOR(1, MachineCheck);
SET_IVOR(2, DataStorage);
SET_IVOR(3, InstructionStorage);
SET_IVOR(4, ExternalInput);
SET_IVOR(5, Alignment);
SET_IVOR(6, Program);
SET_IVOR(7, FloatingPointUnavailable);
SET_IVOR(8, SystemCall);
SET_IVOR(9, AuxillaryProcessorUnavailable);
SET_IVOR(10, Decrementer);
SET_IVOR(11, FixedIntervalTimer);
SET_IVOR(12, WatchdogTimer);
SET_IVOR(13, DataTLBError);
SET_IVOR(14, InstructionTLBError);
SET_IVOR(15, Debug);
SET_IVOR(32, SPEUnavailable);
SET_IVOR(33, SPEFloatingPointData);
SET_IVOR(34, SPEFloatingPointRound);
SET_IVOR(35, PerformanceMonitor);
/* Establish the interrupt vector base */
lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
mtspr SPRN_IVPR,r4
/* Setup the defaults for TLB entries */
li r2,MAS4_TSIZED(BOOKE_PAGESZ_4K)
mtspr SPRN_MAS4, r2
#if 0
/* Enable DOZE */
mfspr r2,SPRN_HID0
oris r2,r2,HID0_DOZE@h
mtspr SPRN_HID0, r2
#endif
/*
* This is where the main kernel code starts.
*/
/* ptr to current */
lis r2,init_task@h
ori r2,r2,init_task@l
/* ptr to current thread */
addi r4,r2,THREAD /* init task's THREAD */
mtspr SPRG3,r4
/* stack */
lis r1,init_thread_union@h
ori r1,r1,init_thread_union@l
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
bl early_init
mfspr r3,SPRN_TLB1CFG
andi. r3,r3,0xfff
lis r4,num_tlbcam_entries@ha
stw r3,num_tlbcam_entries@l(r4)
/*
* Decide what sort of machine this is and initialize the MMU.
*/
mr r3,r31
mr r4,r30
mr r5,r29
mr r6,r28
mr r7,r27
bl machine_init
bl MMU_init
/* Setup PTE pointers for the Abatron bdiGDB */
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
lis r4, KERNELBASE@h
ori r4, r4, KERNELBASE@l
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
stw r6, 0(r5)
/* Let's move on */
lis r4,start_kernel@h
ori r4,r4,start_kernel@l
lis r3,MSR_KERNEL@h
ori r3,r3,MSR_KERNEL@l
mtspr SRR0,r4
mtspr SRR1,r3
rfi /* change context and jump to start_kernel */
/*
* Interrupt vector entry code
*
* The Book E MMUs are always on so we don't need to handle
* interrupts in real mode as with previous PPC processors. In
* this case we handle interrupts in the kernel virtual address
* space.
*
* Interrupt vectors are dynamically placed relative to the
* interrupt prefix as determined by the address of interrupt_base.
* The interrupt vectors offsets are programmed using the labels
* for each interrupt vector entry.
*
* Interrupt vectors must be aligned on a 16 byte boundary.
* We align on a 32 byte cache line boundary for good measure.
*/
#define NORMAL_EXCEPTION_PROLOG \
mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
mtspr SPRN_SPRG1,r11; \
mtspr SPRN_SPRG4W,r1; \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
beq 1f; \
mfspr r1,SPRG3; /* if from user, start at top of */\
lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
addi r1,r1,THREAD_SIZE; \
1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
tophys(r11,r1); \
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mfspr r10,SPRG0; \
stw r10,GPR10(r11); \
mfspr r12,SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r10,SPRG4R; \
mfspr r12,SRR0; \
stw r10,GPR1(r11); \
mfspr r9,SRR1; \
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
/*
* Exception prolog for critical exceptions. This is a little different
* from the normal exception prolog above since a critical exception
* can potentially occur at any point during normal exception processing.
* Thus we cannot use the same SPRG registers as the normal prolog above.
* Instead we use a couple of words of memory at low physical addresses.
* This is OK since we don't support SMP on these processors. For Book E
* processors, we also have a reserved register (SPRG2) that is only used
* in critical exceptions so we can free up a GPR to use as the base for
* indirect access to the critical exception save area. This is necessary
* since the MMU is always on and the save area is offset from KERNELBASE.
*/
#define CRITICAL_EXCEPTION_PROLOG \
mtspr SPRG2,r8; /* SPRG2 only used in criticals */ \
lis r8,crit_save@ha; \
stw r10,crit_r10@l(r8); \
stw r11,crit_r11@l(r8); \
mfspr r10,SPRG0; \
stw r10,crit_sprg0@l(r8); \
mfspr r10,SPRG1; \
stw r10,crit_sprg1@l(r8); \
mfspr r10,SPRG4R; \
stw r10,crit_sprg4@l(r8); \
mfspr r10,SPRG5R; \
stw r10,crit_sprg5@l(r8); \
mfspr r10,SPRG7R; \
stw r10,crit_sprg7@l(r8); \
mfspr r10,SPRN_PID; \
stw r10,crit_pid@l(r8); \
mfspr r10,SRR0; \
stw r10,crit_srr0@l(r8); \
mfspr r10,SRR1; \
stw r10,crit_srr1@l(r8); \
mfspr r8,SPRG2; /* SPRG2 only used in criticals */ \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_CSRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
lis r11,critical_stack_top@h; \
ori r11,r11,critical_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
mfspr r11,SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
mfspr r12,CSRR0; \
stw r1,GPR1(r11); \
mfspr r9,CSRR1; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
/*
* Exception prolog for machine check exceptions. This is similar to
* the critical exception prolog, except that machine check exceptions
* have their own save area. For Book E processors, we also have a
* reserved register (SPRG6) that is only used in machine check exceptions
* so we can free up a GPR to use as the base for indirect access to the
* machine check exception save area. This is necessary since the MMU
* is always on and the save area is offset from KERNELBASE.
*/
#define MCHECK_EXCEPTION_PROLOG \
mtspr SPRG6W,r8; /* SPRG6 used in machine checks */ \
lis r8,mcheck_save@ha; \
stw r10,mcheck_r10@l(r8); \
stw r11,mcheck_r11@l(r8); \
mfspr r10,SPRG0; \
stw r10,mcheck_sprg0@l(r8); \
mfspr r10,SPRG1; \
stw r10,mcheck_sprg1@l(r8); \
mfspr r10,SPRG4R; \
stw r10,mcheck_sprg4@l(r8); \
mfspr r10,SPRG5R; \
stw r10,mcheck_sprg5@l(r8); \
mfspr r10,SPRG7R; \
stw r10,mcheck_sprg7@l(r8); \
mfspr r10,SPRN_PID; \
stw r10,mcheck_pid@l(r8); \
mfspr r10,SRR0; \
stw r10,mcheck_srr0@l(r8); \
mfspr r10,SRR1; \
stw r10,mcheck_srr1@l(r8); \
mfspr r10,CSRR0; \
stw r10,mcheck_csrr0@l(r8); \
mfspr r10,CSRR1; \
stw r10,mcheck_csrr1@l(r8); \
mfspr r8,SPRG6R; /* SPRG6 used in machine checks */ \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_MCSRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
lis r11,mcheck_stack_top@h; \
ori r11,r11,mcheck_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
mfspr r11,SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
mfspr r12,MCSRR0; \
stw r1,GPR1(r11); \
mfspr r9,MCSRR1; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
/*
* Exception vectors.
*/
#define START_EXCEPTION(label) \
.align 5; \
label:
#define FINISH_EXCEPTION(func) \
bl transfer_to_handler_full; \
.long func; \
.long ret_from_except_full
#define EXCEPTION(n, label, hdlr, xfer) \
START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
xfer(n, hdlr)
#define CRITICAL_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(label); \
CRITICAL_EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
#define MCHECK_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(label); \
MCHECK_EXCEPTION_PROLOG; \
mfspr r5,SPRN_ESR; \
stw r5,_ESR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, mcheck_transfer_to_handler, \
ret_from_mcheck_exc)
#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
li r10,trap; \
stw r10,TRAP(r11); \
lis r10,msr@h; \
ori r10,r10,msr@l; \
copyee(r10, r9); \
bl tfer; \
.long hdlr; \
.long ret
#define COPY_EE(d, s) rlwimi d,s,0,16,16
#define NOCOPY(d, s)
#define EXC_XFER_STD(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
ret_from_except)
#define EXC_XFER_EE(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_EE_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
ret_from_except)
interrupt_base:
/* Critical Input Interrupt */
CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
/* Machine Check Interrupt */
MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
/*
* Check if it was a store fault, if not then bail
* because a user tried to access a kernel or
* read-protected page. Otherwise, get the
* offending address and handle it.
*/
mfspr r10, SPRN_ESR
andis. r10, r10, ESR_ST@h
beq 2f
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
lis r11, TASK_SIZE@h
ori r11, r11, TASK_SIZE@l
cmplw 0, r10, r11
bge 2f
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
lwz r11,PGDIR(r11)
4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r11, 0(r11) /* Get L1 entry */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
beq 2f /* Bail if no table */
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
/* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
cmpwi 0, r13, _PAGE_RW|_PAGE_USER
bne 2f /* Bail if not */
/* Update 'changed'. */
ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
stw r11, 0(r12) /* Update Linux page table */
/* MAS2 not updated as the entry does exist in the tlb, this
fault taken to detect state transition (eg: COW -> DIRTY)
*/
lis r12, MAS3_RPN@h
ori r12, r12, _PAGE_HWEXEC | MAS3_RPN@l
and r11, r11, r12
rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
/* update search PID in MAS6, AS = 0 */
mfspr r12, SPRN_PID0
slwi r12, r12, 16
mtspr SPRN_MAS6, r12
/* find the TLB index that caused the fault. It has to be here. */
tlbsx 0, r10
mtspr SPRN_MAS3,r11
tlbwe
/* Done...restore registers and get out of here. */
mfspr r11, SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
rfi /* Force context change */
2:
/*
* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
b data_access
/* Instruction Storage Interrupt */
START_EXCEPTION(InstructionStorage)
NORMAL_EXCEPTION_PROLOG
mfspr r5,SPRN_ESR /* Grab the ESR and save it */
stw r5,_ESR(r11)
mr r4,r12 /* Pass SRR0 as arg2 */
li r5,0 /* Pass zero as arg3 */
EXC_XFER_EE_LITE(0x0400, handle_page_fault)
/* External Input Interrupt */
EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */
START_EXCEPTION(Alignment)
NORMAL_EXCEPTION_PROLOG
mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
stw r4,_DEAR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE(0x0600, AlignmentException)
/* Program Interrupt */
START_EXCEPTION(Program)
NORMAL_EXCEPTION_PROLOG
mfspr r4,SPRN_ESR /* Grab the ESR and save it */
stw r4,_ESR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0x0700, ProgramCheckException)
/* Floating Point Unavailable Interrupt */
EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
NORMAL_EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxillary Processor Unavailable Interrupt */
EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
/* Decrementer Interrupt */
START_EXCEPTION(Decrementer)
NORMAL_EXCEPTION_PROLOG
lis r0,TSR_DIS@h /* Setup the DEC interrupt mask */
mtspr SPRN_TSR,r0 /* Clear the DEC interrupt */
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_LITE(0x0900, timer_interrupt)
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException)
/* Data TLB Error Interrupt */
START_EXCEPTION(DataTLBError)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
lis r11, TASK_SIZE@h
ori r11, r11, TASK_SIZE@l
cmplw 5, r10, r11
blt 5, 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
mfspr r12,SPRN_MAS1 /* Set TID to 0 */
li r13,MAS1_TID@l
andc r12,r12,r13
mtspr SPRN_MAS1,r12
b 4f
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
lwz r11,PGDIR(r11)
4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r11, 0(r11) /* Get L1 entry */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
beq 2f /* Bail if no table */
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
andi. r13, r11, _PAGE_PRESENT
beq 2f
ori r11, r11, _PAGE_ACCESSED
stw r11, 0(r12)
/* Jump to common tlb load */
b finish_tlb_load
2:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
b data_access
/* Instruction TLB Error Interrupt */
/*
* Nearly the same as above, except we get our
* information from different registers and bailout
* to a different point.
*/
START_EXCEPTION(InstructionTLBError)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mfspr r10, SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
lis r11, TASK_SIZE@h
ori r11, r11, TASK_SIZE@l
cmplw 5, r10, r11
blt 5, 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
mfspr r12,SPRN_MAS1 /* Set TID to 0 */
li r13,MAS1_TID@l
andc r12,r12,r13
mtspr SPRN_MAS1,r12
b 4f
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
lwz r11,PGDIR(r11)
4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r11, 0(r11) /* Get L1 entry */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
beq 2f /* Bail if no table */
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
andi. r13, r11, _PAGE_PRESENT
beq 2f
ori r11, r11, _PAGE_ACCESSED
stw r11, 0(r12)
/* Jump to common TLB load point */
b finish_tlb_load
2:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
b InstructionStorage
#ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
NORMAL_EXCEPTION_PROLOG
bne load_up_spe
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
#else
EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE)
/* Performance Monitor */
EXCEPTION(0x2060, PerformanceMonitor, UnknownException, EXC_XFER_EE)
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
* where an instruction that we are trying to single step causes
* an exception (eg ITLB/DTLB miss) and thus the first instruction of
* the exception handler generates a single step debug exception.
*
* If we get a debug trap on the first instruction of an exception handler,
* we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
* a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
* The exception handler was handling a non-critical interrupt, so it will
* save (and later restore) the MSR via SPRN_SRR1, which will still have
* the MSR_DE bit set.
*/
/* Debug Interrupt */
START_EXCEPTION(Debug)
CRITICAL_EXCEPTION_PROLOG
/*
* If this is a single step or branch-taken exception in an
* exception entry sequence, it was probably meant to apply to
* the code where the exception occurred (since exception entry
* doesn't turn off DE automatically). We simulate the effect
* of turning off DE on entry to an exception handler by turning
* off DE in the CSRR1 value and clearing the debug status.
*/
mfspr r10,SPRN_DBSR /* check single-step/branch taken */
andis. r10,r10,(DBSR_IC|DBSR_BT)@h
beq+ 1f
andi. r0,r9,MSR_PR /* check supervisor */
beq 2f /* branch if we need to fix it up... */
/* continue normal handling for a critical exception... */
1: mfspr r4,SPRN_DBSR
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(DebugException, 0x2002, \
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
/* here it looks like we got an inappropriate debug exception. */
2: rlwinm r9,r9,0,~MSR_DE /* clear DE in the CSRR1 value */
mtspr SPRN_DBSR,r10 /* clear the IC/BT debug intr status */
/* restore state and get out */
lwz r10,_CCR(r11)
lwz r0,GPR0(r11)
lwz r1,GPR1(r11)
mtcrf 0x80,r10
mtspr CSRR0,r12
mtspr CSRR1,r9
lwz r9,GPR9(r11)
mtspr SPRG2,r8; /* SPRG2 only used in criticals */
lis r8,crit_save@ha;
lwz r10,crit_r10@l(r8)
lwz r11,crit_r11@l(r8)
mfspr r8,SPRG2
rfci
b .
/*
* Local functions
*/
/*
* Data TLB exceptions will bail out to this point
* if they can't resolve the lightweight TLB fault.
*/
data_access:
NORMAL_EXCEPTION_PROLOG
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
andis. r10,r5,(ESR_ILK|ESR_DLK)@h
bne 1f
EXC_XFER_EE_LITE(0x0300, handle_page_fault)
1:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x0300, CacheLockingException)
/*
* Both the instruction and data TLB miss get to this
* point to load the TLB.
* r10 - EA of fault
* r11 - TLB (info from Linux PTE)
* r12, r13 - available to use
* CR5 - results of addr < TASK_SIZE
* MAS0, MAS1 - loaded with proper value when we get here
* MAS2, MAS3 - will need additional info from Linux PTE
* Upon exit, we reload everything and RFI.
*/
finish_tlb_load:
/*
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
mfspr r12, SPRN_MAS2
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
mtspr SPRN_MAS2, r12
bge 5, 1f
/* addr > TASK_SIZE */
li r10, (MAS3_UX | MAS3_UW | MAS3_UR)
andi. r13, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
andi. r12, r11, _PAGE_USER /* Test for _PAGE_USER */
iseleq r12, 0, r10
and r10, r12, r13
srwi r12, r10, 1
or r12, r12, r10 /* Copy user perms into supervisor */
b 2f
/* addr <= TASK_SIZE */
1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
ori r12, r12, (MAS3_SX | MAS3_SR)
2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
mtspr SPRN_MAS3, r11
tlbwe
/* Done...restore registers and get out of here. */
mfspr r11, SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
rfi /* Force context change */
#ifdef CONFIG_SPE
/* Note that the SPE support is closely modeled after the AltiVec
* support. Changes to one are likely to be applicable to the
* other! */
load_up_spe:
/*
* Disable SPE for the task which had SPE previously,
* and save its SPE registers in its thread_struct.
* Enables SPE for use in the kernel on return.
* On SMP we know the SPE units are free, since we give it up every
* switch. -- Kumar
*/
mfmsr r5
oris r5,r5,MSR_SPE@h
mtmsr r5 /* enable use of SPE now */
isync
/*
* For SMP, we don't do lazy SPE switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
* to another. Instead we call giveup_spe in switch_to.
*/
#ifndef CONFIG_SMP
lis r3,last_task_used_spe@ha
lwz r4,last_task_used_spe@l(r3)
cmpi 0,r4,0
beq 1f
addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
SAVE_32EVR(0,r10,r4)
evxor evr10, evr10, evr10 /* clear out evr10 */
evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
li r5,THREAD_ACC
evstddx evr10, r4, r5 /* save off accumulator */
lwz r5,PT_REGS(r4)
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r10,MSR_SPE@h
andc r4,r4,r10 /* disable SPE for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of SPE after return */
oris r9,r9,MSR_SPE@h
mfspr r5,SPRG3 /* current task's THREAD (phys) */
li r4,1
li r10,THREAD_ACC
stw r4,THREAD_USED_SPE(r5)
evlddx evr4,r10,r5
evmra evr4,evr4
REST_32EVR(0,r10,r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
stw r4,last_task_used_spe@l(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
2: REST_4GPRS(3, r11)
lwz r10,_CCR(r11)
REST_GPR(1, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
REST_GPR(10, r11)
mtspr SRR1,r9
mtspr SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
lwz r11,GPR11(r11)
SYNC
rfi
/*
* SPE unavailable trap from kernel - print a message, but let
* the task use SPE in the kernel until it returns to user mode.
*/
KernelSPE:
lwz r3,_MSR(r1)
oris r3,r3,MSR_SPE@h
stw r3,_MSR(r1) /* enable use of SPE after return */
lis r3,87f@h
ori r3,r3,87f@l
mr r4,r2 /* current */
lwz r5,_NIP(r1)
bl printk
b ret_from_except
87: .string "SPE used in kernel (task=%p, pc=%x) \n"
.align 4,0
#endif /* CONFIG_SPE */
/*
* Global functions
*/
/*
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
*/
_GLOBAL(loadcam_entry)
lis r4,TLBCAM@ha
addi r4,r4,TLBCAM@l
mulli r5,r3,20
add r3,r5,r4
lwz r4,0(r3)
mtspr SPRN_MAS0,r4
lwz r4,4(r3)
mtspr SPRN_MAS1,r4
lwz r4,8(r3)
mtspr SPRN_MAS2,r4
lwz r4,12(r3)
mtspr SPRN_MAS3,r4
tlbwe
isync
blr
/*
* extern void giveup_altivec(struct task_struct *prev)
*
* The e500 core does not have an AltiVec unit.
*/
_GLOBAL(giveup_altivec)
blr
#ifdef CONFIG_SPE
/*
* extern void giveup_spe(struct task_struct *prev)
*
*/
_GLOBAL(giveup_spe)
mfmsr r5
oris r5,r5,MSR_SPE@h
SYNC
mtmsr r5 /* enable use of SPE now */
isync
cmpi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpi 0,r5,0
SAVE_32EVR(0, r4, r3)
evxor evr6, evr6, evr6 /* clear out evr6 */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_SPE@h
andc r4,r4,r3 /* disable SPE for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
lis r4,last_task_used_spe@ha
stw r5,last_task_used_spe@l(r4)
#endif /* CONFIG_SMP */
blr
#endif /* CONFIG_SPE */
/*
* extern void giveup_fpu(struct task_struct *prev)
*
* The e500 core does not have an FPU.
*/
_GLOBAL(giveup_fpu)
blr
/*
* extern void abort(void)
*
* At present, this routine just applies a system reset.
*/
_GLOBAL(abort)
li r13,0
mtspr SPRN_DBCR0,r13 /* disable all debug events */
mfmsr r13
ori r13,r13,MSR_DE@l /* Enable Debug Events */
mtmsr r13
mfspr r13,SPRN_DBCR0
lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
mtspr SPRN_DBCR0,r13
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is the second parameter.
*/
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r4, 0x4(r5)
#endif
mtspr SPRN_PID,r3
isync /* Force context change */
blr
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
_GLOBAL(sdata)
_GLOBAL(empty_zero_page)
.space 4096
_GLOBAL(swapper_pg_dir)
.space 4096
.section .bss
/* Stack for handling critical exceptions from kernel mode */
critical_stack_bottom:
.space 4096
critical_stack_top:
.previous
/* Stack for handling machine check exceptions from kernel mode */
mcheck_stack_bottom:
.space 4096
mcheck_stack_top:
.previous
/*
* This area is used for temporarily saving registers during the
* critical and machine check exception prologs. It must always
* follow the page aligned allocations, so it starts on a page
* boundary, ensuring that all crit_save areas are in a single
* page.
*/
/* crit_save */
_GLOBAL(crit_save)
.space 4
_GLOBAL(crit_r10)
.space 4
_GLOBAL(crit_r11)
.space 4
_GLOBAL(crit_sprg0)
.space 4
_GLOBAL(crit_sprg1)
.space 4
_GLOBAL(crit_sprg4)
.space 4
_GLOBAL(crit_sprg5)
.space 4
_GLOBAL(crit_sprg7)
.space 4
_GLOBAL(crit_pid)
.space 4
_GLOBAL(crit_srr0)
.space 4
_GLOBAL(crit_srr1)
.space 4
/* mcheck_save */
_GLOBAL(mcheck_save)
.space 4
_GLOBAL(mcheck_r10)
.space 4
_GLOBAL(mcheck_r11)
.space 4
_GLOBAL(mcheck_sprg0)
.space 4
_GLOBAL(mcheck_sprg1)
.space 4
_GLOBAL(mcheck_sprg4)
.space 4
_GLOBAL(mcheck_sprg5)
.space 4
_GLOBAL(mcheck_sprg7)
.space 4
_GLOBAL(mcheck_pid)
.space 4
_GLOBAL(mcheck_srr0)
.space 4
_GLOBAL(mcheck_srr1)
.space 4
_GLOBAL(mcheck_csrr0)
.space 4
_GLOBAL(mcheck_csrr1)
.space 4
/*
* This space gets a copy of optional info passed to us by the bootstrap
* which is used to pass parameters into the kernel like root=/dev/sda1, etc.
*/
_GLOBAL(cmd_line)
.space 512
/*
* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
*/
abatron_pteptrs:
.space 8
......@@ -419,7 +419,24 @@ _GLOBAL(_tlbia)
ble 1b
isync
#else /* !(CONFIG_40x || CONFIG_44x) */
#elif defined(CONFIG_FSL_BOOKE)
/* Invalidate all entries in TLB0 */
li r3, 0x04
tlbivax 0,3
/* Invalidate all entries in TLB1 */
li r3, 0x0c
tlbivax 0,3
/* Invalidate all entries in TLB2 */
li r3, 0x14
tlbivax 0,3
/* Invalidate all entries in TLB3 */
li r3, 0x1c
tlbivax 0,3
msync
#ifdef CONFIG_SMP
tlbsync
#endif /* CONFIG_SMP */
#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
#if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
......@@ -487,7 +504,20 @@ _GLOBAL(_tlbie)
tlbwe r3, r3, PPC44x_TLB_PAGEID
isync
10:
#else /* !(CONFIG_40x || CONFIG_44x) */
#elif defined(CONFIG_FSL_BOOKE)
rlwinm r4, r3, 0, 0, 19
ori r5, r4, 0x08 /* TLBSEL = 1 */
ori r6, r4, 0x10 /* TLBSEL = 2 */
ori r7, r4, 0x18 /* TLBSEL = 3 */
tlbivax 0, r4
tlbivax 0, r5
tlbivax 0, r6
tlbivax 0, r7
msync
#if defined(CONFIG_SMP)
tlbsync
#endif /* CONFIG_SMP */
#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
#if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
......@@ -544,6 +574,10 @@ _GLOBAL(flush_instruction_cache)
lis r3, KERNELBASE@h
iccci 0,r3
#endif
#elif CONFIG_FSL_BOOKE
mfspr r3,SPRN_L1CSR1
ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
mtspr SPRN_L1CSR1,r3
#else
mfspr r3,PVR
rlwinm r3,r3,16,16,31
......@@ -1047,7 +1081,7 @@ _GLOBAL(_get_SP)
* and exceptions as if the cpu had performed the load or store.
*/
#if defined(CONFIG_4xx)
#if defined(CONFIG_4xx) || defined(CONFIG_E500)
_GLOBAL(cvt_fd)
lfs 0,0(r3)
stfd 0,0(r4)
......
......@@ -199,6 +199,10 @@ EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(last_task_used_altivec);
EXPORT_SYMBOL(giveup_altivec);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
EXPORT_SYMBOL(last_task_used_spe);
EXPORT_SYMBOL(giveup_spe);
#endif /* CONFIG_SPE */
#ifdef CONFIG_SMP
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_hw_index);
......@@ -324,7 +328,7 @@ EXPORT_SYMBOL(debugger_fault_handler);
EXPORT_SYMBOL(cpm_install_handler);
EXPORT_SYMBOL(cpm_free_handler);
#endif /* CONFIG_8xx */
#if defined(CONFIG_8xx) || defined(CONFIG_40x)
#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx)
EXPORT_SYMBOL(__res);
#endif
#if defined(CONFIG_8xx)
......
......@@ -49,6 +49,7 @@ extern unsigned long _get_SP(void);
struct task_struct *last_task_used_math = NULL;
struct task_struct *last_task_used_altivec = NULL;
struct task_struct *last_task_used_spe = NULL;
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
......@@ -177,6 +178,34 @@ enable_kernel_altivec(void)
EXPORT_SYMBOL(enable_kernel_altivec);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
int
dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
{
if (regs->msr & MSR_SPE)
giveup_spe(current);
/* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
return 1;
}
void
enable_kernel_spe(void)
{
WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
giveup_spe(current);
else
giveup_spe(NULL); /* just enable SPE for kernel - force */
#else
giveup_spe(last_task_used_spe);
#endif /* __SMP __ */
}
EXPORT_SYMBOL(enable_kernel_spe);
#endif /* CONFIG_SPE */
void
enable_kernel_fp(void)
{
......@@ -244,6 +273,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
giveup_altivec(prev);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
/*
* If the previous thread used spe in the last quantum
* (thus changing spe regs) then save them.
*
* On SMP we always save/restore spe regs just to avoid the
* complexity of changing processors.
*/
if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
giveup_spe(prev);
#endif /* CONFIG_SPE */
#endif /* CONFIG_SMP */
/* Avoid the trap. On smp this this never happens since
......@@ -251,6 +291,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
#ifdef CONFIG_SPE
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_spe
*/
if (new->thread.regs && last_task_used_spe == new)
new->thread.regs->msr |= MSR_SPE;
#endif /* CONFIG_SPE */
new_thread = &new->thread;
old_thread = &current->thread;
last = _switch(old_thread, new_thread);
......@@ -354,6 +401,10 @@ void prepare_to_copy(struct task_struct *tsk)
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
if (regs->msr & MSR_SPE)
giveup_spe(current);
#endif /* CONFIG_SPE */
preempt_enable();
}
......@@ -438,18 +489,45 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
current->thread.vrsave = 0;
current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
current->thread.acc = 0;
current->thread.spefscr = 0;
current->thread.used_spe = 0;
#endif /* CONFIG_SPE */
}
#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
| PR_FP_EXC_RES | PR_FP_EXC_INV)
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
if (val > PR_FP_EXC_PRECISE)
/* This is a bit hairy. If we are an SPE enabled processor
* (have embedded fp) we store the IEEE exception enable flags in
* fpexc_mode. fpexc_mode is also used for setting FP exception
* mode (asyn, precise, disabled) for 'Classic' FP. */
if (val & PR_FP_EXC_SW_ENABLE) {
#ifdef CONFIG_SPE
tsk->thread.fpexc_mode = val &
(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
#else
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
if (regs != NULL && (regs->msr & MSR_FP) != 0)
regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
| tsk->thread.fpexc_mode;
#endif
} else {
/* on a CONFIG_SPE this does not hurt us. The bits that
* __pack_fe01 use do not overlap with bits used for
* PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
* on CONFIG_SPE implementations are reserved so writing to
* them does not change anything */
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
if (regs != NULL && (regs->msr & MSR_FP) != 0)
regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
| tsk->thread.fpexc_mode;
}
return 0;
}
......@@ -457,7 +535,14 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
{
unsigned int val;
val = __unpack_fe01(tsk->thread.fpexc_mode);
if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
#ifdef CONFIG_SPE
val = tsk->thread.fpexc_mode;
#else
return -EINVAL;
#endif
else
val = __unpack_fe01(tsk->thread.fpexc_mode);
return put_user(val, (unsigned int *) adr);
}
......@@ -506,6 +591,10 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
if (regs->msr & MSR_SPE)
giveup_spe(current);
#endif /* CONFIG_SPE */
preempt_enable();
error = do_execve(filename, (char __user *__user *) a1,
(char __user *__user *) a2, regs);
......
......@@ -35,7 +35,7 @@
/*
* Set of msr bits that gdb can change on behalf of a process.
*/
#ifdef CONFIG_4xx
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
#define MSR_DEBUGCHANGE 0
#else
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
......@@ -131,13 +131,77 @@ static inline int set_vrregs(struct task_struct *task, unsigned long *data)
}
#endif
#ifdef CONFIG_SPE
/*
* For get_evrregs/set_evrregs functions 'data' has the following layout:
*
* struct {
* u32 evr[32];
* u64 acc;
* u32 spefscr;
* }
*/
/*
* Get contents of SPE register state in task TASK.
*/
static inline int get_evrregs(unsigned long *data, struct task_struct *task)
{
int i;
if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long)))
return -EFAULT;
/* copy SPEFSCR */
if (__put_user(task->thread.spefscr, &data[34]))
return -EFAULT;
/* copy SPE registers EVR[0] .. EVR[31] */
for (i = 0; i < 32; i++, data++)
if (__put_user(task->thread.evr[i], data))
return -EFAULT;
/* copy ACC */
if (__put_user64(task->thread.acc, (unsigned long long *)data))
return -EFAULT;
return 0;
}
/*
* Write contents of SPE register state into task TASK.
*/
static inline int set_evrregs(struct task_struct *task, unsigned long *data)
{
int i;
if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long)))
return -EFAULT;
/* copy SPEFSCR */
if (__get_user(task->thread.spefscr, &data[34]))
return -EFAULT;
/* copy SPE registers EVR[0] .. EVR[31] */
for (i = 0; i < 32; i++, data++)
if (__get_user(task->thread.evr[i], data))
return -EFAULT;
/* copy ACC */
if (__get_user64(task->thread.acc, (unsigned long long*)data))
return -EFAULT;
return 0;
}
#endif /* CONFIG_SPE */
static inline void
set_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_4xx
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
/* MSR.DE should already be set */
#else
......@@ -152,7 +216,7 @@ clear_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_4xx
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
task->thread.dbcr0 = 0;
#else
regs->msr &= ~MSR_SE;
......@@ -360,6 +424,23 @@ int sys_ptrace(long request, long pid, long addr, long data)
ret = set_vrregs(child, (unsigned long *)data);
break;
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
ret = get_evrregs((unsigned long *)data, child);
break;
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
/* this is to clear the MSR_SPE bit to force a reload
* of register state from memory */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
ret = set_evrregs(child, (unsigned long *)data);
break;
#endif
default:
ret = ptrace_request(child, request, addr, data);
......
......@@ -183,8 +183,8 @@ struct rt_sigframe
/*
* Save the current user registers on the user stack.
* We only save the altivec registers if the process has used
* altivec instructions at some point.
* We only save the altivec/spe registers if the process has used
* altivec/spe instructions at some point.
*/
static int
save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret)
......@@ -197,6 +197,10 @@ save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret)
#ifdef CONFIG_ALTIVEC
if (current->thread.used_vr && (regs->msr & MSR_VEC))
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
if (current->thread.used_spe && (regs->msr & MSR_SPE))
giveup_spe(current);
#endif /* CONFIG_ALTIVEC */
preempt_enable();
......@@ -229,6 +233,24 @@ save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret)
return 1;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
/* save spe registers */
if (current->thread.used_spe) {
if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
ELF_NEVRREG * sizeof(u32)))
return 1;
/* set MSR_SPE in the saved MSR value to indicate that
frame->mc_vregs contains valid data */
if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
return 1;
}
/* else assert((regs->msr & MSR_SPE) == 0) */
/* We always copy to/from spefscr */
if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG))
return 1;
#endif /* CONFIG_SPE */
if (sigret) {
/* Set up the sigreturn trampoline: li r0,sigret; sc */
if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
......@@ -249,7 +271,7 @@ static int
restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
{
unsigned long save_r2;
#ifdef CONFIG_ALTIVEC
#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
unsigned long msr;
#endif
......@@ -290,6 +312,23 @@ restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
return 1;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
/* force the process to reload the spe registers from
current->thread when it next does spe instructions */
regs->msr &= ~MSR_SPE;
if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
/* restore spe registers from the stack */
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
} else if (current->thread.used_spe)
memset(&current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
/* Always get SPEFSCR back */
if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG))
return 1;
#endif /* CONFIG_SPE */
return 0;
}
......
......@@ -30,6 +30,7 @@
#include <linux/config.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/prctl.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
......@@ -171,6 +172,11 @@ static inline int check_io_access(struct pt_regs *regs)
/* On 4xx, the reason for the machine check or program exception
is in the ESR. */
#define get_reason(regs) ((regs)->dsisr)
#ifndef CONFIG_E500
#define get_mc_reason(regs) ((regs)->dsisr)
#else
#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
#endif
#define REASON_FP 0
#define REASON_ILLEGAL ESR_PIL
#define REASON_PRIVILEGED ESR_PPR
......@@ -184,6 +190,7 @@ static inline int check_io_access(struct pt_regs *regs)
/* On non-4xx, the reason for the machine check or program
exception is in the MSR. */
#define get_reason(regs) ((regs)->msr)
#define get_mc_reason(regs) ((regs)->msr)
#define REASON_FP 0x100000
#define REASON_ILLEGAL 0x80000
#define REASON_PRIVILEGED 0x40000
......@@ -196,7 +203,7 @@ static inline int check_io_access(struct pt_regs *regs)
void
MachineCheckException(struct pt_regs *regs)
{
unsigned long reason = get_reason(regs);
unsigned long reason = get_mc_reason(regs);
if (user_mode(regs)) {
regs->msr |= MSR_RI;
......@@ -256,7 +263,37 @@ MachineCheckException(struct pt_regs *regs)
/* Clear MCSR */
mtspr(SPRN_MCSR, mcsr);
}
#else /* !CONFIG_4xx */
#elif defined (CONFIG_E500)
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
if (reason & MCSR_MCP)
printk("Machine Check Signal\n");
if (reason & MCSR_ICPERR)
printk("Instruction Cache Parity Error\n");
if (reason & MCSR_DCP_PERR)
printk("Data Cache Push Parity Error\n");
if (reason & MCSR_DCPERR)
printk("Data Cache Parity Error\n");
if (reason & MCSR_GL_CI)
printk("Guarded Load or Cache-Inhibited stwcx.\n");
if (reason & MCSR_BUS_IAERR)
printk("Bus - Instruction Address Error\n");
if (reason & MCSR_BUS_RAERR)
printk("Bus - Read Address Error\n");
if (reason & MCSR_BUS_WAERR)
printk("Bus - Write Address Error\n");
if (reason & MCSR_BUS_IBERR)
printk("Bus - Instruction Data Error\n");
if (reason & MCSR_BUS_RBERR)
printk("Bus - Read Data Bus Error\n");
if (reason & MCSR_BUS_WBERR)
printk("Bus - Read Data Bus Error\n");
if (reason & MCSR_BUS_IPERR)
printk("Bus - Instruction Parity Error\n");
if (reason & MCSR_BUS_RPERR)
printk("Bus - Read Parity Error\n");
#else /* !CONFIG_4xx && !CONFIG_E500 */
printk("Machine check in kernel mode.\n");
printk("Caused by (from SRR1=%lx): ", reason);
switch (reason & 0x601F0000) {
......@@ -682,6 +719,56 @@ AltivecAssistException(struct pt_regs *regs)
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_FSL_BOOKE
void CacheLockingException(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
/* We treat cache locking instructions from the user
* as priv ops, in the future we could try to do
* something smarter
*/
if (error_code & (ESR_DLK|ESR_ILK))
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
return;
}
#endif /* CONFIG_FSL_BOOKE */
#ifdef CONFIG_SPE
void
SPEFloatingPointException(struct pt_regs *regs)
{
unsigned long spefscr;
int fpexc_mode;
int code = 0;
spefscr = current->thread.spefscr;
fpexc_mode = current->thread.fpexc_mode;
/* Hardware does not neccessarily set sticky
* underflow/overflow/invalid flags */
if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
code = FPE_FLTOVF;
spefscr |= SPEFSCR_FOVFS;
}
else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
code = FPE_FLTUND;
spefscr |= SPEFSCR_FUNFS;
}
else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
code = FPE_FLTDIV;
else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
code = FPE_FLTINV;
spefscr |= SPEFSCR_FINVS;
}
else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
code = FPE_FLTRES;
current->thread.spefscr = spefscr;
_exception(SIGFPE, regs, code, regs->nip);
return;
}
#endif
void __init trap_init(void)
{
......
......@@ -12,3 +12,4 @@ obj-y := fault.o init.o mem_pieces.o \
obj-$(CONFIG_PPC_STD_MMU) += hashtable.o ppc_mmu.o tlb.o
obj-$(CONFIG_40x) += 4xx_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
......@@ -99,7 +99,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
#if defined(CONFIG_4xx)
#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
int is_write = error_code & ESR_DST;
#else
int is_write = 0;
......@@ -114,20 +114,20 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
error_code &= 0x48200000;
else
is_write = error_code & 0x02000000;
#endif /* CONFIG_4xx */
#endif /* CONFIG_4xx || CONFIG_BOOKE */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_fault_handler && TRAP(regs) == 0x300) {
debugger_fault_handler(regs);
return 0;
}
#if !defined(CONFIG_4xx)
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
if (error_code & 0x00400000) {
/* DABR match */
if (debugger_dabr_match(regs))
return 0;
}
#endif /* !CONFIG_4xx */
#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
#endif /* CONFIG_XMON || CONFIG_KGDB */
if (in_atomic() || mm == NULL)
......@@ -200,8 +200,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
#if defined(CONFIG_4xx)
/* an exec - 4xx allows for per-page execute permission */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* an exec - 4xx/Book-E allows for per-page execute permission */
} else if (TRAP(regs) == 0x400) {
pte_t *ptep;
......@@ -214,7 +214,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
goto bad_area;
#endif
/* Since 4xx supports per-page execute permission,
/* Since 4xx/Book-E supports per-page execute permission,
* we lazily flush dcache to icache. */
ptep = NULL;
if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
......
/*
* Modifications by Kumar Gala (kumar.gala@freescale.com) to support
* E500 Book E processors.
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This file contains the routines for initializing the MMU
* on the 4xx series of chips.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/bootx.h>
#include <asm/machdep.h>
#include <asm/setup.h>
extern void loadcam_entry(unsigned int index);
unsigned int tlbcam_index;
unsigned int num_tlbcam_entries;
static unsigned long __cam0, __cam1, __cam2;
extern unsigned long total_lowmem;
extern unsigned long __max_low_memory;
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
struct tlbcam {
u32 MAS0;
u32 MAS1;
u32 MAS2;
u32 MAS3;
u32 MAS7;
} TLBCAM[NUM_TLBCAMS];
struct tlbcamrange {
unsigned long start;
unsigned long limit;
phys_addr_t phys;
} tlbcam_addrs[NUM_TLBCAMS];
extern unsigned int tlbcam_index;
/*
* Return PA for this VA if it is mapped by a CAM, or 0
*/
unsigned long v_mapped_by_tlbcam(unsigned long va)
{
int b;
for (b = 0; b < tlbcam_index; ++b)
if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
return 0;
}
/*
* Return VA for a given PA or 0 if not mapped
*/
unsigned long p_mapped_by_tlbcam(unsigned long pa)
{
int b;
for (b = 0; b < tlbcam_index; ++b)
if (pa >= tlbcam_addrs[b].phys
&& pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+tlbcam_addrs[b].phys)
return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
return 0;
}
/*
* Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 4 between 4k and 256M.
*/
void settlbcam(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, int flags, unsigned int pid)
{
unsigned int tsize, lz;
asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
tsize = (21 - lz) / 2;
#ifdef CONFIG_SMP
if ((flags & _PAGE_NO_CACHE) == 0)
flags |= _PAGE_COHERENT;
#endif
TLBCAM[index].MAS0 = MAS0_TLBSEL | (index << 16);
TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | ((pid << 16) & MAS1_TID);
TLBCAM[index].MAS2 = virt & PAGE_MASK;
TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
#ifndef CONFIG_KGDB /* want user access for breakpoints */
if (flags & _PAGE_USER) {
TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
}
#else
TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
#endif
tlbcam_addrs[index].start = virt;
tlbcam_addrs[index].limit = virt + size - 1;
tlbcam_addrs[index].phys = phys;
loadcam_entry(index);
}
void invalidate_tlbcam_entry(int index)
{
TLBCAM[index].MAS0 = MAS0_TLBSEL | (index << 16);
TLBCAM[index].MAS1 = ~MAS1_VALID;
loadcam_entry(index);
}
void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
unsigned long cam2)
{
settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
tlbcam_index++;
if (cam1) {
tlbcam_index++;
settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
}
if (cam2) {
tlbcam_index++;
settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
}
}
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
void __init MMU_init_hw(void)
{
flush_instruction_cache();
}
unsigned long __init mmu_mapin_ram(void)
{
cam_mapin_ram(__cam0, __cam1, __cam2);
return __cam0 + __cam1 + __cam2;
}
void __init
adjust_total_lowmem(void)
{
unsigned long max_low_mem = MAX_LOW_MEM;
unsigned long cam_max = 0x10000000;
unsigned long ram;
/* adjust CAM size to max_low_mem */
if (max_low_mem < cam_max)
cam_max = max_low_mem;
/* adjust lowmem size to max_low_mem */
if (max_low_mem < total_lowmem)
ram = max_low_mem;
else
ram = total_lowmem;
/* Calculate CAM values */
__cam0 = 1UL << 2 * (__ilog2(ram) / 2);
if (__cam0 > cam_max)
__cam0 = cam_max;
ram -= __cam0;
if (ram) {
__cam1 = 1UL << 2 * (__ilog2(ram) / 2);
if (__cam1 > cam_max)
__cam1 = cam_max;
ram -= __cam1;
}
if (ram) {
__cam2 = 1UL << 2 * (__ilog2(ram) / 2);
if (__cam2 > cam_max)
__cam2 = cam_max;
ram -= __cam2;
}
printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
" CAM2=%ldMb residual: %ldMb\n",
__cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
(total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
__max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
}
......@@ -253,6 +253,12 @@ void __init MMU_init(void)
if (__max_memory && total_memory > __max_memory)
total_memory = __max_memory;
total_lowmem = total_memory;
#ifdef CONFIG_FSL_BOOKE
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
* entries, so we need to adjust lowmem to match the amount we can map
* in the fixed entries */
adjust_total_lowmem();
#endif /* CONFIG_FSL_BOOKE */
if (total_lowmem > __max_low_memory) {
total_lowmem = __max_low_memory;
#ifndef CONFIG_HIGHMEM
......
......@@ -27,6 +27,9 @@ extern int map_page(unsigned long va, phys_addr_t pa, int flags);
extern void setbat(int index, unsigned long virt, unsigned long phys,
unsigned int size, int flags);
extern void reserve_phys_mem(unsigned long start, unsigned long size);
extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, int flags, unsigned int pid);
extern void invalidate_tlbcam_entry(int index);
extern int __map_without_bats;
extern unsigned long ioremap_base;
......@@ -53,6 +56,12 @@ extern unsigned long Hash_size, Hash_mask;
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
#define flush_HPTE(X, va, pg) _tlbie(va)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
#else
/* anything except 4xx or 8xx */
extern void MMU_init_hw(void);
......
......@@ -42,6 +42,10 @@ int io_bat_index;
#define HAVE_BATS 1
#endif
#if defined(CONFIG_FSL_BOOKE)
#define HAVE_TLBCAM 1
#endif
extern char etext[], _stext[];
#ifdef CONFIG_SMP
......@@ -59,6 +63,16 @@ void setbat(int index, unsigned long virt, unsigned long phys,
#define p_mapped_by_bats(x) (0UL)
#endif /* HAVE_BATS */
#ifdef HAVE_TLBCAM
extern unsigned int tlbcam_index;
extern unsigned int num_tlbcam_entries;
extern unsigned long v_mapped_by_tlbcam(unsigned long va);
extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
#else /* !HAVE_TLBCAM */
#define v_mapped_by_tlbcam(x) (0UL)
#define p_mapped_by_tlbcam(x) (0UL)
#endif /* HAVE_TLBCAM */
#ifdef CONFIG_44x
/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
#define PGDIR_ORDER 1
......@@ -210,6 +224,9 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
goto out;
if ((v = p_mapped_by_tlbcam(p)))
goto out;
if (mem_init_done) {
struct vm_struct *area;
area = get_vm_area(size, VM_IOREMAP);
......@@ -300,6 +317,9 @@ void __init mapin_ram(void)
/* is x a power of 2? */
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
/* is x a power of 4? */
#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
/*
* Set up a mapping for a block of I/O.
* virt, phys, size must all be page-aligned.
......@@ -325,6 +345,18 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
}
#endif /* HAVE_BATS */
#ifdef HAVE_TLBCAM
/*
* Use a CAM for this if possible...
*/
if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
&& (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
settlbcam(tlbcam_index, virt, phys, size, flags, 0);
++tlbcam_index;
return;
}
#endif /* HAVE_TLBCAM */
/* No BATs available, put it in the page tables. */
for (i = 0; i < size; i += PAGE_SIZE)
map_page(virt + i, phys + i, flags);
......
config 85xx
bool
depends on E500
default y
config PPC_INDIRECT_PCI_BE
bool
depends on 85xx
default y
menu "Freescale 85xx options"
depends on E500
choice
prompt "Machine Type"
depends on 85xx
default MPC8540_ADS
config MPC8540_ADS
bool "MPC8540ADS"
help
This option enables support for the MPC 8540 ADS evaluation board.
endchoice
# It's often necessary to know the specific 85xx processor type.
# Fortunately, it is implied (so far) from the board type, so we
# don't need to ask more redundant questions.
config MPC8540
bool
depends on MPC8540_ADS
default y
config FSL_OCP
bool
depends on 85xx
default y
config PPC_GEN550
bool
depends on MPC8540
default y
endmenu
#
# Makefile for the PowerPC 85xx linux kernel.
#
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads_common.o mpc8540_ads.o
obj-$(CONFIG_MPC8540) += mpc8540.o
/*
* arch/ppc/platforms/85xx/mpc8540.c
*
* MPC8540 I/O descriptions
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <asm/mpc85xx.h>
#include <asm/ocp.h>
/* These should be defined in platform code */
extern struct ocp_gfar_data mpc85xx_tsec1_def;
extern struct ocp_gfar_data mpc85xx_tsec2_def;
extern struct ocp_gfar_data mpc85xx_fec_def;
extern struct ocp_mpc_i2c_data mpc85xx_i2c1_def;
/* We use offsets for paddr since we do not know at compile time
* what CCSRBAR is, platform code should fix this up in
* setup_arch
*
* Only the first IRQ is given even if a device has
* multiple lines associated with ita
*/
struct ocp_def core_ocp[] = {
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_IIC,
.index = 0,
.paddr = MPC85xx_IIC1_OFFSET,
.irq = MPC85xx_IRQ_IIC1,
.pm = OCP_CPM_NA,
.additions = &mpc85xx_i2c1_def,
},
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_16550,
.index = 0,
.paddr = MPC85xx_UART0_OFFSET,
.irq = MPC85xx_IRQ_DUART,
.pm = OCP_CPM_NA,
},
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_16550,
.index = 1,
.paddr = MPC85xx_UART1_OFFSET,
.irq = MPC85xx_IRQ_DUART,
.pm = OCP_CPM_NA,
},
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_GFAR,
.index = 0,
.paddr = MPC85xx_ENET1_OFFSET,
.irq = MPC85xx_IRQ_TSEC1_TX,
.pm = OCP_CPM_NA,
.additions = &mpc85xx_tsec1_def,
},
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_GFAR,
.index = 1,
.paddr = MPC85xx_ENET2_OFFSET,
.irq = MPC85xx_IRQ_TSEC2_TX,
.pm = OCP_CPM_NA,
.additions = &mpc85xx_tsec2_def,
},
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_GFAR,
.index = 2,
.paddr = MPC85xx_ENET3_OFFSET,
.irq = MPC85xx_IRQ_FEC,
.pm = OCP_CPM_NA,
.additions = &mpc85xx_fec_def,
},
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_DMA,
.index = 0,
.paddr = MPC85xx_DMA_OFFSET,
.irq = MPC85xx_IRQ_DMA0,
.pm = OCP_CPM_NA,
},
{ .vendor = OCP_VENDOR_FREESCALE,
.function = OCP_FUNC_PERFMON,
.index = 0,
.paddr = MPC85xx_PERFMON_OFFSET,
.irq = MPC85xx_IRQ_PERFMON,
.pm = OCP_CPM_NA,
},
{ .vendor = OCP_VENDOR_INVALID
}
};
/*
* arch/ppc/platforms/85xx/mpc8540_ads.c
*
* MPC8540ADS board specific routines
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
#include <linux/module.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/open_pic.h>
#include <asm/bootinfo.h>
#include <asm/pci-bridge.h>
#include <asm/mpc85xx.h>
#include <asm/irq.h>
#include <asm/immap_85xx.h>
#include <asm/kgdb.h>
#include <asm/ocp.h>
#include <mm/mmu_decl.h>
#include <syslib/ppc85xx_common.h>
#include <syslib/ppc85xx_setup.h>
struct ocp_gfar_data mpc85xx_tsec1_def = {
.interruptTransmit = MPC85xx_IRQ_TSEC1_TX,
.interruptError = MPC85xx_IRQ_TSEC1_ERROR,
.interruptReceive = MPC85xx_IRQ_TSEC1_RX,
.interruptPHY = MPC85xx_IRQ_EXT5,
.flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR
| GFAR_HAS_RMON
| GFAR_HAS_PHY_INTR | GFAR_HAS_COALESCE),
.phyid = 0,
.phyregidx = 0,
};
struct ocp_gfar_data mpc85xx_tsec2_def = {
.interruptTransmit = MPC85xx_IRQ_TSEC2_TX,
.interruptError = MPC85xx_IRQ_TSEC2_ERROR,
.interruptReceive = MPC85xx_IRQ_TSEC2_RX,
.interruptPHY = MPC85xx_IRQ_EXT5,
.flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR
| GFAR_HAS_RMON
| GFAR_HAS_PHY_INTR | GFAR_HAS_COALESCE),
.phyid = 1,
.phyregidx = 0,
};
struct ocp_gfar_data mpc85xx_fec_def = {
.interruptTransmit = MPC85xx_IRQ_FEC,
.interruptError = MPC85xx_IRQ_FEC,
.interruptReceive = MPC85xx_IRQ_FEC,
.interruptPHY = MPC85xx_IRQ_EXT5,
.flags = 0,
.phyid = 3,
.phyregidx = 0,
};
struct ocp_fs_i2c_data mpc85xx_i2c1_def = {
.flags = FS_I2C_SEPARATE_DFSRR,
};
/* ************************************************************************
*
* Setup the architecture
*
*/
static void __init
mpc8540ads_setup_arch(void)
{
struct ocp_def *def;
struct ocp_gfar_data *einfo;
bd_t *binfo = (bd_t *) __res;
unsigned int freq;
/* get the core frequency */
freq = binfo->bi_intfreq;
if (ppc_md.progress)
ppc_md.progress("mpc8540ads_setup_arch()", 0);
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
loops_per_jiffy = freq / HZ;
#ifdef CONFIG_PCI
/* setup PCI host bridges */
mpc85xx_setup_hose();
#endif
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
#ifdef CONFIG_SERIAL_8250
mpc85xx_early_serial_map();
#endif
#ifdef CONFIG_SERIAL_TEXT_DEBUG
/* Invalidate the entry we stole earlier the serial ports
* should be properly mapped */
invalidate_tlbcam_entry(NUM_TLBCAMS - 1);
#endif
def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0);
if (def) {
einfo = (struct ocp_gfar_data *) def->additions;
memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6);
}
def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1);
if (def) {
einfo = (struct ocp_gfar_data *) def->additions;
memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6);
}
def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 2);
if (def) {
einfo = (struct ocp_gfar_data *) def->additions;
memcpy(einfo->mac_addr, binfo->bi_enet2addr, 6);
}
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
ROOT_DEV = Root_RAM0;
else
#endif
#ifdef CONFIG_ROOT_NFS
ROOT_DEV = Root_NFS;
#else
ROOT_DEV = Root_HDA1;
#endif
ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base));
}
/* ************************************************************************ */
void __init
platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
/* parse_bootinfo must always be called first */
parse_bootinfo(find_bootinfo());
/*
* If we were passed in a board information, copy it into the
* residual data area.
*/
if (r3) {
memcpy((void *) __res, (void *) (r3 + KERNELBASE),
sizeof (bd_t));
}
#ifdef CONFIG_SERIAL_TEXT_DEBUG
{
bd_t *binfo = (bd_t *) __res;
/* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
settlbcam(NUM_TLBCAMS - 1, binfo->bi_immr_base,
binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0);
}
#endif
#if defined(CONFIG_BLK_DEV_INITRD)
/*
* If the init RAM disk has been configured in, and there's a valid
* starting address for it, set it up.
*/
if (r4) {
initrd_start = r4 + KERNELBASE;
initrd_end = r5 + KERNELBASE;
}
#endif /* CONFIG_BLK_DEV_INITRD */
/* Copy the kernel command line arguments to a safe place. */
if (r6) {
*(char *) (r7 + KERNELBASE) = 0;
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc8540ads_setup_arch;
ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo;
ppc_md.init_IRQ = mpc85xx_ads_init_IRQ;
ppc_md.get_irq = openpic_get_irq;
ppc_md.restart = mpc85xx_restart;
ppc_md.power_off = mpc85xx_power_off;
ppc_md.halt = mpc85xx_halt;
ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
ppc_md.time_init = NULL;
ppc_md.set_rtc_time = NULL;
ppc_md.get_rtc_time = NULL;
ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
ppc_md.progress = gen550_progress;
#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
if (ppc_md.progress)
ppc_md.progress("mpc8540ads_init(): exit", 0);
return;
}
/*
* arch/ppc/platforms/85xx/mpc8540_ads.h
*
* MPC8540ADS board definitions
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __MACH_MPC8540ADS_H__
#define __MACH_MPC8540ADS_H__
#include <linux/config.h>
#include <linux/serial.h>
#include <linux/initrd.h>
#include <syslib/ppc85xx_setup.h>
#include <platforms/85xx/mpc85xx_ads_common.h>
#define SERIAL_PORT_DFNS \
STD_UART_OP(0) \
STD_UART_OP(1)
#endif /* __MACH_MPC8540ADS_H__ */
/*
* arch/ppc/platforms/85xx/mpc85xx_ads_common.c
*
* MPC85xx ADS board common routines
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/open_pic.h>
#include <asm/bootinfo.h>
#include <asm/pci-bridge.h>
#include <asm/mpc85xx.h>
#include <asm/irq.h>
#include <asm/immap_85xx.h>
#include <asm/ocp.h>
#include <mm/mmu_decl.h>
#include <platforms/85xx/mpc85xx_ads_common.h>
#ifndef CONFIG_PCI
unsigned long isa_io_base = 0;
unsigned long isa_mem_base = 0;
#endif
extern unsigned long total_memory; /* in mm/init */
unsigned char __res[sizeof (bd_t)];
/* Internal interrupts are all Level Sensitive, and Positive Polarity */
static u_char mpc85xx_ads_openpic_initsenses[] __initdata = {
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
0x0, /* External 0: */
#if defined(CONFIG_PCI)
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 1: PCI slot 0 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI slot 1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI slot 2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 4: PCI slot 3 */
#else
0x0, /* External 1: */
0x0, /* External 2: */
0x0, /* External 3: */
0x0, /* External 4: */
#endif
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
0x0, /* External 6: */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 7: PHY */
0x0, /* External 8: */
0x0, /* External 9: */
0x0, /* External 10: */
0x0, /* External 11: */
};
/* ************************************************************************ */
int
mpc85xx_ads_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
bd_t *binfo = (bd_t *) __res;
unsigned int freq;
/* get the core frequency */
freq = binfo->bi_intfreq;
pvid = mfspr(PVR);
svid = mfspr(SVR);
seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
switch (svid & 0xffff0000) {
case SVR_8540:
seq_printf(m, "Machine\t\t: mpc8540ads\n");
break;
case SVR_8560:
seq_printf(m, "Machine\t\t: mpc8560ads\n");
break;
default:
seq_printf(m, "Machine\t\t: unknown\n");
break;
}
seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000,
freq % 1000000);
seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
return 0;
}
void __init
mpc85xx_ads_init_IRQ(void)
{
bd_t *binfo = (bd_t *) __res;
/* Determine the Physical Address of the OpenPIC regs */
phys_addr_t OpenPIC_PAddr =
binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
OpenPIC_InitSenses = mpc85xx_ads_openpic_initsenses;
OpenPIC_NumInitSenses = sizeof (mpc85xx_ads_openpic_initsenses);
/* Skip reserved space and internal sources */
openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
/* Map PIC IRQs 0-11 */
openpic_set_sources(32, 12, OpenPIC_Addr + 0x10000);
/* we let openpic interrupts starting from an offset, to
* leave space for cascading interrupts underneath.
*/
openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
return;
}
#ifdef CONFIG_PCI
/*
* interrupt routing
*/
int
mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
{
static char pci_irq_table[][4] =
/*
* This is little evil, but works around the fact
* that revA boards have IDSEL starting at 18
* and others boards (older) start at 12
*
* PCI IDSEL/INTPIN->INTLINE
* A B C D
*/
{
{PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 2 */
{PIRQD, PIRQA, PIRQB, PIRQC},
{PIRQC, PIRQD, PIRQA, PIRQB},
{PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 5 */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 12 */
{PIRQD, PIRQA, PIRQB, PIRQC},
{PIRQC, PIRQD, PIRQA, PIRQB},
{PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 15 */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 18 */
{PIRQD, PIRQA, PIRQB, PIRQC},
{PIRQC, PIRQD, PIRQA, PIRQB},
{PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 21 */
};
const long min_idsel = 2, max_idsel = 21, irqs_per_slot = 4;
return PCI_IRQ_TABLE_LOOKUP;
}
int
mpc85xx_exclude_device(u_char bus, u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
#endif /* CONFIG_PCI */
/*
* arch/ppc/platforms/85xx/mpc85xx_ads_common.h
*
* MPC85XX ADS common board definitions
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __MACH_MPC85XX_ADS_H__
#define __MACH_MPC85XX_ADS_H__
#include <linux/config.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/ppcboot.h>
#define BOARD_CCSRBAR ((uint)0xe0000000)
#define BCSR_ADDR ((uint)0xf8000000)
#define BCSR_SIZE ((uint)(32 * 1024))
extern int mpc85xx_ads_show_cpuinfo(struct seq_file *m);
extern void mpc85xx_ads_init_IRQ(void) __init;
extern void mpc85xx_ads_map_io(void) __init;
/* PCI interrupt controller */
#define PIRQA MPC85xx_IRQ_EXT1
#define PIRQB MPC85xx_IRQ_EXT2
#define PIRQC MPC85xx_IRQ_EXT3
#define PIRQD MPC85xx_IRQ_EXT4
#define MPC85XX_PCI1_LOWER_IO 0x00000000
#define MPC85XX_PCI1_UPPER_IO 0x00ffffff
#define MPC85XX_PCI1_LOWER_MEM 0x80000000
#define MPC85XX_PCI1_UPPER_MEM 0x9fffffff
#define MPC85XX_PCI1_IO_BASE 0xe2000000
#define MPC85XX_PCI1_MEM_OFFSET 0x00000000
#define MPC85XX_PCI1_IO_SIZE 0x01000000
#endif /* __MACH_MPC85XX_ADS_H__ */
......@@ -8,6 +8,9 @@ endif
ifdef CONFIG_4xx
EXTRA_AFLAGS := -Wa,-m405
endif
ifdef CONFIG_E500
EXTRA_AFLAGS := -Wa,-me500
endif
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
......@@ -75,3 +78,7 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_MPC10X_BRIDGE) += mpc10x_common.o indirect_pci.o
obj-$(CONFIG_40x) += dcr.o
obj-$(CONFIG_BOOKE) += dcr.o
obj-$(CONFIG_85xx) += open_pic.o ppc85xx_common.o ppc85xx_setup.o
ifeq ($(CONFIG_85xx),y)
obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o
endif
......@@ -28,7 +28,7 @@
#include "open_pic_defs.h"
#ifdef CONFIG_PRPMC800
#if defined(CONFIG_PRPMC800) || defined(CONFIG_85xx)
#define OPENPIC_BIG_ENDIAN
#endif
......
/*
* arch/ppc/syslib/ppc85xx_common.c
*
* MPC85xx support routines
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/mpc85xx.h>
#include <asm/mmu.h>
#include <asm/ocp.h>
/* ************************************************************************ */
/* Return the value of CCSRBAR for the current board */
phys_addr_t
get_ccsrbar(void)
{
return BOARD_CCSRBAR;
}
/* ************************************************************************ */
/* Update the 85xx OCP tables paddr field */
void
mpc85xx_update_paddr_ocp(struct ocp_device *dev, void *arg)
{
phys_addr_t ccsrbar;
if (arg) {
ccsrbar = *(phys_addr_t *)arg;
dev->def->paddr += ccsrbar;
}
}
EXPORT_SYMBOL(get_ccsrbar);
/*
* arch/ppc/syslib/ppc85xx_common.h
*
* MPC85xx support routines
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __PPC_SYSLIB_PPC85XX_COMMON_H
#define __PPC_SYSLIB_PPC85XX_COMMON_H
#include <linux/config.h>
#include <linux/init.h>
#include <asm/ocp.h>
/* Provide access to ccsrbar for any modules, etc */
phys_addr_t get_ccsrbar(void);
/* Update the 85xx OCP tables paddr field */
void mpc85xx_update_paddr_ocp(struct ocp_device *dev, void *ccsrbar);
#endif /* __PPC_SYSLIB_PPC85XX_COMMON_H */
/*
* arch/ppc/syslib/ppc85xx_setup.c
*
* MPC85XX common board code
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/mpc85xx.h>
#include <asm/immap_85xx.h>
#include <asm/mmu.h>
#include <asm/ocp.h>
#include <asm/kgdb.h>
/* Return the amount of memory */
unsigned long __init
mpc85xx_find_end_of_memory(void)
{
bd_t *binfo;
binfo = (bd_t *) __res;
return binfo->bi_memsize;
}
/* The decrementer counts at the system (internal) clock freq divided by 8 */
void __init
mpc85xx_calibrate_decr(void)
{
bd_t *binfo = (bd_t *) __res;
unsigned int freq, divisor;
/* get the core frequency */
freq = binfo->bi_busfreq;
/* The timebase is updated every 8 bus clocks, HID0[SEL_TBCLK] = 0 */
divisor = 8;
tb_ticks_per_jiffy = freq / divisor / HZ;
tb_to_us = mulhwu_scale_factor(freq / divisor, 1000000);
/* Set the time base to zero */
mtspr(SPRN_TBWL, 0);
mtspr(SPRN_TBWU, 0);
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
/* Enable decrementer interrupt */
mtspr(SPRN_TCR, TCR_DIE);
}
#ifdef CONFIG_SERIAL_8250
void __init
mpc85xx_early_serial_map(void)
{
struct uart_port serial_req;
bd_t *binfo = (bd_t *) __res;
phys_addr_t duart_paddr = binfo->bi_immr_base + MPC85xx_UART0_OFFSET;
/* Setup serial port access */
memset(&serial_req, 0, sizeof (serial_req));
serial_req.uartclk = binfo->bi_busfreq;
serial_req.line = 0;
serial_req.irq = MPC85xx_IRQ_DUART;
serial_req.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
serial_req.iotype = SERIAL_IO_MEM;
serial_req.membase = ioremap(duart_paddr, MPC85xx_UART0_SIZE);
serial_req.mapbase = duart_paddr;
serial_req.regshift = 0;
#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
gen550_init(0, &serial_req);
#endif
if (early_serial_setup(&serial_req) != 0)
printk("Early serial init of port 0 failed\n");
/* Assume early_serial_setup() doesn't modify serial_req */
duart_paddr = binfo->bi_immr_base + MPC85xx_UART1_OFFSET;
serial_req.line = 1;
serial_req.mapbase = duart_paddr;
serial_req.membase = ioremap(duart_paddr, MPC85xx_UART1_SIZE);
#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
gen550_init(1, &serial_req);
#endif
if (early_serial_setup(&serial_req) != 0)
printk("Early serial init of port 1 failed\n");
}
#endif
void
mpc85xx_restart(char *cmd)
{
local_irq_disable();
abort();
}
void
mpc85xx_power_off(void)
{
local_irq_disable();
for(;;);
}
void
mpc85xx_halt(void)
{
local_irq_disable();
for(;;);
}
#ifdef CONFIG_PCI
static void __init
mpc85xx_setup_pci1(struct pci_controller *hose)
{
volatile struct ccsr_pci *pci;
volatile struct ccsr_guts *guts;
unsigned short temps;
bd_t *binfo = (bd_t *) __res;
pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI1_OFFSET,
MPC85xx_PCI1_SIZE);
guts = ioremap(binfo->bi_immr_base + MPC85xx_GUTS_OFFSET,
MPC85xx_GUTS_SIZE);
early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps);
temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
early_write_config_word(hose, 0, 0, PCI_COMMAND, temps);
#define PORDEVSR_PCI (0x00800000) /* PCI Mode */
if (guts->pordevsr & PORDEVSR_PCI) {
early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
} else {
/* PCI-X init */
temps = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
early_write_config_word(hose, 0, 0, PCIX_COMMAND, temps);
}
/* Disable all windows (except powar0 since its ignored) */
pci->powar1 = 0;
pci->powar2 = 0;
pci->powar3 = 0;
pci->powar4 = 0;
pci->piwar1 = 0;
pci->piwar2 = 0;
pci->piwar3 = 0;
/* Setup 512M Phys:PCI 1:1 outbound mem window @ 0x80000000 */
pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
/* Setup 16M outboud IO windows @ 0xe2000000 */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff;
pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
pci->piwbar1 = 0x00000000;
pci->piwar1 = 0xa0f5501e; /* Enable, Prefetch, Local
Mem, Snoop R/W, 2G */
}
extern int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin);
extern int mpc85xx_exclude_device(u_char bus, u_char devfn);
#if CONFIG_85xx_PCI2
static void __init
mpc85xx_setup_pci2(struct pci_controller *hose)
{
volatile struct ccsr_pci *pci;
unsigned short temps;
bd_t *binfo = (bd_t *) __res;
pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET,
MPC85xx_PCI2_SIZE);
early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps);
temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
early_write_config_word(hose, 0, 0, PCI_COMMAND, temps);
early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
/* Disable all windows (except powar0 since its ignored) */
pci->powar1 = 0;
pci->powar2 = 0;
pci->powar3 = 0;
pci->powar4 = 0;
pci->piwar1 = 0;
pci->piwar2 = 0;
pci->piwar3 = 0;
/* Setup 512M Phys:PCI 1:1 outbound mem window @ 0xa0000000 */
pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
/* Setup 16M outboud IO windows @ 0xe3000000 */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
pci->piwbar1 = 0x00000000;
pci->piwar1 = 0xa0f5501e; /* Enable, Prefetch, Local
Mem, Snoop R/W, 2G */
}
#endif /* CONFIG_85xx_PCI2 */
void __init
mpc85xx_setup_hose(void)
{
struct pci_controller *hose_a;
#ifdef CONFIG_85xx_PCI2
struct pci_controller *hose_b;
#endif
bd_t *binfo = (bd_t *) __res;
hose_a = pcibios_alloc_controller();
if (!hose_a)
return;
ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_map_irq = mpc85xx_map_irq;
hose_a->first_busno = 0;
hose_a->bus_offset = 0;
hose_a->last_busno = 0xff;
setup_indirect_pci(hose_a, binfo->bi_immr_base + PCI1_CFG_ADDR_OFFSET,
binfo->bi_immr_base + PCI1_CFG_DATA_OFFSET);
hose_a->set_cfg_type = 1;
mpc85xx_setup_pci1(hose_a);
hose_a->pci_mem_offset = MPC85XX_PCI1_MEM_OFFSET;
hose_a->mem_space.start = MPC85XX_PCI1_LOWER_MEM;
hose_a->mem_space.end = MPC85XX_PCI1_UPPER_MEM;
hose_a->io_space.start = MPC85XX_PCI1_LOWER_IO;
hose_a->io_space.end = MPC85XX_PCI1_UPPER_IO;
hose_a->io_base_phys = MPC85XX_PCI1_IO_BASE;
#if CONFIG_85xx_PCI2
isa_io_base =
(unsigned long) ioremap(MPC85XX_PCI1_IO_BASE,
MPC85XX_PCI1_IO_SIZE +
MPC85XX_PCI2_IO_SIZE);
#else
isa_io_base =
(unsigned long) ioremap(MPC85XX_PCI1_IO_BASE,
MPC85XX_PCI1_IO_SIZE);
#endif
hose_a->io_base_virt = (void *) isa_io_base;
/* setup resources */
pci_init_resource(&hose_a->mem_resources[0],
MPC85XX_PCI1_LOWER_MEM,
MPC85XX_PCI1_UPPER_MEM,
IORESOURCE_MEM, "PCI1 host bridge");
pci_init_resource(&hose_a->io_resource,
MPC85XX_PCI1_LOWER_IO,
MPC85XX_PCI1_UPPER_IO,
IORESOURCE_IO, "PCI1 host bridge");
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
hose_a->last_busno = pciauto_bus_scan(hose_a, hose_a->first_busno);
#if CONFIG_85xx_PCI2
hose_b = pcibios_alloc_controller();
if (!hose_b)
return;
hose_b->bus_offset = hose_a->last_busno + 1;
hose_b->first_busno = hose_a->last_busno + 1;
hose_b->last_busno = 0xff;
setup_indirect_pci(hose_b, binfo->bi_immr_base + PCI2_CFG_ADDR_OFFSET,
binfo->bi_immr_base + PCI2_CFG_DATA_OFFSET);
hose_b->set_cfg_type = 1;
mpc85xx_setup_pci2(hose_b);
hose_b->pci_mem_offset = MPC85XX_PCI2_MEM_OFFSET;
hose_b->mem_space.start = MPC85XX_PCI2_LOWER_MEM;
hose_b->mem_space.end = MPC85XX_PCI2_UPPER_MEM;
hose_b->io_space.start = MPC85XX_PCI2_LOWER_IO;
hose_b->io_space.end = MPC85XX_PCI2_UPPER_IO;
hose_b->io_base_phys = MPC85XX_PCI2_IO_BASE;
hose_b->io_base_virt = (void *) isa_io_base + MPC85XX_PCI1_IO_SIZE;
/* setup resources */
pci_init_resource(&hose_b->mem_resources[0],
MPC85XX_PCI2_LOWER_MEM,
MPC85XX_PCI2_UPPER_MEM,
IORESOURCE_MEM, "PCI2 host bridge");
pci_init_resource(&hose_b->io_resource,
MPC85XX_PCI2_LOWER_IO,
MPC85XX_PCI2_UPPER_IO,
IORESOURCE_IO, "PCI2 host bridge");
hose_b->last_busno = pciauto_bus_scan(hose_b, hose_b->first_busno);
#endif
return;
}
#endif /* CONFIG_PCI */
/*
* arch/ppc/syslib/ppc85xx_setup.h
*
* MPC85XX common board definitions
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __PPC_SYSLIB_PPC85XX_SETUP_H
#define __PPC_SYSLIB_PPC85XX_SETUP_H
#include <linux/config.h>
#include <linux/serial.h>
#include <linux/init.h>
#include <asm/ppcboot.h>
extern unsigned long mpc85xx_find_end_of_memory(void) __init;
extern void mpc85xx_calibrate_decr(void) __init;
extern void mpc85xx_early_serial_map(void) __init;
extern void mpc85xx_restart(char *cmd);
extern void mpc85xx_power_off(void);
extern void mpc85xx_halt(void);
extern void mpc85xx_setup_hose(void) __init;
/* PCI config */
#define PCI1_CFG_ADDR_OFFSET (0x8000)
#define PCI1_CFG_DATA_OFFSET (0x8004)
#define PCI2_CFG_ADDR_OFFSET (0x9000)
#define PCI2_CFG_DATA_OFFSET (0x9004)
/* Additional register for PCI-X configuration */
#define PCIX_NEXT_CAP 0x60
#define PCIX_CAP_ID 0x61
#define PCIX_COMMAND 0x62
#define PCIX_STATUS 0x64
/* Serial Config */
#define MPC85XX_0_SERIAL (CCSRBAR + 0x4500)
#define MPC85XX_1_SERIAL (CCSRBAR + 0x4600)
#ifdef CONFIG_SERIAL_MANY_PORTS
#define RS_TABLE_SIZE 64
#else
#define RS_TABLE_SIZE 2
#endif
#define BASE_BAUD 0
#define STD_UART_OP(num) \
{ 0, BASE_BAUD, num, MPC85xx_IRQ_DUART, \
(ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST), \
iomem_base: (u8 *)MPC85XX_##num##_SERIAL, \
io_type: SERIAL_IO_MEM},
/* Offset of CPM register space */
#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
#endif /* __PPC_SYSLIB_PPC85XX_SETUP_H */
......@@ -52,6 +52,7 @@
#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
#define ELF_NFPREG 33 /* includes fpscr */
#define ELF_NVRREG 33 /* includes vscr */
#define ELF_NEVRREG 34 /* includes acc (as 2) */
/*
* These are used to set parameters in the core dumps.
......
/*
* include/asm-ppc/fsl_ocp.h
*
* Definitions for the on-chip peripherals on Freescale PPC processors
*
* Maintainer: Kumar Gala (kumar.gala@freescale.com)
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifdef __KERNEL__
#ifndef __ASM_FS_OCP_H__
#define __ASM_FS_OCP_H__
/* A table of information for supporting the Gianfar Ethernet Controller
* This helps identify which enet controller we are dealing with,
* and what type of enet controller it is
*/
struct ocp_gfar_data {
uint interruptTransmit;
uint interruptError;
uint interruptReceive;
uint interruptPHY;
uint flags;
uint phyid;
uint phyregidx;
unsigned char mac_addr[6];
};
/* Flags in the flags field */
#define GFAR_HAS_COALESCE 0x20
#define GFAR_HAS_RMON 0x10
#define GFAR_HAS_MULTI_INTR 0x08
#define GFAR_FIRM_SET_MACADDR 0x04
#define GFAR_HAS_PHY_INTR 0x02 /* if not set use a timer */
#define GFAR_HAS_GIGABIT 0x01
/* Data structure for I2C support. Just contains a couple flags
* to distinguish various I2C implementations*/
struct ocp_fs_i2c_data {
uint flags;
};
/* Flags for I2C */
#define FS_I2C_SEPARATE_DFSRR 0x02
#define FS_I2C_32BIT 0x01
#endif /* __ASM_FS_OCP_H__ */
#endif /* __KERNEL__ */
/*
* include/asm-ppc/immap_85xx.h
*
* MPC85xx Internal Memory Map
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifdef __KERNEL__
#ifndef __ASM_IMMAP_85XX_H__
#define __ASM_IMMAP_85XX_H__
/* Eventually this should define all the IO block registers in 85xx */
/* PCI Registers */
typedef struct ccsr_pci {
uint cfg_addr; /* 0x.000 - PCI Configuration Address Register */
uint cfg_data; /* 0x.004 - PCI Configuration Data Register */
uint int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */
char res1[3060];
uint potar0; /* 0x.c00 - PCI Outbound Transaction Address Register 0 */
uint potear0; /* 0x.c04 - PCI Outbound Translation Extended Address Register 0 */
uint powbar0; /* 0x.c08 - PCI Outbound Window Base Address Register 0 */
char res2[4];
uint powar0; /* 0x.c10 - PCI Outbound Window Attributes Register 0 */
char res3[12];
uint potar1; /* 0x.c20 - PCI Outbound Transaction Address Register 1 */
uint potear1; /* 0x.c24 - PCI Outbound Translation Extended Address Register 1 */
uint powbar1; /* 0x.c28 - PCI Outbound Window Base Address Register 1 */
char res4[4];
uint powar1; /* 0x.c30 - PCI Outbound Window Attributes Register 1 */
char res5[12];
uint potar2; /* 0x.c40 - PCI Outbound Transaction Address Register 2 */
uint potear2; /* 0x.c44 - PCI Outbound Translation Extended Address Register 2 */
uint powbar2; /* 0x.c48 - PCI Outbound Window Base Address Register 2 */
char res6[4];
uint powar2; /* 0x.c50 - PCI Outbound Window Attributes Register 2 */
char res7[12];
uint potar3; /* 0x.c60 - PCI Outbound Transaction Address Register 3 */
uint potear3; /* 0x.c64 - PCI Outbound Translation Extended Address Register 3 */
uint powbar3; /* 0x.c68 - PCI Outbound Window Base Address Register 3 */
char res8[4];
uint powar3; /* 0x.c70 - PCI Outbound Window Attributes Register 3 */
char res9[12];
uint potar4; /* 0x.c80 - PCI Outbound Transaction Address Register 4 */
uint potear4; /* 0x.c84 - PCI Outbound Translation Extended Address Register 4 */
uint powbar4; /* 0x.c88 - PCI Outbound Window Base Address Register 4 */
char res10[4];
uint powar4; /* 0x.c90 - PCI Outbound Window Attributes Register 4 */
char res11[268];
uint pitar3; /* 0x.da0 - PCI Inbound Translation Address Register 3 */
char res12[4];
uint piwbar3; /* 0x.da8 - PCI Inbound Window Base Address Register 3 */
uint piwbear3; /* 0x.dac - PCI Inbound Window Base Extended Address Register 3 */
uint piwar3; /* 0x.db0 - PCI Inbound Window Attributes Register 3 */
char res13[12];
uint pitar2; /* 0x.dc0 - PCI Inbound Translation Address Register 2 */
char res14[4];
uint piwbar2; /* 0x.dc8 - PCI Inbound Window Base Address Register 2 */
uint piwbear2; /* 0x.dcc - PCI Inbound Window Base Extended Address Register 2 */
uint piwar2; /* 0x.dd0 - PCI Inbound Window Attributes Register 2 */
char res15[12];
uint pitar1; /* 0x.de0 - PCI Inbound Translation Address Register 1 */
char res16[4];
uint piwbar1; /* 0x.de8 - PCI Inbound Window Base Address Register 1 */
char res17[4];
uint piwar1; /* 0x.df0 - PCI Inbound Window Attributes Register 1 */
char res18[12];
uint err_dr; /* 0x.e00 - PCI Error Detect Register */
uint err_cap_dr; /* 0x.e04 - PCI Error Capture Disable Register */
uint err_en; /* 0x.e08 - PCI Error Enable Register */
uint err_attrib; /* 0x.e0c - PCI Error Attributes Capture Register */
uint err_addr; /* 0x.e10 - PCI Error Address Capture Register */
uint err_ext_addr; /* 0x.e14 - PCI Error Extended Address Capture Register */
uint err_dl; /* 0x.e18 - PCI Error Data Low Capture Register */
uint err_dh; /* 0x.e1c - PCI Error Data High Capture Register */
uint gas_timr; /* 0x.e20 - PCI Gasket Timer Register */
uint pci_timr; /* 0x.e24 - PCI Timer Register */
char res19[472];
} ccsr_pci_t;
/* Global Utility Registers */
typedef struct ccsr_guts {
uint porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
uint porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
uint porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
uint pordevsr; /* 0x.000c - POR I/O Device Status Register */
uint pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
char res1[12];
uint gpporcr; /* 0x.0020 - General-Purpose POR Configuration Register */
char res2[12];
uint gpiocr; /* 0x.0030 - GPIO Control Register */
char res3[12];
uint gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
char res4[12];
uint gpindr; /* 0x.0050 - General-Purpose Input Data Register */
char res5[12];
uint pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
char res6[12];
uint devdisr; /* 0x.0070 - Device Disable Control */
char res7[12];
uint powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
char res8[12];
uint mcpsumr; /* 0x.0090 - Machine Check Summary Register */
char res9[12];
uint pvr; /* 0x.00a0 - Processor Version Register */
uint svr; /* 0x.00a4 - System Version Register */
char res10[3416];
uint clkocr; /* 0x.0e00 - Clock Out Select Register */
char res11[12];
uint ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
char res12[12];
uint lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
char res13[61916];
} ccsr_guts_t;
#endif /* __ASM_IMMAP_85XX_H__ */
#endif /* __KERNEL__ */
......@@ -30,6 +30,8 @@
#include <asm/mpc8xx.h>
#elif defined(CONFIG_8260)
#include <asm/mpc8260.h>
#elif defined(CONFIG_85xx)
#include <asm/mpc85xx.h>
#elif defined(CONFIG_APUS)
#define _IO_BASE 0
#define _ISA_MEM_BASE 0
......
......@@ -379,6 +379,74 @@ typedef struct _P601_BAT {
#define PPC44x_TLB_SW 0x00000002 /* Super write */
#define PPC44x_TLB_SR 0x00000001 /* Super read */
/* Book-E defined page sizes */
#define BOOKE_PAGESZ_1K 0
#define BOOKE_PAGESZ_4K 1
#define BOOKE_PAGESZ_16K 2
#define BOOKE_PAGESZ_64K 3
#define BOOKE_PAGESZ_256K 4
#define BOOKE_PAGESZ_1M 5
#define BOOKE_PAGESZ_4M 6
#define BOOKE_PAGESZ_16M 7
#define BOOKE_PAGESZ_64M 8
#define BOOKE_PAGESZ_256M 9
#define BOOKE_PAGESZ_1GB 10
#define BOOKE_PAGESZ_4GB 11
#define BOOKE_PAGESZ_16GB 12
#define BOOKE_PAGESZ_64GB 13
#define BOOKE_PAGESZ_256GB 14
#define BOOKE_PAGESZ_1TB 15
/*
* Freescale Book-E MMU support
*/
#define MAS0_TLBSEL 0x10000000
#define MAS0_ESEL 0x000F0000
#define MAS0_NV 0x00000001
#define MAS1_VALID 0x80000000
#define MAS1_IPROT 0x40000000
#define MAS1_TID 0x03FF0000
#define MAS1_TS 0x00001000
#define MAS1_TSIZE(x) (x << 8)
#define MAS2_EPN 0xFFFFF000
#define MAS2_SHAREN 0x00000200
#define MAS2_X0 0x00000040
#define MAS2_X1 0x00000020
#define MAS2_W 0x00000010
#define MAS2_I 0x00000008
#define MAS2_M 0x00000004
#define MAS2_G 0x00000002
#define MAS2_E 0x00000001
#define MAS3_RPN 0xFFFFF000
#define MAS3_U0 0x00000200
#define MAS3_U1 0x00000100
#define MAS3_U2 0x00000080
#define MAS3_U3 0x00000040
#define MAS3_UX 0x00000020
#define MAS3_SX 0x00000010
#define MAS3_UW 0x00000008
#define MAS3_SW 0x00000004
#define MAS3_UR 0x00000002
#define MAS3_SR 0x00000001
#define MAS4_TLBSELD 0x10000000
#define MAS4_TIDDSEL 0x00030000
#define MAS4_DSHAREN 0x00001000
#define MAS4_TSIZED(x) (x << 8)
#define MAS4_X0D 0x00000040
#define MAS4_X1D 0x00000020
#define MAS4_WD 0x00000010
#define MAS4_ID 0x00000008
#define MAS4_MD 0x00000004
#define MAS4_GD 0x00000002
#define MAS4_ED 0x00000001
#define MAS6_SPID 0x00FF0000
#define MAS6_SAS 0x00000001
#endif /* _PPC_MMU_H_ */
#endif /* __KERNEL__ */
......@@ -63,6 +63,11 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
#elif CONFIG_E500
#define NO_CONTEXT 256
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
#else
/* PPC 6xx, 7xx CPUs */
......
/*
* include/asm-ppc/mpc85xx.h
*
* MPC85xx definitions
*
* Maintainer: Kumar Gala <kumar.gala@freescale.com>
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifdef __KERNEL__
#ifndef __ASM_MPC85xx_H__
#define __ASM_MPC85xx_H__
#include <linux/config.h>
#include <asm/mmu.h>
#ifdef CONFIG_85xx
#ifdef CONFIG_MPC8540_ADS
#include <platforms/85xx/mpc8540_ads.h>
#endif
#define _IO_BASE isa_io_base
#define _ISA_MEM_BASE isa_mem_base
#define PCI_DRAM_OFFSET pci_dram_offset
/*
* The "residual" board information structure the boot loader passes
* into the kernel.
*/
extern unsigned char __res[];
/* Internal IRQs on MPC85xx OpenPIC */
/* Not all of these exist on all MPC85xx implementations */
#ifndef MPC85xx_OPENPIC_IRQ_OFFSET
#define MPC85xx_OPENPIC_IRQ_OFFSET 64
#endif
/* The 32 internal sources */
#define MPC85xx_IRQ_L2CACHE ( 0 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_ECM ( 1 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_DDR ( 2 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_LBIU ( 3 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_DMA0 ( 4 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_DMA1 ( 5 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_DMA2 ( 6 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_DMA3 ( 7 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_PCI1 ( 8 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_PCI2 ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_RIO_ERROR ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_RIO_BELL (10 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_RIO_TX (11 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_RIO_RX (12 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_TSEC1_TX (13 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_TSEC1_RX (14 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_TSEC1_ERROR (18 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_TSEC2_TX (19 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_TSEC2_RX (20 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_TSEC2_ERROR (24 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_FEC (25 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_DUART (26 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_IIC1 (27 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_PERFMON (28 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_CPM (30 + MPC85xx_OPENPIC_IRQ_OFFSET)
/* The 12 external interrupt lines */
#define MPC85xx_IRQ_EXT0 (32 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT1 (33 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT2 (34 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT3 (35 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT4 (36 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT5 (37 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT6 (38 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT7 (39 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT8 (40 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT9 (41 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT10 (42 + MPC85xx_OPENPIC_IRQ_OFFSET)
#define MPC85xx_IRQ_EXT11 (43 + MPC85xx_OPENPIC_IRQ_OFFSET)
/* Offset from CCSRBAR */
#define MPC85xx_CPM_OFFSET (0x80000)
#define MPC85xx_CPM_SIZE (0x40000)
#define MPC85xx_DMA_OFFSET (0x21000)
#define MPC85xx_DMA_SIZE (0x01000)
#define MPC85xx_ENET1_OFFSET (0x24000)
#define MPC85xx_ENET1_SIZE (0x01000)
#define MPC85xx_ENET2_OFFSET (0x25000)
#define MPC85xx_ENET2_SIZE (0x01000)
#define MPC85xx_ENET3_OFFSET (0x26000)
#define MPC85xx_ENET3_SIZE (0x01000)
#define MPC85xx_GUTS_OFFSET (0xe0000)
#define MPC85xx_GUTS_SIZE (0x01000)
#define MPC85xx_IIC1_OFFSET (0x03000)
#define MPC85xx_IIC1_SIZE (0x01000)
#define MPC85xx_OPENPIC_OFFSET (0x40000)
#define MPC85xx_OPENPIC_SIZE (0x40000)
#define MPC85xx_PCI1_OFFSET (0x08000)
#define MPC85xx_PCI1_SIZE (0x01000)
#define MPC85xx_PCI2_OFFSET (0x09000)
#define MPC85xx_PCI2_SIZE (0x01000)
#define MPC85xx_PERFMON_OFFSET (0xe1000)
#define MPC85xx_PERFMON_SIZE (0x01000)
#define MPC85xx_UART0_OFFSET (0x04500)
#define MPC85xx_UART0_SIZE (0x00100)
#define MPC85xx_UART1_OFFSET (0x04600)
#define MPC85xx_UART1_SIZE (0x00100)
#define MPC85xx_CCSRBAR_SIZE (1024*1024)
/* Let modules/drivers get at CCSRBAR */
extern phys_addr_t get_ccsrbar(void);
#ifdef MODULE
#define CCSRBAR get_ccsrbar()
#else
#define CCSRBAR BOARD_CCSRBAR
#endif
#endif /* CONFIG_85xx */
#endif /* __ASM_MPC85xx_H__ */
#endif /* __KERNEL__ */
......@@ -202,6 +202,10 @@ static DEVICE_ATTR(name##_##field, S_IRUGO, show_##name##_##field, NULL);
#include <asm/ibm_ocp.h>
#endif
#ifdef CONFIG_FSL_OCP
#include <asm/fsl_ocp.h>
#endif
#endif /* CONFIG_PPC_OCP */
#endif /* __OCP_H__ */
#endif /* __KERNEL__ */
......@@ -45,6 +45,7 @@
/* Memory devices 0x0090 - 0x009F */
#define OCP_FUNC_MAL 0x0090
#define OCP_FUNC_DMA 0x0091
/* Display 0x00A0 - 0x00AF */
......@@ -62,7 +63,7 @@
/* Network 0x0200 - 0x02FF */
#define OCP_FUNC_EMAC 0x0200
#define OCP_FUNC_ENET 0x0201 /* TSEC & FEC */
#define OCP_FUNC_GFAR 0x0201 /* TSEC & FEC */
/* Bridge devices 0xE00 - 0xEFF */
#define OCP_FUNC_OPB 0x0E00
......
......@@ -222,6 +222,43 @@ extern unsigned long ioremap_bot, ioremap_base;
/* ERPN in a PTE never gets cleared, ignore it */
#define _PTE_NONE_MASK 0xffffffff00000000ULL
#elif defined(CONFIG_E500)
/*
MMU Assist Register 3:
32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
- PRESENT *must* be in the bottom three bits because swap cache
entries use the top 29 bits.
- FILE *must* be in the bottom three bits because swap cache
entries use the top 29 bits.
*/
/* Definitions for e500 core */
#define _PAGE_PRESENT 0x001 /* S: PTE contains a translation */
#define _PAGE_USER 0x002 /* S: User page (maps to UR) */
#define _PAGE_FILE 0x002 /* S: when !present: nonlinear file mapping */
#define _PAGE_ACCESSED 0x004 /* S: Page referenced */
#define _PAGE_HWWRITE 0x008 /* H: Dirty & RW, set in exception */
#define _PAGE_RW 0x010 /* S: Write permission */
#define _PAGE_HWEXEC 0x020 /* H: UX permission */
#define _PAGE_ENDIAN 0x040 /* H: E bit */
#define _PAGE_GUARDED 0x080 /* H: G bit */
#define _PAGE_COHERENT 0x100 /* H: M bit */
#define _PAGE_NO_CACHE 0x200 /* H: I bit */
#define _PAGE_WRITETHRU 0x400 /* H: W bit */
#define _PAGE_DIRTY 0x800 /* S: Page dirty */
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
#define NUM_TLBCAMS (16)
#elif defined(CONFIG_8xx)
/* Definitions for 8xx embedded chips. */
#define _PAGE_PRESENT 0x0001 /* Page is valid */
......
......@@ -59,6 +59,20 @@
#define REST_16VR(n,b,base) REST_8VR(n,b,base); REST_8VR(n+8,b,base)
#define REST_32VR(n,b,base) REST_16VR(n,b,base); REST_16VR(n+16,b,base)
#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
#define SAVE_2EVR(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
#define SAVE_4EVR(n,s,base) SAVE_2EVR(n,s,base); SAVE_2EVR(n+2,s,base)
#define SAVE_8EVR(n,s,base) SAVE_4EVR(n,s,base); SAVE_4EVR(n+4,s,base)
#define SAVE_16EVR(n,s,base) SAVE_8EVR(n,s,base); SAVE_8EVR(n+8,s,base)
#define SAVE_32EVR(n,s,base) SAVE_16EVR(n,s,base); SAVE_16EVR(n+16,s,base)
#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
#define REST_2EVR(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
#define REST_4EVR(n,s,base) REST_2EVR(n,s,base); REST_2EVR(n+2,s,base)
#define REST_8EVR(n,s,base) REST_4EVR(n,s,base); REST_4EVR(n+4,s,base)
#define REST_16EVR(n,s,base) REST_8EVR(n,s,base); REST_8EVR(n+8,s,base)
#define REST_32EVR(n,s,base) REST_16EVR(n,s,base); REST_16EVR(n+16,s,base)
#ifdef CONFIG_PPC601_SYNC_FIX
#define SYNC \
BEGIN_FTR_SECTION \
......@@ -107,7 +121,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
bdnz 0b
#endif
#if !defined(CONFIG_44x)
#ifdef CONFIG_BOOKE
#define tophys(rd,rs) \
addis rd,rs,0
#define tovirt(rd,rs) \
addis rd,rs,0
#else /* CONFIG_BOOKE */
/*
* On APUS (Amiga PowerPC cpu upgrade board), we don't know the
* physical base address of RAM at compile time.
......@@ -125,15 +146,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.align 1; \
.long 0b; \
.previous
#else /* CONFIG_44x */
#define tophys(rd,rs) \
mr rd,rs
#define tovirt(rd,rs) \
mr rd,rs
#endif /* CONFIG_44x */
#endif /* CONFIG_BOOKE */
/*
* On 64-bit cpus, we use the rfid instruction instead of rfi, but
......@@ -289,6 +302,39 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define vr30 30
#define vr31 31
#define evr0 0
#define evr1 1
#define evr2 2
#define evr3 3
#define evr4 4
#define evr5 5
#define evr6 6
#define evr7 7
#define evr8 8
#define evr9 9
#define evr10 10
#define evr11 11
#define evr12 12
#define evr13 13
#define evr14 14
#define evr15 15
#define evr16 16
#define evr17 17
#define evr18 18
#define evr19 19
#define evr20 20
#define evr21 21
#define evr22 22
#define evr23 23
#define evr24 24
#define evr25 25
#define evr26 26
#define evr27 27
#define evr28 28
#define evr29 29
#define evr30 30
#define evr31 31
/* some stab codes */
#define N_FUN 36
#define N_RSYM 64
......
......@@ -52,7 +52,7 @@ typedef struct bd_info {
unsigned long bi_flashoffset; /* reserved area for startup monitor */
unsigned long bi_sramstart; /* start of SRAM memory */
unsigned long bi_sramsize; /* size of SRAM memory */
#if defined(CONFIG_8xx) || defined(CONFIG_8260)
#if defined(CONFIG_8xx) || defined(CONFIG_8260) || defined(CONFIG_85xx)
unsigned long bi_immr_base; /* base of IMMR register */
#endif
unsigned long bi_bootflags; /* boot / reboot flag (for LynxOS) */
......@@ -79,7 +79,7 @@ typedef struct bd_info {
#if defined(CONFIG_HYMOD)
hymod_conf_t bi_hymod_conf; /* hymod configuration information */
#endif
#if defined(CONFIG_EVB64260)
#if defined(CONFIG_EVB64260) || defined(CONFIG_85xx)
/* the board has three onboard ethernet ports */
unsigned char bi_enet1addr[6];
unsigned char bi_enet2addr[6];
......
......@@ -40,6 +40,14 @@
.globl n;\
n:
/*
* this is the minimum allowable io space due to the location
* of the io areas on prep (first one at 0x80000000) but
* as soon as I get around to remapping the io areas with the BATs
* to match the mac we can raise this. -- Cort
*/
#define TASK_SIZE (CONFIG_TASK_SIZE)
#ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_MULTIPLATFORM
extern int _machine;
......@@ -79,14 +87,7 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Lazy FPU handling on uni-processor */
extern struct task_struct *last_task_used_math;
extern struct task_struct *last_task_used_altivec;
/*
* this is the minimum allowable io space due to the location
* of the io areas on prep (first one at 0x80000000) but
* as soon as I get around to remapping the io areas with the BATs
* to match the mac we can raise this. -- Cort
*/
#define TASK_SIZE (CONFIG_TASK_SIZE)
extern struct task_struct *last_task_used_spe;
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
......@@ -104,7 +105,7 @@ struct thread_struct {
void *pgdir; /* root of page-table tree */
int fpexc_mode; /* floating-point exception mode */
signed long last_syscall;
#ifdef CONFIG_4xx
#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
unsigned long dbcr0; /* debug control register values */
unsigned long dbcr1;
#endif
......@@ -119,6 +120,12 @@ struct thread_struct {
unsigned long vrsave;
int used_vr; /* set if process has used altivec */
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
unsigned long evr[32]; /* upper 32-bits of SPE regs */
u64 acc; /* Accumulator */
unsigned long spefscr; /* SPE & eFP status */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
};
#define ARCH_MIN_TASKALIGN 16
......
......@@ -131,4 +131,9 @@ do { \
#define PTRACE_GETVRREGS 18
#define PTRACE_SETVRREGS 19
/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
* spefscr, in one go */
#define PTRACE_GETEVRREGS 20
#define PTRACE_SETEVRREGS 21
#endif
......@@ -294,6 +294,9 @@
#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
#ifndef SPRN_SVR
#define SPRN_SVR 0x11E /* System Version Register */
#endif
#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */
/* these bits were defined in inverted endian sense originally, ugh, confusing */
#define THRM1_TIN (1 << 31)
......@@ -400,6 +403,7 @@
#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
#define SRR2 SPRN_SRR2 /* Save and Restore Register 2 */
#define SRR3 SPRN_SRR3 /* Save and Restore Register 3 */
#define SVR SPRN_SVR /* System Version Register */
#define ICTC SPRN_ICTC /* Instruction Cache Throttling Control Reg */
#define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */
#define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */
......@@ -462,6 +466,8 @@
#define PVR_7400 0x000C0000
#define PVR_7410 0x800C0000
#define PVR_7450 0x80000000
#define PVR_8540 0x80200000
#define PVR_8560 0x80200000
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
......@@ -476,6 +482,12 @@
#define PVR_8245 0x80811014
#define PVR_8260 PVR_8240
/* System Version Numbers */
#define SVR_8540 0x80300000
#define SVR_8541E 0x807A0000
#define SVR_8555E 0x80790000
#define SVR_8560 0x80700000
/* Segment Registers */
#define SR0 0
#define SR1 1
......
......@@ -43,13 +43,23 @@ do { \
mtdcr(base ## _CFGADDR, base ## _ ## reg); \
mtdcr(base ## _CFGDATA, data); \
} while (0)
/* Performance Monitor Registers */
#define mfpmr(rn) ({unsigned int rval; \
asm volatile("mfpmr %0," __stringify(rn) \
: "=r" (rval)); rval;})
#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
#endif /* __ASSEMBLY__ */
/* Machine State Register (MSR) Fields */
#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
#define MSR_SPE (1<<25) /* Enable SPE */
#define MSR_DWE (1<<10) /* Debug Wait Enable */
#define MSR_UBLE (1<<10) /* BTB lock enable (e500) */
#define MSR_IS MSR_IR /* Instruction Space */
#define MSR_DS MSR_DR /* Data Space */
#define MSR_PMM (1<<2) /* Performance monitor mark bit */
/* Default MSR for kernel mode. */
#if defined (CONFIG_40x)
......@@ -91,20 +101,28 @@ do { \
#define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */
#define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */
#define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */
#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
#define SPRN_IVOR32 0x210 /* Interrupt Vector Offset Register 32 */
#define SPRN_IVOR33 0x211 /* Interrupt Vector Offset Register 33 */
#define SPRN_IVOR34 0x212 /* Interrupt Vector Offset Register 34 */
#define SPRN_IVOR35 0x213 /* Interrupt Vector Offset Register 35 */
#define SPRN_MCSRR0 0x23A /* Machine Check Save and Restore Register 0 */
#define SPRN_MCSRR1 0x23B /* Machine Check Save and Restore Register 1 */
#define SPRN_MCSR 0x23C /* Machine Check Status Register */
#ifdef CONFIG_440A
#define MCSR_MCS 0x80000000 /* Machine Check Summary */
#define MCSR_IB 0x40000000 /* Instruction PLB Error */
#define MCSR_DRB 0x20000000 /* Data Read PLB Error */
#define MCSR_DWB 0x10000000 /* Data Write PLB Error */
#define MCSR_TLBP 0x08000000 /* TLB Parity Error */
#define MCSR_ICP 0x04000000 /* I-Cache Parity Error */
#define MCSR_DCSP 0x02000000 /* D-Cache Search Parity Error */
#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
#endif
#define SPRN_MCAR 0x23D /* Machine Check Address Register */
#define SPRN_MAS0 0x270 /* MMU Assist Register 0 */
#define SPRN_MAS1 0x271 /* MMU Assist Register 1 */
#define SPRN_MAS2 0x272 /* MMU Assist Register 2 */
#define SPRN_MAS3 0x273 /* MMU Assist Register 3 */
#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */
#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
#define SPRN_PID1 0x279 /* Process ID Register 1 */
#define SPRN_PID2 0x27A /* Process ID Register 2 */
#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */
#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */
#define SPRN_MMUCR 0x3B2 /* MMU Control Register */
#define SPRN_CCR0 0x3B3 /* Core Configuration Register */
......@@ -115,9 +133,12 @@ do { \
#define SPRN_DCMP 0x3D1 /* Data TLB Compare Register */
#define SPRN_ICDBDR 0x3D3 /* Instruction Cache Debug Data Register */
#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
#define SPRN_SVR 0x3FF /* System Version Register */
/*
* SPRs which have conflicting definitions on true Book E versus classic,
......@@ -125,6 +146,7 @@ do { \
*/
#ifdef CONFIG_BOOKE
#define SPRN_PID 0x030 /* Process ID */
#define SPRN_PID0 SPRN_PID/* Process ID Register 0 */
#define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */
#define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */
#define SPRN_DEAR 0x03D /* Data Error Address Register */
......@@ -157,6 +179,34 @@ do { \
#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */
#endif
/* Bit definitions for the MCSR. */
#ifdef CONFIG_440A
#define MCSR_MCS 0x80000000 /* Machine Check Summary */
#define MCSR_IB 0x40000000 /* Instruction PLB Error */
#define MCSR_DRB 0x20000000 /* Data Read PLB Error */
#define MCSR_DWB 0x10000000 /* Data Write PLB Error */
#define MCSR_TLBP 0x08000000 /* TLB Parity Error */
#define MCSR_ICP 0x04000000 /* I-Cache Parity Error */
#define MCSR_DCSP 0x02000000 /* D-Cache Search Parity Error */
#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
#endif
#ifdef CONFIG_E500
#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
#define MCSR_DCP_PERR 0x20000000UL /* D-Cache Push Parity Error */
#define MCSR_DCPERR 0x10000000UL /* D-Cache Parity Error */
#define MCSR_GL_CI 0x00010000UL /* Guarded Load or Cache-Inhibited stwcx. */
#define MCSR_BUS_IAERR 0x00000080UL /* Instruction Address Error */
#define MCSR_BUS_RAERR 0x00000040UL /* Read Address Error */
#define MCSR_BUS_WAERR 0x00000020UL /* Write Address Error */
#define MCSR_BUS_IBERR 0x00000010UL /* Instruction Data Error */
#define MCSR_BUS_RBERR 0x00000008UL /* Read Data Bus Error */
#define MCSR_BUS_WBERR 0x00000004UL /* Write Data Bus Error */
#define MCSR_BUS_IPERR 0x00000002UL /* Instruction parity Error */
#define MCSR_BUS_RPERR 0x00000001UL /* Read parity Error */
#endif
/* Bit definitions for the DBSR. */
/*
* DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
......@@ -165,11 +215,27 @@ do { \
#define DBSR_IC 0x08000000 /* Instruction Completion */
#define DBSR_BT 0x04000000 /* Branch Taken */
#define DBSR_TIE 0x01000000 /* Trap Instruction Event */
#define DBSR_IAC1 0x00800000 /* Instr Address Compare 1 Event */
#define DBSR_IAC2 0x00400000 /* Instr Address Compare 2 Event */
#define DBSR_IAC3 0x00200000 /* Instr Address Compare 3 Event */
#define DBSR_IAC4 0x00100000 /* Instr Address Compare 4 Event */
#define DBSR_DAC1R 0x00080000 /* Data Addr Compare 1 Read Event */
#define DBSR_DAC1W 0x00040000 /* Data Addr Compare 1 Write Event */
#define DBSR_DAC2R 0x00020000 /* Data Addr Compare 2 Read Event */
#define DBSR_DAC2W 0x00010000 /* Data Addr Compare 2 Write Event */
#endif
#ifdef CONFIG_40x
#define DBSR_IC 0x80000000 /* Instruction Completion */
#define DBSR_BT 0x40000000 /* Branch taken */
#define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */
#define DBSR_IAC1 0x00800000 /* Instruction Address Compare 1 Event */
#define DBSR_IAC2 0x00400000 /* Instruction Address Compare 2 Event */
#define DBSR_IAC3 0x00200000 /* Instruction Address Compare 3 Event */
#define DBSR_IAC4 0x00100000 /* Instruction Address Compare 4 Event */
#define DBSR_DAC1R 0x00080000 /* Data Address Compare 1 Read Event */
#define DBSR_DAC1W 0x00040000 /* Data Address Compare 1 Write Event */
#define DBSR_DAC2R 0x00020000 /* Data Address Compare 2 Read Event */
#define DBSR_DAC2W 0x00010000 /* Data Address Compare 2 Write Event */
#endif
/* Bit definitions related to the ESR. */
......@@ -184,6 +250,9 @@ do { \
#define ESR_DST 0x00800000 /* Storage Exception - Data miss */
#define ESR_DIZ 0x00400000 /* Storage Exception - Zone fault */
#define ESR_ST 0x00800000 /* Store Operation */
#define ESR_DLK 0x00200000 /* Data Cache Locking */
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
#define ESR_BO 0x00020000 /* Byte Ordering */
/* Bit definitions related to the DBCR0. */
#define DBCR0_EDM 0x80000000 /* External Debug Mode */
......@@ -258,10 +327,49 @@ do { \
#define ICCR_NOCACHE 0 /* Noncacheable */
#define ICCR_CACHE 1 /* Cacheable */
/* Bit definitions for L1CSR0. */
#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
/* Bit definitions for L1CSR0. */
#define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */
#define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */
#define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */
/* Bit definitions for SGR. */
#define SGR_NORMAL 0 /* Speculative fetching allowed. */
#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
/* Bit definitions for SPEFSCR. */
#define SPEFSCR_SOVH 0x80000000 /* Summary integer overflow high */
#define SPEFSCR_OVH 0x40000000 /* Integer overflow high */
#define SPEFSCR_FGH 0x20000000 /* Embedded FP guard bit high */
#define SPEFSCR_FXH 0x10000000 /* Embedded FP sticky bit high */
#define SPEFSCR_FINVH 0x08000000 /* Embedded FP invalid operation high */
#define SPEFSCR_FDBZH 0x04000000 /* Embedded FP div by zero high */
#define SPEFSCR_FUNFH 0x02000000 /* Embedded FP underflow high */
#define SPEFSCR_FOVFH 0x01000000 /* Embedded FP overflow high */
#define SPEFSCR_FINXS 0x00200000 /* Embedded FP inexact sticky */
#define SPEFSCR_FINVS 0x00100000 /* Embedded FP invalid op. sticky */
#define SPEFSCR_FDBZS 0x00080000 /* Embedded FP div by zero sticky */
#define SPEFSCR_FUNFS 0x00040000 /* Embedded FP underflow sticky */
#define SPEFSCR_FOVFS 0x00020000 /* Embedded FP overflow sticky */
#define SPEFSCR_MODE 0x00010000 /* Embedded FP mode */
#define SPEFSCR_SOV 0x00008000 /* Integer summary overflow */
#define SPEFSCR_OV 0x00004000 /* Integer overflow */
#define SPEFSCR_FG 0x00002000 /* Embedded FP guard bit */
#define SPEFSCR_FX 0x00001000 /* Embedded FP sticky bit */
#define SPEFSCR_FINV 0x00000800 /* Embedded FP invalid operation */
#define SPEFSCR_FDBZ 0x00000400 /* Embedded FP div by zero */
#define SPEFSCR_FUNF 0x00000200 /* Embedded FP underflow */
#define SPEFSCR_FOVF 0x00000100 /* Embedded FP overflow */
#define SPEFSCR_FINXE 0x00000040 /* Embedded FP inexact enable */
#define SPEFSCR_FINVE 0x00000020 /* Embedded FP invalid op. enable */
#define SPEFSCR_FDBZE 0x00000010 /* Embedded FP div by zero enable */
#define SPEFSCR_FUNFE 0x00000008 /* Embedded FP underflow enable */
#define SPEFSCR_FOVFE 0x00000004 /* Embedded FP overflow enable */
#define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */
/* Short-hand for various SPRs. */
#ifdef CONFIG_BOOKE
#define CSRR0 SPRN_CSRR0 /* Critical Save and Restore Register 0 */
......
......@@ -30,6 +30,8 @@
#include <platforms/spruce.h>
#elif defined(CONFIG_4xx)
#include <asm/ibm4xx.h>
#elif defined(CONFIG_85xx)
#include <asm/mpc85xx.h>
#else
/*
......
......@@ -76,6 +76,8 @@ extern void giveup_fpu(struct task_struct *);
extern void enable_kernel_fp(void);
extern void giveup_altivec(struct task_struct *);
extern void load_up_altivec(struct task_struct *);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
extern int fix_alignment(struct pt_regs *);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
......
......@@ -24,6 +24,28 @@ extern void _tlbia(void);
#define __tlbia _tlbia
#endif
static inline void flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ __tlbia(); }
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{ __tlbia(); }
#elif defined(CONFIG_FSL_BOOKE)
/* TODO: determine if flush_tlb_range & flush_tlb_kernel_range
* are best implemented as tlbia vs specific tlbie's */
#define __tlbia() _tlbia()
static inline void flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void flush_tlb_page(struct vm_area_struct *vma,
......
......@@ -29,6 +29,12 @@
/* Get/set floating-point exception mode (if meaningful) */
#define PR_GET_FPEXC 11
#define PR_SET_FPEXC 12
# define PR_FP_EXC_SW_ENABLE 0x80 /* Use FPEXC for FP exception enables */
# define PR_FP_EXC_DIV 0x010000 /* floating point divide by zero */
# define PR_FP_EXC_OVF 0x020000 /* floating point overflow */
# define PR_FP_EXC_UND 0x040000 /* floating point underflow */
# define PR_FP_EXC_RES 0x080000 /* floating point inexact result */
# define PR_FP_EXC_INV 0x100000 /* floating point invalid operation */
# define PR_FP_EXC_DISABLED 0 /* FP exceptions disabled */
# define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */
# define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment