Commit 3aa590c6 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (43 commits)
  [POWERPC] Use little-endian bit from firmware ibm,pa-features property
  [POWERPC] Make sure smp_processor_id works very early in boot
  [POWERPC] U4 DART improvements
  [POWERPC] todc: add support for Time-Of-Day-Clock
  [POWERPC] Make lparcfg.c work when both iseries and pseries are selected
  [POWERPC] Fix idr locking in init_new_context
  [POWERPC] mpc7448hpc2 (taiga) board config file
  [POWERPC] Add tsi108 pci and platform device data register function
  [POWERPC] Add general support for mpc7448hpc2 (Taiga) platform
  [POWERPC] Correct the MAX_CONTEXT definition
  powerpc: minor cleanups for mpc86xx
  [POWERPC] Make sure we select CONFIG_NEW_LEDS if ADB_PMU_LED is set
  [POWERPC] Simplify the code defining the 64-bit CPU features
  [POWERPC] powerpc: kconfig warning fix
  [POWERPC] Consolidate some of kernel/misc*.S
  [POWERPC] Remove unused function call_with_mmu_off
  [POWERPC] update asm-powerpc/time.h
  [POWERPC] Clean up it_lp_queue.h
  [POWERPC] Skip the "copy down" of the kernel if it is already at zero.
  [POWERPC] Add the use of the firmware soft-reset-nmi to kdump.
  ...
parents 4d3ce21f 339d76c5
...@@ -340,7 +340,7 @@ config PPC_ISERIES ...@@ -340,7 +340,7 @@ config PPC_ISERIES
config EMBEDDED6xx config EMBEDDED6xx
bool "Embedded 6xx/7xx/7xxx-based board" bool "Embedded 6xx/7xx/7xxx-based board"
depends on PPC32 && BROKEN depends on PPC32 && (BROKEN||BROKEN_ON_SMP)
config APUS config APUS
bool "Amiga-APUS" bool "Amiga-APUS"
...@@ -417,12 +417,17 @@ config PPC_CELL_NATIVE ...@@ -417,12 +417,17 @@ config PPC_CELL_NATIVE
default n default n
config PPC_IBM_CELL_BLADE config PPC_IBM_CELL_BLADE
bool " IBM Cell Blade" bool "IBM Cell Blade"
depends on PPC_MULTIPLATFORM && PPC64 depends on PPC_MULTIPLATFORM && PPC64
select PPC_CELL_NATIVE select PPC_CELL_NATIVE
select PPC_RTAS select PPC_RTAS
select MMIO_NVRAM select MMIO_NVRAM
select PPC_UDBG_16550 select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
config UDBG_RTAS_CONSOLE
bool
default n
config XICS config XICS
depends on PPC_PSERIES depends on PPC_PSERIES
...@@ -435,7 +440,8 @@ config U3_DART ...@@ -435,7 +440,8 @@ config U3_DART
default n default n
config MPIC config MPIC
depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE || PPC_CHRP depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE || PPC_CHRP \
|| MPC7448HPC2
bool bool
default y default y
...@@ -561,6 +567,13 @@ config TAU_AVERAGE ...@@ -561,6 +567,13 @@ config TAU_AVERAGE
/proc/cpuinfo. /proc/cpuinfo.
If in doubt, say N here. If in doubt, say N here.
config PPC_TODC
depends on EMBEDDED6xx
bool "Generic Time-of-day Clock (TODC) support"
---help---
This adds support for many TODC/RTC chips.
endmenu endmenu
source arch/powerpc/platforms/embedded6xx/Kconfig source arch/powerpc/platforms/embedded6xx/Kconfig
...@@ -801,7 +814,6 @@ config GENERIC_ISA_DMA ...@@ -801,7 +814,6 @@ config GENERIC_ISA_DMA
config PPC_I8259 config PPC_I8259
bool bool
default y if MPC8641_HPCN
default n default n
config PPC_INDIRECT_PCI config PPC_INDIRECT_PCI
...@@ -824,7 +836,8 @@ config MCA ...@@ -824,7 +836,8 @@ config MCA
bool bool
config PCI config PCI
bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) \
|| MPC7448HPC2
default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx && !PPC_86xx default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx && !PPC_86xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
default PCI_QSPAN if !4xx && !CPM2 && 8xx default PCI_QSPAN if !4xx && !CPM2 && 8xx
......
...@@ -134,12 +134,19 @@ config PPC_EARLY_DEBUG_G5 ...@@ -134,12 +134,19 @@ config PPC_EARLY_DEBUG_G5
help help
Select this to enable early debugging for Apple G5 machines. Select this to enable early debugging for Apple G5 machines.
config PPC_EARLY_DEBUG_RTAS config PPC_EARLY_DEBUG_RTAS_PANEL
bool "RTAS Panel" bool "RTAS Panel"
depends on PPC_RTAS depends on PPC_RTAS
help help
Select this to enable early debugging via the RTAS panel. Select this to enable early debugging via the RTAS panel.
config PPC_EARLY_DEBUG_RTAS_CONSOLE
bool "RTAS Console"
depends on PPC_RTAS
select UDBG_RTAS_CONSOLE
help
Select this to enable early debugging via the RTAS console.
config PPC_EARLY_DEBUG_MAPLE config PPC_EARLY_DEBUG_MAPLE
bool "Maple real mode" bool "Maple real mode"
depends on PPC_MAPLE depends on PPC_MAPLE
......
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.17 # Linux kernel version: 2.6.17-rc6
# Mon Jun 19 17:23:03 2006 # Thu Jun 22 15:28:36 2006
# #
CONFIG_PPC64=y CONFIG_PPC64=y
CONFIG_64BIT=y CONFIG_64BIT=y
...@@ -1063,7 +1063,8 @@ CONFIG_DEBUG_FS=y ...@@ -1063,7 +1063,8 @@ CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_STACKOVERFLOW is not set # CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUGGER=y CONFIG_DEBUGGER=y
# CONFIG_XMON is not set CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_IRQSTACKS=y CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set # CONFIG_BOOTX_TEXT is not set
# CONFIG_PPC_EARLY_DEBUG is not set # CONFIG_PPC_EARLY_DEBUG is not set
......
This diff is collapsed.
...@@ -50,7 +50,8 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o ...@@ -50,7 +50,8 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds extra-y += vmlinux.lds
obj-y += time.o prom.o traps.o setup-common.o udbg.o obj-y += time.o prom.o traps.o setup-common.o \
udbg.o misc.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o
obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
......
...@@ -125,7 +125,12 @@ _GLOBAL(__save_cpu_setup) ...@@ -125,7 +125,12 @@ _GLOBAL(__save_cpu_setup)
cmpwi r0,0x44 cmpwi r0,0x44
bne 2f bne 2f
1: /* Save HID0,1,4 and 5 */ 1: /* skip if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
beq 2f
/* Save HID0,1,4 and 5 */
mfspr r3,SPRN_HID0 mfspr r3,SPRN_HID0
std r3,CS_HID0(r5) std r3,CS_HID0(r5)
mfspr r3,SPRN_HID1 mfspr r3,SPRN_HID1
...@@ -159,7 +164,12 @@ _GLOBAL(__restore_cpu_setup) ...@@ -159,7 +164,12 @@ _GLOBAL(__restore_cpu_setup)
cmpwi r0,0x44 cmpwi r0,0x44
bnelr bnelr
1: /* Before accessing memory, we make sure rm_ci is clear */ 1: /* skip if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
beqlr
/* Before accessing memory, we make sure rm_ci is clear */
li r0,0 li r0,0
mfspr r3,SPRN_HID4 mfspr r3,SPRN_HID4
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
......
...@@ -722,18 +722,6 @@ struct cpu_spec cpu_specs[] = { ...@@ -722,18 +722,6 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_G4, .oprofile_type = PPC_OPROFILE_G4,
.platform = "ppc7450", .platform = "ppc7450",
}, },
{ /* 8641 */
.pvr_mask = 0xffffffff,
.pvr_value = 0x80040010,
.cpu_name = "8641",
.cpu_features = CPU_FTRS_7447A,
.cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
.cpu_setup = __setup_cpu_745x
},
{ /* 82xx (8240, 8245, 8260 are all 603e cores) */ { /* 82xx (8240, 8245, 8260 are all 603e cores) */
.pvr_mask = 0x7fff0000, .pvr_mask = 0x7fff0000,
.pvr_value = 0x00810000, .pvr_value = 0x00810000,
......
...@@ -24,9 +24,11 @@ ...@@ -24,9 +24,11 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/irq.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/kexec.h>
#include <asm/kdump.h> #include <asm/kdump.h>
#include <asm/lmb.h> #include <asm/lmb.h>
#include <asm/firmware.h> #include <asm/firmware.h>
...@@ -41,6 +43,7 @@ ...@@ -41,6 +43,7 @@
/* This keeps a track of which one is crashing cpu. */ /* This keeps a track of which one is crashing cpu. */
int crashing_cpu = -1; int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
size_t data_len) size_t data_len)
...@@ -98,34 +101,66 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu) ...@@ -98,34 +101,66 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static atomic_t waiting_for_crash_ipi; static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
void crash_ipi_callback(struct pt_regs *regs) void crash_ipi_callback(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (cpu == crashing_cpu)
return;
if (!cpu_online(cpu)) if (!cpu_online(cpu))
return; return;
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 1);
local_irq_disable(); local_irq_disable();
if (!cpu_isset(cpu, cpus_in_crash))
crash_save_this_cpu(regs, cpu);
cpu_set(cpu, cpus_in_crash);
crash_save_this_cpu(regs, cpu); /*
atomic_dec(&waiting_for_crash_ipi); * Entered via soft-reset - could be the kdump
* process is invoked using soft-reset or user activated
* it if some CPU did not respond to an IPI.
* For soft-reset, the secondary CPU can enter this func
* twice. 1 - using IPI, and 2. soft-reset.
* Tell the kexec CPU that entered via soft-reset and ready
* to go down.
*/
if (cpu_isset(cpu, cpus_in_sr)) {
cpu_clear(cpu, cpus_in_sr);
atomic_inc(&enter_on_soft_reset);
}
/*
* Starting the kdump boot.
* This barrier is needed to make sure that all CPUs are stopped.
* If not, soft-reset will be invoked to bring other CPUs.
*/
while (!cpu_isset(crashing_cpu, cpus_in_crash))
cpu_relax();
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 1);
kexec_smp_wait(); kexec_smp_wait();
/* NOTREACHED */ /* NOTREACHED */
} }
static void crash_kexec_prepare_cpus(void) /*
* Wait until all CPUs are entered via soft-reset.
*/
static void crash_soft_reset_check(int cpu)
{
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
cpu_clear(cpu, cpus_in_sr);
while (atomic_read(&enter_on_soft_reset) != ncpus)
cpu_relax();
}
static void crash_kexec_prepare_cpus(int cpu)
{ {
unsigned int msecs; unsigned int msecs;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
crash_send_ipi(crash_ipi_callback); crash_send_ipi(crash_ipi_callback);
smp_wmb(); smp_wmb();
...@@ -133,14 +168,13 @@ static void crash_kexec_prepare_cpus(void) ...@@ -133,14 +168,13 @@ static void crash_kexec_prepare_cpus(void)
/* /*
* FIXME: Until we will have the way to stop other CPUSs reliabally, * FIXME: Until we will have the way to stop other CPUSs reliabally,
* the crash CPU will send an IPI and wait for other CPUs to * the crash CPU will send an IPI and wait for other CPUs to
* respond. If not, proceed the kexec boot even though we failed to * respond.
* capture other CPU states.
* Delay of at least 10 seconds. * Delay of at least 10 seconds.
*/ */
printk(KERN_ALERT "Sending IPI to other cpus...\n"); printk(KERN_EMERG "Sending IPI to other cpus...\n");
msecs = 10000; msecs = 10000;
while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
barrier(); cpu_relax();
mdelay(1); mdelay(1);
} }
...@@ -149,18 +183,71 @@ static void crash_kexec_prepare_cpus(void) ...@@ -149,18 +183,71 @@ static void crash_kexec_prepare_cpus(void)
/* /*
* FIXME: In case if we do not get all CPUs, one possibility: ask the * FIXME: In case if we do not get all CPUs, one possibility: ask the
* user to do soft reset such that we get all. * user to do soft reset such that we get all.
* IPI handler is already set by the panic cpu initially. Therefore, * Soft-reset will be used until better mechanism is implemented.
* all cpus could invoke this handler from die() and the panic CPU */
* will call machine_kexec() directly from this handler to do if (cpus_weight(cpus_in_crash) < ncpus) {
* kexec boot. printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
ncpus - cpus_weight(cpus_in_crash));
printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
cpus_in_sr = CPU_MASK_NONE;
atomic_set(&enter_on_soft_reset, 0);
while (cpus_weight(cpus_in_crash) < ncpus)
cpu_relax();
}
/*
* Make sure all CPUs are entered via soft-reset if the kdump is
* invoked using soft-reset.
*/ */
if (atomic_read(&waiting_for_crash_ipi)) if (cpu_isset(cpu, cpus_in_sr))
printk(KERN_ALERT "done waiting: %d cpus not responding\n", crash_soft_reset_check(cpu);
atomic_read(&waiting_for_crash_ipi));
/* Leave the IPI callback set */ /* Leave the IPI callback set */
} }
/*
* This function will be called by secondary cpus or by kexec cpu
* if soft-reset is activated to stop some CPUs.
*/
void crash_kexec_secondary(struct pt_regs *regs)
{
int cpu = smp_processor_id();
unsigned long flags;
int msecs = 5;
local_irq_save(flags);
/* Wait 5ms if the kexec CPU is not entered yet. */
while (crashing_cpu < 0) {
if (--msecs < 0) {
/*
* Either kdump image is not loaded or
* kdump process is not started - Probably xmon
* exited using 'x'(exit and recover) or
* kexec_should_crash() failed for all running tasks.
*/
cpu_clear(cpu, cpus_in_sr);
local_irq_restore(flags);
return;
}
mdelay(1);
cpu_relax();
}
if (cpu == crashing_cpu) {
/*
* Panic CPU will enter this func only via soft-reset.
* Wait until all secondary CPUs entered and
* then start kexec boot.
*/
crash_soft_reset_check(cpu);
cpu_set(crashing_cpu, cpus_in_crash);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
machine_kexec(kexec_crash_image);
/* NOTREACHED */
}
crash_ipi_callback(regs);
}
#else #else
static void crash_kexec_prepare_cpus(void) static void crash_kexec_prepare_cpus(int cpu)
{ {
/* /*
* move the secondarys to us so that we can copy * move the secondarys to us so that we can copy
...@@ -171,6 +258,10 @@ static void crash_kexec_prepare_cpus(void) ...@@ -171,6 +258,10 @@ static void crash_kexec_prepare_cpus(void)
smp_release_cpus(); smp_release_cpus();
} }
void crash_kexec_secondary(struct pt_regs *regs)
{
cpus_in_sr = CPU_MASK_NONE;
}
#endif #endif
void default_machine_crash_shutdown(struct pt_regs *regs) void default_machine_crash_shutdown(struct pt_regs *regs)
...@@ -199,14 +290,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs) ...@@ -199,14 +290,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
desc->chip->disable(irq); desc->chip->disable(irq);
} }
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
/* /*
* Make a note of crashing cpu. Will be used in machine_kexec * Make a note of crashing cpu. Will be used in machine_kexec
* such that another IPI will not be sent. * such that another IPI will not be sent.
*/ */
crashing_cpu = smp_processor_id(); crashing_cpu = smp_processor_id();
crash_kexec_prepare_cpus();
crash_save_this_cpu(regs, crashing_cpu); crash_save_this_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
} }
...@@ -85,34 +85,6 @@ END_FTR_SECTION(0, 1) ...@@ -85,34 +85,6 @@ END_FTR_SECTION(0, 1)
/* Catch branch to 0 in real mode */ /* Catch branch to 0 in real mode */
trap trap
#ifdef CONFIG_PPC_ISERIES
/*
* At offset 0x20, there is a pointer to iSeries LPAR data.
* This is required by the hypervisor
*/
. = 0x20
.llong hvReleaseData-KERNELBASE
/*
* At offset 0x28 and 0x30 are offsets to the mschunks_map
* array (used by the iSeries LPAR debugger to do translation
* between physical addresses and absolute addresses) and
* to the pidhash table (also used by the debugger)
*/
.llong mschunks_map-KERNELBASE
.llong 0 /* pidhash-KERNELBASE SFRXXX */
/* Offset 0x38 - Pointer to start of embedded System.map */
.globl embedded_sysmap_start
embedded_sysmap_start:
.llong 0
/* Offset 0x40 - Pointer to end of embedded System.map */
.globl embedded_sysmap_end
embedded_sysmap_end:
.llong 0
#endif /* CONFIG_PPC_ISERIES */
/* Secondary processors spin on this value until it goes to 1. */ /* Secondary processors spin on this value until it goes to 1. */
.globl __secondary_hold_spinloop .globl __secondary_hold_spinloop
__secondary_hold_spinloop: __secondary_hold_spinloop:
...@@ -124,6 +96,15 @@ __secondary_hold_spinloop: ...@@ -124,6 +96,15 @@ __secondary_hold_spinloop:
__secondary_hold_acknowledge: __secondary_hold_acknowledge:
.llong 0x0 .llong 0x0
#ifdef CONFIG_PPC_ISERIES
/*
* At offset 0x20, there is a pointer to iSeries LPAR data.
* This is required by the hypervisor
*/
. = 0x20
.llong hvReleaseData-KERNELBASE
#endif /* CONFIG_PPC_ISERIES */
. = 0x60 . = 0x60
/* /*
* The following code is used on pSeries to hold secondary processors * The following code is used on pSeries to hold secondary processors
...@@ -1602,9 +1583,6 @@ _GLOBAL(__start_initialization_multiplatform) ...@@ -1602,9 +1583,6 @@ _GLOBAL(__start_initialization_multiplatform)
/* Setup some critical 970 SPRs before switching MMU off */ /* Setup some critical 970 SPRs before switching MMU off */
bl .__970_cpu_preinit bl .__970_cpu_preinit
/* cpu # */
li r24,0
/* Switch off MMU if not already */ /* Switch off MMU if not already */
LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
add r4,r4,r30 add r4,r4,r30
...@@ -1683,6 +1661,9 @@ _STATIC(__after_prom_start) ...@@ -1683,6 +1661,9 @@ _STATIC(__after_prom_start)
/* i.e. where we are running */ /* i.e. where we are running */
/* the source addr */ /* the source addr */
cmpdi r4,0 /* In some cases the loader may */
beq .start_here_multiplatform /* have already put us at zero */
/* so we can skip the copy. */
LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */ LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
sub r5,r5,r27 sub r5,r5,r27
...@@ -1962,14 +1943,6 @@ _STATIC(start_here_common) ...@@ -1962,14 +1943,6 @@ _STATIC(start_here_common)
li r3,0 li r3,0
bl .do_cpu_ftr_fixups bl .do_cpu_ftr_fixups
LOAD_REG_IMMEDIATE(r26, boot_cpuid)
lwz r26,0(r26)
LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */
mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r13,r24 /* for this processor. */
mtspr SPRN_SPRG3,r13
/* ptr to current */ /* ptr to current */
LOAD_REG_IMMEDIATE(r4, init_task) LOAD_REG_IMMEDIATE(r4, init_task)
std r4,PACACURRENT(r13) std r4,PACACURRENT(r13)
...@@ -1995,17 +1968,6 @@ _STATIC(start_here_common) ...@@ -1995,17 +1968,6 @@ _STATIC(start_here_common)
/* Not reached */ /* Not reached */
BUG_OPCODE BUG_OPCODE
/* Put the paca pointer into r13 and SPRG3 */
_GLOBAL(setup_boot_paca)
LOAD_REG_IMMEDIATE(r3, boot_cpuid)
lwz r3,0(r3)
LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */
mulli r3,r3,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r3,r4 /* for this processor. */
mtspr SPRN_SPRG3,r13
blr
/* /*
* We put a few things here that have to be page-aligned. * We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the bss, which is page-aligned. * This stuff goes at the beginning of the bss, which is page-aligned.
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/kdump.h>
#define DBG(...) #define DBG(...)
...@@ -440,8 +441,37 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) ...@@ -440,8 +441,37 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
tbl->it_largehint = tbl->it_halfpoint; tbl->it_largehint = tbl->it_halfpoint;
spin_lock_init(&tbl->it_lock); spin_lock_init(&tbl->it_lock);
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
unsigned long index, tceval;
unsigned long tcecount = 0;
/*
* Reserve the existing mappings left by the first kernel.
*/
for (index = 0; index < tbl->it_size; index++) {
tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
/*
* Freed TCE entry contains 0x7fffffffffffffff on JS20
*/
if (tceval && (tceval != 0x7fffffffffffffffUL)) {
__set_bit(index, tbl->it_map);
tcecount++;
}
}
if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
printk(KERN_WARNING "TCE table is full; ");
printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
KDUMP_MIN_TCE_ENTRIES);
for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
index < tbl->it_size; index++)
__clear_bit(index, tbl->it_map);
}
}
#else
/* Clear the hardware table in case firmware left allocations in it */ /* Clear the hardware table in case firmware left allocations in it */
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
#endif
if (!welcomed) { if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
......
...@@ -302,6 +302,17 @@ void __init find_legacy_serial_ports(void) ...@@ -302,6 +302,17 @@ void __init find_legacy_serial_ports(void)
of_node_put(isa); of_node_put(isa);
} }
/* First fill our array with tsi-bridge ports */
for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
struct device_node *tsi = of_get_parent(np);
if (tsi && !strcmp(tsi->type, "tsi-bridge")) {
index = add_legacy_soc_port(np, np);
if (index >= 0 && np == stdout)
legacy_serial_console = index;
}
of_node_put(tsi);
}
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
/* Next, try to locate PCI ports */ /* Next, try to locate PCI ports */
for (np = NULL; (np = of_find_all_nodes(np));) { for (np = NULL; (np = of_find_all_nodes(np));) {
......
...@@ -45,11 +45,9 @@ ...@@ -45,11 +45,9 @@
static struct proc_dir_entry *proc_ppc64_lparcfg; static struct proc_dir_entry *proc_ppc64_lparcfg;
#define LPARCFG_BUFF_SIZE 4096 #define LPARCFG_BUFF_SIZE 4096
#ifdef CONFIG_PPC_ISERIES
/* /*
* For iSeries legacy systems, the PPA purr function is available from the * Track sum of all purrs across all processors. This is used to further
* emulated_time_base field in the paca. * calculate usage values by different applications
*/ */
static unsigned long get_purr(void) static unsigned long get_purr(void)
{ {
...@@ -57,48 +55,31 @@ static unsigned long get_purr(void) ...@@ -57,48 +55,31 @@ static unsigned long get_purr(void)
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
sum_purr += lppaca[cpu].emulated_time_base; if (firmware_has_feature(FW_FEATURE_ISERIES))
sum_purr += lppaca[cpu].emulated_time_base;
else {
struct cpu_usage *cu;
#ifdef PURR_DEBUG cu = &per_cpu(cpu_usage_array, cpu);
printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n", sum_purr += cu->current_tb;
cpu, lppaca[cpu].emulated_time_base); }
#endif
} }
return sum_purr; return sum_purr;
} }
#define lparcfg_write NULL #ifdef CONFIG_PPC_ISERIES
/* /*
* Methods used to fetch LPAR data when running on an iSeries platform. * Methods used to fetch LPAR data when running on an iSeries platform.
*/ */
static int lparcfg_data(struct seq_file *m, void *v) static int iseries_lparcfg_data(struct seq_file *m, void *v)
{ {
unsigned long pool_id, lp_index; unsigned long pool_id;
int shared, entitled_capacity, max_entitled_capacity; int shared, entitled_capacity, max_entitled_capacity;
int processors, max_processors; int processors, max_processors;
unsigned long purr = get_purr(); unsigned long purr = get_purr();
seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
shared = (int)(get_lppaca()->shared_proc); shared = (int)(get_lppaca()->shared_proc);
seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
e2a(xItExtVpdPanel.mfgID[2]),
e2a(xItExtVpdPanel.mfgID[3]),
e2a(xItExtVpdPanel.systemSerial[1]),
e2a(xItExtVpdPanel.systemSerial[2]),
e2a(xItExtVpdPanel.systemSerial[3]),
e2a(xItExtVpdPanel.systemSerial[4]),
e2a(xItExtVpdPanel.systemSerial[5]));
seq_printf(m, "system_type=%c%c%c%c\n",
e2a(xItExtVpdPanel.machineType[0]),
e2a(xItExtVpdPanel.machineType[1]),
e2a(xItExtVpdPanel.machineType[2]),
e2a(xItExtVpdPanel.machineType[3]));
lp_index = HvLpConfig_getLpIndex();
seq_printf(m, "partition_id=%d\n", (int)lp_index);
seq_printf(m, "system_active_processors=%d\n", seq_printf(m, "system_active_processors=%d\n",
(int)HvLpConfig_getSystemPhysicalProcessors()); (int)HvLpConfig_getSystemPhysicalProcessors());
...@@ -137,6 +118,14 @@ static int lparcfg_data(struct seq_file *m, void *v) ...@@ -137,6 +118,14 @@ static int lparcfg_data(struct seq_file *m, void *v)
return 0; return 0;
} }
#else /* CONFIG_PPC_ISERIES */
static int iseries_lparcfg_data(struct seq_file *m, void *v)
{
return 0;
}
#endif /* CONFIG_PPC_ISERIES */ #endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
...@@ -213,22 +202,6 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) ...@@ -213,22 +202,6 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
log_plpar_hcall_return(rc, "H_PIC"); log_plpar_hcall_return(rc, "H_PIC");
} }
/* Track sum of all purrs across all processors. This is used to further */
/* calculate usage values by different applications */
static unsigned long get_purr(void)
{
unsigned long sum_purr = 0;
int cpu;
struct cpu_usage *cu;
for_each_possible_cpu(cpu) {
cu = &per_cpu(cpu_usage_array, cpu);
sum_purr += cu->current_tb;
}
return sum_purr;
}
#define SPLPAR_CHARACTERISTICS_TOKEN 20 #define SPLPAR_CHARACTERISTICS_TOKEN 20
#define SPLPAR_MAXLENGTH 1026*(sizeof(char)) #define SPLPAR_MAXLENGTH 1026*(sizeof(char))
...@@ -333,35 +306,13 @@ static int lparcfg_count_active_processors(void) ...@@ -333,35 +306,13 @@ static int lparcfg_count_active_processors(void)
return count; return count;
} }
static int lparcfg_data(struct seq_file *m, void *v) static int pseries_lparcfg_data(struct seq_file *m, void *v)
{ {
int partition_potential_processors; int partition_potential_processors;
int partition_active_processors; int partition_active_processors;
struct device_node *rootdn;
const char *model = "";
const char *system_id = "";
unsigned int *lp_index_ptr, lp_index = 0;
struct device_node *rtas_node; struct device_node *rtas_node;
int *lrdrp = NULL; int *lrdrp = NULL;
rootdn = find_path_device("/");
if (rootdn) {
model = get_property(rootdn, "model", NULL);
system_id = get_property(rootdn, "system-id", NULL);
lp_index_ptr = (unsigned int *)
get_property(rootdn, "ibm,partition-no", NULL);
if (lp_index_ptr)
lp_index = *lp_index_ptr;
}
seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
seq_printf(m, "serial_number=%s\n", system_id);
seq_printf(m, "system_type=%s\n", model);
seq_printf(m, "partition_id=%d\n", (int)lp_index);
rtas_node = find_path_device("/rtas"); rtas_node = find_path_device("/rtas");
if (rtas_node) if (rtas_node)
lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity",
...@@ -549,8 +500,61 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, ...@@ -549,8 +500,61 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
return retval; return retval;
} }
#else /* CONFIG_PPC_PSERIES */
static int pseries_lparcfg_data(struct seq_file *m, void *v)
{
return 0;
}
static ssize_t lparcfg_write(struct file *file, const char __user * buf,
size_t count, loff_t * off)
{
return count;
}
#endif /* CONFIG_PPC_PSERIES */ #endif /* CONFIG_PPC_PSERIES */
static int lparcfg_data(struct seq_file *m, void *v)
{
struct device_node *rootdn;
const char *model = "";
const char *system_id = "";
const char *tmp;
unsigned int *lp_index_ptr, lp_index = 0;
seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
rootdn = find_path_device("/");
if (rootdn) {
tmp = get_property(rootdn, "model", NULL);
if (tmp) {
model = tmp;
/* Skip "IBM," - see platforms/iseries/dt.c */
if (firmware_has_feature(FW_FEATURE_ISERIES))
model += 4;
}
tmp = get_property(rootdn, "system-id", NULL);
if (tmp) {
system_id = tmp;
/* Skip "IBM," - see platforms/iseries/dt.c */
if (firmware_has_feature(FW_FEATURE_ISERIES))
system_id += 4;
}
lp_index_ptr = (unsigned int *)
get_property(rootdn, "ibm,partition-no", NULL);
if (lp_index_ptr)
lp_index = *lp_index_ptr;
}
seq_printf(m, "serial_number=%s\n", system_id);
seq_printf(m, "system_type=%s\n", model);
seq_printf(m, "partition_id=%d\n", (int)lp_index);
if (firmware_has_feature(FW_FEATURE_ISERIES))
return iseries_lparcfg_data(m, v);
return pseries_lparcfg_data(m, v);
}
static int lparcfg_open(struct inode *inode, struct file *file) static int lparcfg_open(struct inode *inode, struct file *file)
{ {
return single_open(file, lparcfg_data, NULL); return single_open(file, lparcfg_data, NULL);
...@@ -569,7 +573,8 @@ int __init lparcfg_init(void) ...@@ -569,7 +573,8 @@ int __init lparcfg_init(void)
mode_t mode = S_IRUSR | S_IRGRP | S_IROTH; mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
/* Allow writing if we have FW_FEATURE_SPLPAR */ /* Allow writing if we have FW_FEATURE_SPLPAR */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) { if (firmware_has_feature(FW_FEATURE_SPLPAR) &&
!firmware_has_feature(FW_FEATURE_ISERIES)) {
lparcfg_fops.write = lparcfg_write; lparcfg_fops.write = lparcfg_write;
mode |= S_IWUSR; mode |= S_IWUSR;
} }
......
...@@ -378,11 +378,13 @@ static void __init export_crashk_values(void) ...@@ -378,11 +378,13 @@ static void __init export_crashk_values(void)
of_node_put(node); of_node_put(node);
} }
void __init kexec_setup(void) static int __init kexec_setup(void)
{ {
export_htab_values(); export_htab_values();
export_crashk_values(); export_crashk_values();
return 0;
} }
__initcall(kexec_setup);
static int __init early_parse_crashk(char *p) static int __init early_parse_crashk(char *p)
{ {
......
/*
* This file contains miscellaneous low-level functions.
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
*
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
.text
#ifdef CONFIG_PPC64
#define IN_SYNC twi 0,r5,0; isync
#define EIEIO_32
#define SYNC_64 sync
#else /* CONFIG_PPC32 */
#define IN_SYNC
#define EIEIO_32 eieio
#define SYNC_64
#endif
/*
* Returns (address we are running at) - (address we were linked at)
* for use before the text and data are mapped to KERNELBASE.
*/
_GLOBAL(reloc_offset)
mflr r0
bl 1f
1: mflr r3
LOAD_REG_IMMEDIATE(r4,1b)
subf r3,r4,r3
mtlr r0
blr
/*
* add_reloc_offset(x) returns x + reloc_offset().
*/
_GLOBAL(add_reloc_offset)
mflr r0
bl 1f
1: mflr r5
LOAD_REG_IMMEDIATE(r4,1b)
subf r5,r4,r5
add r3,r3,r5
mtlr r0
blr
/*
* I/O string operations
*
* insb(port, buf, len)
* outsb(port, buf, len)
* insw(port, buf, len)
* outsw(port, buf, len)
* insl(port, buf, len)
* outsl(port, buf, len)
* insw_ns(port, buf, len)
* outsw_ns(port, buf, len)
* insl_ns(port, buf, len)
* outsl_ns(port, buf, len)
*
* The *_ns versions don't do byte-swapping.
*/
_GLOBAL(_insb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbz r5,0(r3)
eieio
stbu r5,1(r4)
bdnz 00b
IN_SYNC
blr
_GLOBAL(_outsb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbzu r5,1(r4)
stb r5,0(r3)
EIEIO_32
bdnz 00b
SYNC_64
blr
_GLOBAL(_insw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhbrx r5,0,r3
eieio
sthu r5,2(r4)
bdnz 00b
IN_SYNC
blr
_GLOBAL(_outsw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
EIEIO_32
sthbrx r5,0,r3
bdnz 00b
SYNC_64
blr
_GLOBAL(_insl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwbrx r5,0,r3
eieio
stwu r5,4(r4)
bdnz 00b
IN_SYNC
blr
_GLOBAL(_outsl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stwbrx r5,0,r3
EIEIO_32
bdnz 00b
SYNC_64
blr
#ifdef CONFIG_PPC32
_GLOBAL(__ide_mm_insw)
#endif
_GLOBAL(_insw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhz r5,0(r3)
eieio
sthu r5,2(r4)
bdnz 00b
IN_SYNC
blr
#ifdef CONFIG_PPC32
_GLOBAL(__ide_mm_outsw)
#endif
_GLOBAL(_outsw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sth r5,0(r3)
EIEIO_32
bdnz 00b
SYNC_64
blr
#ifdef CONFIG_PPC32
_GLOBAL(__ide_mm_insl)
#endif
_GLOBAL(_insl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwz r5,0(r3)
eieio
stwu r5,4(r4)
bdnz 00b
IN_SYNC
blr
#ifdef CONFIG_PPC32
_GLOBAL(__ide_mm_outsl)
#endif
_GLOBAL(_outsl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stw r5,0(r3)
EIEIO_32
bdnz 00b
SYNC_64
blr
...@@ -60,32 +60,6 @@ _GLOBAL(mulhdu) ...@@ -60,32 +60,6 @@ _GLOBAL(mulhdu)
addze r3,r3 addze r3,r3
blr blr
/*
* Returns (address we're running at) - (address we were linked at)
* for use before the text and data are mapped to KERNELBASE.
*/
_GLOBAL(reloc_offset)
mflr r0
bl 1f
1: mflr r3
LOAD_REG_IMMEDIATE(r4,1b)
subf r3,r4,r3
mtlr r0
blr
/*
* add_reloc_offset(x) returns x + reloc_offset().
*/
_GLOBAL(add_reloc_offset)
mflr r0
bl 1f
1: mflr r5
LOAD_REG_IMMEDIATE(r4,1b)
subf r5,r4,r5
add r3,r3,r5
mtlr r0
blr
/* /*
* sub_reloc_offset(x) returns x - reloc_offset(). * sub_reloc_offset(x) returns x - reloc_offset().
*/ */
...@@ -780,136 +754,6 @@ _GLOBAL(atomic_set_mask) ...@@ -780,136 +754,6 @@ _GLOBAL(atomic_set_mask)
bne- 10b bne- 10b
blr blr
/*
* I/O string operations
*
* insb(port, buf, len)
* outsb(port, buf, len)
* insw(port, buf, len)
* outsw(port, buf, len)
* insl(port, buf, len)
* outsl(port, buf, len)
* insw_ns(port, buf, len)
* outsw_ns(port, buf, len)
* insl_ns(port, buf, len)
* outsl_ns(port, buf, len)
*
* The *_ns versions don't do byte-swapping.
*/
_GLOBAL(_insb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbz r5,0(r3)
eieio
stbu r5,1(r4)
bdnz 00b
blr
_GLOBAL(_outsb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbzu r5,1(r4)
stb r5,0(r3)
eieio
bdnz 00b
blr
_GLOBAL(_insw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhbrx r5,0,r3
eieio
sthu r5,2(r4)
bdnz 00b
blr
_GLOBAL(_outsw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
eieio
sthbrx r5,0,r3
bdnz 00b
blr
_GLOBAL(_insl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwbrx r5,0,r3
eieio
stwu r5,4(r4)
bdnz 00b
blr
_GLOBAL(_outsl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stwbrx r5,0,r3
eieio
bdnz 00b
blr
_GLOBAL(__ide_mm_insw)
_GLOBAL(_insw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhz r5,0(r3)
eieio
sthu r5,2(r4)
bdnz 00b
blr
_GLOBAL(__ide_mm_outsw)
_GLOBAL(_outsw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sth r5,0(r3)
eieio
bdnz 00b
blr
_GLOBAL(__ide_mm_insl)
_GLOBAL(_insl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwz r5,0(r3)
eieio
stwu r5,4(r4)
bdnz 00b
blr
_GLOBAL(__ide_mm_outsl)
_GLOBAL(_outsl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stw r5,0(r3)
eieio
bdnz 00b
blr
/* /*
* Extended precision shifts. * Extended precision shifts.
* *
......
/* /*
* arch/powerpc/kernel/misc64.S
*
* This file contains miscellaneous low-level functions. * This file contains miscellaneous low-level functions.
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* *
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras. * and Paul Mackerras.
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
...@@ -30,41 +28,10 @@ ...@@ -30,41 +28,10 @@
.text .text
/*
* Returns (address we are running at) - (address we were linked at)
* for use before the text and data are mapped to KERNELBASE.
*/
_GLOBAL(reloc_offset)
mflr r0
bl 1f
1: mflr r3
LOAD_REG_IMMEDIATE(r4,1b)
subf r3,r4,r3
mtlr r0
blr
/*
* add_reloc_offset(x) returns x + reloc_offset().
*/
_GLOBAL(add_reloc_offset)
mflr r0
bl 1f
1: mflr r5
LOAD_REG_IMMEDIATE(r4,1b)
subf r5,r4,r5
add r3,r3,r5
mtlr r0
blr
_GLOBAL(get_msr) _GLOBAL(get_msr)
mfmsr r3 mfmsr r3
blr blr
_GLOBAL(get_dar)
mfdar r3
blr
_GLOBAL(get_srr0) _GLOBAL(get_srr0)
mfsrr0 r3 mfsrr0 r3
blr blr
...@@ -72,10 +39,6 @@ _GLOBAL(get_srr0) ...@@ -72,10 +39,6 @@ _GLOBAL(get_srr0)
_GLOBAL(get_srr1) _GLOBAL(get_srr1)
mfsrr1 r3 mfsrr1 r3
blr blr
_GLOBAL(get_sp)
mr r3,r1
blr
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq) _GLOBAL(call_do_softirq)
...@@ -101,48 +64,6 @@ _GLOBAL(call___do_IRQ) ...@@ -101,48 +64,6 @@ _GLOBAL(call___do_IRQ)
blr blr
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
/*
* To be called by C code which needs to do some operations with MMU
* disabled. Note that interrupts have to be disabled by the caller
* prior to calling us. The code called _MUST_ be in the RMO of course
* and part of the linear mapping as we don't attempt to translate the
* stack pointer at all. The function is called with the stack switched
* to this CPU emergency stack
*
* prototype is void *call_with_mmu_off(void *func, void *data);
*
* the called function is expected to be of the form
*
* void *called(void *data);
*/
_GLOBAL(call_with_mmu_off)
mflr r0 /* get link, save it on stackframe */
std r0,16(r1)
mr r1,r5 /* save old stack ptr */
ld r1,PACAEMERGSP(r13) /* get emerg. stack */
subi r1,r1,STACK_FRAME_OVERHEAD
std r0,16(r1) /* save link on emerg. stack */
std r5,0(r1) /* save old stack ptr in backchain */
ld r3,0(r3) /* get to real function ptr (assume same TOC) */
bl 2f /* we need LR to return, continue at label 2 */
ld r0,16(r1) /* we return here from the call, get LR and */
ld r1,0(r1) /* .. old stack ptr */
mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
mfmsr r4
ori r4,r4,MSR_IR|MSR_DR
mtspr SPRN_SRR1,r4
rfid
2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
mr r3,r4 /* get parameter */
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR
xori r0,r0,MSR_IR|MSR_DR
mtspr SPRN_SRR1,r0
rfid
.section ".toc","aw" .section ".toc","aw"
PPC64_CACHES: PPC64_CACHES:
.tc ppc64_caches[TC],ppc64_caches .tc ppc64_caches[TC],ppc64_caches
...@@ -323,144 +244,6 @@ _GLOBAL(__flush_dcache_icache) ...@@ -323,144 +244,6 @@ _GLOBAL(__flush_dcache_icache)
bdnz 1b bdnz 1b
isync isync
blr blr
/*
* I/O string operations
*
* insb(port, buf, len)
* outsb(port, buf, len)
* insw(port, buf, len)
* outsw(port, buf, len)
* insl(port, buf, len)
* outsl(port, buf, len)
* insw_ns(port, buf, len)
* outsw_ns(port, buf, len)
* insl_ns(port, buf, len)
* outsl_ns(port, buf, len)
*
* The *_ns versions don't do byte-swapping.
*/
_GLOBAL(_insb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbz r5,0(r3)
eieio
stbu r5,1(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbzu r5,1(r4)
stb r5,0(r3)
bdnz 00b
sync
blr
_GLOBAL(_insw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhbrx r5,0,r3
eieio
sthu r5,2(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sthbrx r5,0,r3
bdnz 00b
sync
blr
_GLOBAL(_insl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwbrx r5,0,r3
eieio
stwu r5,4(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stwbrx r5,0,r3
bdnz 00b
sync
blr
/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
_GLOBAL(_insw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhz r5,0(r3)
eieio
sthu r5,2(r4)
bdnz 00b
twi 0,r5,0
isync
blr
/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
_GLOBAL(_outsw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sth r5,0(r3)
bdnz 00b
sync
blr
_GLOBAL(_insl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwz r5,0(r3)
eieio
stwu r5,4(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stw r5,0(r3)
bdnz 00b
sync
blr
/* /*
* identify_cpu and calls setup_cpu * identify_cpu and calls setup_cpu
...@@ -605,6 +388,7 @@ _GLOBAL(real_writeb) ...@@ -605,6 +388,7 @@ _GLOBAL(real_writeb)
blr blr
#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
#ifdef CONFIG_CPU_FREQ_PMAC64
/* /*
* SCOM access functions for 970 (FX only for now) * SCOM access functions for 970 (FX only for now)
* *
...@@ -673,6 +457,7 @@ _GLOBAL(scom970_write) ...@@ -673,6 +457,7 @@ _GLOBAL(scom970_write)
/* restore interrupts */ /* restore interrupts */
mtmsrd r5,1 mtmsrd r5,1
blr blr
#endif /* CONFIG_CPU_FREQ_PMAC64 */
/* /*
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/it_lp_reg_save.h> #include <asm/iseries/it_lp_reg_save.h>
#include <asm/paca.h> #include <asm/paca.h>
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/debugfs.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/rtas.h> #include <asm/rtas.h>
...@@ -952,6 +953,7 @@ static struct ibm_pa_feature { ...@@ -952,6 +953,7 @@ static struct ibm_pa_feature {
/* put this back once we know how to test if firmware does 64k IO */ /* put this back once we know how to test if firmware does 64k IO */
{CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
#endif #endif
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
}; };
static void __init check_cpu_pa_features(unsigned long node) static void __init check_cpu_pa_features(unsigned long node)
...@@ -1124,24 +1126,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1124,24 +1126,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
tce_alloc_end = *lprop; tce_alloc_end = *lprop;
#endif #endif
#ifdef CONFIG_PPC_RTAS
/* To help early debugging via the front panel, we retrieve a minimal
* set of RTAS infos now if available
*/
{
u64 *basep, *entryp, *sizep;
basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
sizep = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
if (basep && entryp && sizep) {
rtas.base = *basep;
rtas.entry = *entryp;
rtas.size = *sizep;
}
}
#endif /* CONFIG_PPC_RTAS */
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop) if (lprop)
...@@ -1326,6 +1310,11 @@ void __init early_init_devtree(void *params) ...@@ -1326,6 +1310,11 @@ void __init early_init_devtree(void *params)
/* Setup flat device-tree pointer */ /* Setup flat device-tree pointer */
initial_boot_params = params; initial_boot_params = params;
#ifdef CONFIG_PPC_RTAS
/* Some machines might need RTAS info for debugging, grab it now. */
of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
#endif
/* Retrieve various informations from the /chosen node of the /* Retrieve various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and * device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ... * size, TCE reserve, and more ...
...@@ -2148,3 +2137,27 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) ...@@ -2148,3 +2137,27 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
} }
return NULL; return NULL;
} }
#ifdef DEBUG
static struct debugfs_blob_wrapper flat_dt_blob;
static int __init export_flat_device_tree(void)
{
struct dentry *d;
d = debugfs_create_dir("powerpc", NULL);
if (!d)
return 1;
flat_dt_blob.data = initial_boot_params;
flat_dt_blob.size = initial_boot_params->totalsize;
d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
d, &flat_dt_blob);
if (!d)
return 1;
return 0;
}
__initcall(export_flat_device_tree);
#endif
...@@ -38,16 +38,19 @@ ...@@ -38,16 +38,19 @@
struct rtas_t rtas = { struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED .lock = SPIN_LOCK_UNLOCKED
}; };
EXPORT_SYMBOL(rtas);
struct rtas_suspend_me_data { struct rtas_suspend_me_data {
long waiting; long waiting;
struct rtas_args *args; struct rtas_args *args;
}; };
EXPORT_SYMBOL(rtas);
DEFINE_SPINLOCK(rtas_data_buf_lock); DEFINE_SPINLOCK(rtas_data_buf_lock);
EXPORT_SYMBOL(rtas_data_buf_lock);
char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
EXPORT_SYMBOL(rtas_data_buf);
unsigned long rtas_rmo_buf; unsigned long rtas_rmo_buf;
/* /*
...@@ -106,11 +109,71 @@ static void call_rtas_display_status_delay(char c) ...@@ -106,11 +109,71 @@ static void call_rtas_display_status_delay(char c)
} }
} }
void __init udbg_init_rtas(void) void __init udbg_init_rtas_panel(void)
{ {
udbg_putc = call_rtas_display_status_delay; udbg_putc = call_rtas_display_status_delay;
} }
#ifdef CONFIG_UDBG_RTAS_CONSOLE
/* If you think you're dying before early_init_dt_scan_rtas() does its
* work, you can hard code the token values for your firmware here and
* hardcode rtas.base/entry etc.
*/
static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
static void udbg_rtascon_putc(char c)
{
int tries;
if (!rtas.base)
return;
/* Add CRs before LFs */
if (c == '\n')
udbg_rtascon_putc('\r');
/* if there is more than one character to be displayed, wait a bit */
for (tries = 0; tries < 16; tries++) {
if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
break;
udelay(1000);
}
}
static int udbg_rtascon_getc_poll(void)
{
int c;
if (!rtas.base)
return -1;
if (rtas_call(rtas_getchar_token, 0, 2, &c))
return -1;
return c;
}
static int udbg_rtascon_getc(void)
{
int c;
while ((c = udbg_rtascon_getc_poll()) == -1)
;
return c;
}
void __init udbg_init_rtas_console(void)
{
udbg_putc = udbg_rtascon_putc;
udbg_getc = udbg_rtascon_getc;
udbg_getc_poll = udbg_rtascon_getc_poll;
}
#endif /* CONFIG_UDBG_RTAS_CONSOLE */
void rtas_progress(char *s, unsigned short hex) void rtas_progress(char *s, unsigned short hex)
{ {
struct device_node *root; struct device_node *root;
...@@ -236,6 +299,7 @@ int rtas_token(const char *service) ...@@ -236,6 +299,7 @@ int rtas_token(const char *service)
tokp = (int *) get_property(rtas.dev, service, NULL); tokp = (int *) get_property(rtas.dev, service, NULL);
return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
} }
EXPORT_SYMBOL(rtas_token);
#ifdef CONFIG_RTAS_ERROR_LOGGING #ifdef CONFIG_RTAS_ERROR_LOGGING
/* /*
...@@ -328,7 +392,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) ...@@ -328,7 +392,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
char *buff_copy = NULL; char *buff_copy = NULL;
int ret; int ret;
if (token == RTAS_UNKNOWN_SERVICE) if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1; return -1;
/* Gotta do something different here, use global lock for now... */ /* Gotta do something different here, use global lock for now... */
...@@ -369,6 +433,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) ...@@ -369,6 +433,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
} }
return ret; return ret;
} }
EXPORT_SYMBOL(rtas_call);
/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
* code of 990n, perform the hinted delay of 10^n (last digit) milliseconds. * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
...@@ -388,6 +453,7 @@ unsigned int rtas_busy_delay_time(int status) ...@@ -388,6 +453,7 @@ unsigned int rtas_busy_delay_time(int status)
return ms; return ms;
} }
EXPORT_SYMBOL(rtas_busy_delay_time);
/* For an RTAS busy status code, perform the hinted delay. */ /* For an RTAS busy status code, perform the hinted delay. */
unsigned int rtas_busy_delay(int status) unsigned int rtas_busy_delay(int status)
...@@ -401,6 +467,7 @@ unsigned int rtas_busy_delay(int status) ...@@ -401,6 +467,7 @@ unsigned int rtas_busy_delay(int status)
return ms; return ms;
} }
EXPORT_SYMBOL(rtas_busy_delay);
int rtas_error_rc(int rtas_rc) int rtas_error_rc(int rtas_rc)
{ {
...@@ -446,6 +513,7 @@ int rtas_get_power_level(int powerdomain, int *level) ...@@ -446,6 +513,7 @@ int rtas_get_power_level(int powerdomain, int *level)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_get_power_level);
int rtas_set_power_level(int powerdomain, int level, int *setlevel) int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{ {
...@@ -463,6 +531,7 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel) ...@@ -463,6 +531,7 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_set_power_level);
int rtas_get_sensor(int sensor, int index, int *state) int rtas_get_sensor(int sensor, int index, int *state)
{ {
...@@ -480,6 +549,7 @@ int rtas_get_sensor(int sensor, int index, int *state) ...@@ -480,6 +549,7 @@ int rtas_get_sensor(int sensor, int index, int *state)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_get_sensor);
int rtas_set_indicator(int indicator, int index, int new_value) int rtas_set_indicator(int indicator, int index, int new_value)
{ {
...@@ -497,6 +567,7 @@ int rtas_set_indicator(int indicator, int index, int new_value) ...@@ -497,6 +567,7 @@ int rtas_set_indicator(int indicator, int index, int new_value)
return rtas_error_rc(rc); return rtas_error_rc(rc);
return rc; return rc;
} }
EXPORT_SYMBOL(rtas_set_indicator);
void rtas_restart(char *cmd) void rtas_restart(char *cmd)
{ {
...@@ -791,14 +862,34 @@ void __init rtas_initialize(void) ...@@ -791,14 +862,34 @@ void __init rtas_initialize(void)
#endif #endif
} }
int __init early_init_dt_scan_rtas(unsigned long node,
const char *uname, int depth, void *data)
{
u32 *basep, *entryp, *sizep;
EXPORT_SYMBOL(rtas_token); if (depth != 1 || strcmp(uname, "rtas") != 0)
EXPORT_SYMBOL(rtas_call); return 0;
EXPORT_SYMBOL(rtas_data_buf);
EXPORT_SYMBOL(rtas_data_buf_lock); basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
EXPORT_SYMBOL(rtas_busy_delay_time); entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
EXPORT_SYMBOL(rtas_busy_delay); sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
EXPORT_SYMBOL(rtas_get_sensor);
EXPORT_SYMBOL(rtas_get_power_level); if (basep && entryp && sizep) {
EXPORT_SYMBOL(rtas_set_power_level); rtas.base = *basep;
EXPORT_SYMBOL(rtas_set_indicator); rtas.entry = *entryp;
rtas.size = *sizep;
}
#ifdef CONFIG_UDBG_RTAS_CONSOLE
basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
if (basep)
rtas_putchar_token = *basep;
basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
if (basep)
rtas_getchar_token = *basep;
#endif
/* break now */
return 1;
}
...@@ -149,6 +149,13 @@ early_param("smt-enabled", early_smt_enabled); ...@@ -149,6 +149,13 @@ early_param("smt-enabled", early_smt_enabled);
#define check_smt_enabled() #define check_smt_enabled()
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* Put the paca pointer into r13 and SPRG3 */
void __init setup_paca(int cpu)
{
local_paca = &paca[cpu];
mtspr(SPRN_SPRG3, local_paca);
}
/* /*
* Early initialization entry point. This is called by head.S * Early initialization entry point. This is called by head.S
* with MMU translation disabled. We rely on the "feature" of * with MMU translation disabled. We rely on the "feature" of
...@@ -170,6 +177,9 @@ early_param("smt-enabled", early_smt_enabled); ...@@ -170,6 +177,9 @@ early_param("smt-enabled", early_smt_enabled);
void __init early_setup(unsigned long dt_ptr) void __init early_setup(unsigned long dt_ptr)
{ {
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
setup_paca(0);
/* Enable early debugging if any specified (see udbg.h) */ /* Enable early debugging if any specified (see udbg.h) */
udbg_early_init(); udbg_early_init();
...@@ -183,7 +193,7 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -183,7 +193,7 @@ void __init early_setup(unsigned long dt_ptr)
early_init_devtree(__va(dt_ptr)); early_init_devtree(__va(dt_ptr));
/* Now we know the logical id of our boot cpu, setup the paca. */ /* Now we know the logical id of our boot cpu, setup the paca. */
setup_boot_paca(); setup_paca(boot_cpuid);
/* Fix up paca fields required for the boot cpu */ /* Fix up paca fields required for the boot cpu */
get_paca()->cpu_start = 1; get_paca()->cpu_start = 1;
...@@ -350,19 +360,11 @@ void __init setup_system(void) ...@@ -350,19 +360,11 @@ void __init setup_system(void)
*/ */
unflatten_device_tree(); unflatten_device_tree();
#ifdef CONFIG_KEXEC
kexec_setup(); /* requires unflattened device tree. */
#endif
/* /*
* Fill the ppc64_caches & systemcfg structures with informations * Fill the ppc64_caches & systemcfg structures with informations
* retrieved from the device-tree. Need to be called before * retrieved from the device-tree. Need to be called before
* finish_device_tree() since the later requires some of the * finish_device_tree() since the later requires some of the
* informations filled up here to properly parse the interrupt * informations filled up here to properly parse the interrupt tree.
* tree.
* It also sets up the cache line sizes which allows to call
* routines like flush_icache_range (used by the hash init
* later on).
*/ */
initialize_cache_info(); initialize_cache_info();
......
...@@ -52,9 +52,13 @@ ...@@ -52,9 +52,13 @@
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/processor.h> #include <asm/processor.h>
#endif #endif
#include <asm/kexec.h>
#ifdef CONFIG_PPC64 /* XXX */ #ifdef CONFIG_PPC64 /* XXX */
#define _IO_BASE pci_io_base #define _IO_BASE pci_io_base
#ifdef CONFIG_KEXEC
cpumask_t cpus_in_sr = CPU_MASK_NONE;
#endif
#endif #endif
#ifdef CONFIG_DEBUGGER #ifdef CONFIG_DEBUGGER
...@@ -97,7 +101,7 @@ static DEFINE_SPINLOCK(die_lock); ...@@ -97,7 +101,7 @@ static DEFINE_SPINLOCK(die_lock);
int die(const char *str, struct pt_regs *regs, long err) int die(const char *str, struct pt_regs *regs, long err)
{ {
static int die_counter, crash_dump_start = 0; static int die_counter;
if (debugger(regs)) if (debugger(regs))
return 1; return 1;
...@@ -137,21 +141,12 @@ int die(const char *str, struct pt_regs *regs, long err) ...@@ -137,21 +141,12 @@ int die(const char *str, struct pt_regs *regs, long err)
print_modules(); print_modules();
show_regs(regs); show_regs(regs);
bust_spinlocks(0); bust_spinlocks(0);
spin_unlock_irq(&die_lock);
if (!crash_dump_start && kexec_should_crash(current)) { if (kexec_should_crash(current) ||
crash_dump_start = 1; kexec_sr_activated(smp_processor_id()))
spin_unlock_irq(&die_lock);
crash_kexec(regs); crash_kexec(regs);
/* NOTREACHED */ crash_kexec_secondary(regs);
}
spin_unlock_irq(&die_lock);
if (crash_dump_start)
/*
* Only for soft-reset: Other CPUs will be responded to an IPI
* sent by first kexec CPU.
*/
for(;;)
;
if (in_interrupt()) if (in_interrupt())
panic("Fatal exception in interrupt"); panic("Fatal exception in interrupt");
...@@ -215,6 +210,10 @@ void system_reset_exception(struct pt_regs *regs) ...@@ -215,6 +210,10 @@ void system_reset_exception(struct pt_regs *regs)
return; return;
} }
#ifdef CONFIG_KEXEC
cpu_set(smp_processor_id(), cpus_in_sr);
#endif
die("System Reset", regs, SIGABRT); die("System Reset", regs, SIGABRT);
/* Must die if the interrupt is not recoverable */ /* Must die if the interrupt is not recoverable */
......
...@@ -34,9 +34,12 @@ void __init udbg_early_init(void) ...@@ -34,9 +34,12 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_G5) #elif defined(CONFIG_PPC_EARLY_DEBUG_G5)
/* For use on Apple G5 machines */ /* For use on Apple G5 machines */
udbg_init_pmac_realmode(); udbg_init_pmac_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS) #elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL)
/* RTAS panel debug */ /* RTAS panel debug */
udbg_init_rtas(); udbg_init_rtas_panel();
#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE)
/* RTAS console debug */
udbg_init_rtas_console();
#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) #elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
/* Maple real mode debug */ /* Maple real mode debug */
udbg_init_maple_realmode(); udbg_init_maple_realmode();
......
...@@ -520,7 +520,7 @@ static inline int tlb_batching_enabled(void) ...@@ -520,7 +520,7 @@ static inline int tlb_batching_enabled(void)
} }
#endif #endif
void hpte_init_native(void) void __init hpte_init_native(void)
{ {
ppc_md.hpte_invalidate = native_hpte_invalidate; ppc_md.hpte_invalidate = native_hpte_invalidate;
ppc_md.hpte_updatepp = native_hpte_updatepp; ppc_md.hpte_updatepp = native_hpte_updatepp;
...@@ -530,5 +530,4 @@ void hpte_init_native(void) ...@@ -530,5 +530,4 @@ void hpte_init_native(void)
ppc_md.hpte_clear_all = native_hpte_clear; ppc_md.hpte_clear_all = native_hpte_clear;
if (tlb_batching_enabled()) if (tlb_batching_enabled())
ppc_md.flush_hash_range = native_flush_hash_range; ppc_md.flush_hash_range = native_flush_hash_range;
htab_finish_init();
} }
...@@ -167,34 +167,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, ...@@ -167,34 +167,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
hash = hpt_hash(va, shift); hash = hpt_hash(va, shift);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* The crap below can be cleaned once ppd_md.probe() can DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
* set up the hash callbacks, thus we can just used the
* normal insert callback here. BUG_ON(!ppc_md.hpte_insert);
*/ ret = ppc_md.hpte_insert(hpteg, va, paddr,
#ifdef CONFIG_PPC_ISERIES tmp_mode, HPTE_V_BOLTED, psize);
if (machine_is(iseries))
ret = iSeries_hpte_insert(hpteg, va,
paddr,
tmp_mode,
HPTE_V_BOLTED,
psize);
else
#endif
#ifdef CONFIG_PPC_PSERIES
if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR))
ret = pSeries_lpar_hpte_insert(hpteg, va,
paddr,
tmp_mode,
HPTE_V_BOLTED,
psize);
else
#endif
#ifdef CONFIG_PPC_MULTIPLATFORM
ret = native_hpte_insert(hpteg, va,
paddr,
tmp_mode, HPTE_V_BOLTED,
psize);
#endif
if (ret < 0) if (ret < 0)
break; break;
} }
...@@ -413,6 +391,41 @@ void create_section_mapping(unsigned long start, unsigned long end) ...@@ -413,6 +391,41 @@ void create_section_mapping(unsigned long start, unsigned long end)
} }
#endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* CONFIG_MEMORY_HOTPLUG */
static inline void make_bl(unsigned int *insn_addr, void *func)
{
unsigned long funcp = *((unsigned long *)func);
int offset = funcp - (unsigned long)insn_addr;
*insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
flush_icache_range((unsigned long)insn_addr, 4+
(unsigned long)insn_addr);
}
static void __init htab_finish_init(void)
{
extern unsigned int *htab_call_hpte_insert1;
extern unsigned int *htab_call_hpte_insert2;
extern unsigned int *htab_call_hpte_remove;
extern unsigned int *htab_call_hpte_updatepp;
#ifdef CONFIG_PPC_64K_PAGES
extern unsigned int *ht64_call_hpte_insert1;
extern unsigned int *ht64_call_hpte_insert2;
extern unsigned int *ht64_call_hpte_remove;
extern unsigned int *ht64_call_hpte_updatepp;
make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
#endif /* CONFIG_PPC_64K_PAGES */
make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
}
void __init htab_initialize(void) void __init htab_initialize(void)
{ {
unsigned long table; unsigned long table;
...@@ -525,6 +538,8 @@ void __init htab_initialize(void) ...@@ -525,6 +538,8 @@ void __init htab_initialize(void)
mmu_linear_psize)); mmu_linear_psize));
} }
htab_finish_init();
DBG(" <- htab_initialize()\n"); DBG(" <- htab_initialize()\n");
} }
#undef KB #undef KB
...@@ -787,16 +802,6 @@ void flush_hash_range(unsigned long number, int local) ...@@ -787,16 +802,6 @@ void flush_hash_range(unsigned long number, int local)
} }
} }
static inline void make_bl(unsigned int *insn_addr, void *func)
{
unsigned long funcp = *((unsigned long *)func);
int offset = funcp - (unsigned long)insn_addr;
*insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
flush_icache_range((unsigned long)insn_addr, 4+
(unsigned long)insn_addr);
}
/* /*
* low_hash_fault is called when we the low level hash code failed * low_hash_fault is called when we the low level hash code failed
* to instert a PTE due to an hypervisor error * to instert a PTE due to an hypervisor error
...@@ -815,28 +820,3 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address) ...@@ -815,28 +820,3 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address)
} }
bad_page_fault(regs, address, SIGBUS); bad_page_fault(regs, address, SIGBUS);
} }
void __init htab_finish_init(void)
{
extern unsigned int *htab_call_hpte_insert1;
extern unsigned int *htab_call_hpte_insert2;
extern unsigned int *htab_call_hpte_remove;
extern unsigned int *htab_call_hpte_updatepp;
#ifdef CONFIG_PPC_64K_PAGES
extern unsigned int *ht64_call_hpte_insert1;
extern unsigned int *ht64_call_hpte_insert2;
extern unsigned int *ht64_call_hpte_remove;
extern unsigned int *ht64_call_hpte_updatepp;
make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
#endif /* CONFIG_PPC_64K_PAGES */
make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
}
...@@ -44,7 +44,9 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -44,7 +44,9 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return err; return err;
if (index > MAX_CONTEXT) { if (index > MAX_CONTEXT) {
spin_lock(&mmu_context_lock);
idr_remove(&mmu_context_idr, index); idr_remove(&mmu_context_idr, index);
spin_unlock(&mmu_context_lock);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -7,6 +7,7 @@ choice ...@@ -7,6 +7,7 @@ choice
config MPC8641_HPCN config MPC8641_HPCN
bool "Freescale MPC8641 HPCN" bool "Freescale MPC8641 HPCN"
select PPC_I8259
help help
This option enables support for the MPC8641 HPCN board. This option enables support for the MPC8641 HPCN board.
...@@ -28,9 +29,4 @@ config PPC_INDIRECT_PCI_BE ...@@ -28,9 +29,4 @@ config PPC_INDIRECT_PCI_BE
depends on PPC_86xx depends on PPC_86xx
default y default y
config PPC_STD_MMU
bool
depends on PPC_86xx
default y
endmenu endmenu
...@@ -2,9 +2,6 @@ ...@@ -2,9 +2,6 @@
# Makefile for the PowerPC 86xx linux kernel. # Makefile for the PowerPC 86xx linux kernel.
# #
ifeq ($(CONFIG_PPC_86xx),y)
obj-$(CONFIG_SMP) += mpc86xx_smp.o obj-$(CONFIG_SMP) += mpc86xx_smp.o
endif
obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
obj-$(CONFIG_PCI) += pci.o mpc86xx_pcie.o obj-$(CONFIG_PCI) += pci.o mpc86xx_pcie.o
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#ifndef __MPC8641_HPCN_H__ #ifndef __MPC8641_HPCN_H__
#define __MPC8641_HPCN_H__ #define __MPC8641_HPCN_H__
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
/* PCI interrupt controller */ /* PCI interrupt controller */
......
...@@ -15,11 +15,13 @@ ...@@ -15,11 +15,13 @@
* mpc86xx_* files. Mostly for use by mpc86xx_setup(). * mpc86xx_* files. Mostly for use by mpc86xx_setup().
*/ */
extern int __init add_bridge(struct device_node *dev); extern int add_bridge(struct device_node *dev);
extern void __init setup_indirect_pcie(struct pci_controller *hose, extern int mpc86xx_exclude_device(u_char bus, u_char devfn);
extern void setup_indirect_pcie(struct pci_controller *hose,
u32 cfg_addr, u32 cfg_data); u32 cfg_addr, u32 cfg_data);
extern void __init setup_indirect_pcie_nomap(struct pci_controller *hose, extern void setup_indirect_pcie_nomap(struct pci_controller *hose,
void __iomem *cfg_addr, void __iomem *cfg_addr,
void __iomem *cfg_data); void __iomem *cfg_data);
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
* option) any later version. * option) any later version.
*/ */
#include <linux/config.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -36,6 +35,7 @@ ...@@ -36,6 +35,7 @@
#include <sysdev/fsl_soc.h> #include <sysdev/fsl_soc.h>
#include "mpc86xx.h" #include "mpc86xx.h"
#include "mpc8641_hpcn.h"
#ifndef CONFIG_PCI #ifndef CONFIG_PCI
unsigned long isa_io_base = 0; unsigned long isa_io_base = 0;
...@@ -186,17 +186,130 @@ mpc86xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) ...@@ -186,17 +186,130 @@ mpc86xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
return PCI_IRQ_TABLE_LOOKUP + I8259_OFFSET; return PCI_IRQ_TABLE_LOOKUP + I8259_OFFSET;
} }
static void __devinit quirk_ali1575(struct pci_dev *dev)
{
unsigned short temp;
/*
* ALI1575 interrupts route table setup:
*
* IRQ pin IRQ#
* PIRQA ---- 3
* PIRQB ---- 4
* PIRQC ---- 5
* PIRQD ---- 6
* PIRQE ---- 9
* PIRQF ---- 10
* PIRQG ---- 11
* PIRQH ---- 12
*
* interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD
* PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA
*/
pci_write_config_dword(dev, 0x48, 0xb9317542);
/* USB 1.1 OHCI controller 1, interrupt: PIRQE */
pci_write_config_byte(dev, 0x86, 0x0c);
/* USB 1.1 OHCI controller 2, interrupt: PIRQF */
pci_write_config_byte(dev, 0x87, 0x0d);
/* USB 1.1 OHCI controller 3, interrupt: PIRQH */
pci_write_config_byte(dev, 0x88, 0x0f);
/* USB 2.0 controller, interrupt: PIRQ7 */
pci_write_config_byte(dev, 0x74, 0x06);
/* Audio controller, interrupt: PIRQE */
pci_write_config_byte(dev, 0x8a, 0x0c);
/* Modem controller, interrupt: PIRQF */
pci_write_config_byte(dev, 0x8b, 0x0d);
/* HD audio controller, interrupt: PIRQG */
pci_write_config_byte(dev, 0x8c, 0x0e);
/* Serial ATA interrupt: PIRQD */
pci_write_config_byte(dev, 0x8d, 0x0b);
/* SMB interrupt: PIRQH */
pci_write_config_byte(dev, 0x8e, 0x0f);
/* PMU ACPI SCI interrupt: PIRQH */
pci_write_config_byte(dev, 0x8f, 0x0f);
/* Primary PATA IDE IRQ: 14
* Secondary PATA IDE IRQ: 15
*/
pci_write_config_byte(dev, 0x44, 0x3d);
pci_write_config_byte(dev, 0x75, 0x0f);
/* Set IRQ14 and IRQ15 to legacy IRQs */
pci_read_config_word(dev, 0x46, &temp);
temp |= 0xc000;
pci_write_config_word(dev, 0x46, temp);
/* Set i8259 interrupt trigger
* IRQ 3: Level
* IRQ 4: Level
* IRQ 5: Level
* IRQ 6: Level
* IRQ 7: Level
* IRQ 9: Level
* IRQ 10: Level
* IRQ 11: Level
* IRQ 12: Level
* IRQ 14: Edge
* IRQ 15: Edge
*/
outb(0xfa, 0x4d0);
outb(0x1e, 0x4d1);
}
int static void __devinit quirk_uli5288(struct pci_dev *dev)
mpc86xx_exclude_device(u_char bus, u_char devfn)
{ {
#if !defined(CONFIG_PCI) unsigned char c;
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND; pci_read_config_byte(dev,0x83,&c);
#endif c |= 0x80;
pci_write_config_byte(dev, 0x83, c);
pci_write_config_byte(dev, 0x09, 0x01);
pci_write_config_byte(dev, 0x0a, 0x06);
pci_read_config_byte(dev,0x83,&c);
c &= 0x7f;
pci_write_config_byte(dev, 0x83, c);
return PCIBIOS_SUCCESSFUL; pci_read_config_byte(dev,0x84,&c);
c |= 0x01;
pci_write_config_byte(dev, 0x84, c);
} }
static void __devinit quirk_uli5229(struct pci_dev *dev)
{
unsigned short temp;
pci_write_config_word(dev, 0x04, 0x0405);
pci_read_config_word(dev, 0x4a, &temp);
temp |= 0x1000;
pci_write_config_word(dev, 0x4a, temp);
}
static void __devinit early_uli5249(struct pci_dev *dev)
{
unsigned char temp;
pci_write_config_word(dev, 0x04, 0x0007);
pci_read_config_byte(dev, 0x7c, &temp);
pci_write_config_byte(dev, 0x7c, 0x80);
pci_write_config_byte(dev, 0x09, 0x01);
pci_write_config_byte(dev, 0x7c, temp);
dev->class |= 0x1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_ali1575);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249);
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
* option) any later version. * option) any later version.
*/ */
#include <linux/config.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -34,8 +33,8 @@ extern unsigned long __secondary_hold_acknowledge; ...@@ -34,8 +33,8 @@ extern unsigned long __secondary_hold_acknowledge;
static void __init static void __init
smp_86xx_release_core(int nr) smp_86xx_release_core(int nr)
{ {
void *mcm_vaddr; __be32 __iomem *mcm_vaddr;
unsigned long vaddr, pcr; unsigned long pcr;
if (nr < 0 || nr >= NR_CPUS) if (nr < 0 || nr >= NR_CPUS)
return; return;
...@@ -45,10 +44,9 @@ smp_86xx_release_core(int nr) ...@@ -45,10 +44,9 @@ smp_86xx_release_core(int nr)
*/ */
mcm_vaddr = ioremap(get_immrbase() + MPC86xx_MCM_OFFSET, mcm_vaddr = ioremap(get_immrbase() + MPC86xx_MCM_OFFSET,
MPC86xx_MCM_SIZE); MPC86xx_MCM_SIZE);
vaddr = (unsigned long)mcm_vaddr + MCM_PORT_CONFIG_OFFSET; pcr = in_be32(mcm_vaddr + (MCM_PORT_CONFIG_OFFSET >> 2));
pcr = in_be32((volatile unsigned *)vaddr);
pcr |= 1 << (nr + 24); pcr |= 1 << (nr + 24);
out_be32((volatile unsigned *)vaddr, pcr); out_be32(mcm_vaddr + (MCM_PORT_CONFIG_OFFSET >> 2), pcr);
} }
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
* option) any later version. * option) any later version.
*/ */
#include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -122,15 +121,12 @@ static void __init setup_pcie_atmu(struct pci_controller *hose, struct resource ...@@ -122,15 +121,12 @@ static void __init setup_pcie_atmu(struct pci_controller *hose, struct resource
static void __init static void __init
mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 pcie_size) mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 pcie_size)
{ {
volatile struct ccsr_pex *pcie;
u16 cmd; u16 cmd;
unsigned int temps; unsigned int temps;
DBG("PCIE host controller register offset 0x%08x, size 0x%08x.\n", DBG("PCIE host controller register offset 0x%08x, size 0x%08x.\n",
pcie_offset, pcie_size); pcie_offset, pcie_size);
pcie = ioremap(pcie_offset, pcie_size);
early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd); early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
| PCI_COMMAND_IO; | PCI_COMMAND_IO;
...@@ -144,6 +140,14 @@ mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 pcie_size) ...@@ -144,6 +140,14 @@ mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 pcie_size)
early_write_config_dword(hose, 0, 0, PCI_PRIMARY_BUS, temps); early_write_config_dword(hose, 0, 0, PCI_PRIMARY_BUS, temps);
} }
int mpc86xx_exclude_device(u_char bus, u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
int __init add_bridge(struct device_node *dev) int __init add_bridge(struct device_node *dev)
{ {
int len; int len;
...@@ -198,128 +202,3 @@ int __init add_bridge(struct device_node *dev) ...@@ -198,128 +202,3 @@ int __init add_bridge(struct device_node *dev)
return 0; return 0;
} }
static void __devinit quirk_ali1575(struct pci_dev *dev)
{
unsigned short temp;
/*
* ALI1575 interrupts route table setup:
*
* IRQ pin IRQ#
* PIRQA ---- 3
* PIRQB ---- 4
* PIRQC ---- 5
* PIRQD ---- 6
* PIRQE ---- 9
* PIRQF ---- 10
* PIRQG ---- 11
* PIRQH ---- 12
*
* interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD
* PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA
*/
pci_write_config_dword(dev, 0x48, 0xb9317542);
/* USB 1.1 OHCI controller 1, interrupt: PIRQE */
pci_write_config_byte(dev, 0x86, 0x0c);
/* USB 1.1 OHCI controller 2, interrupt: PIRQF */
pci_write_config_byte(dev, 0x87, 0x0d);
/* USB 1.1 OHCI controller 3, interrupt: PIRQH */
pci_write_config_byte(dev, 0x88, 0x0f);
/* USB 2.0 controller, interrupt: PIRQ7 */
pci_write_config_byte(dev, 0x74, 0x06);
/* Audio controller, interrupt: PIRQE */
pci_write_config_byte(dev, 0x8a, 0x0c);
/* Modem controller, interrupt: PIRQF */
pci_write_config_byte(dev, 0x8b, 0x0d);
/* HD audio controller, interrupt: PIRQG */
pci_write_config_byte(dev, 0x8c, 0x0e);
/* Serial ATA interrupt: PIRQD */
pci_write_config_byte(dev, 0x8d, 0x0b);
/* SMB interrupt: PIRQH */
pci_write_config_byte(dev, 0x8e, 0x0f);
/* PMU ACPI SCI interrupt: PIRQH */
pci_write_config_byte(dev, 0x8f, 0x0f);
/* Primary PATA IDE IRQ: 14
* Secondary PATA IDE IRQ: 15
*/
pci_write_config_byte(dev, 0x44, 0x3d);
pci_write_config_byte(dev, 0x75, 0x0f);
/* Set IRQ14 and IRQ15 to legacy IRQs */
pci_read_config_word(dev, 0x46, &temp);
temp |= 0xc000;
pci_write_config_word(dev, 0x46, temp);
/* Set i8259 interrupt trigger
* IRQ 3: Level
* IRQ 4: Level
* IRQ 5: Level
* IRQ 6: Level
* IRQ 7: Level
* IRQ 9: Level
* IRQ 10: Level
* IRQ 11: Level
* IRQ 12: Level
* IRQ 14: Edge
* IRQ 15: Edge
*/
outb(0xfa, 0x4d0);
outb(0x1e, 0x4d1);
}
static void __devinit quirk_uli5288(struct pci_dev *dev)
{
unsigned char c;
pci_read_config_byte(dev,0x83,&c);
c |= 0x80;
pci_write_config_byte(dev, 0x83, c);
pci_write_config_byte(dev, 0x09, 0x01);
pci_write_config_byte(dev, 0x0a, 0x06);
pci_read_config_byte(dev,0x83,&c);
c &= 0x7f;
pci_write_config_byte(dev, 0x83, c);
pci_read_config_byte(dev,0x84,&c);
c |= 0x01;
pci_write_config_byte(dev, 0x84, c);
}
static void __devinit quirk_uli5229(struct pci_dev *dev)
{
unsigned short temp;
pci_write_config_word(dev, 0x04, 0x0405);
pci_read_config_word(dev, 0x4a, &temp);
temp |= 0x1000;
pci_write_config_word(dev, 0x4a, temp);
}
static void __devinit early_uli5249(struct pci_dev *dev)
{
unsigned char temp;
pci_write_config_word(dev, 0x04, 0x0007);
pci_read_config_byte(dev, 0x7c, &temp);
pci_write_config_byte(dev, 0x7c, 0x80);
pci_write_config_byte(dev, 0x09, 0x01);
pci_write_config_byte(dev, 0x7c, temp);
dev->class |= 0x1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_ali1575);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249);
...@@ -14,3 +14,4 @@ obj-$(CONFIG_PPC_PSERIES) += pseries/ ...@@ -14,3 +14,4 @@ obj-$(CONFIG_PPC_PSERIES) += pseries/
obj-$(CONFIG_PPC_ISERIES) += iseries/ obj-$(CONFIG_PPC_ISERIES) += iseries/
obj-$(CONFIG_PPC_MAPLE) += maple/ obj-$(CONFIG_PPC_MAPLE) += maple/
obj-$(CONFIG_PPC_CELL) += cell/ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
...@@ -6,6 +6,7 @@ config SPU_FS ...@@ -6,6 +6,7 @@ config SPU_FS
default m default m
depends on PPC_CELL depends on PPC_CELL
select SPU_BASE select SPU_BASE
select MEMORY_HOTPLUG
help help
The SPU file system is used to access Synergistic Processing The SPU file system is used to access Synergistic Processing
Units on machines implementing the Broadband Processor Units on machines implementing the Broadband Processor
...@@ -18,7 +19,6 @@ config SPU_BASE ...@@ -18,7 +19,6 @@ config SPU_BASE
config SPUFS_MMAP config SPUFS_MMAP
bool bool
depends on SPU_FS && SPARSEMEM depends on SPU_FS && SPARSEMEM
select MEMORY_HOTPLUG
default y default y
config CBE_RAS config CBE_RAS
......
...@@ -125,8 +125,6 @@ static void __init cell_init_early(void) ...@@ -125,8 +125,6 @@ static void __init cell_init_early(void)
{ {
DBG(" -> cell_init_early()\n"); DBG(" -> cell_init_early()\n");
hpte_init_native();
cell_init_iommu(); cell_init_iommu();
ppc64_interrupt_controller = IC_CELL_PIC; ppc64_interrupt_controller = IC_CELL_PIC;
...@@ -139,11 +137,17 @@ static int __init cell_probe(void) ...@@ -139,11 +137,17 @@ static int __init cell_probe(void)
{ {
unsigned long root = of_get_flat_dt_root(); unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "IBM,CBEA") || if (!of_flat_dt_is_compatible(root, "IBM,CBEA") &&
of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) !of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
return 1; return 0;
#ifdef CONFIG_UDBG_RTAS_CONSOLE
udbg_init_rtas_console();
#endif
hpte_init_native();
return 0; return 1;
} }
/* /*
......
...@@ -168,12 +168,12 @@ spu_irq_class_0_bottom(struct spu *spu) ...@@ -168,12 +168,12 @@ spu_irq_class_0_bottom(struct spu *spu)
stat &= mask; stat &= mask;
if (stat & 1) /* invalid MFC DMA */ if (stat & 1) /* invalid DMA alignment */
__spu_trap_invalid_dma(spu);
if (stat & 2) /* invalid DMA alignment */
__spu_trap_dma_align(spu); __spu_trap_dma_align(spu);
if (stat & 2) /* invalid MFC DMA */
__spu_trap_invalid_dma(spu);
if (stat & 4) /* error on SPU */ if (stat & 4) /* error on SPU */
__spu_trap_error(spu); __spu_trap_error(spu);
......
...@@ -204,7 +204,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -204,7 +204,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE); | _PAGE_NO_CACHE | _PAGE_GUARDED);
vma->vm_ops = &spufs_cntl_mmap_vmops; vma->vm_ops = &spufs_cntl_mmap_vmops;
return 0; return 0;
...@@ -675,7 +675,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -675,7 +675,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE); | _PAGE_NO_CACHE | _PAGE_GUARDED);
vma->vm_ops = &spufs_signal1_mmap_vmops; vma->vm_ops = &spufs_signal1_mmap_vmops;
return 0; return 0;
...@@ -762,7 +762,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -762,7 +762,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
/* FIXME: */ /* FIXME: */
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE); | _PAGE_NO_CACHE | _PAGE_GUARDED);
vma->vm_ops = &spufs_signal2_mmap_vmops; vma->vm_ops = &spufs_signal2_mmap_vmops;
return 0; return 0;
...@@ -850,7 +850,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -850,7 +850,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE); | _PAGE_NO_CACHE | _PAGE_GUARDED);
vma->vm_ops = &spufs_mss_mmap_vmops; vma->vm_ops = &spufs_mss_mmap_vmops;
return 0; return 0;
...@@ -899,7 +899,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -899,7 +899,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE); | _PAGE_NO_CACHE | _PAGE_GUARDED);
vma->vm_ops = &spufs_mfc_mmap_vmops; vma->vm_ops = &spufs_mfc_mmap_vmops;
return 0; return 0;
......
...@@ -464,7 +464,8 @@ static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) ...@@ -464,7 +464,8 @@ static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
* Poll MFC_CNTL[Ps] until value '11' is read * Poll MFC_CNTL[Ps] until value '11' is read
* (purge complete). * (purge complete).
*/ */
POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) & POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
MFC_CNTL_PURGE_DMA_COMPLETE); MFC_CNTL_PURGE_DMA_COMPLETE);
} }
...@@ -1028,7 +1029,8 @@ static inline void wait_suspend_mfc_complete(struct spu_state *csa, ...@@ -1028,7 +1029,8 @@ static inline void wait_suspend_mfc_complete(struct spu_state *csa,
* Restore, Step 47. * Restore, Step 47.
* Poll MFC_CNTL[Ss] until 11 is returned. * Poll MFC_CNTL[Ss] until 11 is returned.
*/ */
POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) & POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE); MFC_CNTL_SUSPEND_COMPLETE);
} }
......
...@@ -74,6 +74,16 @@ config SANDPOINT ...@@ -74,6 +74,16 @@ config SANDPOINT
Select SANDPOINT if configuring for a Motorola Sandpoint X3 Select SANDPOINT if configuring for a Motorola Sandpoint X3
(any flavor). (any flavor).
config MPC7448HPC2
bool "Freescale MPC7448HPC2(Taiga)"
select TSI108_BRIDGE
select DEFAULT_UIMAGE
select PPC_UDBG_16550
select MPIC
help
Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga)
platform
config RADSTONE_PPC7D config RADSTONE_PPC7D
bool "Radstone Technology PPC7D board" bool "Radstone Technology PPC7D board"
select PPC_I8259 select PPC_I8259
...@@ -221,6 +231,11 @@ config MV64X60 ...@@ -221,6 +231,11 @@ config MV64X60
select PPC_INDIRECT_PCI select PPC_INDIRECT_PCI
default y default y
config TSI108_BRIDGE
bool
depends on MPC7448HPC2
default y
menu "Set bridge options" menu "Set bridge options"
depends on MV64X60 depends on MV64X60
......
#
# Makefile for the 6xx/7xx/7xxxx linux kernel.
#
obj-$(CONFIG_MPC7448HPC2) += mpc7448_hpc2.o
/*
* mpc7448_hpc2.c
*
* Board setup routines for the Freescale Taiga platform
*
* Author: Jacob Pan
* jacob.pan@freescale.com
* Author: Xianghua Xiao
* x.xiao@freescale.com
* Maintainer: Roy Zang <tie-fei.zang@freescale.com>
* Add Flat Device Tree support fot mpc7448hpc2 board
*
* Copyright 2004-2006 Freescale Semiconductor, Inc.
*
* This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/ide.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
#include <asm/reg.h>
#include <mm/mmu_decl.h>
#include "mpc7448_hpc2.h"
#include <asm/tsi108_irq.h>
#include <asm/mpic.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
#ifndef CONFIG_PCI
isa_io_base = MPC7448_HPC2_ISA_IO_BASE;
isa_mem_base = MPC7448_HPC2_ISA_MEM_BASE;
pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
#endif
extern int tsi108_setup_pci(struct device_node *dev);
extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
extern void tsi108_pci_int_init(void);
extern int tsi108_irq_cascade(struct pt_regs *regs, void *unused);
/*
* Define all of the IRQ senses and polarities. Taken from the
* mpc7448hpc manual.
* Note: Likely, this table and the following function should be
* obtained and derived from the OF Device Tree.
*/
static u_char mpc7448_hpc2_pic_initsenses[] __initdata = {
/* External on-board sources */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[0] XINT0 from FPGA */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[1] XINT1 from FPGA */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[2] PHY_INT from both GIGE */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[3] RESERVED */
/* Internal Tsi108/109 interrupt sources */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA0 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA1 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA2 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA3 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART0 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART1 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* I2C */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* GPIO */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE0 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE1 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* HLP */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* SDC */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Processor IF */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* PCI/X block */
};
int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
/*
* find pci slot by devfn in interrupt map of OF tree
*/
u8 find_slot_by_devfn(unsigned int *interrupt_map, unsigned int devfn)
{
int i;
unsigned int tmp;
for (i = 0; i < 4; i++){
tmp = interrupt_map[i*4*7];
if ((tmp >> 11) == (devfn >> 3))
return i;
}
return i;
}
/*
* Scans the interrupt map for pci device
*/
void mpc7448_hpc2_fixup_irq(struct pci_dev *dev)
{
struct pci_controller *hose;
struct device_node *node;
unsigned int *interrupt;
int busnr;
int len;
u8 slot;
u8 pin;
/* Lookup the hose */
busnr = dev->bus->number;
hose = pci_bus_to_hose(busnr);
if (!hose)
printk(KERN_ERR "No pci hose found\n");
/* Check it has an OF node associated */
node = (struct device_node *) hose->arch_data;
if (!node)
printk(KERN_ERR "No pci node found\n");
interrupt = (unsigned int *) get_property(node, "interrupt-map", &len);
slot = find_slot_by_devfn(interrupt, dev->devfn);
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin == 0 || pin > 4)
pin = 1;
pin--;
dev->irq = interrupt[slot*4*7 + pin*7 + 5];
DBG("TSI_PCI: dev->irq = 0x%x\n", dev->irq);
}
/* temporary pci irq map fixup*/
void __init mpc7448_hpc2_pcibios_fixup(void)
{
struct pci_dev *dev = NULL;
for_each_pci_dev(dev) {
mpc7448_hpc2_fixup_irq(dev);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
}
}
static void __init mpc7448_hpc2_setup_arch(void)
{
struct device_node *cpu;
struct device_node *np;
if (ppc_md.progress)
ppc_md.progress("mpc7448_hpc2_setup_arch():set_bridge", 0);
cpu = of_find_node_by_type(NULL, "cpu");
if (cpu != 0) {
unsigned int *fp;
fp = (int *)get_property(cpu, "clock-frequency", NULL);
if (fp != 0)
loops_per_jiffy = *fp / HZ;
else
loops_per_jiffy = 50000000 / HZ;
of_node_put(cpu);
}
tsi108_csr_vir_base = get_vir_csrbase();
#ifdef CONFIG_ROOT_NFS
ROOT_DEV = Root_NFS;
#else
ROOT_DEV = Root_HDA1;
#endif
#ifdef CONFIG_BLK_DEV_INITRD
ROOT_DEV = Root_RAM0;
#endif
/* setup PCI host bridge */
#ifdef CONFIG_PCI
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
tsi108_setup_pci(np);
ppc_md.pci_exclude_device = mpc7448_hpc2_exclude_device;
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
#endif
printk(KERN_INFO "MPC7448HPC2 (TAIGA) Platform\n");
printk(KERN_INFO
"Jointly ported by Freescale and Tundra Semiconductor\n");
printk(KERN_INFO
"Enabling L2 cache then enabling the HID0 prefetch engine.\n");
}
/*
* Interrupt setup and service. Interrrupts on the mpc7448_hpc2 come
* from the four external INT pins, PCI interrupts are routed via
* PCI interrupt control registers, it generates internal IRQ23
*
* Interrupt routing on the Taiga Board:
* TSI108:PB_INT[0] -> CPU0:INT#
* TSI108:PB_INT[1] -> CPU0:MCP#
* TSI108:PB_INT[2] -> N/C
* TSI108:PB_INT[3] -> N/C
*/
static void __init mpc7448_hpc2_init_IRQ(void)
{
struct mpic *mpic;
phys_addr_t mpic_paddr = 0;
struct device_node *tsi_pic;
tsi_pic = of_find_node_by_type(NULL, "open-pic");
if (tsi_pic) {
unsigned int size;
void *prop = get_property(tsi_pic, "reg", &size);
mpic_paddr = of_translate_address(tsi_pic, prop);
}
if (mpic_paddr == 0) {
printk("%s: No tsi108 PIC found !\n", __FUNCTION__);
return;
}
DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__,
(u32) mpic_paddr);
mpic = mpic_alloc(mpic_paddr,
MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_MOD_ID(MPIC_ID_TSI108),
0, /* num_sources used */
TSI108_IRQ_BASE,
0, /* num_sources used */
NR_IRQS - 4 /* XXXX */,
mpc7448_hpc2_pic_initsenses,
sizeof(mpc7448_hpc2_pic_initsenses), "Tsi108_PIC");
BUG_ON(mpic == NULL); /* XXXX */
mpic_init(mpic);
mpic_setup_cascade(IRQ_TSI108_PCI, tsi108_irq_cascade, mpic);
tsi108_pci_int_init();
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
}
void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: Freescale Semiconductor\n");
seq_printf(m, "machine\t\t: MPC7448hpc2\n");
}
void mpc7448_hpc2_restart(char *cmd)
{
local_irq_disable();
/* Set exception prefix high - to the firmware */
_nmask_and_or_msr(0, MSR_IP);
for (;;) ; /* Spin until reset happens */
}
void mpc7448_hpc2_power_off(void)
{
local_irq_disable();
for (;;) ; /* No way to shut power off with software */
}
void mpc7448_hpc2_halt(void)
{
mpc7448_hpc2_power_off();
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init mpc7448_hpc2_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "mpc74xx"))
return 0;
return 1;
}
static int mpc7448_machine_check_exception(struct pt_regs *regs)
{
extern void tsi108_clear_pci_cfg_error(void);
const struct exception_table_entry *entry;
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
regs->nip = entry->fixup;
return 1;
}
return 0;
}
define_machine(mpc7448_hpc2){
.name = "MPC7448 HPC2",
.probe = mpc7448_hpc2_probe,
.setup_arch = mpc7448_hpc2_setup_arch,
.init_IRQ = mpc7448_hpc2_init_IRQ,
.show_cpuinfo = mpc7448_hpc2_show_cpuinfo,
.get_irq = mpic_get_irq,
.pcibios_fixup = mpc7448_hpc2_pcibios_fixup,
.restart = mpc7448_hpc2_restart,
.calibrate_decr = generic_calibrate_decr,
.machine_check_exception= mpc7448_machine_check_exception,
.progress = udbg_progress,
};
/*
* mpc7448_hpc2.h
*
* Definitions for Freescale MPC7448_HPC2 platform
*
* Author: Jacob Pan
* jacob.pan@freescale.com
* Maintainer: Roy Zang <roy.zang@freescale.com>
*
* 2006 (c) Freescale Semiconductor, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#ifndef __PPC_PLATFORMS_MPC7448_HPC2_H
#define __PPC_PLATFORMS_MPC7448_HPC2_H
#include <asm/ppcboot.h>
/* Base Addresses for the PCI bus
*/
#define MPC7448_HPC2_PCI_MEM_OFFSET (0x00000000)
#define MPC7448_HPC2_ISA_IO_BASE (0x00000000)
#define MPC7448_HPC2_ISA_MEM_BASE (0x00000000)
#endif /* __PPC_PLATFORMS_MPC7448_HPC2_H */
...@@ -252,6 +252,7 @@ static void __init dt_model(struct iseries_flat_dt *dt) ...@@ -252,6 +252,7 @@ static void __init dt_model(struct iseries_flat_dt *dt)
{ {
char buf[16] = "IBM,"; char buf[16] = "IBM,";
/* N.B. lparcfg.c knows about the "IBM," prefixes ... */
/* "IBM," + mfgId[2:3] + systemSerial[1:5] */ /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2); strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5); strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
...@@ -264,6 +265,7 @@ static void __init dt_model(struct iseries_flat_dt *dt) ...@@ -264,6 +265,7 @@ static void __init dt_model(struct iseries_flat_dt *dt)
dt_prop_str(dt, "model", buf); dt_prop_str(dt, "model", buf);
dt_prop_str(dt, "compatible", "IBM,iSeries"); dt_prop_str(dt, "compatible", "IBM,iSeries");
dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
} }
static void __init dt_do_vdevice(struct iseries_flat_dt *dt, static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
......
...@@ -242,13 +242,11 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -242,13 +242,11 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
local_irq_restore(flags); local_irq_restore(flags);
} }
void hpte_init_iSeries(void) void __init hpte_init_iSeries(void)
{ {
ppc_md.hpte_invalidate = iSeries_hpte_invalidate; ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
ppc_md.hpte_updatepp = iSeries_hpte_updatepp; ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp; ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
ppc_md.hpte_insert = iSeries_hpte_insert; ppc_md.hpte_insert = iSeries_hpte_insert;
ppc_md.hpte_remove = iSeries_hpte_remove; ppc_md.hpte_remove = iSeries_hpte_remove;
htab_finish_init();
} }
...@@ -51,20 +51,21 @@ static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes]; ...@@ -51,20 +51,21 @@ static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
static struct HvLpEvent * get_next_hvlpevent(void) static struct HvLpEvent * get_next_hvlpevent(void)
{ {
struct HvLpEvent * event; struct HvLpEvent * event;
event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
if (hvlpevent_is_valid(event)) { if (hvlpevent_is_valid(event)) {
/* rmb() needed only for weakly consistent machines (regatta) */ /* rmb() needed only for weakly consistent machines (regatta) */
rmb(); rmb();
/* Set pointer to next potential event */ /* Set pointer to next potential event */
hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 + hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
LpEventAlign) / LpEventAlign) * LpEventAlign; IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
IT_LP_EVENT_ALIGN;
/* Wrap to beginning if no room at end */ /* Wrap to beginning if no room at end */
if (hvlpevent_queue.xSlicCurEventPtr > if (hvlpevent_queue.hq_current_event >
hvlpevent_queue.xSlicLastValidEventPtr) { hvlpevent_queue.hq_last_event) {
hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.hq_current_event =
hvlpevent_queue.xSlicEventStackPtr; hvlpevent_queue.hq_event_stack;
} }
} else { } else {
event = NULL; event = NULL;
...@@ -82,10 +83,10 @@ int hvlpevent_is_pending(void) ...@@ -82,10 +83,10 @@ int hvlpevent_is_pending(void)
if (smp_processor_id() >= spread_lpevents) if (smp_processor_id() >= spread_lpevents)
return 0; return 0;
next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
return hvlpevent_is_valid(next_event) || return hvlpevent_is_valid(next_event) ||
hvlpevent_queue.xPlicOverflowIntPending; hvlpevent_queue.hq_overflow_pending;
} }
static void hvlpevent_clear_valid(struct HvLpEvent * event) static void hvlpevent_clear_valid(struct HvLpEvent * event)
...@@ -95,18 +96,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event) ...@@ -95,18 +96,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event)
* ie. on 64-byte boundaries. * ie. on 64-byte boundaries.
*/ */
struct HvLpEvent *tmp; struct HvLpEvent *tmp;
unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
LpEventAlign) - 1; IT_LP_EVENT_ALIGN) - 1;
switch (extra) { switch (extra) {
case 3: case 3:
tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp); hvlpevent_invalidate(tmp);
case 2: case 2:
tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp); hvlpevent_invalidate(tmp);
case 1: case 1:
tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp); hvlpevent_invalidate(tmp);
} }
...@@ -120,7 +121,7 @@ void process_hvlpevents(struct pt_regs *regs) ...@@ -120,7 +121,7 @@ void process_hvlpevents(struct pt_regs *regs)
struct HvLpEvent * event; struct HvLpEvent * event;
/* If we have recursed, just return */ /* If we have recursed, just return */
if (!spin_trylock(&hvlpevent_queue.lock)) if (!spin_trylock(&hvlpevent_queue.hq_lock))
return; return;
for (;;) { for (;;) {
...@@ -148,17 +149,17 @@ void process_hvlpevents(struct pt_regs *regs) ...@@ -148,17 +149,17 @@ void process_hvlpevents(struct pt_regs *regs)
printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
hvlpevent_clear_valid(event); hvlpevent_clear_valid(event);
} else if (hvlpevent_queue.xPlicOverflowIntPending) } else if (hvlpevent_queue.hq_overflow_pending)
/* /*
* No more valid events. If overflow events are * No more valid events. If overflow events are
* pending process them * pending process them
*/ */
HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex); HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
else else
break; break;
} }
spin_unlock(&hvlpevent_queue.lock); spin_unlock(&hvlpevent_queue.hq_lock);
} }
static int set_spread_lpevents(char *str) static int set_spread_lpevents(char *str)
...@@ -184,20 +185,20 @@ void setup_hvlpevent_queue(void) ...@@ -184,20 +185,20 @@ void setup_hvlpevent_queue(void)
{ {
void *eventStack; void *eventStack;
spin_lock_init(&hvlpevent_queue.lock); spin_lock_init(&hvlpevent_queue.hq_lock);
/* Allocate a page for the Event Stack. */ /* Allocate a page for the Event Stack. */
eventStack = alloc_bootmem_pages(LpEventStackSize); eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
memset(eventStack, 0, LpEventStackSize); memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
/* Invoke the hypervisor to initialize the event stack */ /* Invoke the hypervisor to initialize the event stack */
HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack; hvlpevent_queue.hq_event_stack = eventStack;
hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack; hvlpevent_queue.hq_current_event = eventStack;
hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack + hvlpevent_queue.hq_last_event = (char *)eventStack +
(LpEventStackSize - LpEventMaxSize); (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
hvlpevent_queue.xIndex = 0; hvlpevent_queue.hq_index = 0;
} }
/* Register a handler for an LpEvent type */ /* Register a handler for an LpEvent type */
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/hv_call_xm.h> #include <asm/iseries/hv_call_xm.h>
#include "processor_vpd.h" #include "processor_vpd.h"
......
...@@ -81,8 +81,6 @@ static void iSeries_pci_final_fixup(void) { } ...@@ -81,8 +81,6 @@ static void iSeries_pci_final_fixup(void) { }
#endif #endif
extern int rd_size; /* Defined in drivers/block/rd.c */ extern int rd_size; /* Defined in drivers/block/rd.c */
extern unsigned long embedded_sysmap_start;
extern unsigned long embedded_sysmap_end;
extern unsigned long iSeries_recal_tb; extern unsigned long iSeries_recal_tb;
extern unsigned long iSeries_recal_titan; extern unsigned long iSeries_recal_titan;
...@@ -320,11 +318,6 @@ static void __init iSeries_init_early(void) ...@@ -320,11 +318,6 @@ static void __init iSeries_init_early(void)
iSeries_recal_tb = get_tb(); iSeries_recal_tb = get_tb();
iSeries_recal_titan = HvCallXm_loadTod(); iSeries_recal_titan = HvCallXm_loadTod();
/*
* Initialize the hash table management pointers
*/
hpte_init_iSeries();
/* /*
* Initialize the DMA/TCE management * Initialize the DMA/TCE management
*/ */
...@@ -563,16 +556,6 @@ static void __init iSeries_fixup_klimit(void) ...@@ -563,16 +556,6 @@ static void __init iSeries_fixup_klimit(void)
if (naca.xRamDisk) if (naca.xRamDisk)
klimit = KERNELBASE + (u64)naca.xRamDisk + klimit = KERNELBASE + (u64)naca.xRamDisk +
(naca.xRamDiskSize * HW_PAGE_SIZE); (naca.xRamDiskSize * HW_PAGE_SIZE);
else {
/*
* No ram disk was included - check and see if there
* was an embedded system map. Change klimit to take
* into account any embedded system map
*/
if (embedded_sysmap_end)
klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
0xfffffffffffff000);
}
} }
static int __init iSeries_src_init(void) static int __init iSeries_src_init(void)
...@@ -683,6 +666,8 @@ static int __init iseries_probe(void) ...@@ -683,6 +666,8 @@ static int __init iseries_probe(void)
*/ */
virt_irq_max = 255; virt_irq_max = 255;
hpte_init_iSeries();
return 1; return 1;
} }
......
...@@ -199,11 +199,6 @@ static void __init maple_init_early(void) ...@@ -199,11 +199,6 @@ static void __init maple_init_early(void)
{ {
DBG(" -> maple_init_early\n"); DBG(" -> maple_init_early\n");
/* Initialize hash table, from now on, we can take hash faults
* and call ioremap
*/
hpte_init_native();
/* Setup interrupt mapping options */ /* Setup interrupt mapping options */
ppc64_interrupt_controller = IC_OPEN_PIC; ppc64_interrupt_controller = IC_OPEN_PIC;
...@@ -272,6 +267,8 @@ static int __init maple_probe(void) ...@@ -272,6 +267,8 @@ static int __init maple_probe(void)
*/ */
alloc_dart_table(); alloc_dart_table();
hpte_init_native();
return 1; return 1;
} }
......
...@@ -600,13 +600,6 @@ pmac_halt(void) ...@@ -600,13 +600,6 @@ pmac_halt(void)
*/ */
static void __init pmac_init_early(void) static void __init pmac_init_early(void)
{ {
#ifdef CONFIG_PPC64
/* Initialize hash table, from now on, we can take hash faults
* and call ioremap
*/
hpte_init_native();
#endif
/* Enable early btext debug if requested */ /* Enable early btext debug if requested */
if (strstr(cmd_line, "btextdbg")) { if (strstr(cmd_line, "btextdbg")) {
udbg_adb_init_early(); udbg_adb_init_early();
...@@ -683,6 +676,8 @@ static int __init pmac_probe(void) ...@@ -683,6 +676,8 @@ static int __init pmac_probe(void)
* part of the cacheable linar mapping * part of the cacheable linar mapping
*/ */
alloc_dart_table(); alloc_dart_table();
hpte_init_native();
#endif #endif
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
......
...@@ -92,6 +92,15 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) ...@@ -92,6 +92,15 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
*(tcep++) = 0; *(tcep++) = 0;
} }
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
{
u64 *tcep;
index <<= TCE_PAGE_FACTOR;
tcep = ((u64 *)tbl->it_base) + index;
return *tcep;
}
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
...@@ -235,6 +244,25 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n ...@@ -235,6 +244,25 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
} }
} }
static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
{
u64 rc;
unsigned long tce_ret;
tcenum <<= TCE_PAGE_FACTOR;
rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
if (rc && printk_ratelimit()) {
printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%ld\n",
rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
show_stack(current, (unsigned long *)__get_SP());
}
return tce_ret;
}
static void iommu_table_setparms(struct pci_controller *phb, static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn, struct device_node *dn,
struct iommu_table *tbl) struct iommu_table *tbl)
...@@ -254,7 +282,10 @@ static void iommu_table_setparms(struct pci_controller *phb, ...@@ -254,7 +282,10 @@ static void iommu_table_setparms(struct pci_controller *phb,
} }
tbl->it_base = (unsigned long)__va(*basep); tbl->it_base = (unsigned long)__va(*basep);
#ifndef CONFIG_CRASH_DUMP
memset((void *)tbl->it_base, 0, *sizep); memset((void *)tbl->it_base, 0, *sizep);
#endif
tbl->it_busno = phb->bus->number; tbl->it_busno = phb->bus->number;
...@@ -560,11 +591,13 @@ void iommu_init_early_pSeries(void) ...@@ -560,11 +591,13 @@ void iommu_init_early_pSeries(void)
ppc_md.tce_build = tce_build_pSeriesLP; ppc_md.tce_build = tce_build_pSeriesLP;
ppc_md.tce_free = tce_free_pSeriesLP; ppc_md.tce_free = tce_free_pSeriesLP;
} }
ppc_md.tce_get = tce_get_pSeriesLP;
ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP; ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP;
ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP; ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP;
} else { } else {
ppc_md.tce_build = tce_build_pSeries; ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries; ppc_md.tce_free = tce_free_pSeries;
ppc_md.tce_get = tce_get_pseries;
ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries; ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries;
ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries; ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
} }
......
...@@ -513,7 +513,7 @@ void pSeries_lpar_flush_hash_range(unsigned long number, int local) ...@@ -513,7 +513,7 @@ void pSeries_lpar_flush_hash_range(unsigned long number, int local)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
} }
void hpte_init_lpar(void) void __init hpte_init_lpar(void)
{ {
ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
...@@ -522,6 +522,4 @@ void hpte_init_lpar(void) ...@@ -522,6 +522,4 @@ void hpte_init_lpar(void)
ppc_md.hpte_remove = pSeries_lpar_hpte_remove; ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
htab_finish_init();
} }
...@@ -322,11 +322,6 @@ static void __init pSeries_init_early(void) ...@@ -322,11 +322,6 @@ static void __init pSeries_init_early(void)
DBG(" -> pSeries_init_early()\n"); DBG(" -> pSeries_init_early()\n");
fw_feature_init(); fw_feature_init();
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
else
hpte_init_native();
if (firmware_has_feature(FW_FEATURE_LPAR)) if (firmware_has_feature(FW_FEATURE_LPAR))
find_udbg_vterm(); find_udbg_vterm();
...@@ -384,6 +379,11 @@ static int __init pSeries_probe_hypertas(unsigned long node, ...@@ -384,6 +379,11 @@ static int __init pSeries_probe_hypertas(unsigned long node,
if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL) if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
powerpc_firmware_features |= FW_FEATURE_LPAR; powerpc_firmware_features |= FW_FEATURE_LPAR;
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
else
hpte_init_native();
return 1; return 1;
} }
......
...@@ -12,3 +12,5 @@ obj-$(CONFIG_U3_DART) += dart_iommu.o ...@@ -12,3 +12,5 @@ obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_PPC_83xx) += ipic.o obj-$(CONFIG_PPC_83xx) += ipic.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o obj-$(CONFIG_FSL_SOC) += fsl_soc.o
obj-$(CONFIG_PPC_TODC) += todc.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
...@@ -47,8 +47,12 @@ ...@@ -47,8 +47,12 @@
/* U4 registers */ /* U4 registers */
#define DART_BASE_U4_BASE_MASK 0xffffff #define DART_BASE_U4_BASE_MASK 0xffffff
#define DART_BASE_U4_BASE_SHIFT 0 #define DART_BASE_U4_BASE_SHIFT 0
#define DART_CNTL_U4_FLUSHTLB 0x20000000
#define DART_CNTL_U4_ENABLE 0x80000000 #define DART_CNTL_U4_ENABLE 0x80000000
#define DART_CNTL_U4_IONE 0x40000000
#define DART_CNTL_U4_FLUSHTLB 0x20000000
#define DART_CNTL_U4_IDLE 0x10000000
#define DART_CNTL_U4_PAR_EN 0x08000000
#define DART_CNTL_U4_IONE_MASK 0x07ffffff
#define DART_SIZE_U4_SIZE_MASK 0x1fff #define DART_SIZE_U4_SIZE_MASK 0x1fff
#define DART_SIZE_U4_SIZE_SHIFT 0 #define DART_SIZE_U4_SIZE_SHIFT 0
......
...@@ -101,8 +101,8 @@ static inline void dart_tlb_invalidate_all(void) ...@@ -101,8 +101,8 @@ static inline void dart_tlb_invalidate_all(void)
if (l == (1L << limit)) { if (l == (1L << limit)) {
if (limit < 4) { if (limit < 4) {
limit++; limit++;
reg = DART_IN(DART_CNTL); reg = DART_IN(DART_CNTL);
reg &= ~inv_bit; reg &= ~inv_bit;
DART_OUT(DART_CNTL, reg); DART_OUT(DART_CNTL, reg);
goto retry; goto retry;
} else } else
...@@ -111,11 +111,39 @@ static inline void dart_tlb_invalidate_all(void) ...@@ -111,11 +111,39 @@ static inline void dart_tlb_invalidate_all(void)
} }
} }
static inline void dart_tlb_invalidate_one(unsigned long bus_rpn)
{
unsigned int reg;
unsigned int l, limit;
reg = DART_CNTL_U4_ENABLE | DART_CNTL_U4_IONE |
(bus_rpn & DART_CNTL_U4_IONE_MASK);
DART_OUT(DART_CNTL, reg);
limit = 0;
wait_more:
l = 0;
while ((DART_IN(DART_CNTL) & DART_CNTL_U4_IONE) && l < (1L << limit)) {
rmb();
l++;
}
if (l == (1L << limit)) {
if (limit < 4) {
limit++;
goto wait_more;
} else
panic("DART: TLB did not flush after waiting a long "
"time. Buggy U4 ?");
}
}
static void dart_flush(struct iommu_table *tbl) static void dart_flush(struct iommu_table *tbl)
{ {
if (dart_dirty) if (dart_dirty) {
dart_tlb_invalidate_all(); dart_tlb_invalidate_all();
dart_dirty = 0; dart_dirty = 0;
}
} }
static void dart_build(struct iommu_table *tbl, long index, static void dart_build(struct iommu_table *tbl, long index,
...@@ -124,6 +152,7 @@ static void dart_build(struct iommu_table *tbl, long index, ...@@ -124,6 +152,7 @@ static void dart_build(struct iommu_table *tbl, long index,
{ {
unsigned int *dp; unsigned int *dp;
unsigned int rpn; unsigned int rpn;
long l;
DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
...@@ -135,7 +164,8 @@ static void dart_build(struct iommu_table *tbl, long index, ...@@ -135,7 +164,8 @@ static void dart_build(struct iommu_table *tbl, long index,
/* On U3, all memory is contigous, so we can move this /* On U3, all memory is contigous, so we can move this
* out of the loop. * out of the loop.
*/ */
while (npages--) { l = npages;
while (l--) {
rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT; rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT;
*(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
...@@ -143,7 +173,14 @@ static void dart_build(struct iommu_table *tbl, long index, ...@@ -143,7 +173,14 @@ static void dart_build(struct iommu_table *tbl, long index,
uaddr += DART_PAGE_SIZE; uaddr += DART_PAGE_SIZE;
} }
dart_dirty = 1; if (dart_is_u4) {
rpn = index;
mb(); /* make sure all updates have reached memory */
while (npages--)
dart_tlb_invalidate_one(rpn++);
} else {
dart_dirty = 1;
}
} }
......
This diff is collapsed.
/*
* tsi108/109 device setup code
*
* Maintained by Roy Zang < tie-fei.zang@freescale.com >
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <asm/tsi108.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <mm/mmu_decl.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
static phys_addr_t tsi108_csr_base = -1;
phys_addr_t get_csrbase(void)
{
struct device_node *tsi;
if (tsi108_csr_base != -1)
return tsi108_csr_base;
tsi = of_find_node_by_type(NULL, "tsi-bridge");
if (tsi) {
unsigned int size;
void *prop = get_property(tsi, "reg", &size);
tsi108_csr_base = of_translate_address(tsi, prop);
of_node_put(tsi);
};
return tsi108_csr_base;
}
u32 get_vir_csrbase(void)
{
return (u32) (ioremap(get_csrbase(), 0x10000));
}
EXPORT_SYMBOL(get_csrbase);
EXPORT_SYMBOL(get_vir_csrbase);
static int __init tsi108_eth_of_init(void)
{
struct device_node *np;
unsigned int i;
struct platform_device *tsi_eth_dev;
struct resource res;
int ret;
for (np = NULL, i = 0;
(np = of_find_compatible_node(np, "network", "tsi-ethernet")) != NULL;
i++) {
struct resource r[2];
struct device_node *phy;
hw_info tsi_eth_data;
unsigned int *id;
unsigned int *phy_id;
void *mac_addr;
phandle *ph;
memset(r, 0, sizeof(r));
memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
ret = of_address_to_resource(np, 0, &r[0]);
DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
__FUNCTION__,r[0].name, r[0].start, r[0].end);
if (ret)
goto err;
r[1].name = "tx";
r[1].start = np->intrs[0].line;
r[1].end = np->intrs[0].line;
r[1].flags = IORESOURCE_IRQ;
tsi_eth_dev =
platform_device_register_simple("tsi-ethernet", i, &r[0],
np->n_intrs + 1);
if (IS_ERR(tsi_eth_dev)) {
ret = PTR_ERR(tsi_eth_dev);
goto err;
}
mac_addr = get_property(np, "address", NULL);
memcpy(tsi_eth_data.mac_addr, mac_addr, 6);
ph = (phandle *) get_property(np, "phy-handle", NULL);
phy = of_find_node_by_phandle(*ph);
if (phy == NULL) {
ret = -ENODEV;
goto unreg;
}
id = (u32 *) get_property(phy, "reg", NULL);
phy_id = (u32 *) get_property(phy, "phy-id", NULL);
ret = of_address_to_resource(phy, 0, &res);
if (ret) {
of_node_put(phy);
goto unreg;
}
tsi_eth_data.regs = r[0].start;
tsi_eth_data.phyregs = res.start;
tsi_eth_data.phy = *phy_id;
tsi_eth_data.irq_num = np->intrs[0].line;
of_node_put(phy);
ret =
platform_device_add_data(tsi_eth_dev, &tsi_eth_data,
sizeof(hw_info));
if (ret)
goto unreg;
}
return 0;
unreg:
platform_device_unregister(tsi_eth_dev);
err:
return ret;
}
arch_initcall(tsi108_eth_of_init);
This diff is collapsed.
...@@ -774,11 +774,18 @@ config BLK_DEV_IDEDMA_PMAC ...@@ -774,11 +774,18 @@ config BLK_DEV_IDEDMA_PMAC
performance. performance.
config BLK_DEV_IDE_PMAC_BLINK config BLK_DEV_IDE_PMAC_BLINK
bool "Blink laptop LED on drive activity" bool "Blink laptop LED on drive activity (DEPRECATED)"
depends on BLK_DEV_IDE_PMAC && ADB_PMU depends on BLK_DEV_IDE_PMAC && ADB_PMU
select ADB_PMU_LED
select LEDS_TRIGGERS
select LEDS_TRIGGER_IDE_DISK
help help
This option enables the use of the sleep LED as a hard drive This option enables the use of the sleep LED as a hard drive
activity LED. activity LED.
This option is deprecated, it only selects ADB_PMU_LED and
LEDS_TRIGGER_IDE_DISK and changes the code in the new led class
device to default to the ide-disk trigger (which should be set
from userspace via sysfs).
config BLK_DEV_IDE_SWARM config BLK_DEV_IDE_SWARM
tristate "IDE for Sibyte evaluation boards" tristate "IDE for Sibyte evaluation boards"
......
...@@ -420,107 +420,6 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive); ...@@ -420,107 +420,6 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
/*
* Below is the code for blinking the laptop LED along with hard
* disk activity.
*/
#ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
/* Set to 50ms minimum led-on time (also used to limit frequency
* of requests sent to the PMU
*/
#define PMU_HD_BLINK_TIME (HZ/50)
static struct adb_request pmu_blink_on, pmu_blink_off;
static spinlock_t pmu_blink_lock;
static unsigned long pmu_blink_stoptime;
static int pmu_blink_ledstate;
static struct timer_list pmu_blink_timer;
static int pmu_ide_blink_enabled;
static void
pmu_hd_blink_timeout(unsigned long data)
{
unsigned long flags;
spin_lock_irqsave(&pmu_blink_lock, flags);
/* We may have been triggered again in a racy way, check
* that we really want to switch it off
*/
if (time_after(pmu_blink_stoptime, jiffies))
goto done;
/* Previous req. not complete, try 100ms more */
if (pmu_blink_off.complete == 0)
mod_timer(&pmu_blink_timer, jiffies + PMU_HD_BLINK_TIME);
else if (pmu_blink_ledstate) {
pmu_request(&pmu_blink_off, NULL, 4, 0xee, 4, 0, 0);
pmu_blink_ledstate = 0;
}
done:
spin_unlock_irqrestore(&pmu_blink_lock, flags);
}
static void
pmu_hd_kick_blink(void *data, int rw)
{
unsigned long flags;
pmu_blink_stoptime = jiffies + PMU_HD_BLINK_TIME;
wmb();
mod_timer(&pmu_blink_timer, pmu_blink_stoptime);
/* Fast path when LED is already ON */
if (pmu_blink_ledstate == 1)
return;
spin_lock_irqsave(&pmu_blink_lock, flags);
if (pmu_blink_on.complete && !pmu_blink_ledstate) {
pmu_request(&pmu_blink_on, NULL, 4, 0xee, 4, 0, 1);
pmu_blink_ledstate = 1;
}
spin_unlock_irqrestore(&pmu_blink_lock, flags);
}
static int
pmu_hd_blink_init(void)
{
struct device_node *dt;
const char *model;
/* Currently, I only enable this feature on KeyLargo based laptops,
* older laptops may support it (at least heathrow/paddington) but
* I don't feel like loading those venerable old machines with so
* much additional interrupt & PMU activity...
*/
if (pmu_get_model() != PMU_KEYLARGO_BASED)
return 0;
dt = of_find_node_by_path("/");
if (dt == NULL)
return 0;
model = (const char *)get_property(dt, "model", NULL);
if (model == NULL)
return 0;
if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 &&
strncmp(model, "iBook", strlen("iBook")) != 0) {
of_node_put(dt);
return 0;
}
of_node_put(dt);
pmu_blink_on.complete = 1;
pmu_blink_off.complete = 1;
spin_lock_init(&pmu_blink_lock);
init_timer(&pmu_blink_timer);
pmu_blink_timer.function = pmu_hd_blink_timeout;
return 1;
}
#endif /* CONFIG_BLK_DEV_IDE_PMAC_BLINK */
/* /*
* N.B. this can't be an initfunc, because the media-bay task can * N.B. this can't be an initfunc, because the media-bay task can
* call ide_[un]register at any time. * call ide_[un]register at any time.
...@@ -1192,23 +1091,6 @@ pmac_ide_do_suspend(ide_hwif_t *hwif) ...@@ -1192,23 +1091,6 @@ pmac_ide_do_suspend(ide_hwif_t *hwif)
pmif->timings[0] = 0; pmif->timings[0] = 0;
pmif->timings[1] = 0; pmif->timings[1] = 0;
#ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
/* Note: This code will be called for every hwif, thus we'll
* try several time to stop the LED blinker timer, but that
* should be harmless
*/
if (pmu_ide_blink_enabled) {
unsigned long flags;
/* Make sure we don't hit the PMU blink */
spin_lock_irqsave(&pmu_blink_lock, flags);
if (pmu_blink_ledstate)
del_timer(&pmu_blink_timer);
pmu_blink_ledstate = 0;
spin_unlock_irqrestore(&pmu_blink_lock, flags);
}
#endif /* CONFIG_BLK_DEV_IDE_PMAC_BLINK */
disable_irq(pmif->irq); disable_irq(pmif->irq);
/* The media bay will handle itself just fine */ /* The media bay will handle itself just fine */
...@@ -1376,13 +1258,6 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) ...@@ -1376,13 +1258,6 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
hwif->selectproc = pmac_ide_selectproc; hwif->selectproc = pmac_ide_selectproc;
hwif->speedproc = pmac_ide_tune_chipset; hwif->speedproc = pmac_ide_tune_chipset;
#ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
pmu_ide_blink_enabled = pmu_hd_blink_init();
if (pmu_ide_blink_enabled)
hwif->led_act = pmu_hd_kick_blink;
#endif
printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
hwif->index, model_name[pmif->kind], pmif->aapl_bus_id, hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
pmif->mediabay ? " (mediabay)" : "", hwif->irq); pmif->mediabay ? " (mediabay)" : "", hwif->irq);
......
...@@ -78,6 +78,18 @@ config ADB_PMU ...@@ -78,6 +78,18 @@ config ADB_PMU
this device; you should do so if your machine is one of those this device; you should do so if your machine is one of those
mentioned above. mentioned above.
config ADB_PMU_LED
bool "Support for the Power/iBook front LED"
depends on ADB_PMU
select NEW_LEDS
select LEDS_CLASS
help
Support the front LED on Power/iBooks as a generic LED that can
be triggered by any of the supported triggers. To get the
behaviour of the old CONFIG_BLK_DEV_IDE_PMAC_BLINK, select this
and the ide-disk LED trigger and configure appropriately through
sysfs.
config PMAC_SMU config PMAC_SMU
bool "Support for SMU based PowerMacs" bool "Support for SMU based PowerMacs"
depends on PPC_PMAC64 depends on PPC_PMAC64
......
...@@ -12,6 +12,7 @@ obj-$(CONFIG_INPUT_ADBHID) += adbhid.o ...@@ -12,6 +12,7 @@ obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
obj-$(CONFIG_ANSLCD) += ans-lcd.o obj-$(CONFIG_ANSLCD) += ans-lcd.o
obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o
obj-$(CONFIG_ADB_PMU_LED) += via-pmu-led.o
obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
obj-$(CONFIG_ADB_CUDA) += via-cuda.o obj-$(CONFIG_ADB_CUDA) += via-cuda.o
obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#define KDUMP_TRAMPOLINE_START 0x0100 #define KDUMP_TRAMPOLINE_START 0x0100
#define KDUMP_TRAMPOLINE_END 0x3000 #define KDUMP_TRAMPOLINE_END 0x3000
#define KDUMP_MIN_TCE_ENTRIES 2048
#else /* !CONFIG_CRASH_DUMP */ #else /* !CONFIG_CRASH_DUMP */
#define PHYSICAL_START 0x0 #define PHYSICAL_START 0x0
......
...@@ -112,9 +112,13 @@ static inline void crash_setup_regs(struct pt_regs *newregs, ...@@ -112,9 +112,13 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
#ifdef __powerpc64__ #ifdef __powerpc64__
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */ master to copy new code to 0 */
extern void __init kexec_setup(void);
extern int crashing_cpu; extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern cpumask_t cpus_in_sr;
static inline int kexec_sr_activated(int cpu)
{
return cpu_isset(cpu,cpus_in_sr);
}
#endif /* __powerpc64 __ */ #endif /* __powerpc64 __ */
struct kimage; struct kimage;
...@@ -124,10 +128,13 @@ extern int default_machine_kexec_prepare(struct kimage *image); ...@@ -124,10 +128,13 @@ extern int default_machine_kexec_prepare(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs); extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern void machine_kexec_simple(struct kimage *image); extern void machine_kexec_simple(struct kimage *image);
extern void crash_kexec_secondary(struct pt_regs *regs);
extern int overlaps_crashkernel(unsigned long start, unsigned long size); extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void); extern void reserve_crashkernel(void);
#else /* !CONFIG_KEXEC */ #else /* !CONFIG_KEXEC */
static inline int kexec_sr_activated(int cpu) { return 0; }
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int overlaps_crashkernel(unsigned long start, unsigned long size) static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
{ {
......
...@@ -81,6 +81,8 @@ struct machdep_calls { ...@@ -81,6 +81,8 @@ struct machdep_calls {
void (*tce_free)(struct iommu_table *tbl, void (*tce_free)(struct iommu_table *tbl,
long index, long index,
long npages); long npages);
unsigned long (*tce_get)(struct iommu_table *tbl,
long index);
void (*tce_flush)(struct iommu_table *tbl); void (*tce_flush)(struct iommu_table *tbl);
void (*iommu_dev_setup)(struct pci_dev *dev); void (*iommu_dev_setup)(struct pci_dev *dev);
void (*iommu_bus_setup)(struct pci_bus *bus); void (*iommu_bus_setup)(struct pci_bus *bus);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment