Commit 8ee3e0d6 authored by Stephen Rothwell's avatar Stephen Rothwell Committed by Benjamin Herrenschmidt

powerpc: Remove the main legacy iSerie platform code

Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 2d4b9712
......@@ -16,7 +16,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
obj-$(CONFIG_PPC_86xx) += 86xx/
obj-$(CONFIG_PPC_POWERNV) += powernv/
obj-$(CONFIG_PPC_PSERIES) += pseries/
obj-$(CONFIG_PPC_ISERIES) += iseries/
obj-$(CONFIG_PPC_MAPLE) += maple/
obj-$(CONFIG_PPC_PASEMI) += pasemi/
obj-$(CONFIG_PPC_CELL) += cell/
......
ccflags-y := -mno-minimal-toc
obj-y += exception.o
obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \
hvcall.o proc.o htab.o iommu.o misc.o irq.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_VIOPATH) += viopath.o vio.o
obj-$(CONFIG_MODULES) += ksyms.o
/*
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
#define _PLATFORMS_ISERIES_CALL_HPT_H
/*
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from the OS.
*/
#include <asm/iseries/hv_call_sc.h>
#include <asm/iseries/hv_types.h>
#include <asm/mmu.h>
#define HvCallHptGetHptAddress HvCallHpt + 0
#define HvCallHptGetHptPages HvCallHpt + 1
#define HvCallHptSetPp HvCallHpt + 5
#define HvCallHptSetSwBits HvCallHpt + 6
#define HvCallHptUpdate HvCallHpt + 7
#define HvCallHptInvalidateNoSyncICache HvCallHpt + 8
#define HvCallHptGet HvCallHpt + 11
#define HvCallHptFindNextValid HvCallHpt + 12
#define HvCallHptFindValid HvCallHpt + 13
#define HvCallHptAddValidate HvCallHpt + 16
#define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18
static inline u64 HvCallHpt_getHptAddress(void)
{
return HvCall0(HvCallHptGetHptAddress);
}
static inline u64 HvCallHpt_getHptPages(void)
{
return HvCall0(HvCallHptGetHptPages);
}
static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value)
{
HvCall2(HvCallHptSetPp, hpteIndex, value);
}
static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff)
{
HvCall3(HvCallHptSetSwBits, hpteIndex, bitson, bitsoff);
}
static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex)
{
HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
}
static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson,
u8 bitsoff)
{
u64 compressedStatus;
compressedStatus = HvCall4(HvCallHptInvalidateSetSwBitsGet,
hpteIndex, bitson, bitsoff, 1);
HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
return compressedStatus;
}
static inline u64 HvCallHpt_findValid(struct hash_pte *hpte, u64 vpn)
{
return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
}
static inline u64 HvCallHpt_findNextValid(struct hash_pte *hpte, u32 hpteIndex,
u8 bitson, u8 bitsoff)
{
return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
bitson, bitsoff);
}
static inline void HvCallHpt_get(struct hash_pte *hpte, u32 hpteIndex)
{
HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
}
static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit,
struct hash_pte *hpte)
{
HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
}
#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
/*
* Provides the Hypervisor PCI calls for iSeries Linux Parition.
* Copyright (C) 2001 <Wayne G Holm> <IBM Corporation>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the:
* Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330,
* Boston, MA 02111-1307 USA
*
* Change Activity:
* Created, Jan 9, 2001
*/
#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
#define _PLATFORMS_ISERIES_CALL_PCI_H
#include <asm/iseries/hv_call_sc.h>
#include <asm/iseries/hv_types.h>
/*
* DSA == Direct Select Address
* this struct must be 64 bits in total
*/
struct HvCallPci_DsaAddr {
u16 busNumber; /* PHB index? */
u8 subBusNumber; /* PCI bus number? */
u8 deviceId; /* device and function? */
u8 barNumber;
u8 reserved[3];
};
union HvDsaMap {
u64 DsaAddr;
struct HvCallPci_DsaAddr Dsa;
};
struct HvCallPci_LoadReturn {
u64 rc;
u64 value;
};
enum HvCallPci_DeviceType {
HvCallPci_NodeDevice = 1,
HvCallPci_SpDevice = 2,
HvCallPci_IopDevice = 3,
HvCallPci_BridgeDevice = 4,
HvCallPci_MultiFunctionDevice = 5,
HvCallPci_IoaDevice = 6
};
struct HvCallPci_DeviceInfo {
u32 deviceType; /* See DeviceType enum for values */
};
struct HvCallPci_BusUnitInfo {
u32 sizeReturned; /* length of data returned */
u32 deviceType; /* see DeviceType enum for values */
};
struct HvCallPci_BridgeInfo {
struct HvCallPci_BusUnitInfo busUnitInfo; /* Generic bus unit info */
u8 subBusNumber; /* Bus number of secondary bus */
u8 maxAgents; /* Max idsels on secondary bus */
u8 maxSubBusNumber; /* Max Sub Bus */
u8 logicalSlotNumber; /* Logical Slot Number for IOA */
};
/*
* Maximum BusUnitInfo buffer size. Provided for clients so
* they can allocate a buffer big enough for any type of bus
* unit. Increase as needed.
*/
enum {HvCallPci_MaxBusUnitInfoSize = 128};
struct HvCallPci_BarParms {
u64 vaddr;
u64 raddr;
u64 size;
u64 protectStart;
u64 protectEnd;
u64 relocationOffset;
u64 pciAddress;
u64 reserved[3];
};
enum HvCallPci_VpdType {
HvCallPci_BusVpd = 1,
HvCallPci_BusAdapterVpd = 2
};
#define HvCallPciConfigLoad8 HvCallPci + 0
#define HvCallPciConfigLoad16 HvCallPci + 1
#define HvCallPciConfigLoad32 HvCallPci + 2
#define HvCallPciConfigStore8 HvCallPci + 3
#define HvCallPciConfigStore16 HvCallPci + 4
#define HvCallPciConfigStore32 HvCallPci + 5
#define HvCallPciEoi HvCallPci + 16
#define HvCallPciGetBarParms HvCallPci + 18
#define HvCallPciMaskFisr HvCallPci + 20
#define HvCallPciUnmaskFisr HvCallPci + 21
#define HvCallPciSetSlotReset HvCallPci + 25
#define HvCallPciGetDeviceInfo HvCallPci + 27
#define HvCallPciGetCardVpd HvCallPci + 28
#define HvCallPciBarLoad8 HvCallPci + 40
#define HvCallPciBarLoad16 HvCallPci + 41
#define HvCallPciBarLoad32 HvCallPci + 42
#define HvCallPciBarLoad64 HvCallPci + 43
#define HvCallPciBarStore8 HvCallPci + 44
#define HvCallPciBarStore16 HvCallPci + 45
#define HvCallPciBarStore32 HvCallPci + 46
#define HvCallPciBarStore64 HvCallPci + 47
#define HvCallPciMaskInterrupts HvCallPci + 48
#define HvCallPciUnmaskInterrupts HvCallPci + 49
#define HvCallPciGetBusUnitInfo HvCallPci + 50
static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
u8 deviceId, u32 offset, u16 *value)
{
struct HvCallPci_DsaAddr dsa;
struct HvCallPci_LoadReturn retVal;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumber;
dsa.subBusNumber = subBusNumber;
dsa.deviceId = deviceId;
HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0);
*value = retVal.value;
return retVal.rc;
}
static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
u8 deviceId, u32 offset, u32 *value)
{
struct HvCallPci_DsaAddr dsa;
struct HvCallPci_LoadReturn retVal;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumber;
dsa.subBusNumber = subBusNumber;
dsa.deviceId = deviceId;
HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
*value = retVal.value;
return retVal.rc;
}
static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
u8 deviceId, u32 offset, u8 value)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumber;
dsa.subBusNumber = subBusNumber;
dsa.deviceId = deviceId;
return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
}
static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
u8 deviceIdParm)
{
struct HvCallPci_DsaAddr dsa;
struct HvCallPci_LoadReturn retVal;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceIdParm;
HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa);
return retVal.rc;
}
static inline u64 HvCallPci_getBarParms(u16 busNumberParm, u8 subBusParm,
u8 deviceIdParm, u8 barNumberParm, u64 parms, u32 sizeofParms)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceIdParm;
dsa.barNumber = barNumberParm;
return HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms);
}
static inline u64 HvCallPci_maskFisr(u16 busNumberParm, u8 subBusParm,
u8 deviceIdParm, u64 fisrMask)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceIdParm;
return HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask);
}
static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
u8 deviceIdParm, u64 fisrMask)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceIdParm;
return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
}
static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
u8 deviceNumberParm, u64 parms, u32 sizeofParms)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceNumberParm << 4;
return HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms);
}
static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, u8 subBusParm,
u8 deviceIdParm, u64 interruptMask)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceIdParm;
return HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask);
}
static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, u8 subBusParm,
u8 deviceIdParm, u64 interruptMask)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceIdParm;
return HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask);
}
static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, u8 subBusParm,
u8 deviceIdParm, u64 parms, u32 sizeofParms)
{
struct HvCallPci_DsaAddr dsa;
*((u64*)&dsa) = 0;
dsa.busNumber = busNumberParm;
dsa.subBusNumber = subBusParm;
dsa.deviceId = deviceIdParm;
return HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms,
sizeofParms);
}
static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
u16 sizeParm)
{
u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
sizeParm, HvCallPci_BusVpd);
if (xRc == -1)
return -1;
else
return xRc & 0xFFFF;
}
#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
/*
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ISERIES_CALL_SM_H
#define _ISERIES_CALL_SM_H
/*
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from the OS.
*/
#include <asm/iseries/hv_call_sc.h>
#include <asm/iseries/hv_types.h>
#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
u64 indexIntoBitMap)
{
return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
}
#endif /* _ISERIES_CALL_SM_H */
This diff is collapsed.
/*
* Low level routines for legacy iSeries support.
*
* Extracted from head_64.S
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
* Adapted for Power Macintosh by Paul Mackerras.
* Low-level exception handlers and MMU support
* rewritten by Paul Mackerras.
* Copyright (C) 1996 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
*
* This file contains the low-level support and setup for the
* PowerPC-64 platform, including trap and interrupt dispatch.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/reg.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cputable.h>
#include <asm/mmu.h>
#include "exception.h"
.text
.globl system_reset_iSeries
system_reset_iSeries:
bl .relative_toc
mfspr r13,SPRN_SPRG3 /* Get alpaca address */
LOAD_REG_ADDR(r23, alpaca)
li r0,ALPACA_SIZE
sub r23,r13,r23
divdu r24,r23,r0 /* r24 has cpu number */
cmpwi 0,r24,0 /* Are we processor 0? */
bne 1f
LOAD_REG_ADDR(r13, boot_paca)
mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
mfmsr r23
ori r23,r23,MSR_RI
mtmsrd r23 /* RI on */
b .__start_initialization_iSeries /* Start up the first processor */
1: mfspr r4,SPRN_CTRLF
li r5,CTRL_RUNLATCH /* Turn off the run light */
andc r4,r4,r5
mtspr SPRN_CTRLT,r4
/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
/* In the UP case we'll yield() later, and we will not access the paca anyway */
#ifdef CONFIG_SMP
iSeries_secondary_wait_paca:
HMT_LOW
LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
ld r23,0(r23)
cmpdi 0,r23,0
bne 2f /* go on when the master is ready */
/* Keep poking the Hypervisor until we're released */
/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
lis r3,0x8002
rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
b iSeries_secondary_wait_paca
2:
HMT_MEDIUM
sync
LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
cmpld 0,r24,r3 /* is our cpu number allocated? */
bge iSeries_secondary_yield /* no, yield forever */
/* Load our paca now that it's been allocated */
LOAD_REG_ADDR(r13, paca)
ld r13,0(r13)
mulli r0,r24,PACA_SIZE
add r13,r13,r0
mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
mfmsr r23
ori r23,r23,MSR_RI
mtmsrd r23 /* RI on */
iSeries_secondary_smp_loop:
lbz r23,PACAPROCSTART(r13) /* Test if this processor
* should start */
cmpwi 0,r23,0
bne 3f /* go on when we are told */
HMT_LOW
/* Let the Hypervisor know we are alive */
/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
lis r3,0x8002
rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
b iSeries_secondary_smp_loop /* wait for signal to start */
3:
HMT_MEDIUM
sync
LOAD_REG_ADDR(r3,current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
ldx r3,r3,r28
addi r1,r3,THREAD_SIZE
subi r1,r1,STACK_FRAME_OVERHEAD
b __secondary_start /* Loop until told to go */
#endif /* CONFIG_SMP */
iSeries_secondary_yield:
/* Yield the processor. This is required for non-SMP kernels
which are running on multi-threaded machines. */
HMT_LOW
lis r3,0x8000
rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
li r4,0 /* "yield timed" */
li r5,-1 /* "yield forever" */
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
b iSeries_secondary_yield /* If SMP not configured, secondaries
* loop forever */
/*** ISeries-LPAR interrupt handlers ***/
STD_EXCEPTION_ISERIES(machine_check, PACA_EXMC)
.globl data_access_iSeries
data_access_iSeries:
mtspr SPRN_SPRG_SCRATCH0,r13
BEGIN_FTR_SECTION
mfspr r13,SPRN_SPRG_PACA
std r9,PACA_EXSLB+EX_R9(r13)
std r10,PACA_EXSLB+EX_R10(r13)
mfspr r10,SPRN_DAR
mfspr r9,SPRN_DSISR
srdi r10,r10,60
rlwimi r10,r9,16,0x20
mfcr r9
cmpwi r10,0x2c
beq .do_stab_bolted_iSeries
ld r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXGEN+EX_R11(r13)
ld r11,PACA_EXSLB+EX_R9(r13)
std r12,PACA_EXGEN+EX_R12(r13)
mfspr r12,SPRN_SPRG_SCRATCH0
std r10,PACA_EXGEN+EX_R10(r13)
std r11,PACA_EXGEN+EX_R9(r13)
std r12,PACA_EXGEN+EX_R13(r13)
EXCEPTION_PROLOG_ISERIES_1
FTR_SECTION_ELSE
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
EXCEPTION_PROLOG_ISERIES_1
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
b data_access_common
.do_stab_bolted_iSeries:
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
mfspr r10,SPRN_SPRG_SCRATCH0
std r10,PACA_EXSLB+EX_R13(r13)
EXCEPTION_PROLOG_ISERIES_1
b .do_stab_bolted
.globl data_access_slb_iSeries
data_access_slb_iSeries:
mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
std r9,PACA_EXSLB+EX_R9(r13)
mfcr r9
#ifdef __DISABLED__
cmpdi r3,0
bge slb_miss_user_iseries
#endif
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
mfspr r10,SPRN_SPRG_SCRATCH0
std r10,PACA_EXSLB+EX_R13(r13)
ld r12,PACALPPACAPTR(r13)
ld r12,LPPACASRR1(r12)
b .slb_miss_realmode
STD_EXCEPTION_ISERIES(instruction_access, PACA_EXGEN)
.globl instruction_access_slb_iSeries
instruction_access_slb_iSeries:
mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
ld r3,PACALPPACAPTR(r13)
ld r3,LPPACASRR0(r3) /* get SRR0 value */
std r9,PACA_EXSLB+EX_R9(r13)
mfcr r9
#ifdef __DISABLED__
cmpdi r3,0
bge slb_miss_user_iseries
#endif
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
mfspr r10,SPRN_SPRG_SCRATCH0
std r10,PACA_EXSLB+EX_R13(r13)
ld r12,PACALPPACAPTR(r13)
ld r12,LPPACASRR1(r12)
b .slb_miss_realmode
#ifdef __DISABLED__
slb_miss_user_iseries:
std r10,PACA_EXGEN+EX_R10(r13)
std r11,PACA_EXGEN+EX_R11(r13)
std r12,PACA_EXGEN+EX_R12(r13)
mfspr r10,SPRG_SCRATCH0
ld r11,PACA_EXSLB+EX_R9(r13)
ld r12,PACA_EXSLB+EX_R3(r13)
std r10,PACA_EXGEN+EX_R13(r13)
std r11,PACA_EXGEN+EX_R9(r13)
std r12,PACA_EXGEN+EX_R3(r13)
EXCEPTION_PROLOG_ISERIES_1
b slb_miss_user_common
#endif
MASKABLE_EXCEPTION_ISERIES(hardware_interrupt)
STD_EXCEPTION_ISERIES(alignment, PACA_EXGEN)
STD_EXCEPTION_ISERIES(program_check, PACA_EXGEN)
STD_EXCEPTION_ISERIES(fp_unavailable, PACA_EXGEN)
MASKABLE_EXCEPTION_ISERIES(decrementer)
STD_EXCEPTION_ISERIES(trap_0a, PACA_EXGEN)
STD_EXCEPTION_ISERIES(trap_0b, PACA_EXGEN)
.globl system_call_iSeries
system_call_iSeries:
mr r9,r13
mfspr r13,SPRN_SPRG_PACA
EXCEPTION_PROLOG_ISERIES_1
b system_call_common
STD_EXCEPTION_ISERIES(single_step, PACA_EXGEN)
STD_EXCEPTION_ISERIES(trap_0e, PACA_EXGEN)
STD_EXCEPTION_ISERIES(performance_monitor, PACA_EXGEN)
decrementer_iSeries_masked:
/* We may not have a valid TOC pointer in here. */
li r11,1
ld r12,PACALPPACAPTR(r13)
stb r11,LPPACADECRINT(r12)
li r12,-1
clrldi r12,r12,33 /* set DEC to 0x7fffffff */
mtspr SPRN_DEC,r12
/* fall through */
hardware_interrupt_iSeries_masked:
mtcrf 0x80,r9 /* Restore regs */
ld r12,PACALPPACAPTR(r13)
ld r11,LPPACASRR0(r12)
ld r12,LPPACASRR1(r12)
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
ld r9,PACA_EXGEN+EX_R9(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
ld r13,PACA_EXGEN+EX_R13(r13)
rfid
b . /* prevent speculative execution */
_INIT_STATIC(__start_initialization_iSeries)
/* Clear out the BSS */
LOAD_REG_ADDR(r11,__bss_stop)
LOAD_REG_ADDR(r8,__bss_start)
sub r11,r11,r8 /* bss size */
addi r11,r11,7 /* round up to an even double word */
rldicl. r11,r11,61,3 /* shift right by 3 */
beq 4f
addi r8,r8,-8
li r0,0
mtctr r11 /* zero this many doublewords */
3: stdu r0,8(r8)
bdnz 3b
4:
LOAD_REG_ADDR(r1,init_thread_union)
addi r1,r1,THREAD_SIZE
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
bl .iSeries_early_setup
bl .early_setup
/* relocation is on at this point */
b .start_here_common
#ifndef _ASM_POWERPC_ISERIES_EXCEPTION_H
#define _ASM_POWERPC_ISERIES_EXCEPTION_H
/*
* Extracted from head_64.S
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
* Adapted for Power Macintosh by Paul Mackerras.
* Low-level exception handlers and MMU support
* rewritten by Paul Mackerras.
* Copyright (C) 1996 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
*
* This file contains the low-level support and setup for the
* PowerPC-64 platform, including trap and interrupt dispatch.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/exception-64s.h>
#define EXCEPTION_PROLOG_ISERIES_1 \
mfmsr r10; \
ld r12,PACALPPACAPTR(r13); \
ld r11,LPPACASRR0(r12); \
ld r12,LPPACASRR1(r12); \
ori r10,r10,MSR_RI; \
mtmsrd r10,1
#define STD_EXCEPTION_ISERIES(label, area) \
.globl label##_iSeries; \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
EXCEPTION_PROLOG_1(area, NOTEST, 0); \
EXCEPTION_PROLOG_ISERIES_1; \
b label##_common
#define MASKABLE_EXCEPTION_ISERIES(label) \
.globl label##_iSeries; \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
lbz r10,PACASOFTIRQEN(r13); \
cmpwi 0,r10,0; \
beq- label##_iSeries_masked; \
EXCEPTION_PROLOG_ISERIES_1; \
b label##_common; \
#endif /* _ASM_POWERPC_ISERIES_EXCEPTION_H */
/*
* iSeries hashtable management.
* Derived from pSeries_htab.c
*
* SMP scalability work:
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/machdep.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/abs_addr.h>
#include <linux/spinlock.h>
#include "call_hpt.h"
static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
/*
* Very primitive algorithm for picking up a lock
*/
static inline void iSeries_hlock(unsigned long slot)
{
if (slot & 0x8)
slot = ~slot;
spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
}
static inline void iSeries_hunlock(unsigned long slot)
{
if (slot & 0x8)
slot = ~slot;
spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
}
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
{
long slot;
struct hash_pte lhpte;
int secondary = 0;
BUG_ON(psize != MMU_PAGE_4K);
/*
* The hypervisor tries both primary and secondary.
* If we are being called to insert in the secondary,
* it means we have already tried both primary and secondary,
* so we return failure immediately.
*/
if (vflags & HPTE_V_SECONDARY)
return -1;
iSeries_hlock(hpte_group);
slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
if (unlikely(lhpte.v & HPTE_V_VALID)) {
if (vflags & HPTE_V_BOLTED) {
HvCallHpt_setSwBits(slot, 0x10, 0);
HvCallHpt_setPp(slot, PP_RWXX);
iSeries_hunlock(hpte_group);
if (slot < 0)
return 0x8 | (slot & 7);
else
return slot & 7;
}
BUG();
}
if (slot == -1) { /* No available entry found in either group */
iSeries_hunlock(hpte_group);
return -1;
}
if (slot < 0) { /* MSB set means secondary group */
vflags |= HPTE_V_SECONDARY;
secondary = 1;
slot &= 0x7fffffffffffffff;
}
lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
iSeries_hunlock(hpte_group);
return (secondary << 3) | (slot & 7);
}
static unsigned long iSeries_hpte_getword0(unsigned long slot)
{
struct hash_pte hpte;
HvCallHpt_get(&hpte, slot);
return hpte.v;
}
static long iSeries_hpte_remove(unsigned long hpte_group)
{
unsigned long slot_offset;
int i;
unsigned long hpte_v;
/* Pick a random slot to start at */
slot_offset = mftb() & 0x7;
iSeries_hlock(hpte_group);
for (i = 0; i < HPTES_PER_GROUP; i++) {
hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
if (! (hpte_v & HPTE_V_BOLTED)) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0);
iSeries_hunlock(hpte_group);
return i;
}
slot_offset++;
slot_offset &= 0x7;
}
iSeries_hunlock(hpte_group);
return -1;
}
/*
* The HyperVisor expects the "flags" argument in this form:
* bits 0..59 : reserved
* bit 60 : N
* bits 61..63 : PP2,PP1,PP0
*/
static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int psize, int ssize, int local)
{
struct hash_pte hpte;
unsigned long want_v;
iSeries_hlock(slot);
HvCallHpt_get(&hpte, slot);
want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
/*
* Hypervisor expects bits as NPPP, which is
* different from how they are mapped in our PP.
*/
HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
iSeries_hunlock(slot);
return 0;
}
iSeries_hunlock(slot);
return -1;
}
/*
* Functions used to find the PTE for a particular virtual address.
* Only used during boot when bolting pages.
*
* Input : vpn : virtual page number
* Output: PTE index within the page table of the entry
* -1 on failure
*/
static long iSeries_hpte_find(unsigned long vpn)
{
struct hash_pte hpte;
long slot;
/*
* The HvCallHpt_findValid interface is as follows:
* 0xffffffffffffffff : No entry found.
* 0x00000000xxxxxxxx : Entry found in primary group, slot x
* 0x80000000xxxxxxxx : Entry found in secondary group, slot x
*/
slot = HvCallHpt_findValid(&hpte, vpn);
if (hpte.v & HPTE_V_VALID) {
if (slot < 0) {
slot &= 0x7fffffffffffffff;
slot = -slot;
}
} else
slot = -1;
return slot;
}
/*
* Update the page protection bits. Intended to be used to create
* guard pages for kernel data structures on pages which are bolted
* in the HPT. Assumes pages being operated on will not be stolen.
* Does not work on large pages.
*
* No need to lock here because we should be the only user.
*/
static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize, int ssize)
{
unsigned long vsid,va,vpn;
long slot;
BUG_ON(psize != MMU_PAGE_4K);
vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> HW_PAGE_SHIFT;
slot = iSeries_hpte_find(vpn);
if (slot == -1)
panic("updateboltedpp: Could not find page to bolt\n");
HvCallHpt_setPp(slot, newpp);
}
static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
int psize, int ssize, int local)
{
unsigned long hpte_v;
unsigned long avpn = va >> 23;
unsigned long flags;
local_irq_save(flags);
iSeries_hlock(slot);
hpte_v = iSeries_hpte_getword0(slot);
if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
iSeries_hunlock(slot);
local_irq_restore(flags);
}
void __init hpte_init_iSeries(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
spin_lock_init(&iSeries_hlocks[i]);
ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
ppc_md.hpte_insert = iSeries_hpte_insert;
ppc_md.hpte_remove = iSeries_hpte_remove;
}
/*
* This file contains the code to perform calls to the
* iSeries LPAR hypervisor
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
.text
/*
* Hypervisor call
*
* Invoke the iSeries hypervisor via the System Call instruction
* Parameters are passed to this routine in registers r3 - r10
*
* r3 contains the HV function to be called
* r4-r10 contain the operands to the hypervisor function
*
*/
_GLOBAL(HvCall)
_GLOBAL(HvCall0)
_GLOBAL(HvCall1)
_GLOBAL(HvCall2)
_GLOBAL(HvCall3)
_GLOBAL(HvCall4)
_GLOBAL(HvCall5)
_GLOBAL(HvCall6)
_GLOBAL(HvCall7)
mfcr r0
std r0,-8(r1)
stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
/* r0 = 0xffffffffffffffff indicates a hypervisor call */
li r0,-1
/* Invoke the hypervisor */
sc
ld r1,0(r1)
ld r0,-8(r1)
mtcrf 0xff,r0
/* return to caller, return value in r3 */
blr
_GLOBAL(HvCall0Ret16)
_GLOBAL(HvCall1Ret16)
_GLOBAL(HvCall2Ret16)
_GLOBAL(HvCall3Ret16)
_GLOBAL(HvCall4Ret16)
_GLOBAL(HvCall5Ret16)
_GLOBAL(HvCall6Ret16)
_GLOBAL(HvCall7Ret16)
mfcr r0
std r0,-8(r1)
std r31,-16(r1)
stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
mr r31,r4
li r0,-1
mr r4,r5
mr r5,r6
mr r6,r7
mr r7,r8
mr r8,r9
mr r9,r10
sc
std r3,0(r31)
std r4,8(r31)
mr r3,r5
ld r1,0(r1)
ld r0,-8(r1)
mtcrf 0xff,r0
ld r31,-16(r1)
blr
/*
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <asm/page.h>
#include <asm/abs_addr.h>
#include <asm/iseries/hv_call.h>
#include <asm/iseries/hv_call_sc.h>
#include <asm/iseries/hv_types.h>
void HvCall_writeLogBuffer(const void *buffer, u64 len)
{
struct HvLpBufferList hv_buf;
u64 left_this_page;
u64 cur = virt_to_abs(buffer);
while (len) {
hv_buf.addr = cur;
left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
if (left_this_page > len)
left_this_page = len;
hv_buf.len = left_this_page;
len -= left_this_page;
HvCall2(HvCallBaseWriteLogBuffer,
virt_to_abs(&hv_buf),
left_this_page);
cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
}
}
/*
* Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/export.h>
#include <asm/iseries/hv_lp_config.h>
#include "it_lp_naca.h"
HvLpIndex HvLpConfig_getLpIndex_outline(void)
{
return HvLpConfig_getLpIndex();
}
EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
HvLpIndex HvLpConfig_getLpIndex(void)
{
return itLpNaca.xLpIndex;
}
EXPORT_SYMBOL(HvLpConfig_getLpIndex);
HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
{
return itLpNaca.xPrimaryLpIndex;
}
EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex);
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup:
*
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
* Copyright (C) 2006 Olof Johansson <olof@lixom.net>
*
* Dynamic DMA mapping support, iSeries-specific parts.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/iommu.h>
#include <asm/vio.h>
#include <asm/tce.h>
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/iseries/hv_call_xm.h>
#include <asm/iseries/hv_call_event.h>
#include <asm/iseries/iommu.h>
static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 rc;
u64 tce, rpn;
while (npages--) {
rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
if (tbl->it_type == TCE_VB) {
/* Virtual Bus */
tce |= TCE_VALID|TCE_ALLIO;
if (direction != DMA_TO_DEVICE)
tce |= TCE_VB_WRITE;
} else {
/* PCI Bus */
tce |= TCE_PCI_READ; /* Read allowed */
if (direction != DMA_TO_DEVICE)
tce |= TCE_PCI_WRITE;
}
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
if (rc)
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
rc);
index++;
uaddr += TCE_PAGE_SIZE;
}
return 0;
}
static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
{
u64 rc;
while (npages--) {
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
if (rc)
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
rc);
index++;
}
}
/*
* Structure passed to HvCallXm_getTceTableParms
*/
struct iommu_table_cb {
unsigned long itc_busno; /* Bus number for this tce table */
unsigned long itc_start; /* Will be NULL for secondary */
unsigned long itc_totalsize; /* Size (in pages) of whole table */
unsigned long itc_offset; /* Index into real tce table of the
start of our section */
unsigned long itc_size; /* Size (in pages) of our section */
unsigned long itc_index; /* Index of this tce table */
unsigned short itc_maxtables; /* Max num of tables for partition */
unsigned char itc_virtbus; /* Flag to indicate virtual bus */
unsigned char itc_slotno; /* IOA Tce Slot Index */
unsigned char itc_rsvd[4];
};
/*
* Call Hv with the architected data structure to get TCE table info.
* info. Put the returned data into the Linux representation of the
* TCE table data.
* The Hardware Tce table comes in three flavors.
* 1. TCE table shared between Buses.
* 2. TCE table per Bus.
* 3. TCE Table per IOA.
*/
void iommu_table_getparms_iSeries(unsigned long busno,
unsigned char slotno,
unsigned char virtbus,
struct iommu_table* tbl)
{
struct iommu_table_cb *parms;
parms = kzalloc(sizeof(*parms), GFP_KERNEL);
if (parms == NULL)
panic("PCI_DMA: TCE Table Allocation failed.");
parms->itc_busno = busno;
parms->itc_slotno = slotno;
parms->itc_virtbus = virtbus;
HvCallXm_getTceTableParms(iseries_hv_addr(parms));
if (parms->itc_size == 0)
panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
/* itc_size is in pages worth of table, it_size is in # of entries */
tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
tbl->it_busno = parms->itc_busno;
tbl->it_offset = parms->itc_offset;
tbl->it_index = parms->itc_index;
tbl->it_blocksize = 1;
tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
kfree(parms);
}
#ifdef CONFIG_PCI
/*
* This function compares the known tables to find an iommu_table
* that has already been built for hardware TCEs.
*/
static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
{
struct device_node *node;
for (node = NULL; (node = of_find_all_nodes(node)); ) {
struct pci_dn *pdn = PCI_DN(node);
struct iommu_table *it;
if (pdn == NULL)
continue;
it = pdn->iommu_table;
if ((it != NULL) &&
(it->it_type == TCE_PCI) &&
(it->it_offset == tbl->it_offset) &&
(it->it_index == tbl->it_index) &&
(it->it_size == tbl->it_size)) {
of_node_put(node);
return it;
}
}
return NULL;
}
static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
{
struct iommu_table *tbl;
struct device_node *dn = pci_device_to_OF_node(pdev);
struct pci_dn *pdn = PCI_DN(dn);
const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
BUG_ON(lsn == NULL);
tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
/* Look for existing tce table */
pdn->iommu_table = iommu_table_find(tbl);
if (pdn->iommu_table == NULL)
pdn->iommu_table = iommu_init_table(tbl, -1);
else
kfree(tbl);
set_iommu_table_base(&pdev->dev, pdn->iommu_table);
}
#else
#define pci_dma_dev_setup_iseries NULL
#endif
static struct iommu_table veth_iommu_table;
static struct iommu_table vio_iommu_table;
void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
DMA_BIT_MASK(32), flag, -1);
}
EXPORT_SYMBOL_GPL(iseries_hv_alloc);
void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
{
iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
}
EXPORT_SYMBOL_GPL(iseries_hv_free);
dma_addr_t iseries_hv_map(void *vaddr, size_t size,
enum dma_data_direction direction)
{
return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
(unsigned long)vaddr % PAGE_SIZE, size,
DMA_BIT_MASK(32), direction, NULL);
}
void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
}
void __init iommu_vio_init(void)
{
iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
veth_iommu_table.it_size /= 2;
vio_iommu_table = veth_iommu_table;
vio_iommu_table.it_offset += veth_iommu_table.it_size;
if (!iommu_init_table(&veth_iommu_table, -1))
printk("Virtual Bus VETH TCE table failed.\n");
if (!iommu_init_table(&vio_iommu_table, -1))
printk("Virtual Bus VIO TCE table failed.\n");
}
struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
{
if (strcmp(dev->type, "network") == 0)
return &veth_iommu_table;
return &vio_iommu_table;
}
void iommu_init_early_iSeries(void)
{
ppc_md.tce_build = tce_build_iSeries;
ppc_md.tce_free = tce_free_iSeries;
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
set_pci_dma_ops(&dma_iommu_ops);
}
/*
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ISERIES_IPL_PARMS_H
#define _ISERIES_IPL_PARMS_H
/*
* This struct maps the IPL Parameters DMA'd from the SP.
*
* Warning:
* This data must map in exactly 64 bytes and match the architecture for
* the IPL parms
*/
#include <asm/types.h>
struct ItIplParmsReal {
u8 xFormat; // Defines format of IplParms x00-x00
u8 xRsvd01:6; // Reserved x01-x01
u8 xAlternateSearch:1; // Alternate search indicator ...
u8 xUaSupplied:1; // UA Supplied on programmed IPL...
u8 xLsUaFormat; // Format byte for UA x02-x02
u8 xRsvd02; // Reserved x03-x03
u32 xLsUa; // LS UA x04-x07
u32 xUnusedLsLid; // First OS LID to load x08-x0B
u16 xLsBusNumber; // LS Bus Number x0C-x0D
u8 xLsCardAdr; // LS Card Address x0E-x0E
u8 xLsBoardAdr; // LS Board Address x0F-x0F
u32 xRsvd03; // Reserved x10-x13
u8 xSpcnPresent:1; // SPCN present x14-x14
u8 xCpmPresent:1; // CPM present ...
u8 xRsvd04:6; // Reserved ...
u8 xRsvd05:4; // Reserved x15-x15
u8 xKeyLock:4; // Keylock setting ...
u8 xRsvd06:6; // Reserved x16-x16
u8 xIplMode:2; // Ipl mode (A|B|C|D) ...
u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17
u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiatedx18-x19
u16 xPowerOnResetIpl:1; // Indicate POR condition ...
u16 xMainStorePreserved:1; // Main Storage is preserved ...
u16 xRsvd07:13; // Reserved ...
u16 xIplSource:16; // Ipl source x1A-x1B
u8 xIplReason:8; // Reason for this IPL x1C-x1C
u8 xRsvd08; // Reserved x1D-x1D
u16 xRsvd09; // Reserved x1E-x1F
u16 xSysBoxType; // System Box Type x20-x21
u16 xSysProcType; // System Processor Type x22-x23
u32 xRsvd10; // Reserved x24-x27
u64 xRsvd11; // Reserved x28-x2F
u64 xRsvd12; // Reserved x30-x37
u64 xRsvd13; // Reserved x38-x3F
};
#endif /* _ISERIES_IPL_PARMS_H */
This diff is collapsed.
#ifndef _ISERIES_IRQ_H
#define _ISERIES_IRQ_H
#ifdef CONFIG_PCI
extern void iSeries_init_IRQ(void);
extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
extern void iSeries_activate_IRQs(void);
#else
#define iSeries_init_IRQ NULL
#endif
extern unsigned int iSeries_get_irq(void);
#endif /* _ISERIES_IRQ_H */
/*
* Copyright (C) 2002 Dave Boutcher IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
#define _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
/*
* This struct maps the panel information
*
* Warning:
* This data must match the architecture for the panel information
*/
#include <asm/types.h>
struct ItExtVpdPanel {
/* Definition of the Extended Vpd On Panel Data Area */
char systemSerial[8];
char mfgID[4];
char reserved1[24];
char machineType[4];
char systemID[6];
char somUniqueCnt[4];
char serialNumberCount;
char reserved2[7];
u16 bbu3;
u16 bbu2;
u16 bbu1;
char xLocationLabel[8];
u8 xRsvd1[6];
u16 xFrameId;
u8 xRsvd2[48];
};
extern struct ItExtVpdPanel xItExtVpdPanel;
#endif /* _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H */
/*
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PLATFORMS_ISERIES_IT_LP_NACA_H
#define _PLATFORMS_ISERIES_IT_LP_NACA_H
#include <linux/types.h>
/*
* This control block contains the data that is shared between the
* hypervisor (PLIC) and the OS.
*/
struct ItLpNaca {
// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
u32 xDesc; // Eye catcher x00-x03
u16 xSize; // Size of this class x04-x05
u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
u8 xLpIndex; // LP Index x0B-x0B
u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
u8 xPirEnvironMode; // Piranha or hardware x10-x10
u8 xPirConsoleMode; // Piranha console indicator x11-x11
u8 xPirDasdMode; // Piranha dasd indicator x12-x12
u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
u8 flags; // flags, see below x18-x1F
u8 xSpVpdFormat; // VPD areas are in CSP format ...
u8 xIntProcRatio; // Ratio of int procs to procs ...
u8 xRsvd1_2[5]; // Reserved ...
u16 xRsvd1_3; // Reserved x20-x21
u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27
u64 xLoadAreaAddr; // ER address of load area x28-x2F
u32 xLoadAreaChunks; // Chunks for the load area x30-x33
u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
// doing an ASR switch on PASE
// system call.
u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
u8 xRsvd1_4[64]; // x40-x7F
// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
u8 xRsvd2_0[128]; // Reserved x00-x7F
// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
// NB: Padding required to keep xInterruptHdlr at x300 which is required
// for v4r4 PLIC.
u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
u8 xRsvd3_0[384]; // Reserved 180-2FF
// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
// handlers
u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
};
extern struct ItLpNaca itLpNaca;
#define ITLPNACA_LPAR 0x80 /* Is LPAR installed on the system */
#define ITLPNACA_PARTITIONED 0x40 /* Is the system partitioned */
#define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */
#define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */
#endif /* _PLATFORMS_ISERIES_IT_LP_NACA_H */
/*
* (C) 2001-2005 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/export.h>
#include <asm/hw_irq.h>
#include <asm/iseries/hv_call_sc.h>
EXPORT_SYMBOL(HvCall0);
EXPORT_SYMBOL(HvCall1);
EXPORT_SYMBOL(HvCall2);
EXPORT_SYMBOL(HvCall3);
EXPORT_SYMBOL(HvCall4);
EXPORT_SYMBOL(HvCall5);
EXPORT_SYMBOL(HvCall6);
EXPORT_SYMBOL(HvCall7);
This diff is collapsed.
/*
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
#include <asm/system.h>
#include <asm/paca.h>
#include <asm/firmware.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/hv_call_event.h>
#include "it_lp_naca.h"
/*
* The LpQueue is used to pass event data from the hypervisor to
* the partition. This is where I/O interrupt events are communicated.
*
* It is written to by the hypervisor so cannot end up in the BSS.
*/
struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
static char *event_types[HvLpEvent_Type_NumTypes] = {
"Hypervisor",
"Machine Facilities",
"Session Manager",
"SPD I/O",
"Virtual Bus",
"PCI I/O",
"RIO I/O",
"Virtual Lan",
"Virtual I/O"
};
/* Array of LpEvent handler functions */
static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
static struct HvLpEvent * get_next_hvlpevent(void)
{
struct HvLpEvent * event;
event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
if (hvlpevent_is_valid(event)) {
/* rmb() needed only for weakly consistent machines (regatta) */
rmb();
/* Set pointer to next potential event */
hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
IT_LP_EVENT_ALIGN;
/* Wrap to beginning if no room at end */
if (hvlpevent_queue.hq_current_event >
hvlpevent_queue.hq_last_event) {
hvlpevent_queue.hq_current_event =
hvlpevent_queue.hq_event_stack;
}
} else {
event = NULL;
}
return event;
}
static unsigned long spread_lpevents = NR_CPUS;
int hvlpevent_is_pending(void)
{
struct HvLpEvent *next_event;
if (smp_processor_id() >= spread_lpevents)
return 0;
next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
return hvlpevent_is_valid(next_event) ||
hvlpevent_queue.hq_overflow_pending;
}
static void hvlpevent_clear_valid(struct HvLpEvent * event)
{
/* Tell the Hypervisor that we're done with this event.
* Also clear bits within this event that might look like valid bits.
* ie. on 64-byte boundaries.
*/
struct HvLpEvent *tmp;
unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
IT_LP_EVENT_ALIGN) - 1;
switch (extra) {
case 3:
tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp);
case 2:
tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp);
case 1:
tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
hvlpevent_invalidate(tmp);
}
mb();
hvlpevent_invalidate(event);
}
void process_hvlpevents(void)
{
struct HvLpEvent * event;
restart:
/* If we have recursed, just return */
if (!spin_trylock(&hvlpevent_queue.hq_lock))
return;
for (;;) {
event = get_next_hvlpevent();
if (event) {
/* Call appropriate handler here, passing
* a pointer to the LpEvent. The handler
* must make a copy of the LpEvent if it
* needs it in a bottom half. (perhaps for
* an ACK)
*
* Handlers are responsible for ACK processing
*
* The Hypervisor guarantees that LpEvents will
* only be delivered with types that we have
* registered for, so no type check is necessary
* here!
*/
if (event->xType < HvLpEvent_Type_NumTypes)
__get_cpu_var(hvlpevent_counts)[event->xType]++;
if (event->xType < HvLpEvent_Type_NumTypes &&
lpEventHandler[event->xType])
lpEventHandler[event->xType](event);
else {
u8 type = event->xType;
/*
* Don't printk in the spinlock as printk
* may require ack events form the HV to send
* any characters there.
*/
hvlpevent_clear_valid(event);
spin_unlock(&hvlpevent_queue.hq_lock);
printk(KERN_INFO
"Unexpected Lp Event type=%d\n", type);
goto restart;
}
hvlpevent_clear_valid(event);
} else if (hvlpevent_queue.hq_overflow_pending)
/*
* No more valid events. If overflow events are
* pending process them
*/
HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
else
break;
}
spin_unlock(&hvlpevent_queue.hq_lock);
}
static int set_spread_lpevents(char *str)
{
unsigned long val = simple_strtoul(str, NULL, 0);
/*
* The parameter is the number of processors to share in processing
* lp events.
*/
if (( val > 0) && (val <= NR_CPUS)) {
spread_lpevents = val;
printk("lpevent processing spread over %ld processors\n", val);
} else {
printk("invalid spread_lpevents %ld\n", val);
}
return 1;
}
__setup("spread_lpevents=", set_spread_lpevents);
void __init setup_hvlpevent_queue(void)
{
void *eventStack;
spin_lock_init(&hvlpevent_queue.hq_lock);
/* Allocate a page for the Event Stack. */
eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
/* Invoke the hypervisor to initialize the event stack */
HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
hvlpevent_queue.hq_event_stack = eventStack;
hvlpevent_queue.hq_current_event = eventStack;
hvlpevent_queue.hq_last_event = (char *)eventStack +
(IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
hvlpevent_queue.hq_index = 0;
}
/* Register a handler for an LpEvent type */
int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
{
if (eventType < HvLpEvent_Type_NumTypes) {
lpEventHandler[eventType] = handler;
return 0;
}
return 1;
}
EXPORT_SYMBOL(HvLpEvent_registerHandler);
int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
{
might_sleep();
if (eventType < HvLpEvent_Type_NumTypes) {
if (!lpEventHandlerPaths[eventType]) {
lpEventHandler[eventType] = NULL;
/*
* We now sleep until all other CPUs have scheduled.
* This ensures that the deletion is seen by all
* other CPUs, and that the deleted handler isn't
* still running on another CPU when we return.
*/
synchronize_sched();
return 0;
}
}
return 1;
}
EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
/*
* lpIndex is the partition index of the target partition.
* needed only for VirtualIo, VirtualLan and SessionMgr. Zero
* indicates to use our partition index - for the other types.
*/
int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
{
if ((eventType < HvLpEvent_Type_NumTypes) &&
lpEventHandler[eventType]) {
if (lpIndex == 0)
lpIndex = itLpNaca.xLpIndex;
HvCallEvent_openLpEventPath(lpIndex, eventType);
++lpEventHandlerPaths[eventType];
return 0;
}
return 1;
}
int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
{
if ((eventType < HvLpEvent_Type_NumTypes) &&
lpEventHandler[eventType] &&
lpEventHandlerPaths[eventType]) {
if (lpIndex == 0)
lpIndex = itLpNaca.xLpIndex;
HvCallEvent_closeLpEventPath(lpIndex, eventType);
--lpEventHandlerPaths[eventType];
return 0;
}
return 1;
}
static int proc_lpevents_show(struct seq_file *m, void *v)
{
int cpu, i;
unsigned long sum;
static unsigned long cpu_totals[NR_CPUS];
/* FIXME: do we care that there's no locking here? */
sum = 0;
for_each_online_cpu(cpu) {
cpu_totals[cpu] = 0;
for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
}
sum += cpu_totals[cpu];
}
seq_printf(m, "LpEventQueue 0\n");
seq_printf(m, " events processed:\t%lu\n", sum);
for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
sum = 0;
for_each_online_cpu(cpu) {
sum += per_cpu(hvlpevent_counts, cpu)[i];
}
seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
}
seq_printf(m, "\n events processed by processor:\n");
for_each_online_cpu(cpu) {
seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
}
return 0;
}
static int proc_lpevents_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_lpevents_show, NULL);
}
static const struct file_operations proc_lpevents_operations = {
.open = proc_lpevents_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init proc_lpevents_init(void)
{
if (!firmware_has_feature(FW_FEATURE_ISERIES))
return 0;
proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
&proc_lpevents_operations);
return 0;
}
__initcall(proc_lpevents_init);
/*
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ISERIES_MAIN_STORE_H
#define _ISERIES_MAIN_STORE_H
/* Main Store Vpd for Condor,iStar,sStar */
struct IoHriMainStoreSegment4 {
u8 msArea0Exists:1;
u8 msArea1Exists:1;
u8 msArea2Exists:1;
u8 msArea3Exists:1;
u8 reserved1:4;
u8 reserved2;
u8 msArea0Functional:1;
u8 msArea1Functional:1;
u8 msArea2Functional:1;
u8 msArea3Functional:1;
u8 reserved3:4;
u8 reserved4;
u32 totalMainStore;
u64 msArea0Ptr;
u64 msArea1Ptr;
u64 msArea2Ptr;
u64 msArea3Ptr;
u32 cardProductionLevel;
u32 msAdrHole;
u8 msArea0HasRiserVpd:1;
u8 msArea1HasRiserVpd:1;
u8 msArea2HasRiserVpd:1;
u8 msArea3HasRiserVpd:1;
u8 reserved5:4;
u8 reserved6;
u16 reserved7;
u8 reserved8[28];
u64 nonInterleavedBlocksStartAdr;
u64 nonInterleavedBlocksEndAdr;
};
/* Main Store VPD for Power4 */
struct __attribute((packed)) IoHriMainStoreChipInfo1 {
u32 chipMfgID;
char chipECLevel[4];
};
struct IoHriMainStoreVpdIdData {
char typeNumber[4];
char modelNumber[4];
char partNumber[12];
char serialNumber[12];
};
struct __attribute((packed)) IoHriMainStoreVpdFruData {
char fruLabel[8];
u8 numberOfSlots;
u8 pluggingType;
u16 slotMapIndex;
};
struct __attribute((packed)) IoHriMainStoreAdrRangeBlock {
void *blockStart;
void *blockEnd;
u32 blockProcChipId;
};
#define MaxAreaAdrRangeBlocks 4
struct __attribute((packed)) IoHriMainStoreArea4 {
u32 msVpdFormat;
u8 containedVpdType;
u8 reserved1;
u16 reserved2;
u64 msExists;
u64 msFunctional;
u32 memorySize;
u32 procNodeId;
u32 numAdrRangeBlocks;
struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks];
struct IoHriMainStoreChipInfo1 chipInfo0;
struct IoHriMainStoreChipInfo1 chipInfo1;
struct IoHriMainStoreChipInfo1 chipInfo2;
struct IoHriMainStoreChipInfo1 chipInfo3;
struct IoHriMainStoreChipInfo1 chipInfo4;
struct IoHriMainStoreChipInfo1 chipInfo5;
struct IoHriMainStoreChipInfo1 chipInfo6;
struct IoHriMainStoreChipInfo1 chipInfo7;
void *msRamAreaArray;
u32 msRamAreaArrayNumEntries;
u32 msRamAreaArrayEntrySize;
u32 numaDimmExists;
u32 numaDimmFunctional;
void *numaDimmArray;
u32 numaDimmArrayNumEntries;
u32 numaDimmArrayEntrySize;
struct IoHriMainStoreVpdIdData idData;
u64 powerData;
u64 cardAssemblyPartNum;
u64 chipSerialNum;
u64 reserved3;
char reserved4[16];
struct IoHriMainStoreVpdFruData fruData;
u8 vpdPortNum;
u8 reserved5;
u8 frameId;
u8 rackUnit;
char asciiKeywordVpd[256];
u32 reserved6;
};
struct IoHriMainStoreSegment5 {
u16 reserved1;
u8 reserved2;
u8 msVpdFormat;
u32 totalMainStore;
u64 maxConfiguredMsAdr;
struct IoHriMainStoreArea4 *msAreaArray;
u32 msAreaArrayNumEntries;
u32 msAreaArrayEntrySize;
u32 msAreaExists;
u32 msAreaFunctional;
u64 reserved3;
};
extern u64 xMsVpd[];
#endif /* _ISERIES_MAIN_STORE_H */
This diff is collapsed.
/*
* This file contains miscellaneous low-level functions.
* Copyright (C) 1995-2005 IBM Corp
*
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/asm-offsets.h>
#include <asm/ppc_asm.h>
.text
/* Handle pending interrupts in interrupt context */
_GLOBAL(iseries_handle_interrupts)
li r0,0x5555
sc
blr
#ifndef _PLATFORMS_ISERIES_NACA_H
#define _PLATFORMS_ISERIES_NACA_H
/*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/types.h>
struct naca_struct {
/* Kernel only data - undefined for user space */
const void *xItVpdAreas; /* VPD Data 0x00 */
void *xRamDisk; /* iSeries ramdisk 0x08 */
u64 xRamDiskSize; /* In pages 0x10 */
};
extern struct naca_struct naca;
#endif /* _PLATFORMS_ISERIES_NACA_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
* Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
*
* Description:
* Architecture- / platform-specific boot-time initialization code for
* the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
* code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
* <dan@netx4.com>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ISERIES_SETUP_H__
#define __ISERIES_SETUP_H__
extern void *iSeries_early_setup(void);
extern unsigned long iSeries_get_boot_time(void);
extern int iSeries_set_rtc_time(struct rtc_time *tm);
extern void iSeries_get_rtc_time(struct rtc_time *tm);
extern void *build_flat_dt(unsigned long phys_mem_size);
#endif /* __ISERIES_SETUP_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment