Commit fd402909 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_ppc64
parents 22775da0 95cba9be
......@@ -18,7 +18,8 @@ KERNELLOAD =0xc000000000000000
LINKFLAGS = -T arch/ppc64/vmlinux.lds -Bstatic \
-e $(KERNELLOAD) -Ttext $(KERNELLOAD)
CFLAGS := $(CFLAGS) -fsigned-char -msoft-float -pipe \
-Wno-uninitialized -mminimal-toc -mtraceback=full
-Wno-uninitialized -mminimal-toc -mtraceback=full \
-Wa,-mpower4 -finline-limit-2000
CPP = $(CC) -E $(CFLAGS)
......
......@@ -153,7 +153,7 @@ make_bi_recs(unsigned long addr)
rec = bi_rec_alloc(rec, 2);
rec->tag = BI_MACHTYPE;
rec->data[0] = _MACH_pSeries;
rec->data[0] = PLATFORM_PSERIES;
rec->data[1] = 1;
if ( initrd_size > 0 ) {
......
......@@ -5,7 +5,6 @@
define_bool CONFIG_UID16 n
define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
define_bool CONFIG_GENERIC_BUST_SPINLOCK n
define_bool CONFIG_GENERIC_ISA_DMA y
define_bool CONFIG_HAVE_DEC_LOCK y
......@@ -30,8 +29,6 @@ define_bool CONFIG_PREEMPT n
if [ "$CONFIG_PPC_ISERIES" = "y" ]; then
define_bool CONFIG_MSCHUNKS y
else
bool 'MsChunks Physical to Absolute address translation support' CONFIG_MSCHUNKS
fi
endmenu
......@@ -103,8 +100,12 @@ if [ "$CONFIG_SCSI" != "n" ]; then
fi
endmenu
source drivers/message/fusion/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
......@@ -181,6 +182,9 @@ if [ "$CONFIG_VIOCD" = "y" ]; then
fi
source drivers/char/Config.in
source drivers/media/Config.in
source fs/Config.in
mainmenu_option next_comment
......@@ -194,13 +198,21 @@ endmenu
source drivers/usb/Config.in
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool 'Include kgdb kernel debugger' CONFIG_KGDB
bool 'Include xmon kernel debugger' CONFIG_XMON
bool 'Include PPCDBG realtime debugging' CONFIG_PPCDBG
bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool ' Include xmon kernel debugger' CONFIG_XMON
if [ "$CONFIG_XMON" = "y" ]; then
bool ' Enable xmon by default' CONFIG_XMON_DEFAULT
fi
bool ' Include PPCDBG realtime debugging' CONFIG_PPCDBG
fi
endmenu
source lib/Config.in
......@@ -4,7 +4,6 @@
# CONFIG_UID16 is not set
# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_GENERIC_BUST_SPINLOCK is not set
CONFIG_GENERIC_ISA_DMA=y
CONFIG_HAVE_DEC_LOCK=y
......@@ -38,9 +37,7 @@ CONFIG_PPC64=y
CONFIG_SMP=y
CONFIG_IRQ_ALL_CPUS=y
# CONFIG_HMT is not set
# CONFIG_PPC_EEH is not set
# CONFIG_PREEMPT is not set
# CONFIG_MSCHUNKS is not set
#
# General setup
......@@ -133,6 +130,11 @@ CONFIG_IPV6=m
#
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
#
# Appletalk devices
#
# CONFIG_DEV_APPLETALK is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
......@@ -153,7 +155,6 @@ CONFIG_IPV6=m
# ATA/IDE/MFM/RLL support
#
# CONFIG_IDE is not set
# CONFIG_BLK_DEV_IDE_MODES is not set
# CONFIG_BLK_DEV_HD is not set
#
......@@ -177,6 +178,7 @@ CONFIG_CHR_DEV_SG=y
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_REPORT_LUNS is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
......@@ -232,11 +234,26 @@ CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_MESH is not set
# CONFIG_SCSI_MAC53C94 is not set
#
# Fusion MPT device support
#
# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support (EXPERIMENTAL)
#
# CONFIG_IEEE1394 is not set
#
# I2O device support
#
# CONFIG_I2O is not set
# CONFIG_I2O_PCI is not set
# CONFIG_I2O_BLOCK is not set
# CONFIG_I2O_LAN is not set
# CONFIG_I2O_SCSI is not set
# CONFIG_I2O_PROC is not set
#
# Network device support
#
......@@ -282,12 +299,9 @@ CONFIG_PCNET32=y
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
# CONFIG_DE2104X is not set
# CONFIG_TULIP is not set
# CONFIG_DE4X5 is not set
# CONFIG_DGRS is not set
# CONFIG_DM9102 is not set
CONFIG_EEPRO100=y
# CONFIG_E100 is not set
# CONFIG_LNE390 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
......@@ -306,20 +320,21 @@ CONFIG_EEPRO100=y
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_VIA_RHINE_MMIO is not set
# CONFIG_WINBOND_840 is not set
# CONFIG_NET_POCKET is not set
#
# Ethernet (1000 Mbit)
#
CONFIG_ACENIC=y
# CONFIG_ACENIC_OMIT_TIGON_I is not set
CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PLIP is not set
......@@ -337,6 +352,7 @@ CONFIG_ACENIC=y
CONFIG_TR=y
CONFIG_IBMOL=y
# CONFIG_IBMLS is not set
# CONFIG_3C359 is not set
# CONFIG_TMS380TR is not set
# CONFIG_NET_FC is not set
# CONFIG_RCPCI is not set
......@@ -347,6 +363,11 @@ CONFIG_IBMOL=y
#
# CONFIG_WAN is not set
#
# "Tulip" family network device support
#
# CONFIG_NET_TULIP is not set
#
# Amateur Radio support
#
......@@ -360,7 +381,7 @@ CONFIG_IBMOL=y
#
# ISDN subsystem
#
# CONFIG_ISDN is not set
# CONFIG_ISDN_BOOL is not set
#
# Old CD-ROM drivers (not SCSI, not IDE)
......@@ -414,6 +435,25 @@ CONFIG_FONT_8x16=y
# CONFIG_FONT_SUN8x16 is not set
# CONFIG_FONT_PEARL_8x8 is not set
# CONFIG_FONT_ACORN_8x8 is not set
#
# Input device support
#
# CONFIG_INPUT is not set
# CONFIG_INPUT_KEYBDEV is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_INPUT_EMU10K1 is not set
# CONFIG_GAMEPORT_PCIGAME is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461x is not set
# CONFIG_SERIO is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_VIOPATH=y
#
......@@ -460,6 +500,11 @@ CONFIG_PSMOUSE=y
# CONFIG_AGP is not set
# CONFIG_DRM is not set
#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
#
# File systems
#
......@@ -490,6 +535,9 @@ CONFIG_RAMFS=y
CONFIG_ISO9660_FS=y
# CONFIG_JOLIET is not set
# CONFIG_ZISOFS is not set
CONFIG_JFS_FS=y
# CONFIG_JFS_DEBUG is not set
# CONFIG_JFS_STATISTICS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_NTFS_FS is not set
......@@ -520,9 +568,11 @@ CONFIG_NFS_V3=y
# CONFIG_ROOT_NFS is not set
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_TCP is not set
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
CONFIG_SMB_FS=y
# CONFIG_SMB_NLS_DEFAULT is not set
# CONFIG_NCP_FS is not set
......@@ -569,6 +619,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
......@@ -596,119 +647,18 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_USB is not set
#
# USB Host Controller Drivers
#
# CONFIG_USB_EHCI_HCD is not set
# CONFIG_USB_OHCI_HCD is not set
# CONFIG_USB_UHCI is not set
# CONFIG_USB_UHCI_ALT is not set
# CONFIG_USB_OHCI is not set
#
# USB Device Class drivers
#
# CONFIG_USB_AUDIO is not set
# CONFIG_USB_BLUETOOTH is not set
# CONFIG_USB_STORAGE is not set
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_STORAGE_DPCM is not set
# CONFIG_USB_STORAGE_HP8200e is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_JUMPSHOT is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
#
# USB Human Interface Devices (HID)
#
# CONFIG_USB_HID is not set
# CONFIG_USB_HIDDEV is not set
# CONFIG_USB_KBD is not set
# CONFIG_USB_MOUSE is not set
# CONFIG_USB_WACOM is not set
#
# USB Imaging devices
#
# CONFIG_USB_DC2XX is not set
# CONFIG_USB_MDC800 is not set
# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
# CONFIG_USB_HPUSBSCSI is not set
#
# USB Multimedia devices
#
# CONFIG_USB_IBMCAM is not set
# CONFIG_USB_OV511 is not set
# CONFIG_USB_PWC is not set
# CONFIG_USB_SE401 is not set
# CONFIG_USB_STV680 is not set
# CONFIG_USB_VICAM is not set
# CONFIG_USB_DSBR is not set
# CONFIG_USB_DABUSB is not set
# CONFIG_USB_KONICAWC is not set
#
# USB Network adaptors
#
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_CATC is not set
# CONFIG_USB_CDCETHER is not set
# CONFIG_USB_USBNET is not set
#
# USB port drivers
#
# CONFIG_USB_USS720 is not set
#
# USB Serial Converter support
#
# CONFIG_USB_SERIAL is not set
# CONFIG_USB_SERIAL_GENERIC is not set
# CONFIG_USB_SERIAL_BELKIN is not set
# CONFIG_USB_SERIAL_WHITEHEAT is not set
# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
# CONFIG_USB_SERIAL_EMPEG is not set
# CONFIG_USB_SERIAL_FTDI_SIO is not set
# CONFIG_USB_SERIAL_VISOR is not set
# CONFIG_USB_SERIAL_IPAQ is not set
# CONFIG_USB_SERIAL_IR is not set
# CONFIG_USB_SERIAL_EDGEPORT is not set
# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
# CONFIG_USB_SERIAL_KEYSPAN is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
# CONFIG_USB_SERIAL_MCT_U232 is not set
# CONFIG_USB_SERIAL_KLSI is not set
# CONFIG_USB_SERIAL_PL2303 is not set
# CONFIG_USB_SERIAL_CYBERJACK is not set
# CONFIG_USB_SERIAL_XIRCOM is not set
# CONFIG_USB_SERIAL_OMNINET is not set
#
# USB Miscellaneous drivers
# Bluetooth support
#
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_BLUEZ is not set
#
# Kernel hacking
#
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
# CONFIG_KGDB is not set
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
# CONFIG_PPCDBG is not set
#
......
......@@ -32,8 +32,6 @@ obj-$(CONFIG_PCI) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o eeh.o
obj-y += rtasd.o nvram.o
endif
obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o
obj-y += prom.o lmb.o rtas.o rtas-proc.o chrp_setup.o i8259.o
......
......@@ -56,7 +56,6 @@ show_syscalls_task:
* Handle a system call.
*/
_GLOBAL(DoSyscall)
std r0,THREAD+LAST_SYSCALL(r13)
ld r11,_CCR(r1) /* Clear SO bit in CR */
lis r10,0x1000
andc r11,r11,r10
......
......@@ -84,6 +84,8 @@ int cpu_idle(void)
lpaca = get_paca();
while (1) {
irq_stat[smp_processor_id()].idle_timestamp = jiffies;
if (lpaca->xLpPaca.xSharedProc) {
if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr))
process_iSeries_events();
......@@ -123,6 +125,7 @@ int cpu_idle(void)
long oldval;
while (1) {
irq_stat[smp_processor_id()].idle_timestamp = jiffies;
oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) {
......@@ -146,3 +149,8 @@ int cpu_idle(void)
}
#endif /* CONFIG_PPC_ISERIES */
void default_idle(void)
{
barrier();
}
......@@ -3756,6 +3756,7 @@ COMPATIBLE_IOCTL(TCSETSW),
COMPATIBLE_IOCTL(TCSETSF),
COMPATIBLE_IOCTL(TIOCLINUX),
COMPATIBLE_IOCTL(TIOCSTART),
COMPATIBLE_IOCTL(TIOCSTOP),
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD),
COMPATIBLE_IOCTL(TIOCSETD),
......@@ -4336,8 +4337,6 @@ COMPATIBLE_IOCTL(RNDCLEARPOOL),
COMPATIBLE_IOCTL(HCIDEVUP),
COMPATIBLE_IOCTL(HCIDEVDOWN),
COMPATIBLE_IOCTL(HCIDEVRESET),
COMPATIBLE_IOCTL(HCIRESETSTAT),
COMPATIBLE_IOCTL(HCIGETINFO),
COMPATIBLE_IOCTL(HCIGETDEVLIST),
COMPATIBLE_IOCTL(HCISETRAW),
COMPATIBLE_IOCTL(HCISETSCAN),
......
......@@ -75,22 +75,6 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
int ppc_spurious_interrupts = 0;
struct irqaction *ppc_irq_action[NR_IRQS];
unsigned long lpEvent_count = 0;
#ifdef CONFIG_XMON
extern void xmon(struct pt_regs *regs);
extern int xmon_bpt(struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
#endif
#ifdef CONFIG_XMON
extern void (*debugger)(struct pt_regs *regs);
extern int (*debugger_bpt)(struct pt_regs *regs);
extern int (*debugger_sstep)(struct pt_regs *regs);
extern int (*debugger_iabr_match)(struct pt_regs *regs);
extern int (*debugger_dabr_match)(struct pt_regs *regs);
extern void (*debugger_fault_handler)(struct pt_regs *regs);
#endif
/* nasty hack for shared irq's since we need to do kmalloc calls but
* can't very early in the boot when we need to do a request irq.
......@@ -410,6 +394,75 @@ handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
__cli();
}
#ifdef CONFIG_SMP
extern unsigned int irq_affinity [NR_IRQS];
typedef struct {
unsigned long cpu;
unsigned long timestamp;
} ____cacheline_aligned irq_balance_t;
static irq_balance_t irq_balance[NR_IRQS] __cacheline_aligned
= { [ 0 ... NR_IRQS-1 ] = { 1, 0 } };
#define IDLE_ENOUGH(cpu,now) \
(idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > ((HZ/100)+1)))
#define IRQ_ALLOWED(cpu,allowed_mask) \
((1 << cpu) & (allowed_mask))
static unsigned long move(unsigned long curr_cpu, unsigned long allowed_mask,
unsigned long now, int direction)
{
int search_idle = 1;
int cpu = curr_cpu;
goto inside;
do {
if (unlikely(cpu == curr_cpu))
search_idle = 0;
inside:
if (direction == 1) {
cpu++;
if (cpu >= smp_num_cpus)
cpu = 0;
} else {
cpu--;
if (cpu == -1)
cpu = smp_num_cpus-1;
}
} while (!IRQ_ALLOWED(cpu,allowed_mask) ||
(search_idle && !IDLE_ENOUGH(cpu,now)));
return cpu;
}
static inline void balance_irq(int irq)
{
irq_balance_t *entry = irq_balance + irq;
unsigned long now = jiffies;
if (unlikely(entry->timestamp != now)) {
unsigned long allowed_mask;
unsigned long random_number;
if (!irq_desc[irq].handler->set_affinity)
return;
random_number = mftb();
random_number &= 1;
allowed_mask = cpu_online_map & irq_affinity[irq];
entry->timestamp = now;
entry->cpu = move(entry->cpu, allowed_mask, now, random_number);
irq_desc[irq].handler->set_affinity(irq, 1 << entry->cpu);
}
}
#else
#define balance_irq(irq) do { } while (0)
#endif
/*
* Eventually, this should take an array of interrupts and an array size
* so it can dispatch multiple interrupts.
......@@ -421,6 +474,8 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
int cpu = smp_processor_id();
irq_desc_t *desc = irq_desc + irq;
balance_irq(irq);
kstat.irqs[cpu][irq]++;
spin_lock(&desc->lock);
ack_irq(irq);
......
......@@ -265,24 +265,6 @@ _GLOBAL(__flush_dcache_icache)
isync
blr
/*
* Copy a whole page. Assumes a 4096B page size.
*/
_GLOBAL(copy_page)
clrrdi r3,r3,12 /* Page align */
clrrdi r4,r4,12 /* Page align */
li r5,256
mtctr r5
addi r3,r3,-8
addi r4,r4,-8
1: ld r6,8(r4)
ldu r7,16(r4)
std r6,8(r3)
stdu r7,16(r3)
bdnz+ 1b
blr
/*
* I/O string operations
*
......@@ -649,7 +631,7 @@ _GLOBAL(sys_call_table32)
.llong .sys32_init_module
.llong .sys32_delete_module
.llong .sys32_get_kernel_syms /* 130 */
.llong .sys32_quotactl
.llong .sys_quotactl
.llong .sys32_getpgid
.llong .sys_fchdir
.llong .sys32_bdflush
......@@ -740,7 +722,11 @@ _GLOBAL(sys_call_table32)
.llong .sys_lremovexattr
.llong .sys_fremovexattr /* 220 */
.llong .sys_futex
.rept NR_syscalls-221
.llong .sys_ni_syscall /* reserved for tux */
.llong .sys32_sched_setaffinity
.llong .sys32_sched_getaffinity
.rept NR_syscalls-224
.llong .sys_ni_syscall
.endr
#endif
......@@ -969,6 +955,10 @@ _GLOBAL(sys_call_table)
.llong .sys_lremovexattr
.llong .sys_fremovexattr /* 220 */
.llong .sys_futex
.rept NR_syscalls-221
.llong .sys_ni_syscall /* reserved for tux */
.llong .sys_sched_setaffinity
.llong .sys_sched_getaffinity
.rept NR_syscalls-224
.llong .sys_ni_syscall
.endr
......@@ -52,7 +52,6 @@ main(void)
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
......
......@@ -359,7 +359,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
/* Invalidate the tlb */
if (!large && local && __is_processor(PV_POWER4)) {
_tlbiel(va, large);
_tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
_tlbie(va, large);
......
......@@ -498,6 +498,8 @@ pcibios_init(void)
}
subsys_initcall(pcibios_init);
int __init
pcibios_assign_all_busses(void)
{
......
/*
* ppc-stub.c: KGDB support for the Linux kernel.
*
* adapted from arch/sparc/kernel/sparc-stub.c for the PowerPC
* some stuff borrowed from Paul Mackerras' xmon
* Copyright (C) 1998 Michael AK Tesch (tesch@cs.wisc.edu)
*
* Modifications to run under Linux
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
* This file originally came from the gdb sources, and the
* copyright notices have been retained below.
*/
/****************************************************************************
THIS SOFTWARE IS NOT COPYRIGHTED
HP offers the following for use in the public domain. HP makes no
warranty with regard to the software or its performance and the
user accepts the software "AS IS" with all faults.
HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
****************************************************************************/
/****************************************************************************
* Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $
*
* Module name: remcom.c $
* Revision: 1.34 $
* Date: 91/03/09 12:29:49 $
* Contributor: Lake Stevens Instrument Division$
*
* Description: low level support for gdb debugger. $
*
* Considerations: only works on target hardware $
*
* Written by: Glenn Engel $
* ModuleState: Experimental $
*
* NOTES: See Below $
*
* Modified for SPARC by Stu Grossman, Cygnus Support.
*
* This code has been extensively tested on the Fujitsu SPARClite demo board.
*
* To enable debugger support, two things need to happen. One, a
* call to set_debug_traps() is necessary in order to allow any breakpoints
* or error conditions to be properly intercepted and reported to gdb.
* Two, a breakpoint needs to be generated to begin communication. This
* is most easily accomplished by a call to breakpoint(). Breakpoint()
* simulates a breakpoint by executing a trap #1.
*
*************
*
* The following gdb commands are supported:
*
* command function Return value
*
* g return the value of the CPU registers hex data or ENN
* G set the value of the CPU registers OK or ENN
* qOffsets Get section offsets. Reply is Text=xxx;Data=yyy;Bss=zzz
*
* mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
* MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
*
* c Resume at current address SNN ( signal NN)
* cAA..AA Continue at address AA..AA SNN
*
* s Step one instruction SNN
* sAA..AA Step one instruction from AA..AA SNN
*
* k kill
*
* ? What was the last sigval ? SNN (signal NN)
*
* bBB..BB Set baud rate to BB..BB OK or BNN, then sets
* baud rate
*
* All commands and responses are sent with a packet which includes a
* checksum. A packet consists of
*
* $<packet info>#<checksum>.
*
* where
* <packet info> :: <characters representing the command or response>
* <checksum> :: <two hex digits computed as modulo 256 sum of <packetinfo>>
*
* When a packet is received, it is first acknowledged with either '+' or '-'.
* '+' indicates a successful transfer. '-' indicates a failed transfer.
*
* Example:
*
* Host: Reply:
* $m0,10#2a +$00010203040506070809101112131415#42
*
****************************************************************************/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/signal.h>
#include <asm/kgdb.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
void breakinst(void);
/*
* BUFMAX defines the maximum number of characters in inbound/outbound buffers
* at least NUMREGBYTES*2 are needed for register packets
*/
#define BUFMAX 2048
static char remcomInBuffer[BUFMAX];
static char remcomOutBuffer[BUFMAX];
static int initialized = 0;
static int kgdb_active = 0;
static int kgdb_started = 0;
static u_int fault_jmp_buf[100];
static int kdebug;
static const char hexchars[]="0123456789abcdef";
/* Place where we save old trap entries for restoration - sparc*/
/* struct tt_entry kgdb_savettable[256]; */
/* typedef void (*trapfunc_t)(void); */
#if 0
/* Install an exception handler for kgdb */
static void exceptionHandler(int tnum, unsigned int *tfunc)
{
/* We are dorking with a live trap table, all irqs off */
}
#endif
int
kgdb_setjmp(long *buf)
{
asm ("mflr 0; stw 0,0(%0);"
"stw 1,4(%0); stw 2,8(%0);"
"mfcr 0; stw 0,12(%0);"
"stmw 13,16(%0)"
: : "r" (buf));
/* XXX should save fp regs as well */
return 0;
}
void
kgdb_longjmp(long *buf, int val)
{
if (val == 0)
val = 1;
asm ("lmw 13,16(%0);"
"lwz 0,12(%0); mtcrf 0x38,0;"
"lwz 0,0(%0); lwz 1,4(%0); lwz 2,8(%0);"
"mtlr 0; mr 3,%1"
: : "r" (buf), "r" (val));
}
/* Convert ch from a hex digit to an int */
static int
hex(unsigned char ch)
{
if (ch >= 'a' && ch <= 'f')
return ch-'a'+10;
if (ch >= '0' && ch <= '9')
return ch-'0';
if (ch >= 'A' && ch <= 'F')
return ch-'A'+10;
return -1;
}
/* Convert the memory pointed to by mem into hex, placing result in buf.
* Return a pointer to the last char put in buf (null), in case of mem fault,
* return 0.
*/
static unsigned char *
mem2hex(char *mem, char *buf, int count)
{
unsigned char ch;
if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
debugger_fault_handler = kgdb_fault_handler;
while (count-- > 0) {
ch = *mem++;
*buf++ = hexchars[ch >> 4];
*buf++ = hexchars[ch & 0xf];
}
} else {
/* error condition */
}
debugger_fault_handler = 0;
*buf = 0;
return buf;
}
/* convert the hex array pointed to by buf into binary to be placed in mem
* return a pointer to the character AFTER the last byte written.
*/
static char *
hex2mem(char *buf, char *mem, int count)
{
int i;
unsigned char ch;
if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
debugger_fault_handler = kgdb_fault_handler;
for (i=0; i<count; i++) {
ch = hex(*buf++) << 4;
ch |= hex(*buf++);
*mem++ = ch;
}
flush_icache_range((int)mem, (int)mem+count);
} else {
/* error condition */
}
debugger_fault_handler = 0;
return mem;
}
/*
* While we find nice hex chars, build an int.
* Return number of chars processed.
*/
static int
hexToInt(char **ptr, int *intValue)
{
int numChars = 0;
int hexValue;
*intValue = 0;
if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
debugger_fault_handler = kgdb_fault_handler;
while (**ptr) {
hexValue = hex(**ptr);
if (hexValue < 0)
break;
*intValue = (*intValue << 4) | hexValue;
numChars ++;
(*ptr)++;
}
} else {
/* error condition */
}
debugger_fault_handler = 0;
return (numChars);
}
/* scan for the sequence $<data>#<checksum> */
static void
getpacket(char *buffer)
{
unsigned char checksum;
unsigned char xmitcsum;
int i;
int count;
unsigned char ch;
do {
/* wait around for the start character, ignore all other
* characters */
while ((ch = (getDebugChar() & 0x7f)) != '$') ;
checksum = 0;
xmitcsum = -1;
count = 0;
/* now, read until a # or end of buffer is found */
while (count < BUFMAX) {
ch = getDebugChar() & 0x7f;
if (ch == '#')
break;
checksum = checksum + ch;
buffer[count] = ch;
count = count + 1;
}
if (count >= BUFMAX)
continue;
buffer[count] = 0;
if (ch == '#') {
xmitcsum = hex(getDebugChar() & 0x7f) << 4;
xmitcsum |= hex(getDebugChar() & 0x7f);
if (checksum != xmitcsum)
putDebugChar('-'); /* failed checksum */
else {
putDebugChar('+'); /* successful transfer */
/* if a sequence char is present, reply the ID */
if (buffer[2] == ':') {
putDebugChar(buffer[0]);
putDebugChar(buffer[1]);
/* remove sequence chars from buffer */
count = strlen(buffer);
for (i=3; i <= count; i++)
buffer[i-3] = buffer[i];
}
}
}
} while (checksum != xmitcsum);
}
/* send the packet in buffer. */
static void putpacket(unsigned char *buffer)
{
unsigned char checksum;
int count;
unsigned char ch, recv;
/* $<packet info>#<checksum>. */
do {
putDebugChar('$');
checksum = 0;
count = 0;
while ((ch = buffer[count])) {
putDebugChar(ch);
checksum += ch;
count += 1;
}
putDebugChar('#');
putDebugChar(hexchars[checksum >> 4]);
putDebugChar(hexchars[checksum & 0xf]);
recv = getDebugChar();
} while ((recv & 0x7f) != '+');
}
static void kgdb_flush_cache_all(void)
{
flush_instruction_cache();
}
/* Set up exception handlers for tracing and breakpoints
* [could be called kgdb_init()]
*/
void set_debug_traps(void)
{
#if 0
unsigned char c;
save_and_cli(flags);
/* In case GDB is started before us, ack any packets (presumably
* "$?#xx") sitting there.
*
* I've found this code causes more problems than it solves,
* so that's why it's commented out. GDB seems to work fine
* now starting either before or after the kernel -bwb
*/
while((c = getDebugChar()) != '$');
while((c = getDebugChar()) != '#');
c = getDebugChar(); /* eat first csum byte */
c = getDebugChar(); /* eat second csum byte */
putDebugChar('+'); /* ack it */
#endif
debugger = kgdb;
debugger_bpt = kgdb_bpt;
debugger_sstep = kgdb_sstep;
debugger_iabr_match = kgdb_iabr_match;
debugger_dabr_match = kgdb_dabr_match;
initialized = 1;
}
static void kgdb_fault_handler(struct pt_regs *regs)
{
kgdb_longjmp((long*)fault_jmp_buf, 1);
}
int kgdb_bpt(struct pt_regs *regs)
{
handle_exception(regs);
return 1;
}
int kgdb_sstep(struct pt_regs *regs)
{
handle_exception(regs);
return 1;
}
void kgdb(struct pt_regs *regs)
{
handle_exception(regs);
}
int kgdb_iabr_match(struct pt_regs *regs)
{
printk("kgdb doesn't support iabr, what?!?\n");
handle_exception(regs);
return 1;
}
int kgdb_dabr_match(struct pt_regs *regs)
{
printk("kgdb doesn't support dabr, what?!?\n");
handle_exception(regs);
return 1;
}
/* Convert the SPARC hardware trap type code to a unix signal number. */
/*
* This table contains the mapping between PowerPC hardware trap types, and
* signals, which are primarily what GDB understands.
*/
static struct hard_trap_info
{
unsigned int tt; /* Trap type code for powerpc */
unsigned char signo; /* Signal that we map this trap into */
} hard_trap_info[] = {
{ 0x200, SIGSEGV }, /* machine check */
{ 0x300, SIGSEGV }, /* address error (store) */
{ 0x400, SIGBUS }, /* instruction bus error */
{ 0x500, SIGINT }, /* interrupt */
{ 0x600, SIGBUS }, /* alingment */
{ 0x700, SIGTRAP }, /* breakpoint trap */
{ 0x800, SIGFPE }, /* fpu unavail */
{ 0x900, SIGALRM }, /* decrementer */
{ 0xa00, SIGILL }, /* reserved */
{ 0xb00, SIGILL }, /* reserved */
{ 0xc00, SIGCHLD }, /* syscall */
{ 0xd00, SIGTRAP }, /* single-step/watch */
{ 0xe00, SIGFPE }, /* fp assist */
{ 0, 0} /* Must be last */
};
static int computeSignal(unsigned int tt)
{
struct hard_trap_info *ht;
for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
if (ht->tt == tt)
return ht->signo;
return SIGHUP; /* default for things we don't know about */
}
#define PC_REGNUM 64
#define SP_REGNUM 1
/*
* This function does all command processing for interfacing to gdb.
*/
static void
handle_exception (struct pt_regs *regs)
{
int sigval;
int addr;
int length;
char *ptr;
unsigned long msr;
if (debugger_fault_handler) {
debugger_fault_handler(regs);
panic("kgdb longjump failed!\n");
}
if (kgdb_active) {
printk("interrupt while in kgdb, returning\n");
return;
}
kgdb_active = 1;
kgdb_started = 1;
#ifdef KGDB_DEBUG
printk("kgdb: entering handle_exception; trap [0x%x]\n",
(unsigned int)regs->trap);
#endif
kgdb_interruptible(0);
lock_kernel();
msr = get_msr();
set_msr(msr & ~MSR_EE); /* disable interrupts */
if (regs->nip == (unsigned long)breakinst) {
/* Skip over breakpoint trap insn */
regs->nip += 4;
}
/* reply to host that an exception has occurred */
sigval = computeSignal(regs->trap);
ptr = remcomOutBuffer;
#if 0
*ptr++ = 'S';
*ptr++ = hexchars[sigval >> 4];
*ptr++ = hexchars[sigval & 0xf];
#else
*ptr++ = 'T';
*ptr++ = hexchars[sigval >> 4];
*ptr++ = hexchars[sigval & 0xf];
*ptr++ = hexchars[PC_REGNUM >> 4];
*ptr++ = hexchars[PC_REGNUM & 0xf];
*ptr++ = ':';
ptr = mem2hex((char *)&regs->nip, ptr, 4);
*ptr++ = ';';
*ptr++ = hexchars[SP_REGNUM >> 4];
*ptr++ = hexchars[SP_REGNUM & 0xf];
*ptr++ = ':';
ptr = mem2hex(((char *)&regs) + SP_REGNUM*4, ptr, 4);
*ptr++ = ';';
#endif
*ptr++ = 0;
putpacket(remcomOutBuffer);
/* XXX We may want to add some features dealing with poking the
* XXX page tables, ... (look at sparc-stub.c for more info)
* XXX also required hacking to the gdb sources directly...
*/
while (1) {
remcomOutBuffer[0] = 0;
getpacket(remcomInBuffer);
switch (remcomInBuffer[0]) {
case '?': /* report most recent signal */
remcomOutBuffer[0] = 'S';
remcomOutBuffer[1] = hexchars[sigval >> 4];
remcomOutBuffer[2] = hexchars[sigval & 0xf];
remcomOutBuffer[3] = 0;
break;
#if 0
case 'q': /* this screws up gdb for some reason...*/
{
extern long _start, sdata, __bss_start;
ptr = &remcomInBuffer[1];
if (strncmp(ptr, "Offsets", 7) != 0)
break;
ptr = remcomOutBuffer;
sprintf(ptr, "Text=%8.8x;Data=%8.8x;Bss=%8.8x",
&_start, &sdata, &__bss_start);
break;
}
#endif
case 'd':
/* toggle debug flag */
kdebug ^= 1;
break;
case 'g': /* return the value of the CPU registers.
* some of them are non-PowerPC names :(
* they are stored in gdb like:
* struct {
* u32 gpr[32];
* f64 fpr[32];
* u32 pc, ps, cnd, lr; (ps=msr)
* u32 cnt, xer, mq;
* }
*/
{
int i;
ptr = remcomOutBuffer;
/* General Purpose Regs */
ptr = mem2hex((char *)regs, ptr, 32 * 4);
/* Floating Point Regs - FIXME */
/*ptr = mem2hex((char *), ptr, 32 * 8);*/
for(i=0; i<(32*8*2); i++) { /* 2chars/byte */
ptr[i] = '0';
}
ptr += 32*8*2;
/* pc, msr, cr, lr, ctr, xer, (mq is unused) */
ptr = mem2hex((char *)&regs->nip, ptr, 4);
ptr = mem2hex((char *)&regs->msr, ptr, 4);
ptr = mem2hex((char *)&regs->ccr, ptr, 4);
ptr = mem2hex((char *)&regs->link, ptr, 4);
ptr = mem2hex((char *)&regs->ctr, ptr, 4);
ptr = mem2hex((char *)&regs->xer, ptr, 4);
}
break;
case 'G': /* set the value of the CPU registers */
{
ptr = &remcomInBuffer[1];
/*
* If the stack pointer has moved, you should pray.
* (cause only god can help you).
*/
/* General Purpose Regs */
hex2mem(ptr, (char *)regs, 32 * 4);
/* Floating Point Regs - FIXME?? */
/*ptr = hex2mem(ptr, ??, 32 * 8);*/
ptr += 32*8*2;
/* pc, msr, cr, lr, ctr, xer, (mq is unused) */
ptr = hex2mem(ptr, (char *)&regs->nip, 4);
ptr = hex2mem(ptr, (char *)&regs->msr, 4);
ptr = hex2mem(ptr, (char *)&regs->ccr, 4);
ptr = hex2mem(ptr, (char *)&regs->link, 4);
ptr = hex2mem(ptr, (char *)&regs->ctr, 4);
ptr = hex2mem(ptr, (char *)&regs->xer, 4);
strcpy(remcomOutBuffer,"OK");
}
break;
case 'H':
/* don't do anything, yet, just acknowledge */
hexToInt(&ptr, &addr);
strcpy(remcomOutBuffer,"OK");
break;
case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
/* Try to read %x,%x. */
ptr = &remcomInBuffer[1];
if (hexToInt(&ptr, &addr)
&& *ptr++ == ','
&& hexToInt(&ptr, &length)) {
if (mem2hex((char *)addr, remcomOutBuffer,length))
break;
strcpy (remcomOutBuffer, "E03");
} else {
strcpy(remcomOutBuffer,"E01");
}
break;
case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
/* Try to read '%x,%x:'. */
ptr = &remcomInBuffer[1];
if (hexToInt(&ptr, &addr)
&& *ptr++ == ','
&& hexToInt(&ptr, &length)
&& *ptr++ == ':') {
if (hex2mem(ptr, (char *)addr, length)) {
strcpy(remcomOutBuffer, "OK");
} else {
strcpy(remcomOutBuffer, "E03");
}
flush_icache_range(addr, addr+length);
} else {
strcpy(remcomOutBuffer, "E02");
}
break;
case 'k': /* kill the program, actually just continue */
case 'c': /* cAA..AA Continue; address AA..AA optional */
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (hexToInt(&ptr, &addr)) {
regs->nip = addr;
}
/* Need to flush the instruction cache here, as we may have deposited a
* breakpoint, and the icache probably has no way of knowing that a data ref to
* some location may have changed something that is in the instruction cache.
*/
kgdb_flush_cache_all();
set_msr(msr);
kgdb_interruptible(1);
unlock_kernel();
kgdb_active = 0;
return;
case 's':
kgdb_flush_cache_all();
regs->msr |= MSR_SE;
#if 0
set_msr(msr | MSR_SE);
#endif
unlock_kernel();
kgdb_active = 0;
return;
case 'r': /* Reset (if user process..exit ???)*/
panic("kgdb reset.");
break;
} /* switch */
if (remcomOutBuffer[0] && kdebug) {
printk("remcomInBuffer: %s\n", remcomInBuffer);
printk("remcomOutBuffer: %s\n", remcomOutBuffer);
}
/* reply to the request */
putpacket(remcomOutBuffer);
} /* while(1) */
}
/* This function will generate a breakpoint exception. It is used at the
beginning of a program to sync up with a debugger and can be used
otherwise as a quick means to stop program execution and "break" into
the debugger. */
void
breakpoint(void)
{
if (!initialized) {
printk("breakpoint() called b4 kgdb init\n");
return;
}
asm(" .globl breakinst
breakinst: .long 0x7d821008
");
}
/* Output string in GDB O-packet format if GDB has connected. If nothing
output, returns 0 (caller must then handle output). */
int
kgdb_output_string (const char* s, unsigned int count)
{
char buffer[512];
if (!kgdb_started)
return 0;
count = (count <= (sizeof(buffer) / 2 - 2))
? count : (sizeof(buffer) / 2 - 2);
buffer[0] = 'O';
mem2hex (s, &buffer[1], count);
putpacket(buffer);
return 1;
}
......@@ -257,7 +257,7 @@ EXPORT_SYMBOL(console_drivers);
EXPORT_SYMBOL(xmon);
#endif
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
#ifdef CONFIG_DEBUG_KERNEL
extern void (*debugger)(struct pt_regs *regs);
extern int (*debugger_bpt)(struct pt_regs *regs);
extern int (*debugger_sstep)(struct pt_regs *regs);
......
......@@ -117,7 +117,6 @@ void show_regs(struct pt_regs * regs)
regs->msr&MSR_DR ? 1 : 0);
printk("TASK = %p[%d] '%s' ",
current, current->pid, current->comm);
printk("Last syscall: %ld ", current->thread.last_syscall);
printk("\nlast math %p ", last_task_used_math);
#ifdef CONFIG_SMP
......@@ -223,8 +222,6 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
memcpy(&p->thread.fpr, &current->thread.fpr, sizeof(p->thread.fpr));
p->thread.fpscr = current->thread.fpscr;
p->thread.last_syscall = -1;
return 0;
}
......
......@@ -878,7 +878,7 @@ prom_initialize_tce_table(void)
phandle node;
ihandle phb_node;
unsigned long offset = reloc_offset();
char compatible[64], path[64], type[64];
char compatible[64], path[64], type[64], model[64];
unsigned long i, table = 0;
unsigned long base, vbase, align;
unsigned int minalign, minsize;
......@@ -893,16 +893,25 @@ prom_initialize_tce_table(void)
for (node = 0; prom_next_node(&node); ) {
compatible[0] = 0;
type[0] = 0;
model[0] = 0;
call_prom(RELOC("getprop"), 4, 1, node, RELOC("compatible"),
compatible, sizeof(compatible));
call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
type, sizeof(type));
call_prom(RELOC("getprop"), 4, 1, node, RELOC("model"),
model, sizeof(model));
if ((compatible[0] == 0) ||
((strstr(compatible, RELOC("python")) == NULL) &&
(strstr(compatible, RELOC("Speedwagon")) == NULL))) {
/* Keep the old logic in tack to avoid regression. */
if (compatible[0] != 0) {
if((strstr(compatible, RELOC("python")) == NULL) &&
(strstr(compatible, RELOC("Speedwagon")) == NULL))
continue;
} else if (model[0] != 0) {
if ((strstr(model, RELOC("ython")) == NULL) &&
(strstr(model, RELOC("peedwagon")) == NULL))
continue;
}
if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL)) {
continue;
}
......
......@@ -130,6 +130,14 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
/* This should be fixed properly in kernel/resource.c */
iomem_resource.end = MEM_SPACE_LIMIT;
#ifdef CONFIG_XMON_DEFAULT
debugger = xmon;
debugger_bpt = xmon_bpt;
debugger_sstep = xmon_sstep;
debugger_iabr_match = xmon_iabr_match;
debugger_dabr_match = xmon_dabr_match;
#endif
/* pSeries systems are identified in prom.c via OF. */
if ( itLpNaca.xLparInstalled == 1 )
naca->platform = PLATFORM_ISERIES_LPAR;
......@@ -507,11 +515,6 @@ void __init setup_arch(char **cmdline_p)
ppc_md.progress("setup_arch:enter", 0x3eab);
#if defined(CONFIG_KGDB)
kgdb_map_scc();
set_debug_traps();
breakpoint();
#endif
/*
* Set cache line size based on type of cpu as a default.
* Systems with OF can look in the properties on the cpu node(s)
......@@ -543,26 +546,6 @@ void __init setup_arch(char **cmdline_p)
ppc_md.progress("setup_arch: exit", 0x3eab);
}
void exception_trace(unsigned long trap)
{
unsigned long x, srr0, srr1, reg20, reg1, reg21;
asm("mflr %0" : "=r" (x) :);
asm("mfspr %0,0x1a" : "=r" (srr0) :);
asm("mfspr %0,0x1b" : "=r" (srr1) :);
asm("mr %0,1" : "=r" (reg1) :);
asm("mr %0,20" : "=r" (reg20) :);
asm("mr %0,21" : "=r" (reg21) :);
udbg_puts("\n");
udbg_puts("Took an exception : "); udbg_puthex(x); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(reg1); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(reg20); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(reg21); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(srr0); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(srr1); udbg_puts("\n");
}
int set_spread_lpevents( char * str )
{
/* The parameter is the number of processors to share in processing lp events */
......
/*
* linux/arch/ppc64/kernel/signal.c
*
*
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
......@@ -73,11 +71,6 @@ long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
{
sigset_t saveset;
PPCDBG(PPCDBG_SYS64X, "sys_sigsuspend - running - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
mask &= _BLOCKABLE;
spin_lock_irq(&current->sigmask_lock);
saveset = current->blocked;
......@@ -107,10 +100,6 @@ long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int
{
sigset_t saveset, newset;
PPCDBG(PPCDBG_SYS64X, "sys_rt_sigsuspend - running - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
......@@ -136,13 +125,9 @@ long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int
asmlinkage long sys_sigaltstack(const stack_t *uss, stack_t *uoss)
long sys_sigaltstack(const stack_t *uss, stack_t *uoss)
{
struct pt_regs *regs = (struct pt_regs *) &uss;
PPCDBG(PPCDBG_SYS64X, "sys_sigaltstack - running - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
return do_sigaltstack(uss, uoss, regs->gpr[1]);
}
......@@ -152,11 +137,6 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
struct k_sigaction new_ka, old_ka;
int ret;
PPCDBG(PPCDBG_SYS64X, "sys_sigaction - running - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
if (act) {
old_sigset_t mask;
if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
......@@ -168,7 +148,7 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL));
if (!ret && oact) {
if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
......@@ -179,9 +159,6 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}
......@@ -189,13 +166,11 @@ long sys_sigaction(int sig, const struct old_sigaction *act,
* When we have signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
* a sigregs struct
* one or more sigcontext structs
* one or more sigcontext structs with
* a gap of __SIGNAL_FRAMESIZE bytes
*
* Each of these things must be a multiple of 16 bytes in size.
*
* XXX ultimately we will have to stack up a siginfo and ucontext
* for each rt signal.
*/
struct sigregs {
elf_gregset_t gp_regs;
......@@ -206,8 +181,6 @@ struct sigregs {
int abigap[72];
};
struct rt_sigframe
{
unsigned long _unused[2];
......@@ -236,11 +209,9 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
struct rt_sigframe *rt_sf;
struct sigcontext_struct sigctx;
struct sigregs *sr;
int ret;
elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */
sigset_t set;
stack_t st;
unsigned long prevsp;
rt_sf = (struct rt_sigframe *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx))
......@@ -252,19 +223,16 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
if (regs->msr & MSR_FP)
giveup_fpu(current);
rt_sf++; /* Look at next rt_sigframe */
if (rt_sf == (struct rt_sigframe *)(sigctx.regs)) {
/* Last stacked signal - restore registers -
/* restore registers -
* sigctx is initialized to point to the
* preamble frame (where registers are stored)
* see handle_signal()
*/
sr = (struct sigregs *) sigctx.regs;
if (regs->msr & MSR_FP )
giveup_fpu(current);
if (copy_from_user(saved_regs, &sr->gp_regs,
sizeof(sr->gp_regs)))
if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs)))
goto badframe;
saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
| (saved_regs[PT_MSR] & MSR_USERCHANGE);
......@@ -277,28 +245,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
the current task structure. */
sys_sigaltstack(&st, NULL);
ret = regs->result;
} else {
/* More signals to go */
/* Set up registers for next signal handler */
regs->gpr[1] = (unsigned long)rt_sf - __SIGNAL_FRAMESIZE;
if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx)))
goto badframe;
sr = (struct sigregs *) sigctx.regs;
regs->gpr[3] = ret = sigctx.signal;
/* Get the siginfo */
get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo);
/* Get the ucontext */
get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc);
regs->gpr[6] = (unsigned long) rt_sf;
regs->link = (unsigned long) &sr->tramp;
regs->nip = sigctx.handler;
if (get_user(prevsp, &sr->gp_regs[PT_R1])
|| put_user(prevsp, (unsigned long *) regs->gpr[1]))
goto badframe;
}
return ret;
return regs->result;
badframe:
do_exit(SIGSEGV);
......@@ -335,6 +282,7 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
goto badframe;
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
current->thread.fpscr = 0; /* turn off all fp exceptions */
/* Retrieve rt_sigframe from stack and
set up registers for signal handler
......@@ -359,7 +307,6 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
regs->gpr[6] = (unsigned long) rt_sf;
regs->link = (unsigned long) frame->tramp;
return;
badframe:
......@@ -379,10 +326,8 @@ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
{
struct sigcontext_struct *sc, sigctx;
struct sigregs *sr;
long ret;
elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */
sigset_t set;
unsigned long prevsp;
sc = (struct sigcontext_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
......@@ -397,15 +342,12 @@ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
sc++; /* Look at next sigcontext */
if (sc == (struct sigcontext_struct *)(sigctx.regs)) {
/* Last stacked signal - restore registers */
sr = (struct sigregs *) sigctx.regs;
if (regs->msr & MSR_FP )
giveup_fpu(current);
if (copy_from_user(saved_regs, &sr->gp_regs,
sizeof(sr->gp_regs)))
/* restore registers */
sr = (struct sigregs *) sigctx.regs;
if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs)))
goto badframe;
saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
| (saved_regs[PT_MSR] & MSR_USERCHANGE);
......@@ -416,24 +358,7 @@ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
sizeof(sr->fp_regs)))
goto badframe;
ret = regs->result;
} else {
/* More signals to go */
regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
sr = (struct sigregs *) sigctx.regs;
regs->gpr[3] = ret = sigctx.signal;
regs->gpr[4] = (unsigned long) sc;
regs->link = (unsigned long) &sr->tramp;
regs->nip = sigctx.handler;
if (get_user(prevsp, &sr->gp_regs[PT_R1])
|| put_user(prevsp, (unsigned long *) regs->gpr[1]))
goto badframe;
}
return ret;
return regs->result;
badframe:
do_exit(SIGSEGV);
......@@ -474,6 +399,7 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
goto badframe;
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
current->thread.fpscr = 0; /* turn off all fp exceptions */
newsp -= __SIGNAL_FRAMESIZE;
if ( get_user(temp_ptr, &sc->handler))
......@@ -490,14 +416,9 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
regs->gpr[4] = (unsigned long) sc;
regs->link = (unsigned long) frame->tramp;
PPCDBG(PPCDBG_SIGNAL, "setup_frame - returning - regs->gpr[1]=%lx, regs->gpr[4]=%lx, regs->link=%lx \n",
regs->gpr[1], regs->gpr[4], regs->link);
return;
badframe:
PPCDBG(PPCDBG_SIGNAL, "setup_frame - badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); PPCDBG_ENTER_DEBUGGER();
badframe:
#if DEBUG_SIG
printk("badframe in setup_frame, regs=%p frame=%p newsp=%lx\n",
regs, frame, newsp);
......@@ -521,8 +442,8 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
((int)regs->result == -ERESTARTSYS &&
!(ka->sa.sa_flags & SA_RESTART))))
regs->result = -EINTR;
/* Set up Signal Frame */
/* Set up Signal Frame */
if (ka->sa.sa_flags & SA_SIGINFO) {
/* Put a Real Time Context onto stack */
*newspp -= sizeof(*rt_sf);
......@@ -530,12 +451,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
if (verify_area(VERIFY_WRITE, rt_sf, sizeof(*rt_sf)))
goto badframe;
if (__put_user((unsigned long) ka->sa.sa_handler, &rt_sf->uc.uc_mcontext.handler)
|| __put_user(&rt_sf->info, &rt_sf->pinfo)
|| __put_user(&rt_sf->uc, &rt_sf->puc)
/* Put the siginfo */
|| __copy_to_user(&rt_sf->info, info, sizeof(*info))
|| copy_siginfo_to_user(&rt_sf->info, info)
/* Create the ucontext */
|| __put_user(0, &rt_sf->uc.uc_flags)
|| __put_user(0, &rt_sf->uc.uc_link)
......@@ -548,9 +468,8 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
|| __put_user((struct pt_regs *)frame, &rt_sf->uc.uc_mcontext.regs)
|| __put_user(sig, &rt_sf->uc.uc_mcontext.signal))
goto badframe;
} else {
/* Put another sigcontext on the stack */
/* Put a sigcontext on the stack */
*newspp -= sizeof(*sc);
sc = (struct sigcontext_struct *) *newspp;
if (verify_area(VERIFY_WRITE, sc, sizeof(*sc)))
......@@ -623,34 +542,24 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
newsp = frame = newsp - sizeof(struct sigregs);
/* Whee! Actually deliver the signal. */
PPCDBG(PPCDBG_SIGNAL, "do_signal - GOING TO RUN SIGNAL HANDLER - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
handle_signal(signr, &info, oldset, regs, &newsp, frame);
PPCDBG(PPCDBG_SIGNAL, "do_signal - after running signal handler - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
}
if (regs->trap == 0x0C00 /* System Call! */ &&
((int)regs->result == -ERESTARTNOHAND ||
(int)regs->result == -ERESTARTSYS ||
(int)regs->result == -ERESTARTNOINTR)) {
PPCDBG(PPCDBG_SIGNAL, "do_signal - going to back up & retry system call \n");
regs->gpr[3] = regs->orig_gpr3;
regs->nip -= 4; /* Back up & retry system call */
regs->result = 0;
}
if (newsp == frame)
{
PPCDBG(PPCDBG_SIGNAL, "do_signal - returning w/ no signal delivered \n");
return 0; /* no signals delivered */
}
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame(regs, (struct sigregs *) frame, newsp);
else
setup_frame(regs, (struct sigregs *) frame, newsp);
PPCDBG(PPCDBG_SIGNAL, "do_signal - returning a signal was delivered \n");
return 1;
}
......@@ -136,8 +136,6 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 *act, struct old
struct k_sigaction new_ka, old_ka;
int ret;
PPCDBG(PPCDBG_SYS32, "sys32_sigaction - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
if (sig < 0)
{
sig = -sig;
......@@ -153,12 +151,11 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 *act, struct old
ret |= __get_user(mask, &act->sa_mask);
if (ret)
return ret;
PPCDBG(PPCDBG_SIGNAL, "sys32_sigaction flags =%lx \n", new_ka.sa.sa_flags);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL));
if (!ret && oact)
{
......@@ -168,9 +165,6 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 *act, struct old
ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
PPCDBG(PPCDBG_SYS32, "sys32_sigaction - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
return ret;
}
......@@ -185,15 +179,11 @@ asmlinkage long sys32_sigpending(old_sigset_t32 *set)
int ret;
mm_segment_t old_fs = get_fs();
PPCDBG(PPCDBG_SYS32, "sys32_sigpending - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
set_fs (KERNEL_DS);
ret = sys_sigpending(&s);
set_fs (old_fs);
if (put_user (s, set)) return -EFAULT;
PPCDBG(PPCDBG_SYS32, "sys32_sigpending - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
return ret;
}
......@@ -213,8 +203,6 @@ asmlinkage long sys32_sigprocmask(u32 how, old_sigset_t32 *set, old_sigset_t32 *
int ret;
mm_segment_t old_fs = get_fs();
PPCDBG(PPCDBG_SYS32, "sys32_sigprocmask - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
if (set && get_user (s, set)) return -EFAULT;
set_fs (KERNEL_DS);
ret = sys_sigprocmask((int)how, set ? &s : NULL, oset ? &s : NULL);
......@@ -222,8 +210,6 @@ asmlinkage long sys32_sigprocmask(u32 how, old_sigset_t32 *set, old_sigset_t32 *
if (ret) return ret;
if (oset && put_user (s, oset)) return -EFAULT;
PPCDBG(PPCDBG_SYS32, "sys32_sigprocmask - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
return 0;
}
......@@ -253,9 +239,6 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
int ret;
elf_gregset_t32 saved_regs; /* an array of ELF_NGREG unsigned ints (32 bits) */
sigset_t set;
unsigned int prevsp;
PPCDBG(PPCDBG_SIGNAL, "sys32_sigreturn - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
sc = (struct sigcontext32_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
......@@ -270,11 +253,6 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
sc++; /* Look at next sigcontext */
/* If the next sigcontext is actually the sigregs (frame) */
/* - then no more sigcontexts on the user stack */
if (sc == (struct sigcontext32_struct*)(u64)sigctx.regs)
{
/* Last stacked signal - restore registers */
sr = (struct sigregs32*)(u64)sigctx.regs;
if (regs->msr & MSR_FP )
......@@ -365,27 +343,9 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
goto badframe;
ret = regs->result;
} else {
/* More signals to go */
regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE32;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
sr = (struct sigregs32*)(u64)sigctx.regs;
regs->gpr[3] = ret = sigctx.signal;
regs->gpr[4] = (unsigned long) sc;
regs->link = (unsigned long) &sr->tramp;
regs->nip = sigctx.handler;
if (get_user(prevsp, &sr->gp_regs[PT_R1])
|| put_user(prevsp, (unsigned int*) regs->gpr[1]))
goto badframe;
}
PPCDBG(PPCDBG_SIGNAL, "sys32_sigreturn - normal exit returning %ld - pid=%ld current=%lx comm=%s \n", ret, current->pid, current, current->comm);
return ret;
badframe:
PPCDBG(PPCDBG_SYS32NI, "sys32_sigreturn - badframe - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
do_exit(SIGSEGV);
}
......@@ -487,6 +447,7 @@ setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
current->thread.fpscr = 0; /* turn off all fp exceptions */
newsp -= __SIGNAL_FRAMESIZE32;
if (put_user(regs->gpr[1], (u32*)(u64)newsp)
......@@ -505,8 +466,7 @@ setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
regs->link = (unsigned long) frame->tramp;
return;
badframe:
udbg_printf("setup_frame32 - badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); PPCDBG_ENTER_DEBUGGER();
badframe:
#if DEBUG_SIG
printk("badframe in setup_frame32, regs=%p frame=%p newsp=%lx\n",
regs, frame, newsp);
......@@ -552,7 +512,6 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
elf_gregset_t32 saved_regs; /* an array of 32 bit register values */
sigset_t signal_set;
stack_t stack;
unsigned int previous_stack;
ret = 0;
/* Adjust the inputted reg1 to point to the first rt signal frame */
......@@ -581,10 +540,6 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
/* Set to point to the next rt_sigframe - this is used to determine whether this
* is the last signal to process
*/
rt_stack_frame ++;
if (rt_stack_frame == (struct rt_sigframe_32 *)(u64)(sigctx.regs))
{
signalregs = (struct sigregs32 *) (u64)sigctx.regs;
/* If currently owning the floating point - give them up */
if (regs->msr & MSR_FP)
......@@ -669,37 +624,6 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
regs->dsisr = 0;
regs->result = (u64)(saved_regs[PT_RESULT]) & 0xFFFFFFFF;
ret = regs->result;
}
else /* more signals to go */
{
udbg_printf("hey should not occur\n");
regs->gpr[1] = (u64)rt_stack_frame - __SIGNAL_FRAMESIZE32;
if (copy_from_user(&sigctx, &rt_stack_frame->uc.uc_mcontext,sizeof(sigctx)))
{
goto badframe;
}
signalregs = (struct sigregs32 *) (u64)sigctx.regs;
/* first parm to signal handler is the signal number */
regs->gpr[3] = ret = sigctx.signal;
/* second parm is a pointer to sig info */
get_user(regs->gpr[4], &rt_stack_frame->pinfo);
/* third parm is a pointer to the ucontext */
get_user(regs->gpr[5], &rt_stack_frame->puc);
/* fourth parm is the stack frame */
regs->gpr[6] = (u64)rt_stack_frame;
/* Set up link register to return to sigreturn when the */
/* signal handler completes */
regs->link = (u64)&signalregs->tramp;
/* Set next instruction to the start fo the signal handler */
regs->nip = sigctx.handler;
/* Set the reg1 to look like a call to the signal handler */
if (get_user(previous_stack,&signalregs->gp_regs[PT_R1])
|| put_user(previous_stack, (unsigned long *)regs->gpr[1]))
{
goto badframe;
}
}
return ret;
......@@ -715,8 +639,6 @@ asmlinkage long sys32_rt_sigaction(int sig, const struct sigaction32 *act, struc
int ret;
sigset32_t set32;
PPCDBG(PPCDBG_SIGNAL, "sys32_rt_sigaction - entered - sig=%x \n", sig);
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset32_t))
return -EINVAL;
......@@ -765,8 +687,6 @@ asmlinkage long sys32_rt_sigaction(int sig, const struct sigaction32 *act, struc
ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
}
PPCDBG(PPCDBG_SIGNAL, "sys32_rt_sigaction - exiting - sig=%x \n", sig);
return ret;
}
......@@ -786,8 +706,6 @@ asmlinkage long sys32_rt_sigprocmask(u32 how, sigset32_t *set, sigset32_t *oset,
int ret;
mm_segment_t old_fs = get_fs();
PPCDBG(PPCDBG_SIGNAL, "sys32_rt_sigprocmask - entered how=%x \n", (int)how);
if (set) {
if (copy_from_user (&s32, set, sizeof(sigset32_t)))
return -EFAULT;
......@@ -855,7 +773,8 @@ siginfo64to32(siginfo_t32 *d, siginfo_t *s)
memset (d, 0, sizeof(siginfo_t32));
d->si_signo = s->si_signo;
d->si_errno = s->si_errno;
d->si_code = s->si_code;
/* XXX why dont we just implement copy_siginfo_to_user32? - Anton */
d->si_code = s->si_code & 0xffff;
if (s->si_signo >= SIGRTMIN) {
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
......@@ -1145,6 +1064,7 @@ setup_rt_frame32(struct pt_regs *regs, struct sigregs32 *frame,
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
current->thread.fpscr = 0; /* turn off all fp exceptions */
/* Retrieve rt_sigframe from stack and
......@@ -1172,9 +1092,7 @@ setup_rt_frame32(struct pt_regs *regs, struct sigregs32 *frame,
return;
badframe:
udbg_printf("setup_frame32 - badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); PPCDBG_ENTER_DEBUGGER();
badframe:
#if DEBUG_SIG
printk("badframe in setup_frame32, regs=%p frame=%p newsp=%lx\n",
regs, frame, newsp);
......@@ -1182,7 +1100,6 @@ setup_rt_frame32(struct pt_regs *regs, struct sigregs32 *frame,
do_exit(SIGSEGV);
}
/*
* OK, we're invoking a handler
*/
......@@ -1233,7 +1150,7 @@ handle_signal32(unsigned long sig, siginfo_t *info, sigset_t *oldset,
goto badframe;
}
} else {
/* Put another sigcontext on the stack */
/* Put a sigcontext on the stack */
*newspp -= sizeof(*sc);
sc = (struct sigcontext32_struct *)(u64)*newspp;
if (verify_area(VERIFY_WRITE, sc, sizeof(*sc)))
......@@ -1259,7 +1176,6 @@ handle_signal32(unsigned long sig, siginfo_t *info, sigset_t *oldset,
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
}
return;
badframe:
......@@ -1348,17 +1264,10 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
if (signr > 0) {
ka = &current->sig->action[signr-1];
PPCDBG(PPCDBG_SIGNAL, " do signal :sigaction flags = %lx \n" ,ka->sa.sa_flags);
PPCDBG(PPCDBG_SIGNAL, " do signal :on sig stack = %lx \n" ,on_sig_stack(regs->gpr[1]));
PPCDBG(PPCDBG_SIGNAL, " do signal :reg1 = %lx \n" ,regs->gpr[1]);
PPCDBG(PPCDBG_SIGNAL, " do signal :alt stack = %lx \n" ,current->sas_ss_sp);
PPCDBG(PPCDBG_SIGNAL, " do signal :alt stack size = %lx \n" ,current->sas_ss_size);
if ( (ka->sa.sa_flags & SA_ONSTACK)
&& (! on_sig_stack(regs->gpr[1])))
{
newsp = (current->sas_ss_sp + current->sas_ss_size);
} else
else
newsp = regs->gpr[1];
newsp = frame = newsp - sizeof(struct sigregs32);
......@@ -1378,12 +1287,10 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
if (newsp == frame)
return 0; /* no signals delivered */
// Invoke correct stack setup routine
/* Invoke correct stack setup routine */
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame32(regs, (struct sigregs32*)(u64)frame, newsp);
else
setup_frame32(regs, (struct sigregs32*)(u64)frame, newsp);
return 1;
}
......@@ -31,6 +31,7 @@
/* #include <linux/openpic.h> */
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
......@@ -58,7 +59,6 @@ volatile int smp_commenced = 0;
int smp_num_cpus = 1;
int smp_tb_synchronized = 0;
spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
cycles_t cacheflush_time;
unsigned long cache_decay_ticks;
static int max_cpus __initdata = NR_CPUS;
......@@ -595,12 +595,13 @@ void __init smp_boot_cpus(void)
}
/*
* XXX very rough, assumes 20 bus cycles to read a cache line,
* timebase increments every 4 bus cycles, 32kB L1 data cache.
* XXX very rough. On POWER4 we optimise tlb flushes for
* tasks that only run on one cpu so we increase decay ticks.
*/
cacheflush_time = 5 * 1024;
/* XXX - Fix - Anton */
cache_decay_ticks = 0;
if (__is_processor(PV_POWER4))
cache_decay_ticks = HZ/50;
else
cache_decay_ticks = HZ/100;
/* Probe arch for CPUs */
cpu_nr = ppc_md.smp_probe();
......
......@@ -4,12 +4,16 @@
* Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
* Copyright (c) 2001 Dave Engebretsen
*
* Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* XXX Note: Changes for bolted region have not been merged - Anton */
#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
......@@ -18,11 +22,10 @@
#include <asm/naca.h>
#include <asm/pmc.h>
int make_ste(unsigned long stab,
unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid,
int large);
extern struct Naca *naca;
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large);
#define cpu_has_slb() (__is_processor(PV_POWER4))
/*
* Build an entry for the base kernel segment and put it into
......@@ -36,20 +39,22 @@ void stab_initialize(unsigned long stab)
esid = GET_ESID(KERNELBASE);
vsid = get_kernel_vsid(esid << SID_SHIFT);
if (!__is_processor(PV_POWER4)) {
__asm__ __volatile__("isync; slbia; isync":::"memory");
make_ste(stab, esid, vsid);
} else {
if (cpu_has_slb()) {
/* Invalidate the entire SLB & all the ERATS */
__asm__ __volatile__("isync" : : : "memory");
#ifndef CONFIG_PPC_ISERIES
__asm__ __volatile__("slbmte %0,%0"
: : "r" (0) : "memory");
__asm__ __volatile__("isync; slbia; isync":::"memory");
make_slbe(esid, vsid, 0);
#ifdef CONFIG_PPC_ISERIES
asm volatile("isync; slbia; isync":::"memory");
#else
__asm__ __volatile__("isync; slbia; isync":::"memory");
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
make_slbe(esid, vsid, 0);
#endif
} else {
asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, esid, vsid);
/* Order update */
asm volatile("sync":::"memory");
}
}
......@@ -66,21 +71,15 @@ int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
global_entry = (esid & 0x1f) << 3;
ste = (STE *)(stab | ((esid & 0x1f) << 7));
/*
* Find an empty entry, if one exists.
*/
for(group = 0; group < 2; group++) {
for(entry = 0; entry < 8; entry++, ste++) {
if(!(ste->dw0.dw0.v)) {
/* Find an empty entry, if one exists. */
for (group = 0; group < 2; group++) {
for (entry = 0; entry < 8; entry++, ste++) {
if (!(ste->dw0.dw0.v)) {
ste->dw1.dw1.vsid = vsid;
/* Order VSID updte */
__asm__ __volatile__ ("eieio" : : : "memory");
ste->dw0.dw0.esid = esid;
ste->dw0.dw0.v = 1;
ste->dw0.dw0.kp = 1;
/* Order update */
__asm__ __volatile__ ("sync" : : : "memory");
asm volatile("eieio":::"memory");
ste->dw0.dw0.v = 1;
return(global_entry | entry);
}
}
......@@ -100,8 +99,8 @@ int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
PMC_SW_PROCESSOR(stab_capacity_castouts);
castout_entry = get_paca()->xStab_data.next_round_robin;
for(i = 0; i < 16; i++) {
if(castout_entry < 8) {
for (i = 0; i < 16; i++) {
if (castout_entry < 8) {
global_entry = (esid & 0x1f) << 3;
ste = (STE *)(stab | ((esid & 0x1f) << 7));
castout_ste = ste + castout_entry;
......@@ -111,12 +110,9 @@ int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
castout_ste = ste + (castout_entry - 8);
}
if((((castout_ste->dw0.dw0.esid) >> 32) == 0) ||
(((castout_ste->dw0.dw0.esid) & 0xffffffff) > 0)) {
/* Found an entry to castout. It is either a user */
/* region, or a secondary kernel segment. */
/* Dont cast out the first kernel segment */
if (castout_ste->dw0.dw0.esid != GET_ESID(KERNELBASE))
break;
}
castout_entry = (castout_entry + 1) & 0xf;
}
......@@ -126,21 +122,21 @@ int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
/* Modify the old entry to the new value. */
/* Force previous translations to complete. DRENG */
__asm__ __volatile__ ("isync" : : : "memory" );
asm volatile("isync" : : : "memory" );
castout_ste->dw0.dw0.v = 0;
__asm__ __volatile__ ("sync" : : : "memory" ); /* Order update */
asm volatile("sync" : : : "memory" ); /* Order update */
castout_ste->dw1.dw1.vsid = vsid;
__asm__ __volatile__ ("eieio" : : : "memory" ); /* Order update */
old_esid = castout_ste->dw0.dw0.esid;
castout_ste->dw0.dw0.esid = esid;
castout_ste->dw0.dw0.v = 1;
castout_ste->dw0.dw0.kp = 1;
__asm__ __volatile__ ("slbie %0" : : "r" (old_esid << SID_SHIFT));
asm volatile("eieio" : : : "memory" ); /* Order update */
castout_ste->dw0.dw0.v = 1;
asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
/* Ensure completion of slbie */
__asm__ __volatile__ ("sync" : : : "memory" );
asm volatile("sync" : : : "memory" );
return(global_entry | (castout_entry & 0x7));
return (global_entry | (castout_entry & 0x7));
}
/*
......@@ -165,10 +161,10 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large)
/*
* Find an empty entry, if one exists.
*/
for(entry = 0; entry < naca->slb_size; entry++) {
__asm__ __volatile__("slbmfee %0,%1"
for (entry = 0; entry < naca->slb_size; entry++) {
asm volatile("slbmfee %0,%1"
: "=r" (esid_data) : "r" (entry));
if(!esid_data.data.v) {
if (!esid_data.data.v) {
/*
* Write the new SLB entry.
*/
......@@ -187,12 +183,12 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large)
/* slbie not needed as no previous mapping existed. */
/* Order update */
__asm__ __volatile__ ("isync" : : : "memory");
__asm__ __volatile__ ("slbmte %0,%1"
asm volatile("isync" : : : "memory");
asm volatile("slbmte %0,%1"
: : "r" (vsid_data),
"r" (esid_data));
/* Order update */
__asm__ __volatile__ ("isync" : : : "memory");
asm volatile("isync" : : : "memory");
return;
}
}
......@@ -228,17 +224,44 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large)
esid_data.data.v = 1;
esid_data.data.index = entry;
__asm__ __volatile__ ("isync" : : : "memory"); /* Order update */
__asm__ __volatile__ ("slbmte %0,%1"
asm volatile("isync" : : : "memory"); /* Order update */
asm volatile("slbmte %0,%1"
: : "r" (vsid_data), "r" (esid_data));
__asm__ __volatile__ ("isync" : : : "memory" ); /* Order update */
asm volatile("isync" : : : "memory" ); /* Order update */
}
static inline void __ste_allocate(unsigned long esid, unsigned long vsid,
int kernel_segment)
{
if (cpu_has_slb()) {
#ifndef CONFIG_PPC_ISERIES
if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID)
make_slbe(esid, vsid, 1);
else
#endif
make_slbe(esid, vsid, 0);
} else {
unsigned char top_entry, stab_entry, *segments;
stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid);
PMC_SW_PROCESSOR_A(stab_entry_use, stab_entry & 0xf);
segments = get_paca()->xSegments;
top_entry = get_paca()->stab_cache_pointer;
if (!kernel_segment && top_entry < STAB_CACHE_SIZE) {
segments[top_entry] = stab_entry;
if (top_entry == STAB_CACHE_SIZE)
top_entry = 0xff;
top_entry++;
get_paca()->stab_cache_pointer = top_entry;
}
}
}
/*
* Allocate a segment table entry for the given ea.
*/
int ste_allocate ( unsigned long ea,
unsigned long trap)
int ste_allocate(unsigned long ea)
{
unsigned long vsid, esid;
int kernel_segment = 0;
......@@ -246,85 +269,140 @@ int ste_allocate ( unsigned long ea,
PMC_SW_PROCESSOR(stab_faults);
/* Check for invalid effective addresses. */
if (!IS_VALID_EA(ea)) {
if (!IS_VALID_EA(ea))
return 1;
}
/* Kernel or user address? */
if (REGION_ID(ea) >= KERNEL_REGION_ID) {
kernel_segment = 1;
vsid = get_kernel_vsid( ea );
vsid = get_kernel_vsid(ea);
} else {
struct mm_struct *mm = current->mm;
if ( mm ) {
vsid = get_vsid(mm->context, ea );
} else {
if (mm)
vsid = get_vsid(mm->context, ea);
else
return 1;
}
}
esid = GET_ESID(ea);
if (trap == 0x380 || trap == 0x480) {
#ifndef CONFIG_PPC_ISERIES
if (REGION_ID(ea) == KERNEL_REGION_ID)
make_slbe(esid, vsid, 1);
else
#endif
make_slbe(esid, vsid, 0);
} else {
unsigned char top_entry, stab_entry, *segments;
stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid);
PMC_SW_PROCESSOR_A(stab_entry_use, stab_entry & 0xf);
segments = get_paca()->xSegments;
top_entry = segments[0];
if(!kernel_segment && top_entry < (STAB_CACHE_SIZE - 1)) {
top_entry++;
segments[top_entry] = stab_entry;
if(top_entry == STAB_CACHE_SIZE - 1) top_entry = 0xff;
segments[0] = top_entry;
}
__ste_allocate(esid, vsid, kernel_segment);
if (!cpu_has_slb()) {
/* Order update */
asm volatile("sync":::"memory");
}
return(0);
return 0;
}
unsigned long ppc64_preload_all_segments;
unsigned long ppc64_stab_preload = 1;
#define STAB_PRESSURE 0
#define USE_SLBIE_ON_STAB 0
/*
* Flush all entries from the segment table of the current processor.
* Kernel and Bolted entries are not removed as we cannot tolerate
* faults on those addresses.
* preload all 16 segments for a 32 bit process and the PC and SP segments
* for a 64 bit process.
*/
static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
{
if (ppc64_preload_all_segments && test_tsk_thread_flag(tsk, TIF_32BIT)) {
unsigned long esid, vsid;
#define STAB_PRESSURE 0
for (esid = 0; esid < 16; esid++) {
vsid = get_vsid(mm->context, esid << SID_SHIFT);
__ste_allocate(esid, vsid, 0);
}
} else {
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long pc_segment = pc & ~SID_MASK;
unsigned long stack_segment = stack & ~SID_MASK;
unsigned long vsid;
if (pc) {
if (REGION_ID(pc) >= KERNEL_REGION_ID)
BUG();
vsid = get_vsid(mm->context, pc);
__ste_allocate(GET_ESID(pc), vsid, 0);
}
if (stack && (pc_segment != stack_segment)) {
if (REGION_ID(stack) >= KERNEL_REGION_ID)
BUG();
vsid = get_vsid(mm->context, stack);
__ste_allocate(GET_ESID(stack), vsid, 0);
}
}
if (!cpu_has_slb()) {
/* Order update */
asm volatile("sync" : : : "memory");
}
}
/* Flush all user entries from the segment table of the current processor. */
void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
{
STE *stab = (STE *) get_paca()->xStab_data.virt;
unsigned char *segments = get_paca()->xSegments;
unsigned long flags, i;
if (cpu_has_slb()) {
if (!STAB_PRESSURE && test_thread_flag(TIF_32BIT)) {
union {
unsigned long word0;
slb_dword0 data;
} esid_data;
unsigned long esid;
if(!__is_processor(PV_POWER4)) {
unsigned long entry;
asm volatile("isync" : : : "memory");
for (esid = 0; esid < 16; esid++) {
esid_data.word0 = 0;
esid_data.data.esid = esid;
asm volatile("slbie %0" : : "r" (esid_data));
}
asm volatile("isync" : : : "memory");
} else {
asm volatile("isync; slbia; isync":::"memory");
}
PMC_SW_PROCESSOR(stab_invalidations);
} else {
STE *stab = (STE *) get_paca()->xStab_data.virt;
STE *ste;
unsigned long flags;
/* Force previous translations to complete. DRENG */
__asm__ __volatile__ ("isync" : : : "memory");
asm volatile("isync" : : : "memory");
__save_and_cli(flags);
if(segments[0] != 0xff && !STAB_PRESSURE) {
for(i = 1; i <= segments[0]; i++) {
if (get_paca()->stab_cache_pointer != 0xff && !STAB_PRESSURE) {
int i;
unsigned char *segments = get_paca()->xSegments;
for (i = 0; i < get_paca()->stab_cache_pointer; i++) {
ste = stab + segments[i];
ste->dw0.dw0.v = 0;
PMC_SW_PROCESSOR(stab_invalidations);
}
#if USE_SLBIE_ON_STAB
asm volatile("sync":::"memory");
for (i = 0; i < get_paca()->stab_cache_pointer; i++) {
ste = stab + segments[i];
asm volatile("slbie %0" : :
"r" (ste->dw0.dw0.esid << SID_SHIFT));
}
asm volatile("sync":::"memory");
#else
asm volatile("sync; slbia; sync":::"memory");
#endif
} else {
unsigned long entry;
/* Invalidate all entries. */
ste = stab;
/* Never flush the first entry. */
ste += 1;
for(entry = 1;
for (entry = 1;
entry < (PAGE_SIZE / sizeof(STE));
entry++, ste++) {
unsigned long ea;
......@@ -334,70 +412,14 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
PMC_SW_PROCESSOR(stab_invalidations);
}
}
}
*((unsigned long *)segments) = 0;
__restore_flags(flags);
/* Invalidate the SLB. */
/* Force invals to complete. */
__asm__ __volatile__ ("sync" : : : "memory");
/* Flush the SLB. */
__asm__ __volatile__ ("slbia" : : : "memory");
/* Force flush to complete. */
__asm__ __volatile__ ("sync" : : : "memory");
} else {
/* XXX The commented out code will only work for 32 bit tasks */
#if 1
unsigned long flags;
__save_and_cli(flags);
__asm__ __volatile__("isync; slbia; isync":::"memory");
__restore_flags(flags);
#else
union {
unsigned long word0;
slb_dword0 data;
} esid_data;
unsigned long esid;
__asm__ __volatile__("isync" : : : "memory");
for (esid = 0; esid < 16; esid++) {
esid_data.word0 = 0;
esid_data.data.esid = esid;
__asm__ __volatile__("slbie %0" : : "r" (esid_data));
asm volatile("sync; slbia; sync":::"memory");
}
__asm__ __volatile__("isync" : : : "memory");
#endif
PMC_SW_PROCESSOR(stab_invalidations);
if (test_tsk_thread_flag(tsk, TIF_32BIT)) {
unsigned long esid, vsid;
for (esid = 0; esid < 16; esid++) {
vsid = get_vsid(mm->context, esid << SID_SHIFT);
make_slbe(esid, vsid, 0);
}
} else {
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long pc_segment = pc & ~SID_MASK;
unsigned long stack_segment = stack & ~SID_MASK;
unsigned long vsid;
if (pc) {
if (REGION_ID(pc) >= KERNEL_REGION_ID)
BUG();
vsid = get_vsid(mm->context, pc);
make_slbe(GET_ESID(pc), vsid, 0);
get_paca()->stab_cache_pointer = 0;
__restore_flags(flags);
}
if (stack && (pc_segment != stack_segment)) {
if (REGION_ID(stack) >= KERNEL_REGION_ID)
BUG();
vsid = get_vsid(mm->context, stack);
make_slbe(GET_ESID(stack), vsid, 0);
}
}
}
if (ppc64_stab_preload)
preload_stab(tsk, mm);
}
......@@ -38,7 +38,6 @@
#include <linux/smb_fs.h>
#include <linux/smb_mount.h>
#include <linux/ncp_fs.h>
#include <linux/quota.h>
#include <linux/module.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
......@@ -66,6 +65,7 @@
#include <asm/ppcdebug.h>
#include <asm/time.h>
#include <asm/ppc32.h>
#include <asm/mmu_context.h>
extern unsigned long wall_jiffies;
#define USEC_PER_SEC (1000000)
......@@ -518,68 +518,6 @@ struct dqblk32 {
__kernel_time_t32 dqb_itime;
};
extern asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr);
/* Note: it is necessary to treat cmd and id as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long sys32_quotactl(u32 cmd_parm, const char *special, u32 id_parm, unsigned long addr)
{
int cmd = (int)cmd_parm;
int id = (int)id_parm;
int cmds = cmd >> SUBCMDSHIFT;
int err;
struct dqblk d;
mm_segment_t old_fs;
char *spec;
PPCDBG(PPCDBG_SYS32, "sys32_quotactl - entered - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
switch (cmds) {
case Q_GETQUOTA:
break;
case Q_SETQUOTA:
case Q_SETUSE:
case Q_SETQLIM:
if (copy_from_user (&d, (struct dqblk32 *)addr,
sizeof (struct dqblk32)))
return -EFAULT;
d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime;
d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime;
break;
default:
return sys_quotactl(cmd, special,
id, (caddr_t)addr);
}
spec = getname32 (special);
err = PTR_ERR(spec);
if (IS_ERR(spec)) return err;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d);
set_fs (old_fs);
putname (spec);
if (cmds == Q_GETQUOTA) {
__kernel_time_t b = d.dqb_btime, i = d.dqb_itime;
((struct dqblk32 *)&d)->dqb_itime = i;
((struct dqblk32 *)&d)->dqb_btime = b;
if (copy_to_user ((struct dqblk32 *)addr, &d,
sizeof (struct dqblk32)))
return -EFAULT;
}
PPCDBG(PPCDBG_SYS32, "sys32_quotactl - exited - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
return err;
}
/* readdir & getdents */
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
......@@ -900,15 +838,6 @@ asmlinkage long sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x)
return ret;
}
/*
* Due to some executables calling the wrong select we sometimes
* get wrong args. This determines how the args are being passed
* (a single ptr to them all args passed) then calls
* sys_select() with the appropriate args. -- Cort
*/
/* Note: it is necessary to treat n as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
......@@ -916,14 +845,9 @@ asmlinkage long sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x)
*/
asmlinkage int ppc32_select(u32 n, u32* inp, u32* outp, u32* exp, u32 tvp_x)
{
if ((unsigned int)n >= 4096)
panic("ppc32_select - wrong arguments were passed in \n");
return sys32_select((int)n, inp, outp, exp, tvp_x);
}
static int cp_new_stat32(struct kstat *stat, struct stat32 *statbuf)
{
int err;
......@@ -3800,30 +3724,37 @@ static int do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs *
int retval;
int i;
bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
file = open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
return retval;
bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
bprm.file = file;
bprm.filename = filename;
bprm.sh_bang = 0;
bprm.loader = 0;
bprm.exec = 0;
if ((bprm.argc = count32(argv, bprm.p / sizeof(u32))) < 0) {
allow_write_access(file);
fput(file);
return bprm.argc;
}
if ((bprm.envc = count32(envp, bprm.p / sizeof(u32))) < 0) {
allow_write_access(file);
fput(file);
return bprm.argc;
}
bprm.mm = mm_alloc();
retval = -ENOMEM;
if (!bprm.mm)
goto out_file;
retval = init_new_context(current, bprm.mm);
if (retval < 0)
goto out_mm;
bprm.argc = count32(argv, bprm.p / sizeof(u32));
if ((retval = bprm.argc) < 0)
goto out_mm;
bprm.envc = count32(envp, bprm.p / sizeof(u32));
if ((retval = bprm.envc) < 0)
goto out_mm;
retval = prepare_binprm(&bprm);
if (retval < 0)
......@@ -3842,21 +3773,27 @@ static int do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs *
if (retval < 0)
goto out;
retval = search_binary_handler(&bprm, regs);
retval = search_binary_handler(&bprm,regs);
if (retval >= 0)
/* execve success */
return retval;
out:
/* Something went wrong, return the inode and free the argument pages*/
allow_write_access(bprm.file);
if (bprm.file)
fput(bprm.file);
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
struct page * page = bprm.page[i];
if (page)
__free_page(page);
}
for (i=0 ; i<MAX_ARG_PAGES ; i++)
if (bprm.page[i])
__free_page(bprm.page[i]);
out_mm:
mmdrop(bprm.mm);
out_file:
if (bprm.file) {
allow_write_access(bprm.file);
fput(bprm.file);
}
return retval;
}
......@@ -3867,11 +3804,6 @@ asmlinkage long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a
int error;
char * filename;
ifppcdebug(PPCDBG_SYS32) {
udbg_printf("sys32_execve - entered - pid=%ld, comm=%s \n", current->pid, current->comm);
//PPCDBG(PPCDBG_SYS32NI, " a0=%lx, a1=%lx, a2=%lx, a3=%lx, a4=%lx, a5=%lx, regs=%p \n", a0, a1, a2, a3, a4, a5, regs);
}
filename = getname((char *) a0);
error = PTR_ERR(filename);
if (IS_ERR(filename))
......@@ -3886,10 +3818,6 @@ asmlinkage long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a
putname(filename);
out:
ifppcdebug(PPCDBG_SYS32) {
udbg_printf("sys32_execve - exited - returning %x - pid=%ld \n", error, current->pid);
//udbg_printf("sys32_execve - at exit - regs->gpr[1]=%lx, gpr[3]=%lx, gpr[4]=%lx, gpr[5]=%lx, gpr[6]=%lx \n", regs->gpr[1], regs->gpr[3], regs->gpr[4], regs->gpr[5], regs->gpr[6]);
}
return error;
}
......@@ -4671,3 +4599,53 @@ asmlinkage long sys32_time(__kernel_time_t32* tloc)
return secs;
}
extern asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage int sys32_sched_setaffinity(__kernel_pid_t32 pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
if (get_user(kernel_mask, user_mask_ptr))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_setaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
return ret;
}
extern asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage int sys32_sched_getaffinity(__kernel_pid_t32 pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_getaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
if (ret == 0) {
if (put_user(kernel_mask, user_mask_ptr))
ret = -EFAULT;
}
return ret;
}
......@@ -38,29 +38,12 @@
#include <asm/ppcdebug.h>
extern int fix_alignment(struct pt_regs *);
extern void bad_page_fault(struct pt_regs *, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
/* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active;
#ifdef CONFIG_XMON
extern void xmon(struct pt_regs *regs);
extern int xmon_bpt(struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
#endif
#ifdef CONFIG_XMON
void (*debugger)(struct pt_regs *regs) = xmon;
int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#ifdef CONFIG_KGDB
#ifdef CONFIG_DEBUG_KERNEL
void (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs);
......@@ -68,30 +51,44 @@ int (*debugger_iabr_match)(struct pt_regs *regs);
int (*debugger_dabr_match)(struct pt_regs *regs);
void (*debugger_fault_handler)(struct pt_regs *regs);
#endif
#endif
/*
* Trap & Exception support
*/
void
_exception(int signr, struct pt_regs *regs)
/* Should we panic on bad kernel exceptions or try to recover */
#undef PANIC_ON_ERROR
static spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
void die(const char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
{
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("Oops: %s, sig: %ld\n", str, err);
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
print_backtrace((unsigned long *)regs->gpr[1]);
panic("Exception in kernel pc %lx signal %d",regs->nip,signr);
#if defined(CONFIG_PPCDBG) && (defined(CONFIG_XMON) || defined(CONFIG_KGDB))
/* Allow us to catch SIGILLs for 64-bit app/glibc debugging. -Peter */
} else if (signr == SIGILL) {
ifppcdebug(PPCDBG_SIGNALXMON)
debugger(regs);
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
#ifdef PANIC_ON_ERROR
panic(str);
#else
do_exit(SIGSEGV);
#endif
}
static void
_exception(int signr, siginfo_t *info, struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (debugger)
debugger(regs);
die("Exception in kernel mode\n", regs, signr);
}
force_sig(signr, current);
force_sig_info(signr, info, current);
}
/* Get the error information for errors coming through the
......@@ -130,9 +127,8 @@ static void FWNMI_release_errinfo(void)
void
SystemResetException(struct pt_regs *regs)
{
char *msg = "System Reset in kernel mode.\n";
udbg_printf(msg); printk(msg);
if (fwnmi_active) {
char *msg;
unsigned long *r3 = __va(regs->gpr[3]); /* for FWNMI debug */
struct rtas_error_log *errlog;
......@@ -140,17 +136,31 @@ SystemResetException(struct pt_regs *regs)
udbg_printf(msg, r3); printk(msg, r3);
errlog = FWNMI_get_errinfo(regs);
}
#if defined(CONFIG_XMON)
xmon(regs);
udbg_printf("leaving xmon...\n");
if (debugger)
debugger(regs);
#ifdef PANIC_ON_ERROR
panic("System Reset");
#else
for(;;);
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable System Reset");
#endif
/* What should we do here? We could issue a shutdown or hard reset. */
}
static int power4_handle_mce(struct pt_regs *regs)
{
return 0;
}
void
MachineCheckException(struct pt_regs *regs)
{
siginfo_t info;
if (fwnmi_active) {
struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
if (errhdr) {
......@@ -158,117 +168,221 @@ MachineCheckException(struct pt_regs *regs)
}
FWNMI_release_errinfo();
}
if ( !user_mode(regs) )
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (!user_mode(regs)) {
/* Attempt to recover if the interrupt is recoverable */
if (regs->msr & MSR_RI) {
if (__is_processor(PV_POWER4) &&
power4_handle_mce(regs))
return;
}
if (debugger_fault_handler) {
debugger_fault_handler(regs);
return;
}
#endif
if (debugger)
debugger(regs);
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("Machine check in kernel mode.\n");
printk("Caused by (from SRR1=%lx): ", regs->msr);
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
print_backtrace((unsigned long *)regs->gpr[1]);
panic("machine check");
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
panic("Unrecoverable Machine Check");
}
_exception(SIGSEGV, regs);
}
void
SMIException(struct pt_regs *regs)
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
{
debugger(regs);
return;
}
#endif
show_regs(regs);
print_backtrace((unsigned long *)regs->gpr[1]);
panic("System Management Interrupt");
/*
* XXX we should check RI bit on exception exit and kill the
* task if it was cleared
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)regs->nip;
_exception(SIGSEGV, &info, regs);
}
void
UnknownException(struct pt_regs *regs)
{
siginfo_t info;
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = 0;
info.si_addr = 0;
_exception(SIGTRAP, &info, regs);
}
void
InstructionBreakpointException(struct pt_regs *regs)
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_iabr_match(regs))
siginfo_t info;
if (debugger_iabr_match && debugger_iabr_match(regs))
return;
#endif
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
}
static void parse_fpe(struct pt_regs *regs)
{
siginfo_t info;
unsigned int *tmp;
unsigned int fpscr;
if (regs->msr & MSR_FP)
giveup_fpu(current);
tmp = &current->thread.fpscr;
fpscr = *tmp;
/* Invalid operation */
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
info.si_code = FPE_FLTINV;
/* Overflow */
else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
info.si_code = FPE_FLTOVF;
/* Underflow */
else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
info.si_code = FPE_FLTUND;
/* Divide by zero */
else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
info.si_code = FPE_FLTDIV;
/* Inexact result */
else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
info.si_code = FPE_FLTRES;
else
info.si_code = 0;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void *)regs->nip;
_exception(SIGFPE, &info, regs);
}
void
ProgramCheckException(struct pt_regs *regs)
{
siginfo_t info;
if (regs->msr & 0x100000) {
/* IEEE FP exception */
_exception(SIGFPE, regs);
parse_fpe(regs);
} else if (regs->msr & 0x40000) {
/* Privileged instruction */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_PRVOPC;
info.si_addr = (void *)regs->nip;
_exception(SIGILL, &info, regs);
} else if (regs->msr & 0x20000) {
/* trap exception */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_bpt(regs))
if (debugger_bpt && debugger_bpt(regs))
return;
#endif
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
} else {
_exception(SIGILL, regs);
/* Illegal instruction */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void *)regs->nip;
_exception(SIGILL, &info, regs);
}
}
void
SingleStepException(struct pt_regs *regs)
{
siginfo_t info;
regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_sstep(regs))
if (debugger_sstep && debugger_sstep(regs))
return;
#endif
_exception(SIGTRAP, regs);
}
/* Dummy handler for Performance Monitor */
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_TRACE;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
}
void
PerformanceMonitorException(struct pt_regs *regs)
{
_exception(SIGTRAP, regs);
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = 0;
_exception(SIGTRAP, &info, regs);
}
void
AlignmentException(struct pt_regs *regs)
{
int fixed;
siginfo_t info;
fixed = fix_alignment(regs);
if (fixed == 1) {
ifppcdebug(PPCDBG_ALIGNFIXUP)
if (!user_mode(regs))
PPCDBG(PPCDBG_ALIGNFIXUP, "fix alignment at %lx\n", regs->nip);
PPCDBG(PPCDBG_ALIGNFIXUP, "fix alignment at %lx\n",
regs->nip);
regs->nip += 4; /* skip over emulated instruction */
return;
}
/* Operand address was bad */
if (fixed == -EFAULT) {
/* fixed == -EFAULT means the operand address was bad */
if (user_mode(regs))
force_sig(SIGSEGV, current);
else
bad_page_fault(regs, regs->dar);
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = (void *)regs->dar;
force_sig_info(SIGSEGV, &info, current);
} else {
/* Search exception table */
bad_page_fault(regs, regs->dar, SIGSEGV);
}
return;
}
_exception(SIGBUS, regs);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *)regs->nip;
_exception(SIGBUS, &info, regs);
}
void __init trap_init(void)
......
......@@ -4,6 +4,7 @@
O_TARGET = lib.o
obj-y := checksum.o dec_and_lock.o string.o strcase.o
obj-y := checksum.o dec_and_lock.o string.o strcase.o copypage.o \
memcpy.o copyuser.o
include $(TOPDIR)/Rules.make
/*
* arch/ppc64/lib/copypage.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include "../kernel/ppc_asm.h"
_GLOBAL(copy_page)
std r31,-8(1)
std r30,-16(1)
std r29,-24(1)
std r28,-32(1)
std r27,-40(1)
std r26,-48(1)
std r25,-56(1)
std r24,-64(1)
std r23,-72(1)
std r22,-80(1)
std r21,-88(1)
std r20,-96(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r12,5
0: addi r5,r5,-24
mtctr r12
ld r22,640(4)
ld r21,512(4)
ld r20,384(4)
ld r11,256(4)
ld r9,128(4)
ld r7,0(4)
ld r25,648(4)
ld r24,520(4)
ld r23,392(4)
ld r10,264(4)
ld r8,136(4)
ldu r6,8(4)
cmpwi r5,24
1: std r22,648(3)
std r21,520(3)
std r20,392(3)
std r11,264(3)
std r9,136(3)
std r7,8(3)
ld r28,648(4)
ld r27,520(4)
ld r26,392(4)
ld r31,264(4)
ld r30,136(4)
ld r29,8(4)
std r25,656(3)
std r24,528(3)
std r23,400(3)
std r10,272(3)
std r8,144(3)
std r6,16(3)
ld r22,656(4)
ld r21,528(4)
ld r20,400(4)
ld r11,272(4)
ld r9,144(4)
ld r7,16(4)
std r28,664(3)
std r27,536(3)
std r26,408(3)
std r31,280(3)
std r30,152(3)
stdu r29,24(3)
ld r25,664(4)
ld r24,536(4)
ld r23,408(4)
ld r10,280(4)
ld r8,152(4)
ldu r6,24(4)
bdnz 1b
std r22,648(3)
std r21,520(3)
std r20,392(3)
std r11,264(3)
std r9,136(3)
std r7,8(3)
addi r4,r4,640
addi r3,r3,648
bge 0b
mtctr r5
ld r7,0(4)
ld r8,8(4)
ldu r9,16(4)
3: ld r10,8(4)
std r7,8(3)
ld r7,16(4)
std r8,16(3)
ld r8,24(4)
std r9,24(3)
ldu r9,32(4)
stdu r10,32(3)
bdnz 3b
4: ld r10,8(4)
std r7,8(3)
std r8,16(3)
std r9,24(3)
std r10,32(3)
9: ld r20,-96(1)
ld r21,-88(1)
ld r22,-80(1)
ld r23,-72(1)
ld r24,-64(1)
ld r25,-56(1)
ld r26,-48(1)
ld r27,-40(1)
ld r28,-32(1)
ld r29,-24(1)
ld r30,-16(1)
ld r31,-8(1)
blr
/*
* arch/ppc64/lib/copyuser.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include "../kernel/ppc_asm.h"
.align 7
_GLOBAL(__copy_tofrom_user)
/* first check for a whole page copy on a page boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
or r0,r3,r4
neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
andi. r0,r0,4095
std r3,-24(r1)
crand cr0*4+2,cr0*4+2,cr6*4+2
std r4,-16(r1)
std r5,-8(r1)
dcbt 0,r4
beq .Lcopy_page
andi. r6,r6,7
mtcrf 0x01,r5
blt cr1,.Lshort_copy
bne .Ldst_unaligned
.Ldst_aligned:
andi. r0,r4,7
addi r3,r3,-16
bne .Lsrc_unaligned
srdi r7,r5,4
20: ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,22f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,72f
21: ld r9,8(r4)
70: std r8,8(r3)
22: ldu r8,16(r4)
71: stdu r9,16(r3)
bdnz 21b
72: std r8,8(r3)
beq+ 3f
addi r3,r3,16
23: ld r9,8(r4)
.Ldo_tail:
bf cr7*4+1,1f
rotldi r9,r9,32
73: stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
74: sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
75: stb r9,0(r3)
3: li r3,0
blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpldi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,28f
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
25: ld r0,8(r4)
sld r6,r9,r10
26: ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,79f
27: ld r0,8(r4)
b 2f
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
29: ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
30: ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
31: ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,78f
1: or r7,r7,r6
32: ld r0,8(r4)
76: std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
33: ldu r9,16(r4)
or r12,r8,r12
77: stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
78: std r12,8(r3)
or r7,r7,r6
79: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
80: std r12,24(r3)
bne 6f
li r3,0
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
blt cr1,.Ldo_tail
34: ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
b .Ldo_tail
.Ldst_unaligned:
mtcrf 0x01,r6 /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi r1,r5,16
bf cr7*4+3,1f
35: lbz r0,0(r4)
81: stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
36: lhzx r0,r7,r4
82: sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
37: lwzx r0,r7,r4
83: stwx r0,r7,r3
3: mtcrf 0x01,r5
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
38: lwz r0,0(r4)
39: lwz r9,4(r4)
addi r4,r4,8
84: stw r0,0(r3)
85: stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
40: lwz r0,0(r4)
addi r4,r4,4
86: stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
41: lhz r0,0(r4)
addi r4,r4,2
87: sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
42: lbz r0,0(r4)
88: stb r0,0(r3)
4: li r3,0
blr
/*
* exception handlers follow
* we have to return the number of bytes not copied
* for an exception on a load, we set the rest of the destination to 0
*/
136:
137:
add r3,r3,r7
b 1f
130:
131:
addi r3,r3,8
120:
122:
124:
125:
126:
127:
128:
129:
133:
addi r3,r3,8
121:
132:
addi r3,r3,8
123:
134:
135:
138:
139:
140:
141:
142:
/*
* here we have had a fault on a load and r3 points to the first
* unmodified byte of the destination
*/
1: ld r6,-24(r1)
ld r4,-16(r1)
ld r5,-8(r1)
subf r6,r6,r3
add r4,r4,r6
subf r5,r6,r5 /* #bytes left to go */
/*
* first see if we can copy any more bytes before hitting another exception
*/
mtctr r5
43: lbz r0,0(r4)
addi r4,r4,1
89: stb r0,0(r3)
addi r3,r3,1
bdnz 43b
li r3,0 /* huh? all copied successfully this time? */
blr
/*
* here we have trapped again, need to clear ctr bytes starting at r3
*/
143: mfctr r5
li r0,0
mr r4,r3
mr r3,r5 /* return the number of bytes not copied */
1: andi. r9,r4,7
beq 3f
90: stb r0,0(r4)
addic. r5,r5,-1
addi r4,r4,1
bne 1b
blr
3: srdi r9,r5,3
andi. r5,r5,7
mtctr r9
91: std r0,0(r4)
addi r4,r4,8
bdnz 91b
beqlr
mtctr r5
92: stb r0,0(r4)
addi r4,r4,1
bdnz 92b
blr
/*
* exception handlers for stores: we just need to work
* out how many bytes weren't copied
*/
182:
183:
add r3,r3,r7
b 1f
180:
addi r3,r3,8
171:
177:
addi r3,r3,8
170:
172:
176:
178:
addi r3,r3,4
185:
addi r3,r3,4
173:
174:
175:
179:
181:
184:
186:
187:
188:
189:
1:
ld r6,-24(r1)
ld r5,-8(r1)
add r6,r6,r5
subf r3,r3,r6 /* #bytes not copied */
190:
191:
192:
blr /* #bytes not copied in r3 */
.section __ex_table,"a"
.align 3
.llong 20b,120b
.llong 21b,121b
.llong 70b,170b
.llong 22b,122b
.llong 71b,171b
.llong 72b,172b
.llong 23b,123b
.llong 73b,173b
.llong 74b,174b
.llong 75b,175b
.llong 24b,124b
.llong 25b,125b
.llong 26b,126b
.llong 27b,127b
.llong 28b,128b
.llong 29b,129b
.llong 30b,130b
.llong 31b,131b
.llong 32b,132b
.llong 76b,176b
.llong 33b,133b
.llong 77b,177b
.llong 78b,178b
.llong 79b,179b
.llong 80b,180b
.llong 34b,134b
.llong 35b,135b
.llong 81b,181b
.llong 36b,136b
.llong 82b,182b
.llong 37b,137b
.llong 83b,183b
.llong 38b,138b
.llong 39b,139b
.llong 84b,184b
.llong 85b,185b
.llong 40b,140b
.llong 86b,186b
.llong 41b,141b
.llong 87b,187b
.llong 42b,142b
.llong 88b,188b
.llong 43b,143b
.llong 89b,189b
.llong 90b,190b
.llong 91b,191b
.llong 92b,192b
.text
/*
* Routine to copy a whole page of data, optimized for POWER4.
* On POWER4 it is more than 50% faster than the simple loop
* above (following the .Ldst_aligned label) but it runs slightly
* slower on POWER3.
*/
.Lcopy_page:
std r31,-32(1)
std r30,-40(1)
std r29,-48(1)
std r28,-56(1)
std r27,-64(1)
std r26,-72(1)
std r25,-80(1)
std r24,-88(1)
std r23,-96(1)
std r22,-104(1)
std r21,-112(1)
std r20,-120(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r0,5
0: addi r5,r5,-24
mtctr r0
20: ld r22,640(4)
21: ld r21,512(4)
22: ld r20,384(4)
23: ld r11,256(4)
24: ld r9,128(4)
25: ld r7,0(4)
26: ld r25,648(4)
27: ld r24,520(4)
28: ld r23,392(4)
29: ld r10,264(4)
30: ld r8,136(4)
31: ldu r6,8(4)
cmpwi r5,24
1:
32: std r22,648(3)
33: std r21,520(3)
34: std r20,392(3)
35: std r11,264(3)
36: std r9,136(3)
37: std r7,8(3)
38: ld r28,648(4)
39: ld r27,520(4)
40: ld r26,392(4)
41: ld r31,264(4)
42: ld r30,136(4)
43: ld r29,8(4)
44: std r25,656(3)
45: std r24,528(3)
46: std r23,400(3)
47: std r10,272(3)
48: std r8,144(3)
49: std r6,16(3)
50: ld r22,656(4)
51: ld r21,528(4)
52: ld r20,400(4)
53: ld r11,272(4)
54: ld r9,144(4)
55: ld r7,16(4)
56: std r28,664(3)
57: std r27,536(3)
58: std r26,408(3)
59: std r31,280(3)
60: std r30,152(3)
61: stdu r29,24(3)
62: ld r25,664(4)
63: ld r24,536(4)
64: ld r23,408(4)
65: ld r10,280(4)
66: ld r8,152(4)
67: ldu r6,24(4)
bdnz 1b
68: std r22,648(3)
69: std r21,520(3)
70: std r20,392(3)
71: std r11,264(3)
72: std r9,136(3)
73: std r7,8(3)
74: addi r4,r4,640
75: addi r3,r3,648
bge 0b
mtctr r5
76: ld r7,0(4)
77: ld r8,8(4)
78: ldu r9,16(4)
3:
79: ld r10,8(4)
80: std r7,8(3)
81: ld r7,16(4)
82: std r8,16(3)
83: ld r8,24(4)
84: std r9,24(3)
85: ldu r9,32(4)
86: stdu r10,32(3)
bdnz 3b
4:
87: ld r10,8(4)
88: std r7,8(3)
89: std r8,16(3)
90: std r9,24(3)
91: std r10,32(3)
9: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
li r3,0
blr
/*
* on an exception, reset to the beginning and jump back into the
* standard __copy_tofrom_user
*/
100: ld r3,-24(r1)
ld r4,-24(r1)
li r5,4096
b .Ldst_aligned
.section __ex_table,"a"
.align 3
.llong 20b,100b
.llong 21b,100b
.llong 22b,100b
.llong 23b,100b
.llong 24b,100b
.llong 25b,100b
.llong 26b,100b
.llong 27b,100b
.llong 28b,100b
.llong 29b,100b
.llong 30b,100b
.llong 31b,100b
.llong 32b,100b
.llong 33b,100b
.llong 34b,100b
.llong 35b,100b
.llong 36b,100b
.llong 37b,100b
.llong 38b,100b
.llong 39b,100b
.llong 40b,100b
.llong 41b,100b
.llong 42b,100b
.llong 43b,100b
.llong 44b,100b
.llong 45b,100b
.llong 46b,100b
.llong 47b,100b
.llong 48b,100b
.llong 49b,100b
.llong 50b,100b
.llong 51b,100b
.llong 52b,100b
.llong 53b,100b
.llong 54b,100b
.llong 55b,100b
.llong 56b,100b
.llong 57b,100b
.llong 58b,100b
.llong 59b,100b
.llong 60b,100b
.llong 61b,100b
.llong 62b,100b
.llong 63b,100b
.llong 64b,100b
.llong 65b,100b
.llong 66b,100b
.llong 67b,100b
.llong 68b,100b
.llong 69b,100b
.llong 70b,100b
.llong 71b,100b
.llong 72b,100b
.llong 73b,100b
.llong 74b,100b
.llong 75b,100b
.llong 76b,100b
.llong 77b,100b
.llong 78b,100b
.llong 79b,100b
.llong 80b,100b
.llong 81b,100b
.llong 82b,100b
.llong 83b,100b
.llong 84b,100b
.llong 85b,100b
.llong 86b,100b
.llong 87b,100b
.llong 88b,100b
.llong 89b,100b
.llong 90b,100b
.llong 91b,100b
/*
* arch/ppc64/lib/memcpy.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include "../kernel/ppc_asm.h"
.align 7
_GLOBAL(memcpy)
mtcrf 0x01,r5
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
bne .Ldst_unaligned
.Ldst_aligned:
andi. r0,r4,7
addi r3,r3,-16
bne .Lsrc_unaligned
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,2f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,3f
1: ld r9,8(r4)
std r8,8(r3)
2: ldu r8,16(r4)
stdu r9,16(r3)
bdnz 1b
3: std r8,8(r3)
beqlr
addi r3,r3,16
ld r9,8(r4)
.Ldo_tail:
bf cr7*4+1,1f
rotldi r9,r9,32
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
3: blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpdi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,0f
ld r9,0(r4) # 3+2n loads, 2+2n stores
ld r0,8(r4)
sld r6,r9,r10
ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,4f
ld r0,8(r4)
# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
b 2f
0: ld r0,0(r4) # 4+2n loads, 3+2n stores
ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,3f
# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1: or r7,r7,r6
ld r0,8(r4)
std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
3: std r12,8(r3)
or r7,r7,r6
4: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
std r12,24(r3)
beqlr
cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
blt cr1,.Ldo_tail
ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
b .Ldo_tail
.Ldst_unaligned:
mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi r1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
lhzx r0,r7,r4
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
3: mtcrf 0x01,r5
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
lwz r0,0(r4)
lwz r9,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
4: blr
......@@ -12,61 +12,6 @@
#include <asm/processor.h>
#include <asm/errno.h>
#define CACHE_LINE_SIZE 128
#define LG_CACHE_LINE_SIZE 7
#define MAX_COPY_PREFETCH 1
#define COPY_16_BYTES \
lwz r7,4(r4); \
lwz r8,8(r4); \
lwz r9,12(r4); \
lwzu r10,16(r4); \
stw r7,4(r6); \
stw r8,8(r6); \
stw r9,12(r6); \
stwu r10,16(r6)
#define COPY_16_BYTES_WITHEX(n) \
8 ## n ## 0: \
lwz r7,4(r4); \
8 ## n ## 1: \
lwz r8,8(r4); \
8 ## n ## 2: \
lwz r9,12(r4); \
8 ## n ## 3: \
lwzu r10,16(r4); \
8 ## n ## 4: \
stw r7,4(r6); \
8 ## n ## 5: \
stw r8,8(r6); \
8 ## n ## 6: \
stw r9,12(r6); \
8 ## n ## 7: \
stwu r10,16(r6)
#define COPY_16_BYTES_EXCODE(n) \
9 ## n ## 0: \
addi r5,r5,-(16 * n); \
b 104f; \
9 ## n ## 1: \
addi r5,r5,-(16 * n); \
b 105f; \
.section __ex_table,"a"; \
.align 3; \
.llong 8 ## n ## 0b,9 ## n ## 0b; \
.llong 8 ## n ## 1b,9 ## n ## 0b; \
.llong 8 ## n ## 2b,9 ## n ## 0b; \
.llong 8 ## n ## 3b,9 ## n ## 0b; \
.llong 8 ## n ## 4b,9 ## n ## 1b; \
.llong 8 ## n ## 5b,9 ## n ## 1b; \
.llong 8 ## n ## 6b,9 ## n ## 1b; \
.llong 8 ## n ## 7b,9 ## n ## 1b; \
.text
CACHELINE_BYTES = CACHE_LINE_SIZE
LG_CACHELINE_BYTES = LG_CACHE_LINE_SIZE
CACHELINE_MASK = (CACHE_LINE_SIZE-1)
_GLOBAL(strcpy)
addi r5,r3,-1
addi r4,r4,-1
......@@ -148,48 +93,7 @@ _GLOBAL(memset)
_GLOBAL(memmove)
cmplw 0,r3,r4
bgt .backwards_memcpy
/* fall through */
_GLOBAL(memcpy)
srwi. r7,r5,3
addi r6,r3,-4
addi r4,r4,-4
beq 2f /* if less than 8 bytes to do */
andi. r0,r6,3 /* get dest word aligned */
mtctr r7
bne 5f
1: lwz r7,4(r4)
lwzu r8,8(r4)
stw r7,4(r6)
stwu r8,8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,4(r4)
addi r5,r5,-4
stwu r0,4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
addi r4,r4,3
addi r6,r6,3
4: lbzu r0,1(r4)
stbu r0,1(r6)
bdnz 4b
blr
5: subfic r0,r0,4
mtctr r0
6: lbz r7,4(r4)
addi r4,r4,1
stb r7,4(r6)
addi r6,r6,1
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
b .memcpy
_GLOBAL(backwards_memcpy)
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
......@@ -253,195 +157,6 @@ _GLOBAL(memchr)
2: li r3,0
blr
_GLOBAL(__copy_tofrom_user)
addi r4,r4,-4
addi r6,r3,-4
neg r0,r3
andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
beq 58f
cmplw 0,r5,r0 /* is this more than total to do? */
blt 63f /* if not much to do */
andi. r8,r0,3 /* get it word-aligned first */
mtctr r8
beq+ 61f
70: lbz r9,4(r4) /* do some bytes */
71: stb r9,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 70b
61: subf r5,r0,r5
srwi. r0,r0,2
mtctr r0
beq 58f
72: lwzu r9,4(r4) /* do some words */
73: stwu r9,4(r6)
bdnz 72b
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
clrlwi r5,r5,32-LG_CACHELINE_BYTES
li r11,4
beq 63f
/* Here we decide how far ahead to prefetch the source */
#if MAX_COPY_PREFETCH > 1
/* Heuristically, for large transfers we prefetch
MAX_COPY_PREFETCH cachelines ahead. For small transfers
we prefetch 1 cacheline ahead. */
cmpwi r0,MAX_COPY_PREFETCH
li r7,1
li r3,4
ble 111f
li r7,MAX_COPY_PREFETCH
111: mtctr r7
112: dcbt r3,r4
addi r3,r3,CACHELINE_BYTES
bdnz 112b
#else /* MAX_COPY_PREFETCH == 1 */
li r3,CACHELINE_BYTES + 4
dcbt r11,r4
#endif /* MAX_COPY_PREFETCH */
mtctr r0
53:
dcbt r3,r4
dcbz r11,r6
/* had to move these to keep extable in order */
.section __ex_table,"a"
.align 3
.llong 70b,100f
.llong 71b,101f
.llong 72b,102f
.llong 73b,103f
.llong 53b,105f
.text
/* the main body of the cacheline loop */
COPY_16_BYTES_WITHEX(0)
#if CACHE_LINE_SIZE >= 32
COPY_16_BYTES_WITHEX(1)
#if CACHE_LINE_SIZE >= 64
COPY_16_BYTES_WITHEX(2)
COPY_16_BYTES_WITHEX(3)
#if CACHE_LINE_SIZE >= 128
COPY_16_BYTES_WITHEX(4)
COPY_16_BYTES_WITHEX(5)
COPY_16_BYTES_WITHEX(6)
COPY_16_BYTES_WITHEX(7)
#endif
#endif
#endif
bdnz 53b
63: srwi. r0,r5,2
mtctr r0
beq 64f
30: lwzu r0,4(r4)
31: stwu r0,4(r6)
bdnz 30b
64: andi. r0,r5,3
mtctr r0
beq+ 65f
40: lbz r0,4(r4)
41: stb r0,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 40b
65: li r3,0
blr
/* read fault, initial single-byte copy */
100: li r4,0
b 90f
/* write fault, initial single-byte copy */
101: li r4,1
90: subf r5,r8,r5
li r3,0
b 99f
/* read fault, initial word copy */
102: li r4,0
b 91f
/* write fault, initial word copy */
103: li r4,1
91: li r3,2
b 99f
/*
* this stuff handles faults in the cacheline loop and branches to either
* 104f (if in read part) or 105f (if in write part), after updating r5
*/
COPY_16_BYTES_EXCODE(0)
#if CACHE_LINE_SIZE >= 32
COPY_16_BYTES_EXCODE(1)
#if CACHE_LINE_SIZE >= 64
COPY_16_BYTES_EXCODE(2)
COPY_16_BYTES_EXCODE(3)
#if CACHE_LINE_SIZE >= 128
COPY_16_BYTES_EXCODE(4)
COPY_16_BYTES_EXCODE(5)
COPY_16_BYTES_EXCODE(6)
COPY_16_BYTES_EXCODE(7)
#endif
#endif
#endif
/* read fault in cacheline loop */
104: li r4,0
b 92f
/* fault on dcbz (effectively a write fault) */
/* or write fault in cacheline loop */
105: li r4,1
92: li r3,LG_CACHELINE_BYTES
b 99f
/* read fault in final word loop */
108: li r4,0
b 93f
/* write fault in final word loop */
109: li r4,1
93: andi. r5,r5,3
li r3,2
b 99f
/* read fault in final byte loop */
110: li r4,0
b 94f
/* write fault in final byte loop */
111: li r4,1
94: li r5,0
li r3,0
/*
* At this stage the number of bytes not copied is
* r5 + (ctr << r3), and r4 is 0 for read or 1 for write.
*/
99: mfctr r0
slw r3,r0,r3
add r3,r3,r5
cmpwi 0,r4,0
bne 120f
/* for read fault, clear out the destination: r3 bytes starting at 4(r6) */
srwi. r0,r3,2
li r9,0
mtctr r0
beq 113f
112: stwu r9,4(r6)
bdnz 112b
113: andi. r0,r3,3
mtctr r0
beq 120f
114: stb r9,4(r6)
addi r6,r6,1
bdnz 114b
120: blr
.section __ex_table,"a"
.align 3
.llong 30b,108b
.llong 31b,109b
.llong 40b,110b
.llong 41b,111b
.llong 112b,120b
.llong 114b,120b
.text
_GLOBAL(__clear_user)
addi r6,r3,-4
li r3,0
......
......@@ -38,16 +38,11 @@
#include <asm/ppcdebug.h>
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
extern void (*debugger)(struct pt_regs *);
extern void (*debugger_fault_handler)(struct pt_regs *);
extern int (*debugger_dabr_match)(struct pt_regs *);
#ifdef CONFIG_DEBUG_KERNEL
int debugger_kernel_faults = 1;
#endif
extern void die_if_kernel(char *, struct pt_regs *, long);
void bad_page_fault(struct pt_regs *, unsigned long);
void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
void bad_page_fault(struct pt_regs *, unsigned long, int);
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
......@@ -71,7 +66,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if (regs->trap == 0x400)
error_code &= 0x48200000;
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
#ifdef CONFIG_DEBUG_KERNEL
if (debugger_fault_handler && (regs->trap == 0x300 ||
regs->trap == 0x380)) {
debugger_fault_handler(regs);
......@@ -83,10 +78,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if (debugger_dabr_match(regs))
return;
}
#endif /* CONFIG_XMON || CONFIG_KGDB */
#endif
if (in_interrupt() || mm == NULL) {
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGSEGV);
return;
}
down_read(&mm->mmap_sem);
......@@ -159,7 +154,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
return;
}
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGSEGV);
return;
/*
......@@ -176,7 +171,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGKILL);
return;
do_sigbus:
......@@ -187,7 +182,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
info.si_addr = (void *)address;
force_sig_info (SIGBUS, &info, current);
if (!user_mode(regs))
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGBUS);
}
/*
......@@ -196,8 +191,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* in traps.c.
*/
void
bad_page_fault(struct pt_regs *regs, unsigned long address)
bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
extern void die(const char *, struct pt_regs *, long);
unsigned long fixup;
/* Are we prepared to handle this fault? */
......@@ -207,13 +204,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address)
}
/* kernel has accessed a bad area */
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
#ifdef CONFIG_DEBUG_KERNEL
if (debugger_kernel_faults)
debugger(regs);
#endif
print_backtrace( (unsigned long *)regs->gpr[1] );
panic("kernel access of bad area pc %lx lr %lx address %lX tsk %s/%d",
regs->nip,regs->link,address,current->comm,current->pid);
die("Kernel access of bad area", regs, sig);
}
......@@ -85,14 +85,10 @@ extern struct task_struct *current_set[NR_CPUS];
void mm_init_ppc64(void);
unsigned long *pmac_find_end_of_memory(void);
extern unsigned long *find_end_of_memory(void);
extern pgd_t ioremap_dir[];
pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
static void map_io_page(unsigned long va, unsigned long pa, int flags);
extern void die_if_kernel(char *,struct pt_regs *,long);
unsigned long klimit = (unsigned long)_end;
......@@ -246,20 +242,17 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
void
flush_tlb_mm(struct mm_struct *mm)
{
if (mm->map_count) {
struct vm_area_struct *mp;
spin_lock(&mm->page_table_lock);
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
__flush_tlb_range(mm, mp->vm_start, mp->vm_end);
} else {
/* MIKEC: It is not clear why this is needed */
/* paulus: it is needed to clear out stale HPTEs
* when an address space (represented by an mm_struct)
* is being destroyed. */
__flush_tlb_range(mm, USER_START, USER_END);
}
/* XXX are there races with checking cpu_vm_mask? - Anton */
mm->cpu_vm_mask = 0;
spin_unlock(&mm->page_table_lock);
}
/*
......@@ -399,47 +392,40 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
flush_hash_range(context, i, local);
}
void __init free_initmem(void)
void free_initmem(void)
{
unsigned long a;
unsigned long num_freed_pages = 0;
#define FREESEC(START,END,CNT) do { \
a = (unsigned long)(&START); \
for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
set_page_count(mem_map+MAP_NR(a), 1); \
free_page(a); \
CNT++; \
} \
} while (0)
FREESEC(__init_begin,__init_end,num_freed_pages);
printk ("Freeing unused kernel memory: %ldk init\n",
PGTOKB(num_freed_pages));
unsigned long addr;
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
printk ("Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
unsigned long xstart = start;
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(mem_map + MAP_NR(start));
set_page_count(mem_map+MAP_NR(start), 1);
ClearPageReserved(virt_to_page(start));
set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
printk ("Freeing initrd memory: %ldk freed\n", (end - xstart) >> 10);
}
#endif
/*
* Do very early mm setup.
*/
void __init mm_init_ppc64(void) {
void __init mm_init_ppc64(void)
{
struct paca_struct *lpaca;
unsigned long guard_page, index;
......@@ -467,8 +453,6 @@ void __init mm_init_ppc64(void) {
ppc_md.progress("MM:exit", 0x211);
}
/*
* Initialize the bootmem system and give it all the memory we
* have available.
......@@ -582,11 +566,11 @@ void __init mem_init(void)
for (addr = (unsigned long)sysmap;
addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
addr += PAGE_SIZE)
SetPageReserved(mem_map + MAP_NR(addr));
SetPageReserved(virt_to_page(addr));
for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
addr += PAGE_SIZE) {
if (!PageReserved(mem_map + MAP_NR(addr)))
if (!PageReserved(virt_to_page(addr)))
continue;
if (addr < (ulong) etext)
codepages++;
......@@ -665,6 +649,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
* This must always be called with the mm->page_table_lock held
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
pte_t pte)
......
......@@ -309,7 +309,6 @@ xmon(struct pt_regs *excp)
std 29,232(%0)\n\
std 30,240(%0)\n\
std 31,248(%0)" : : "b" (&regs));
printf("xmon called\n");
/* Fetch the link reg for this stack frame.
NOTE: the prev printf fills in the lr. */
regs.nip = regs.link = ((unsigned long *)(regs.gpr[1]))[2];
......
......@@ -26,7 +26,7 @@
#define O_DIRECTORY 040000 /* must be a directory */
#define O_NOFOLLOW 0100000 /* don't follow links */
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint - currently ignored */
#define O_DIRECT 0400000 /* direct disk access hint */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
......
......@@ -26,7 +26,7 @@ typedef struct {
#endif
unsigned int __local_bh_count;
unsigned int __syscall_count;
unsigned long __unused;
unsigned long idle_timestamp;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t;
......
......@@ -50,7 +50,7 @@ static inline void isync(void)
#define HMT_LOW "\tor 1,1,1 # low priority\n"
#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
#define HMT_MEDIUM "\tor 3,3,3 # high priority\n"
#define HMT_HIGH "\tor 3,3,3 # high priority\n"
#else
#define HMT_low() do { } while(0)
#define HMT_medium() do { } while(0)
......
......@@ -211,18 +211,11 @@ static inline void _tlbie(unsigned long va, int large)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
static inline void _tlbiel(unsigned long va, int large)
static inline void _tlbiel(unsigned long va)
{
asm volatile("ptesync": : :"memory");
if (large) {
asm volatile("clrldi %0,%0,16\n\
tlbiel %0,1" : : "r"(va) : "memory");
} else {
asm volatile("clrldi %0,%0,16\n\
tlbiel %0,0" : : "r"(va) : "memory");
}
tlbiel %0" : : "r"(va) : "memory");
asm volatile("ptesync": : :"memory");
}
......
......@@ -85,7 +85,8 @@ struct paca_struct {
u8 xProcEnabled; /* 1=soft enabled 0x78 */
u8 xHrdIntCount; /* Count of active hardware interrupts 0x79 */
u8 prof_enabled; /* 1=iSeries profiling enabled 0x7A */
u8 resv1[5]; /* 0x7B-0x7F */
u8 stab_cache_pointer;
u8 resv1[4]; /* 0x7B-0x7F */
/*=====================================================================================
* CACHE_LINE_2 0x0080 - 0x00FF
......
......@@ -215,11 +215,12 @@ static inline int get_order(unsigned long size)
#define __a2p(x) ((void *) absolute_to_phys(x))
#define __a2v(x) ((void *) __va(absolute_to_phys(x)))
#define virt_to_page(kaddr) (mem_map+(__pa((unsigned long)kaddr) >> PAGE_SHIFT))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(pfn) ((unsigned long)((pfn) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -155,7 +155,7 @@
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* __ASSEMBLY__ */
/* shift to put page number into pte */
......@@ -167,22 +167,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* mk_pte_phys takes a physical address as input
*
* mk_pte takes a (struct page *) as input
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#define mk_pte_phys(physpage,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = (((physpage)<<(PTE_SHIFT-PAGE_SHIFT)) | pgprot_val(pgprot)); \
pte; \
})
#define mk_pte(page,pgprot) \
#define pfn_pte(pfn,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = ((unsigned long)((page) - mem_map) << PTE_SHIFT) | \
pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \
pgprot_val(pgprot); \
pte; \
})
......@@ -195,8 +187,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
/* pte_clear moved to later in this file */
#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) (mem_map+pte_pagenr(x))
#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep)))
#define pmd_none(pmd) (!pmd_val(pmd))
......
......@@ -97,7 +97,7 @@
#define FPSCR_VX 0x20000000 /* Invalid operation summary */
#define FPSCR_OX 0x10000000 /* Overflow exception summary */
#define FPSCR_UX 0x08000000 /* Underflow exception summary */
#define FPSCR_ZX 0x04000000 /* Zero-devide exception summary */
#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
#define FPSCR_XX 0x02000000 /* Inexact exception summary */
#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
......@@ -651,9 +651,7 @@ struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
struct pt_regs *regs; /* Pointer to saved register state */
mm_segment_t fs; /* for get_fs() validation */
signed long last_syscall;
double fpr[32]; /* Complete floating point set */
unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */
unsigned long fpscr; /* Floating point status */
};
......@@ -663,8 +661,8 @@ struct thread_struct {
INIT_SP, /* ksp */ \
(struct pt_regs *)INIT_SP - 1, /* regs */ \
KERNEL_DS, /*fs*/ \
0, /* last_syscall */ \
{0}, 0, 0 \
{0}, /* fpr */ \
0 /* fpscr */ \
}
/*
......
......@@ -55,9 +55,31 @@
#define smp_wmb() __asm__ __volatile__("": : :"memory")
#endif /* CONFIG_SMP */
#ifdef CONFIG_DEBUG_KERNEL
extern void (*debugger)(struct pt_regs *regs);
extern int (*debugger_bpt)(struct pt_regs *regs);
extern int (*debugger_sstep)(struct pt_regs *regs);
extern int (*debugger_iabr_match)(struct pt_regs *regs);
extern int (*debugger_dabr_match)(struct pt_regs *regs);
extern void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#define debugger(regs) do { } while (0)
#define debugger_bpt(regs) 0
#define debugger_sstep(regs) 0
#define debugger_iabr_match(regs) 0
#define debugger_dabr_match(regs) 0
#define debugger_fault_handler ((void (*)(struct pt_regs *))0)
#endif
#ifdef CONFIG_XMON
extern void xmon_irq(int, void *, struct pt_regs *);
extern void xmon(struct pt_regs *excp);
extern void xmon(struct pt_regs *regs);
extern int xmon_bpt(struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
#endif
extern void print_backtrace(unsigned long *);
......
......@@ -65,6 +65,8 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x4000000
/*
* thread information flag bit numbers
*/
......
......@@ -18,7 +18,6 @@
<< (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles(void)
{
......
......@@ -230,6 +230,9 @@
#define __NR_lremovexattr 219
#define __NR_fremovexattr 220
#define __NR_futex 221
#define __NR_tux 222
#define __NR_sched_setaffinity 223
#define __NR_sched_getaffinity 224
#if 0
/* Remind paulus to add these into ppc32 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment