Commit e58595ec authored by Ralf Bächle's avatar Ralf Bächle Committed by Linus Torvalds

[PATCH] mips: generic MIPS updates

Update the generic MIPS code.  Highlights are oprofile for MIPS, initially for
the PMC-Sierra RM9000.  We're also taking a significantly more aggressive
approach to the TLB exception handlers which now are runtime generated and
provide an upto 20% speedup on certain micro benchmarks.

From: Yoichi Yuasa <yuasa@hh.iij4u.or.jp>

This patch had fixed restore_sigcontext about MIPS.
Signed-off-by: default avatarYoichi Yuasa <yuasa@hh.iij4u.or.jp>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9ec0d34b
...@@ -673,6 +673,8 @@ libs-$(CONFIG_MIPS64) += arch/mips/lib-64/ ...@@ -673,6 +673,8 @@ libs-$(CONFIG_MIPS64) += arch/mips/lib-64/
core-y += arch/mips/kernel/ arch/mips/mm/ arch/mips/math-emu/ core-y += arch/mips/kernel/ arch/mips/mm/ arch/mips/math-emu/
drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
ifdef CONFIG_LASAT ifdef CONFIG_LASAT
rom.bin rom.sw: vmlinux rom.bin rom.sw: vmlinux
$(call descend,arch/mips/lasat/image,$@) $(call descend,arch/mips/lasat/image,$@)
...@@ -744,36 +746,16 @@ define filechk_gen-asm-offset.h ...@@ -744,36 +746,16 @@ define filechk_gen-asm-offset.h
echo "#endif /* _ASM_OFFSET_H */" ) echo "#endif /* _ASM_OFFSET_H */" )
endef endef
define filechk_gen-asm-reg.h prepare: include/asm-$(ARCH)/offset.h
(set -e; \
echo "#ifndef _ASM_REG_H"; \
echo "#define _ASM_REG_H"; \
echo "/*"; \
echo " * DO NOT MODIFY."; \
echo " *"; \
echo " * This file was generated by arch/$(ARCH)/Makefile"; \
echo " *"; \
echo " */"; \
echo ""; \
sed -ne "/^@@@/s///p"; \
echo "#endif /* _ASM_REG_H */" )
endef
prepare: include/asm-$(ARCH)/offset.h \
include/asm-$(ARCH)/reg.h
arch/$(ARCH)/kernel/offset.s: include/asm include/linux/version.h \ arch/$(ARCH)/kernel/offset.s: include/asm include/linux/version.h \
include/config/MARKER include/config/MARKER
include/asm-$(ARCH)/offset.h: arch/$(ARCH)/kernel/offset.s include/asm-$(ARCH)/offset.h: arch/$(ARCH)/kernel/offset.s
$(call filechk,gen-asm-offset.h) $(call filechk,gen-asm-offset.h)
include/asm-$(ARCH)/reg.h: arch/$(ARCH)/kernel/reg.s
$(call filechk,gen-asm-reg.h)
CLEAN_FILES += include/asm-$(ARCH)/offset.h.tmp \ CLEAN_FILES += include/asm-$(ARCH)/offset.h.tmp \
include/asm-$(ARCH)/offset.h \ include/asm-$(ARCH)/offset.h \
include/asm-$(ARCH)/reg.h.tmp \
include/asm-$(ARCH)/reg.h \
vmlinux.32 \ vmlinux.32 \
vmlinux.64 \ vmlinux.64 \
vmlinux.ecoff vmlinux.ecoff
...@@ -8,6 +8,9 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ ...@@ -8,6 +8,9 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
time.o traps.o unaligned.o time.o traps.o unaligned.o
binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
irix5sys.o sysirix.o
ifdef CONFIG_MODULES ifdef CONFIG_MODULES
obj-y += mips_ksyms.o module.o obj-y += mips_ksyms.o module.o
obj-$(CONFIG_MIPS32) += module-elf32.o obj-$(CONFIG_MIPS32) += module-elf32.o
...@@ -35,15 +38,16 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o ...@@ -35,15 +38,16 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_NO_ISA) += dma-no-isa.o
obj-$(CONFIG_I8259) += i8259.o obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
obj-$(CONFIG_MIPS32) += scall32-o32.o obj-$(CONFIG_MIPS32) += scall32-o32.o
obj-$(CONFIG_MIPS64) += scall64-64.o obj-$(CONFIG_MIPS64) += scall64-64.o
obj-$(CONFIG_BINFMT_IRIX) += irixelf.o irixioctl.o irixsig.o sysirix.o \ obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o
irixinv.o
obj-$(CONFIG_MIPS32_COMPAT) += ioctl32.o linux32.o signal32.o obj-$(CONFIG_MIPS32_COMPAT) += ioctl32.o linux32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o
......
/*
* Copyright (C) 2003 Ralf Baechle
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Handler for RM9000 extended interrupts. These are a non-standard
* feature so we handle them separately from standard interrupts.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/system.h>
static int irq_base;
static inline void unmask_rm9k_irq(unsigned int irq)
{
set_c0_intcontrol(0x1000 << (irq - irq_base));
}
static inline void mask_rm9k_irq(unsigned int irq)
{
clear_c0_intcontrol(0x1000 << (irq - irq_base));
}
static inline void rm9k_cpu_irq_enable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
unmask_rm9k_irq(irq);
local_irq_restore(flags);
}
static void rm9k_cpu_irq_disable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
mask_rm9k_irq(irq);
local_irq_restore(flags);
}
static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
{
rm9k_cpu_irq_enable(irq);
return 0;
}
#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable
/*
* Performance counter interrupts are global on all processors.
*/
static void local_rm9k_perfcounter_irq_startup(void *args)
{
unsigned int irq = (unsigned int) args;
rm9k_cpu_irq_enable(irq);
}
static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
{
on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1);
return 0;
}
static void local_rm9k_perfcounter_irq_shutdown(void *args)
{
unsigned int irq = (unsigned int) args;
unsigned long flags;
local_irq_save(flags);
mask_rm9k_irq(irq);
local_irq_restore(flags);
}
static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
{
on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
}
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for rm9k_cpu_irq_end.
*/
static void rm9k_cpu_irq_ack(unsigned int irq)
{
mask_rm9k_irq(irq);
}
static void rm9k_cpu_irq_end(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
unmask_rm9k_irq(irq);
}
static hw_irq_controller rm9k_irq_controller = {
"RM9000",
rm9k_cpu_irq_startup,
rm9k_cpu_irq_shutdown,
rm9k_cpu_irq_enable,
rm9k_cpu_irq_disable,
rm9k_cpu_irq_ack,
rm9k_cpu_irq_end,
};
static hw_irq_controller rm9k_perfcounter_irq = {
"RM9000",
rm9k_perfcounter_irq_startup,
rm9k_perfcounter_irq_shutdown,
rm9k_cpu_irq_enable,
rm9k_cpu_irq_disable,
rm9k_cpu_irq_ack,
rm9k_cpu_irq_end,
};
unsigned int rm9000_perfcount_irq;
EXPORT_SYMBOL(rm9000_perfcount_irq);
void __init rm9k_cpu_irq_init(int base)
{
int i;
clear_c0_intcontrol(0x0000f000); /* Mask all */
for (i = base; i < base + 4; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].handler = &rm9k_irq_controller;
}
rm9000_perfcount_irq = base + 1;
irq_desc[rm9000_perfcount_irq].handler = &rm9k_perfcounter_irq;
irq_base = base;
}
...@@ -125,7 +125,7 @@ void __init init_IRQ(void) ...@@ -125,7 +125,7 @@ void __init init_IRQ(void)
irq_desc[i].action = NULL; irq_desc[i].action = NULL;
irq_desc[i].depth = 1; irq_desc[i].depth = 1;
irq_desc[i].handler = &no_irq_type; irq_desc[i].handler = &no_irq_type;
irq_desc[i].lock = SPIN_LOCK_UNLOCKED; spin_lock_init(&irq_desc[i].lock);
} }
arch_init_irq(); arch_init_irq();
......
...@@ -99,7 +99,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf) ...@@ -99,7 +99,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf)
} }
asmlinkage unsigned long asmlinkage unsigned long
sys32_mmap2(unsigned long addr, size_t len, unsigned long prot, sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff) unsigned long flags, unsigned long fd, unsigned long pgoff)
{ {
struct file * file = NULL; struct file * file = NULL;
......
...@@ -5,9 +5,11 @@ ...@@ -5,9 +5,11 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1996, 97, 98, 99, 2000, 01, 03 by Ralf Baechle * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
*/ */
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -60,3 +62,6 @@ EXPORT_SYMBOL(__strnlen_user_asm); ...@@ -60,3 +62,6 @@ EXPORT_SYMBOL(__strnlen_user_asm);
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(invalid_pte_table); EXPORT_SYMBOL(invalid_pte_table);
#ifdef CONFIG_GENERIC_IRQ_PROBE
EXPORT_SYMBOL(probe_irq_mask);
#endif
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
static LIST_HEAD(dbe_list); static LIST_HEAD(dbe_list);
static spinlock_t dbe_lock = SPIN_LOCK_UNLOCKED; static DEFINE_SPINLOCK(dbe_lock);
/* Given an address, look for it in the module exception tables. */ /* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr) const struct exception_table_entry *search_module_dbetables(unsigned long addr)
......
...@@ -113,19 +113,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -113,19 +113,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
*childregs = *regs; *childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[7] = 0; /* Clear error flag */
#ifdef CONFIG_BINFMT_IRIX #if defined(CONFIG_BINFMT_IRIX)
if (current->personality != PER_LINUX) { if (current->personality != PER_LINUX) {
/* Under IRIX things are a little different. */ /* Under IRIX things are a little different. */
childregs->regs[2] = 0;
childregs->regs[3] = 1; childregs->regs[3] = 1;
regs->regs[2] = p->pid;
regs->regs[3] = 0; regs->regs[3] = 0;
} else }
#endif #endif
{
childregs->regs[2] = 0; /* Child gets zero as return value */ childregs->regs[2] = 0; /* Child gets zero as return value */
regs->regs[2] = p->pid; regs->regs[2] = p->pid;
}
if (childregs->cp0_status & ST0_CU0) { if (childregs->cp0_status & ST0_CU0) {
childregs->regs[28] = (unsigned long) ti; childregs->regs[28] = (unsigned long) ti;
...@@ -153,6 +149,36 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -153,6 +149,36 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{ {
memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu)); memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
return 1;
}
void dump_regs(elf_greg_t *gp, struct pt_regs *regs)
{
int i;
for (i = 0; i < EF_R0; i++)
gp[i] = 0;
gp[EF_R0] = 0;
for (i = 1; i <= 31; i++)
gp[EF_R0 + i] = regs->regs[i];
gp[EF_R26] = 0;
gp[EF_R27] = 0;
gp[EF_LO] = regs->lo;
gp[EF_HI] = regs->hi;
gp[EF_CP0_EPC] = regs->cp0_epc;
gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
gp[EF_CP0_STATUS] = regs->cp0_status;
gp[EF_CP0_CAUSE] = regs->cp0_cause;
#ifdef EF_UNUSED0
gp[EF_UNUSED0] = 0;
#endif
}
int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
{
memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
return 1; return 1;
} }
...@@ -263,7 +289,6 @@ arch_initcall(frame_info_init); ...@@ -263,7 +289,6 @@ arch_initcall(frame_info_init);
*/ */
unsigned long thread_saved_pc(struct task_struct *tsk) unsigned long thread_saved_pc(struct task_struct *tsk)
{ {
extern void ret_from_fork(void);
struct thread_struct *t = &tsk->thread; struct thread_struct *t = &tsk->thread;
/* New born processes are a special case */ /* New born processes are a special case */
......
/*
* offset.c: Calculate pt_regs and task_struct indices.
*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#define text(t) __asm__("\n@@@" t)
#define _offset(type, member) ((unsigned long) &(((type *)NULL)->member))
#define index(string, ptr, member) \
__asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member)/sizeof(long)))
#define size(string, size) \
__asm__("\n@@@" string "%0" : : "i" (sizeof(size)))
#define linefeed text("")
void output_ptreg_defines(void)
{
text("/* MIPS pt_regs indices. */");
index("#define EF_R0 ", struct pt_regs, regs[0]);
index("#define EF_R1 ", struct pt_regs, regs[1]);
index("#define EF_R2 ", struct pt_regs, regs[2]);
index("#define EF_R3 ", struct pt_regs, regs[3]);
index("#define EF_R4 ", struct pt_regs, regs[4]);
index("#define EF_R5 ", struct pt_regs, regs[5]);
index("#define EF_R6 ", struct pt_regs, regs[6]);
index("#define EF_R7 ", struct pt_regs, regs[7]);
index("#define EF_R8 ", struct pt_regs, regs[8]);
index("#define EF_R9 ", struct pt_regs, regs[9]);
index("#define EF_R10 ", struct pt_regs, regs[10]);
index("#define EF_R11 ", struct pt_regs, regs[11]);
index("#define EF_R12 ", struct pt_regs, regs[12]);
index("#define EF_R13 ", struct pt_regs, regs[13]);
index("#define EF_R14 ", struct pt_regs, regs[14]);
index("#define EF_R15 ", struct pt_regs, regs[15]);
index("#define EF_R16 ", struct pt_regs, regs[16]);
index("#define EF_R17 ", struct pt_regs, regs[17]);
index("#define EF_R18 ", struct pt_regs, regs[18]);
index("#define EF_R19 ", struct pt_regs, regs[19]);
index("#define EF_R20 ", struct pt_regs, regs[20]);
index("#define EF_R21 ", struct pt_regs, regs[21]);
index("#define EF_R22 ", struct pt_regs, regs[22]);
index("#define EF_R23 ", struct pt_regs, regs[23]);
index("#define EF_R24 ", struct pt_regs, regs[24]);
index("#define EF_R25 ", struct pt_regs, regs[25]);
index("#define EF_R26 ", struct pt_regs, regs[26]);
index("#define EF_R27 ", struct pt_regs, regs[27]);
index("#define EF_R28 ", struct pt_regs, regs[28]);
index("#define EF_R29 ", struct pt_regs, regs[29]);
index("#define EF_R30 ", struct pt_regs, regs[30]);
index("#define EF_R31 ", struct pt_regs, regs[31]);
linefeed;
index("#define EF_LO ", struct pt_regs, lo);
index("#define EF_HI ", struct pt_regs, hi);
linefeed;
index("#define EF_EPC ", struct pt_regs, cp0_epc);
index("#define EF_BVADDR ", struct pt_regs, cp0_badvaddr);
index("#define EF_STATUS ", struct pt_regs, cp0_status);
index("#define EF_CAUSE ", struct pt_regs, cp0_cause);
linefeed;
size("#define EF_SIZE ", struct pt_regs);
linefeed;
}
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* *
* Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2004 Thiemo Seufer
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -32,26 +33,30 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -32,26 +33,30 @@ NESTED(handle_sys, PT_SIZE, sp)
lw t1, PT_EPC(sp) # skip syscall on return lw t1, PT_EPC(sp) # skip syscall on return
#if defined(CONFIG_BINFMT_IRIX)
sltiu t0, v0, MAX_SYSCALL_NO + 1 # check syscall number sltiu t0, v0, MAX_SYSCALL_NO + 1 # check syscall number
#else
subu v0, v0, __NR_O32_Linux # check syscall number
sltiu t0, v0, __NR_O32_Linux_syscalls + 1
#endif
addiu t1, 4 # skip to next instruction addiu t1, 4 # skip to next instruction
sw t1, PT_EPC(sp) sw t1, PT_EPC(sp)
beqz t0, illegal_syscall beqz t0, illegal_syscall
/* XXX Put both in one cacheline, should save a bit. */ sll t0, v0, 3
sll t0, v0, 2 la t1, sys_call_table
lw t2, sys_call_table(t0) # syscall routine addu t1, t0
lbu t3, sys_narg_table(v0) # number of arguments lw t2, (t1) # syscall routine
beqz t2, illegal_syscall; lw t3, 4(t1) # >= 0 if we need stack arguments
beqz t2, illegal_syscall
subu t0, t3, 5 # 5 or more arguments?
sw a3, PT_R26(sp) # save a3 for syscall restarting sw a3, PT_R26(sp) # save a3 for syscall restarting
bgez t0, stackargs bgez t3, stackargs
stack_done: stack_done:
sw a3, PT_R26(sp) # save for syscall restart lw t0, TI_FLAGS($28) # syscall tracing enabled?
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
and t0, t1, t0 and t0, t1
bnez t0, syscall_trace_entry # -> yes bnez t0, syscall_trace_entry # -> yes
jalr t2 # Do The Real Thing (TM) jalr t2 # Do The Real Thing (TM)
...@@ -70,9 +75,9 @@ o32_syscall_exit: ...@@ -70,9 +75,9 @@ o32_syscall_exit:
local_irq_disable # make sure need_resched and local_irq_disable # make sure need_resched and
# signals dont change between # signals dont change between
# sampling and return # sampling and return
LONG_L a2, TI_FLAGS($28) # current->work lw a2, TI_FLAGS($28) # current->work
li t0, _TIF_ALLWORK_MASK li t0, _TIF_ALLWORK_MASK
and t0, a2, t0 and t0, a2
bnez t0, o32_syscall_exit_work bnez t0, o32_syscall_exit_work
j restore_partial j restore_partial
...@@ -116,49 +121,48 @@ syscall_trace_entry: ...@@ -116,49 +121,48 @@ syscall_trace_entry:
*/ */
stackargs: stackargs:
lw t0, PT_R29(sp) # get old user stack pointer lw t0, PT_R29(sp) # get old user stack pointer
subu t3, 4
sll t1, t3, 2 # stack valid?
addu t1, t0 # end address
or t0, t1
bltz t0, bad_stack # -> sp is bad
lw t0, PT_R29(sp) # get old user stack pointer
PTR_LA t1, 4f # copy 1 to 3 arguments
sll t3, t3, 4
subu t1, t3
jr t1
/* Ok, copy the args from the luser stack to the kernel stack */
/* /*
* I know Ralf doesn't like nops but this avoids code * We intentionally keep the kernel stack a little below the top of
* duplication for R3000 targets (and this is the * userspace so we don't have to do a slower byte accurate check here.
* only place where ".set reorder" doesn't help). */
* Harald. lw t5, TI_ADDR_LIMIT($28)
addu t4, t0, 32
and t5, t4
bltz t5, bad_stack # -> sp is bad
/* Ok, copy the args from the luser stack to the kernel stack.
* t3 is the precomputed number of instruction bytes needed to
* load or store arguments 6-8.
*/ */
la t1, 5f # load up to 3 arguments
subu t1, t3
1: lw t5, 16(t0) # argument #5 from usp
.set push .set push
.set noreorder .set noreorder
.set nomacro .set nomacro
1: lw t1, 24(t0) # argument #7 from usp jr t1
nop addiu t1, 6f - 5f
sw t1, 24(sp)
nop 2: lw t8, 28(t0) # argument #8 from usp
2: lw t1, 20(t0) # argument #5 from usp 3: lw t7, 24(t0) # argument #7 from usp
nop 4: lw t6, 20(t0) # argument #6 from usp
sw t1, 20(sp) 5: jr t1
nop sw t5, 16(sp) # argument #5 to ksp
3: lw t1, 16(t0) # argument #5 from usp
nop sw t8, 28(sp) # argument #8 to ksp
sw t1, 16(sp) sw t7, 24(sp) # argument #7 to ksp
sw t6, 20(sp) # argument #6 to ksp
6: j stack_done # go back
nop nop
4: .set pop .set pop
j stack_done # go back
.section __ex_table,"a" .section __ex_table,"a"
PTR 1b,bad_stack PTR 1b,bad_stack
PTR 2b,bad_stack PTR 2b,bad_stack
PTR 3b,bad_stack PTR 3b,bad_stack
PTR 4b,bad_stack
.previous .previous
/* /*
...@@ -177,7 +181,7 @@ bad_stack: ...@@ -177,7 +181,7 @@ bad_stack:
* The system call does not exist in this kernel * The system call does not exist in this kernel
*/ */
illegal_syscall: illegal_syscall:
li v0, ENOSYS # error li v0, -ENOSYS # error
sw v0, PT_R2(sp) sw v0, PT_R2(sp)
li t0, 1 # set error flag li t0, 1 # set error flag
sw t0, PT_R7(sp) sw t0, PT_R7(sp)
...@@ -238,12 +242,12 @@ illegal_syscall: ...@@ -238,12 +242,12 @@ illegal_syscall:
sw v0, PT_R2(sp) # result sw v0, PT_R2(sp) # result
/* Success, so skip usual error handling garbage. */ /* Success, so skip usual error handling garbage. */
LONG_L a2, TI_FLAGS($28) # syscall tracing enabled? lw a2, TI_FLAGS($28) # syscall tracing enabled?
li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
and t0, a2, t0 and t0, a2, t0
bnez t0, 1f bnez t0, 1f
b o32_syscall_exit j o32_syscall_exit
1: SAVE_STATIC 1: SAVE_STATIC
move a0, sp move a0, sp
...@@ -269,69 +273,49 @@ bad_alignment: ...@@ -269,69 +273,49 @@ bad_alignment:
END(sys_sysmips) END(sys_sysmips)
LEAF(sys_syscall) LEAF(sys_syscall)
lw t0, PT_R29(sp) # user sp #if defined(CONFIG_BINFMT_IRIX)
sltiu v0, a0, MAX_SYSCALL_NO + 1 # check syscall number
sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1 #else
beqz v0, enosys subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
sll v0, a0, 2 #endif
la v1, sys_syscall sll t1, t0, 3
lw t2, sys_call_table(v0) # function pointer beqz v0, einval
lbu t4, sys_narg_table(a0) # number of arguments
li v0, -EINVAL
beq t2, v1, out # do not recurse
beqz t2, enosys # null function pointer? lw t2, sys_call_table(t1) # syscall routine
andi v0, t0, 0x3 # unaligned stack pointer? #if defined(CONFIG_BINFMT_IRIX)
bnez v0, sigsegv li v1, 4000 # nr of sys_syscall
#else
li v1, 4000 - __NR_O32_Linux # index of sys_syscall
#endif
beq t0, v1, einval # do not recurse
addu v0, t0, 16 # v0 = usp + 16 /* Some syscalls like execve get their arguments from struct pt_regs
addu t1, v0, 12 # 3 32-bit arguments and claim zero arguments in the syscall table. Thus we have to
lw v1, TI_ADDR_LIMIT($28) assume the worst case and shuffle around all potential arguments.
or v0, v0, t1 If you want performance, don't use indirect syscalls. */
and v1, v1, v0
bltz v1, efault
move a0, a1 # shift argument registers move a0, a1 # shift argument registers
move a1, a2 move a1, a2
move a2, a3 move a2, a3
lw a3, 16(sp)
1: lw a3, 16(t0) lw t4, 20(sp)
2: lw t3, 20(t0) lw t5, 24(sp)
3: lw t4, 24(t0) lw t6, 28(sp)
sw t4, 16(sp)
.section __ex_table, "a" sw t5, 20(sp)
.word 1b, efault sw t6, 24(sp)
.word 2b, efault sw a0, PT_R4(sp) # .. and push back a0 - a3, some
.word 3b, efault sw a1, PT_R5(sp) # syscalls expect them there
.previous sw a2, PT_R6(sp)
sw a3, PT_R7(sp)
sw t3, 16(sp) # put into new stackframe sw a3, PT_R26(sp) # update a3 for syscall restarting
sw t4, 20(sp)
bnez t4, 1f # zero arguments?
addu a0, sp, 32 # then pass sp in a0
1:
sw t3, 16(sp)
sw v1, 20(sp)
jr t2 jr t2
/* Unreached */ /* Unreached */
enosys: li v0, -ENOSYS einval: li v0, -EINVAL
b out jr ra
sigsegv:
li a0, _SIGSEGV
move a1, $28
jal force_sig
/* Fall through */
efault: li v0, -EFAULT
out: jr ra
END(sys_syscall) END(sys_syscall)
.macro fifty ptr, nargs, from=1, to=50 .macro fifty ptr, nargs, from=1, to=50
...@@ -349,12 +333,14 @@ out: jr ra ...@@ -349,12 +333,14 @@ out: jr ra
.endm .endm
.macro syscalltable .macro syscalltable
#if defined(CONFIG_BINFMT_IRIX)
mille sys_ni_syscall 0 /* 0 - 999 SVR4 flavour */ mille sys_ni_syscall 0 /* 0 - 999 SVR4 flavour */
#include "irix5sys.h" /* 1000 - 1999 32-bit IRIX */ mille sys_ni_syscall 0 /* 1000 - 1999 32-bit IRIX */
mille sys_ni_syscall 0 /* 2000 - 2999 BSD43 flavour */ mille sys_ni_syscall 0 /* 2000 - 2999 BSD43 flavour */
mille sys_ni_syscall 0 /* 3000 - 3999 POSIX flavour */ mille sys_ni_syscall 0 /* 3000 - 3999 POSIX flavour */
#endif
sys sys_syscall 0 /* 4000 */ sys sys_syscall 8 /* 4000 */
sys sys_exit 1 sys sys_exit 1
sys sys_fork 0 sys sys_fork 0
sys sys_read 3 sys sys_read 3
...@@ -405,7 +391,7 @@ out: jr ra ...@@ -405,7 +391,7 @@ out: jr ra
sys sys_ni_syscall 0 /* was signal(2) */ sys sys_ni_syscall 0 /* was signal(2) */
sys sys_geteuid 0 sys sys_geteuid 0
sys sys_getegid 0 /* 4050 */ sys sys_getegid 0 /* 4050 */
sys sys_acct 0 sys sys_acct 1
sys sys_umount 2 sys sys_umount 2
sys sys_ni_syscall 0 sys sys_ni_syscall 0
sys sys_ioctl 3 sys sys_ioctl 3
...@@ -485,7 +471,7 @@ out: jr ra ...@@ -485,7 +471,7 @@ out: jr ra
sys sys_init_module 5 sys sys_init_module 5
sys sys_delete_module 1 sys sys_delete_module 1
sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */ sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
sys sys_quotactl 0 sys sys_quotactl 4
sys sys_getpgid 1 sys sys_getpgid 1
sys sys_fchdir 1 sys sys_fchdir 1
sys sys_bdflush 2 sys sys_bdflush 2
...@@ -506,7 +492,7 @@ out: jr ra ...@@ -506,7 +492,7 @@ out: jr ra
sys sys_sysmips 4 sys sys_sysmips 4
sys sys_ni_syscall 0 /* 4150 */ sys sys_ni_syscall 0 /* 4150 */
sys sys_getsid 1 sys sys_getsid 1
sys sys_fdatasync 0 sys sys_fdatasync 1
sys sys_sysctl 1 sys sys_sysctl 1
sys sys_mlock 2 sys sys_mlock 2
sys sys_munlock 2 /* 4155 */ sys sys_munlock 2 /* 4155 */
...@@ -640,19 +626,16 @@ out: jr ra ...@@ -640,19 +626,16 @@ out: jr ra
.endm .endm
/* We pre-compute the number of _instruction_ bytes needed to
load or store the arguments 6-8. Negative values are ignored. */
.macro sys function, nargs .macro sys function, nargs
PTR \function PTR \function
LONG (\nargs << 2) - (5 << 2)
.endm .endm
.align 3 .align 3
sys_call_table: .type sys_call_table,@object
EXPORT(sys_call_table)
syscalltable syscalltable
.size sys_call_table, . - sys_call_table .size sys_call_table, . - sys_call_table
.macro sys function, nargs
.byte \nargs
.endm
sys_narg_table:
syscalltable
.size sys_narg_table, . - sys_narg_table
...@@ -53,8 +53,10 @@ NESTED(handle_sys64, PT_SIZE, sp) ...@@ -53,8 +53,10 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting sd a3, PT_R26(sp) # save a3 for syscall restarting
LONG_L t0, TI_FLAGS($28) li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
bltz t0, syscall_trace_entry # syscall tracing enabled? LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
jalr t2 # Do The Real Thing (TM) jalr t2 # Do The Real Thing (TM)
...@@ -112,7 +114,7 @@ syscall_trace_entry: ...@@ -112,7 +114,7 @@ syscall_trace_entry:
illegal_syscall: illegal_syscall:
/* This also isn't a 64-bit syscall, throw an error. */ /* This also isn't a 64-bit syscall, throw an error. */
li v0, ENOSYS # error li v0, -ENOSYS # error
sd v0, PT_R2(sp) sd v0, PT_R2(sp)
li t0, 1 # set error flag li t0, 1 # set error flag
sd t0, PT_R7(sp) sd t0, PT_R7(sp)
...@@ -173,8 +175,8 @@ illegal_syscall: ...@@ -173,8 +175,8 @@ illegal_syscall:
sd v0, PT_R2(sp) # result sd v0, PT_R2(sp) # result
/* Success, so skip usual error handling garbage. */ /* Success, so skip usual error handling garbage. */
LONG_L a2, TI_FLAGS($28) # syscall tracing enabled?
li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
LONG_L a2, TI_FLAGS($28) # syscall tracing enabled?
and t0, a2, t0 and t0, a2, t0
bnez t0, 1f bnez t0, 1f
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Copyright (C) 1995 - 2000, 2001 by Ralf Baechle * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2004 Thiemo Seufer
* *
* Hairy, the userspace application uses a different argument passing * Hairy, the userspace application uses a different argument passing
* convention than the kernel, so we have to translate things from o32 * convention than the kernel, so we have to translate things from o32
...@@ -43,6 +44,8 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -43,6 +44,8 @@ NESTED(handle_sys, PT_SIZE, sp)
RESTORE_ALL RESTORE_ALL
#endif #endif
/* We don't want to stumble over broken sign extensions from
userland. O32 does never use the upper half. */
sll a0, a0, 0 sll a0, a0, 0
sll a1, a1, 0 sll a1, a1, 0
sll a2, a2, 0 sll a2, a2, 0
...@@ -68,11 +71,13 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -68,11 +71,13 @@ NESTED(handle_sys, PT_SIZE, sp)
1: lw a4, 16(t0) # argument #5 from usp 1: lw a4, 16(t0) # argument #5 from usp
2: lw a5, 20(t0) # argument #6 from usp 2: lw a5, 20(t0) # argument #6 from usp
3: lw a6, 24(t0) # argument #7 from usp 3: lw a6, 24(t0) # argument #7 from usp
4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
.section __ex_table,"a" .section __ex_table,"a"
PTR 1b, bad_stack PTR 1b, bad_stack
PTR 2b, bad_stack PTR 2b, bad_stack
PTR 3b, bad_stack PTR 3b, bad_stack
PTR 4b, bad_stack
.previous .previous
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
...@@ -91,7 +96,7 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -91,7 +96,7 @@ NESTED(handle_sys, PT_SIZE, sp)
sd v0, PT_R0(sp) # flag for syscall restarting sd v0, PT_R0(sp) # flag for syscall restarting
1: sd v0, PT_R2(sp) # result 1: sd v0, PT_R2(sp) # result
FEXPORT(o32_syscall_exit) o32_syscall_exit:
local_irq_disable # make need_resched and local_irq_disable # make need_resched and
# signals dont change between # signals dont change between
# sampling and return # sampling and return
...@@ -109,12 +114,12 @@ o32_syscall_exit_work: ...@@ -109,12 +114,12 @@ o32_syscall_exit_work:
trace_a_syscall: trace_a_syscall:
SAVE_STATIC SAVE_STATIC
sd a4, PT_R8(sp) sd a4, PT_R8(sp) # Save argument registers
sd a5, PT_R9(sp) sd a5, PT_R9(sp)
sd a6, PT_R10(sp) sd a6, PT_R10(sp)
sd a7, PT_R11(sp) sd a7, PT_R11(sp) # For indirect syscalls
move s0, t2 move s0, t2 # Save syscall pointer
move a0, sp move a0, sp
li a1, 0 li a1, 0
jal do_syscall_trace jal do_syscall_trace
...@@ -125,7 +130,8 @@ trace_a_syscall: ...@@ -125,7 +130,8 @@ trace_a_syscall:
ld a3, PT_R7(sp) ld a3, PT_R7(sp)
ld a4, PT_R8(sp) ld a4, PT_R8(sp)
ld a5, PT_R9(sp) ld a5, PT_R9(sp)
ld a6, PT_R10(sp) # For indirect syscalls ld a6, PT_R10(sp)
ld a7, PT_R11(sp) # For indirect syscalls
jalr s0 jalr s0
li t0, -EMAXERRNO - 1 # error? li t0, -EMAXERRNO - 1 # error?
...@@ -162,40 +168,17 @@ not_o32_scall: ...@@ -162,40 +168,17 @@ not_o32_scall:
#else #else
j handle_sys64 j handle_sys64
#endif #endif
illegal_syscall:
/* This also isn't a 64-bit syscall, throw an error. */
li v0, ENOSYS # error
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
j o32_syscall_exit
END(handle_sys) END(handle_sys)
LEAF(sys32_syscall) LEAF(sys32_syscall)
ld t0, PT_R29(sp) # user sp
sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1 sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1
beqz v0, enosys beqz v0, einval
dsll v0, a0, 3 dsll v0, a0, 3
dla v1, sys32_syscall
ld t2, (sys_call_table - (__NR_O32_Linux * 8))(v0) ld t2, (sys_call_table - (__NR_O32_Linux * 8))(v0)
li v0, -EINVAL li v1, 4000 # indirect syscall number
beq t2, v1, out # do not recurse beq a0, v1, einval # do not recurse
beqz t2, enosys # null function pointer?
andi v0, t0, 0x3 # unaligned stack pointer?
bnez v0, sigsegv
daddiu v0, t0, 16 # v0 = usp + 16
daddu t1, v0, 12 # 3 32-bit arguments
ld v1, TI_ADDR_LIMIT($28)
or v0, v0, t1
and v1, v1, v0
bnez v1, efault
move a0, a1 # shift argument registers move a0, a1 # shift argument registers
move a1, a2 move a1, a2
...@@ -203,25 +186,21 @@ LEAF(sys32_syscall) ...@@ -203,25 +186,21 @@ LEAF(sys32_syscall)
move a3, a4 move a3, a4
move a4, a5 move a4, a5
move a5, a6 move a5, a6
move a6, a7
sd a0, PT_R4(sp) # ... and push back a0 - a3, some
sd a1, PT_R5(sp) # syscalls expect them there
sd a2, PT_R6(sp)
sd a3, PT_R7(sp)
sd a3, PT_R26(sp) # update a3 for syscall restarting
jr t2 jr t2
/* Unreached */ /* Unreached */
enosys: li v0, -ENOSYS einval: li v0, -EINVAL
b out jr ra
sigsegv:
li a0, _SIGSEGV
move a1, $28
jal force_sig
/* Fall through */
efault: li v0, -EFAULT
out: jr ra
END(sys32_syscall) END(sys32_syscall)
.align 3 .align 3
.type sys_call_table,@object; .type sys_call_table,@object
sys_call_table: sys_call_table:
PTR sys32_syscall /* 4000 */ PTR sys32_syscall /* 4000 */
PTR sys_exit PTR sys_exit
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
* indicate that some process(es) are waiting for the semaphore. * indicate that some process(es) are waiting for the semaphore.
*/ */
#include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -64,7 +63,7 @@ static inline int __sem_update_count(struct semaphore *sem, int incr) ...@@ -64,7 +63,7 @@ static inline int __sem_update_count(struct semaphore *sem, int incr)
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (incr), "m" (sem->count)); : "r" (incr), "m" (sem->count));
} else { } else {
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED; static DEFINE_SPINLOCK(semaphore_lock);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&semaphore_lock, flags); spin_lock_irqsave(&semaphore_lock, flags);
......
...@@ -281,12 +281,12 @@ static inline void bootmem_init(void) ...@@ -281,12 +281,12 @@ static inline void bootmem_init(void)
initrd_reserve_bootmem = 1; initrd_reserve_bootmem = 1;
} else { } else {
unsigned long tmp; unsigned long tmp;
unsigned long *initrd_header; u32 *initrd_header;
tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - 8; tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - sizeof(u32) * 2;
if (tmp < reserved_end) if (tmp < reserved_end)
tmp += PAGE_SIZE; tmp += PAGE_SIZE;
initrd_header = (unsigned long *)tmp; initrd_header = (u32 *)tmp;
if (initrd_header[0] == 0x494E5244) { if (initrd_header[0] == 0x494E5244) {
initrd_start = (unsigned long)&initrd_header[2]; initrd_start = (unsigned long)&initrd_header[2];
initrd_end = initrd_start + initrd_header[1]; initrd_end = initrd_start + initrd_header[1];
...@@ -425,8 +425,10 @@ static inline void bootmem_init(void) ...@@ -425,8 +425,10 @@ static inline void bootmem_init(void)
if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk("initrd extends beyond end of memory " printk("initrd extends beyond end of memory "
"(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n", "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
sizeof(long) * 2, CPHYSADDR(initrd_end), sizeof(long) * 2,
sizeof(long) * 2, PFN_PHYS(max_low_pfn)); (unsigned long long)CPHYSADDR(initrd_end),
sizeof(long) * 2,
(unsigned long long)PFN_PHYS(max_low_pfn));
initrd_start = initrd_end = 0; initrd_start = initrd_end = 0;
initrd_reserve_bootmem = 0; initrd_reserve_bootmem = 0;
} }
...@@ -441,10 +443,21 @@ static inline void resource_init(void) ...@@ -441,10 +443,21 @@ static inline void resource_init(void)
{ {
int i; int i;
#if defined(CONFIG_MIPS64) && !defined(CONFIG_BUILD_ELF64)
/*
* The 64bit code in 32bit object format trick can't represent
* 64bit wide relocations for linker script symbols.
*/
code_resource.start = CPHYSADDR(&_text);
code_resource.end = CPHYSADDR(&_etext) - 1;
data_resource.start = CPHYSADDR(&_etext);
data_resource.end = CPHYSADDR(&_edata) - 1;
#else
code_resource.start = virt_to_phys(&_text); code_resource.start = virt_to_phys(&_text);
code_resource.end = virt_to_phys(&_etext) - 1; code_resource.end = virt_to_phys(&_etext) - 1;
data_resource.start = virt_to_phys(&_etext); data_resource.start = virt_to_phys(&_etext);
data_resource.end = virt_to_phys(&_edata) - 1; data_resource.end = virt_to_phys(&_edata) - 1;
#endif
/* /*
* Request address space for all standard RAM. * Request address space for all standard RAM.
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
static inline int
setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
int err = 0;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(regs->cp0_status, &sc->sc_status);
#define save_gp_reg(i) do { \
err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
} while(0)
__put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
save_gp_reg(31);
#undef save_gp_reg
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
err |= __put_user(regs->cp0_cause, &sc->sc_cause);
err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
err |= __put_user(!!used_math(), &sc->sc_used_math);
if (!used_math())
goto out;
/*
* Save FPU state to signal context. Signal handler will "inherit"
* current FPU state.
*/
preempt_disable();
if (!is_fpu_owner()) {
own_fpu();
restore_fp(current);
}
err |= save_fp_context(sc);
preempt_enable();
out:
return err;
}
static inline int
restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
int err = 0;
unsigned int used_math;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
} while(0)
restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
restore_gp_reg(31);
#undef restore_gp_reg
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
preempt_disable();
if (used_math()) {
/* restore fpu context if we have used it before */
own_fpu();
err |= restore_fp_context(sc);
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu();
}
preempt_enable();
return err;
}
/*
* Determine which stack to use..
*/
static inline void *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
unsigned long sp, almask;
/* Default to using normal stack */
sp = regs->regs[29];
/*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
*/
sp -= 32;
/* This is the X/Open sanctioned signal stack switching. */
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
if (PLAT_TRAMPOLINE_STUFF_LINE)
almask = ~(PLAT_TRAMPOLINE_STUFF_LINE - 1);
else
almask = ALMASK;
return (void *)((sp - frame_size) & almask);
}
...@@ -28,12 +28,15 @@ ...@@ -28,12 +28,15 @@
#include <asm/sim.h> #include <asm/sim.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include "signal-common.h"
#define DEBUG_SIG 0 #define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
extern asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs); static int do_signal(sigset_t *oldset, struct pt_regs *regs);
/* /*
* Atomically swap in the new signal mask, and wait for a signal. * Atomically swap in the new signal mask, and wait for a signal.
...@@ -151,53 +154,6 @@ asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) ...@@ -151,53 +154,6 @@ asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
return do_sigaltstack(uss, uoss, usp); return do_sigaltstack(uss, uoss, usp);
} }
asmlinkage int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
int err = 0;
unsigned int used_math;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
} while(0)
restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
restore_gp_reg(31);
#undef restore_gp_reg
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
preempt_disable();
if (used_math()) {
/* restore fpu context if we have used it before */
own_fpu();
err |= restore_fp_context(sc);
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu();
}
preempt_enable();
return err;
}
#if PLAT_TRAMPOLINE_STUFF_LINE #if PLAT_TRAMPOLINE_STUFF_LINE
#define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE))) #define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE)))
#else #else
...@@ -221,7 +177,9 @@ struct rt_sigframe { ...@@ -221,7 +177,9 @@ struct rt_sigframe {
}; };
#ifdef CONFIG_TRAD_SIGNALS #ifdef CONFIG_TRAD_SIGNALS
asmlinkage void sys_sigreturn(struct pt_regs regs) save_static_function(sys_sigreturn);
__attribute_used__ noinline static void
_sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{ {
struct sigframe *frame; struct sigframe *frame;
sigset_t blocked; sigset_t blocked;
...@@ -258,7 +216,9 @@ asmlinkage void sys_sigreturn(struct pt_regs regs) ...@@ -258,7 +216,9 @@ asmlinkage void sys_sigreturn(struct pt_regs regs)
} }
#endif #endif
asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) save_static_function(sys_rt_sigreturn);
__attribute_used__ noinline static void
_sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{ {
struct rt_sigframe *frame; struct rt_sigframe *frame;
sigset_t set; sigset_t set;
...@@ -299,85 +259,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -299,85 +259,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
} }
inline int setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
int err = 0;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(regs->cp0_status, &sc->sc_status);
#define save_gp_reg(i) do { \
err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
} while(0)
__put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
save_gp_reg(31);
#undef save_gp_reg
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
err |= __put_user(regs->cp0_cause, &sc->sc_cause);
err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
err |= __put_user(!!used_math(), &sc->sc_used_math);
if (!used_math())
goto out;
/*
* Save FPU state to signal context. Signal handler will "inherit"
* current FPU state.
*/
preempt_disable();
if (!is_fpu_owner()) {
own_fpu();
restore_fp(current);
}
err |= save_fp_context(sc);
preempt_enable();
out:
return err;
}
/*
* Determine which stack to use..
*/
static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp, almask;
/* Default to using normal stack */
sp = regs->regs[29];
/*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
*/
sp -= 32;
/* This is the X/Open sanctioned signal stack switching. */
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
if (PLAT_TRAMPOLINE_STUFF_LINE)
almask = ~(PLAT_TRAMPOLINE_STUFF_LINE - 1);
else
almask = ALMASK;
return (void *)((sp - frame_size) & ~(PLAT_TRAMPOLINE_STUFF_LINE - 1));
}
#ifdef CONFIG_TRAD_SIGNALS #ifdef CONFIG_TRAD_SIGNALS
static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs, static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set) int signr, sigset_t *set)
...@@ -396,8 +277,7 @@ static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs, ...@@ -396,8 +277,7 @@ static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
* syscall * syscall
*/ */
if (PLAT_TRAMPOLINE_STUFF_LINE) if (PLAT_TRAMPOLINE_STUFF_LINE)
__builtin_memset(frame->sf_code, '0', __clear_user(frame->sf_code, PLAT_TRAMPOLINE_STUFF_LINE);
PLAT_TRAMPOLINE_STUFF_LINE);
err |= __put_user(0x24020000 + __NR_sigreturn, frame->sf_code + 0); err |= __put_user(0x24020000 + __NR_sigreturn, frame->sf_code + 0);
err |= __put_user(0x0000000c , frame->sf_code + 1); err |= __put_user(0x0000000c , frame->sf_code + 1);
flush_cache_sigtramp((unsigned long) frame->sf_code); flush_cache_sigtramp((unsigned long) frame->sf_code);
...@@ -453,8 +333,7 @@ static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, ...@@ -453,8 +333,7 @@ static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
* syscall * syscall
*/ */
if (PLAT_TRAMPOLINE_STUFF_LINE) if (PLAT_TRAMPOLINE_STUFF_LINE)
__builtin_memset(frame->rs_code, '0', __clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE);
PLAT_TRAMPOLINE_STUFF_LINE);
err |= __put_user(0x24020000 + __NR_rt_sigreturn, frame->rs_code + 0); err |= __put_user(0x24020000 + __NR_rt_sigreturn, frame->rs_code + 0);
err |= __put_user(0x0000000c , frame->rs_code + 1); err |= __put_user(0x0000000c , frame->rs_code + 1);
flush_cache_sigtramp((unsigned long) frame->rs_code); flush_cache_sigtramp((unsigned long) frame->rs_code);
...@@ -558,7 +437,7 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, ...@@ -558,7 +437,7 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs); extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs);
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs) static int do_signal(sigset_t *oldset, struct pt_regs *regs)
{ {
struct k_sigaction ka; struct k_sigaction ka;
siginfo_t info; siginfo_t info;
...@@ -612,8 +491,6 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs) ...@@ -612,8 +491,6 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
return 0; return 0;
} }
extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs);
/* /*
* notification of userspace execution resumption * notification of userspace execution resumption
* - triggered by current->work.notify_resume * - triggered by current->work.notify_resume
......
...@@ -37,7 +37,7 @@ typedef union sigval32 { ...@@ -37,7 +37,7 @@ typedef union sigval32 {
s32 sival_ptr; s32 sival_ptr;
} sigval_t32; } sigval_t32;
typedef struct compat_siginfo{ typedef struct compat_siginfo {
int si_signo; int si_signo;
int si_code; int si_code;
int si_errno; int si_errno;
...@@ -106,7 +106,7 @@ typedef struct compat_siginfo{ ...@@ -106,7 +106,7 @@ typedef struct compat_siginfo{
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
extern asmlinkage int do_signal32(sigset_t *oldset, struct pt_regs *regs); extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
/* 32-bit compatibility types */ /* 32-bit compatibility types */
...@@ -192,6 +192,7 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t *ubuf) ...@@ -192,6 +192,7 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t *ubuf)
/* /*
* Atomically swap in the new signal mask, and wait for a signal. * Atomically swap in the new signal mask, and wait for a signal.
*/ */
save_static_function(sys32_sigsuspend); save_static_function(sys32_sigsuspend);
__attribute_used__ noinline static int __attribute_used__ noinline static int
_sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
...@@ -333,8 +334,7 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs) ...@@ -333,8 +334,7 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
return ret; return ret;
} }
static asmlinkage int restore_sigcontext32(struct pt_regs *regs, static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc)
struct sigcontext32 *sc)
{ {
int err = 0; int err = 0;
__u32 used_math; __u32 used_math;
...@@ -391,7 +391,7 @@ struct sigframe { ...@@ -391,7 +391,7 @@ struct sigframe {
struct rt_sigframe32 { struct rt_sigframe32 {
u32 rs_ass[4]; /* argument save space for o32 */ u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_code[2]; /* signal trampoline */ u32 rs_code[2]; /* signal trampoline */
struct compat_siginfo_t rs_info; compat_siginfo_t rs_info;
struct ucontext32 rs_uc; struct ucontext32 rs_uc;
}; };
...@@ -442,7 +442,9 @@ int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from) ...@@ -442,7 +442,9 @@ int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from)
return err; return err;
} }
asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) save_static_function(sys32_sigreturn);
__attribute_used__ noinline static void
_sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
{ {
struct sigframe *frame; struct sigframe *frame;
sigset_t blocked; sigset_t blocked;
...@@ -478,7 +480,9 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -478,7 +480,9 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
} }
asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) save_static_function(sys32_rt_sigreturn);
__attribute_used__ noinline static void
_sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{ {
struct rt_sigframe32 *frame; struct rt_sigframe32 *frame;
sigset_t set; sigset_t set;
...@@ -761,7 +765,7 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, ...@@ -761,7 +765,7 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
} }
} }
asmlinkage int do_signal32(sigset_t *oldset, struct pt_regs *regs) int do_signal32(sigset_t *oldset, struct pt_regs *regs)
{ {
struct k_sigaction ka; struct k_sigaction ka;
siginfo_t info; siginfo_t info;
......
...@@ -35,9 +35,12 @@ ...@@ -35,9 +35,12 @@
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/cpu-features.h>
#include "signal-common.h"
/* /*
* Including <asm/unistd.h would give use the 64-bit syscall numbers ... * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/ */
#define __NR_N32_rt_sigreturn 6211 #define __NR_N32_rt_sigreturn 6211
#define __NR_N32_restart_syscall 6214 #define __NR_N32_restart_syscall 6214
...@@ -59,17 +62,22 @@ struct ucontextn32 { ...@@ -59,17 +62,22 @@ struct ucontextn32 {
sigset_t uc_sigmask; /* mask last for extensibility */ sigset_t uc_sigmask; /* mask last for extensibility */
}; };
#if PLAT_TRAMPOLINE_STUFF_LINE
#define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE)))
#else
#define __tramp
#endif
struct rt_sigframe_n32 { struct rt_sigframe_n32 {
u32 rs_ass[4]; /* argument save space for o32 */ u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_code[2]; /* signal trampoline */ u32 rs_code[2] __tramp; /* signal trampoline */
struct siginfo rs_info; struct siginfo rs_info __tramp;
struct ucontextn32 rs_uc; struct ucontextn32 rs_uc;
}; };
extern asmlinkage int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc); save_static_function(sysn32_rt_sigreturn);
extern int inline setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc); __attribute_used__ noinline static void
_sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{ {
struct rt_sigframe_n32 *frame; struct rt_sigframe_n32 *frame;
sigset_t set; sigset_t set;
...@@ -118,31 +126,6 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -118,31 +126,6 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
} }
/*
* Determine which stack to use..
*/
static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
/* Default to using normal stack */
sp = regs->regs[29];
/*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
*/
sp -= 32;
/* This is the X/Open sanctioned signal stack switching. */
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void *)((sp - frame_size) & ALMASK);
}
void setup_rt_frame_n32(struct k_sigaction * ka, void setup_rt_frame_n32(struct k_sigaction * ka,
struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
{ {
...@@ -160,6 +143,8 @@ void setup_rt_frame_n32(struct k_sigaction * ka, ...@@ -160,6 +143,8 @@ void setup_rt_frame_n32(struct k_sigaction * ka,
* li v0, __NR_rt_sigreturn * li v0, __NR_rt_sigreturn
* syscall * syscall
*/ */
if (PLAT_TRAMPOLINE_STUFF_LINE)
__clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE);
err |= __put_user(0x24020000 + __NR_N32_rt_sigreturn, frame->rs_code + 0); err |= __put_user(0x24020000 + __NR_N32_rt_sigreturn, frame->rs_code + 0);
err |= __put_user(0x0000000c , frame->rs_code + 1); err |= __put_user(0x0000000c , frame->rs_code + 1);
flush_cache_sigtramp((unsigned long) frame->rs_code); flush_cache_sigtramp((unsigned long) frame->rs_code);
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
* Copyright (C) 2000, 2001 Silicon Graphics, Inc. * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2003 Broadcom Corporation * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
*/ */
#include <linux/config.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -94,6 +93,7 @@ static void smp_tune_scheduling (void) ...@@ -94,6 +93,7 @@ static void smp_tune_scheduling (void)
} }
extern void __init calibrate_delay(void); extern void __init calibrate_delay(void);
extern ATTRIB_NORET void cpu_idle(void);
/* /*
* First C code run on the secondary CPUs after being started up by * First C code run on the secondary CPUs after being started up by
...@@ -123,7 +123,7 @@ asmlinkage void start_secondary(void) ...@@ -123,7 +123,7 @@ asmlinkage void start_secondary(void)
cpu_idle(); cpu_idle();
} }
spinlock_t smp_call_lock = SPIN_LOCK_UNLOCKED; DEFINE_SPINLOCK(smp_call_lock);
struct call_data_struct *call_data; struct call_data_struct *call_data;
......
...@@ -3,10 +3,11 @@ ...@@ -3,10 +3,11 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1995, 1996, 1997, 2000, 2001 by Ralf Baechle * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2001 MIPS Technologies, Inc.
*/ */
#include <linux/a.out.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -66,11 +67,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -66,11 +67,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
int do_color_align; int do_color_align;
unsigned long task_size; unsigned long task_size;
#ifdef CONFIG_MIPS32 task_size = STACK_TOP;
task_size = TASK_SIZE;
#else
task_size = (current->thread.mflags & MF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE;
#endif
if (flags & MAP_FIXED) { if (flags & MAP_FIXED) {
/* /*
...@@ -116,7 +113,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -116,7 +113,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
} }
/* common code for old and new mmaps */ /* common code for old and new mmaps */
static inline long static inline unsigned long
do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff) unsigned long flags, unsigned long fd, unsigned long pgoff)
{ {
...@@ -140,7 +137,8 @@ do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, ...@@ -140,7 +137,8 @@ do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
return error; return error;
} }
asmlinkage unsigned long old_mmap(unsigned long addr, size_t len, int prot, asmlinkage unsigned long
old_mmap(unsigned long addr, unsigned long len, int prot,
int flags, int fd, off_t offset) int flags, int fd, off_t offset)
{ {
unsigned long result; unsigned long result;
...@@ -155,7 +153,7 @@ asmlinkage unsigned long old_mmap(unsigned long addr, size_t len, int prot, ...@@ -155,7 +153,7 @@ asmlinkage unsigned long old_mmap(unsigned long addr, size_t len, int prot,
return result; return result;
} }
asmlinkage long asmlinkage unsigned long
sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff) unsigned long flags, unsigned long fd, unsigned long pgoff)
{ {
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
* Free Software Foundation; either version 2 of the License, or (at your * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version. * option) any later version.
*/ */
#include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -53,7 +52,7 @@ EXPORT_SYMBOL(jiffies_64); ...@@ -53,7 +52,7 @@ EXPORT_SYMBOL(jiffies_64);
*/ */
extern volatile unsigned long wall_jiffies; extern volatile unsigned long wall_jiffies;
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; DEFINE_SPINLOCK(rtc_lock);
/* /*
* By default we provide the null RTC ops * By default we provide the null RTC ops
......
...@@ -38,12 +38,9 @@ ...@@ -38,12 +38,9 @@
#include <asm/watch.h> #include <asm/watch.h>
#include <asm/types.h> #include <asm/types.h>
extern asmlinkage void handle_mod(void); extern asmlinkage void handle_tlbm(void);
extern asmlinkage void handle_tlbl(void); extern asmlinkage void handle_tlbl(void);
extern asmlinkage void handle_tlbs(void); extern asmlinkage void handle_tlbs(void);
extern asmlinkage void __xtlb_mod(void);
extern asmlinkage void __xtlb_tlbl(void);
extern asmlinkage void __xtlb_tlbs(void);
extern asmlinkage void handle_adel(void); extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void); extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void); extern asmlinkage void handle_ibe(void);
...@@ -82,7 +79,12 @@ void show_stack(struct task_struct *task, unsigned long *sp) ...@@ -82,7 +79,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
long stackdata; long stackdata;
int i; int i;
sp = sp ? sp : (unsigned long *) &sp; if (!sp) {
if (task && task != current)
sp = (unsigned long *) task->thread.reg29;
else
sp = (unsigned long *) &sp;
}
printk("Stack :"); printk("Stack :");
i = 0; i = 0;
...@@ -110,8 +112,12 @@ void show_trace(struct task_struct *task, unsigned long *stack) ...@@ -110,8 +112,12 @@ void show_trace(struct task_struct *task, unsigned long *stack)
const int field = 2 * sizeof(unsigned long); const int field = 2 * sizeof(unsigned long);
unsigned long addr; unsigned long addr;
if (!stack) if (!stack) {
stack = (unsigned long*)&stack; if (task && task != current)
stack = (unsigned long *) task->thread.reg29;
else
stack = (unsigned long *) &stack;
}
printk("Call Trace:"); printk("Call Trace:");
#ifdef CONFIG_KALLSYMS #ifdef CONFIG_KALLSYMS
...@@ -244,7 +250,7 @@ void show_registers(struct pt_regs *regs) ...@@ -244,7 +250,7 @@ void show_registers(struct pt_regs *regs)
printk("\n"); printk("\n");
} }
static spinlock_t die_lock = SPIN_LOCK_UNLOCKED; static DEFINE_SPINLOCK(die_lock);
NORET_TYPE void __die(const char * str, struct pt_regs * regs, NORET_TYPE void __die(const char * str, struct pt_regs * regs,
const char * file, const char * func, unsigned long line) const char * file, const char * func, unsigned long line)
...@@ -1001,16 +1007,10 @@ void __init trap_init(void) ...@@ -1001,16 +1007,10 @@ void __init trap_init(void)
if (board_be_init) if (board_be_init)
board_be_init(); board_be_init();
#ifdef CONFIG_MIPS32 set_except_vector(1, handle_tlbm);
set_except_vector(1, handle_mod);
set_except_vector(2, handle_tlbl); set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs); set_except_vector(3, handle_tlbs);
#endif
#ifdef CONFIG_MIPS64
set_except_vector(1, __xtlb_mod);
set_except_vector(2, __xtlb_tlbl);
set_except_vector(3, __xtlb_tlbs);
#endif
set_except_vector(4, handle_adel); set_except_vector(4, handle_adel);
set_except_vector(5, handle_ades); set_except_vector(5, handle_ades);
...@@ -1047,7 +1047,7 @@ void __init trap_init(void) ...@@ -1047,7 +1047,7 @@ void __init trap_init(void)
* unaligned ldc1/sdc1 exception. The handlers have not been * unaligned ldc1/sdc1 exception. The handlers have not been
* written yet. Well, anyway there is no R6000 machine on the * written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS. * current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a tripple R6k machine) * (Duh, crap, there is someone with a triple R6k machine)
*/ */
//set_except_vector(14, handle_mc); //set_except_vector(14, handle_mc);
//set_except_vector(15, handle_ndc); //set_except_vector(15, handle_ndc);
......
...@@ -156,6 +156,7 @@ SECTIONS ...@@ -156,6 +156,7 @@ SECTIONS
*(.options) *(.options)
*(.pdr) *(.pdr)
*(.reginfo) *(.reginfo)
*(.mdebug*)
} }
/* This is the MIPS specific mdebug section. */ /* This is the MIPS specific mdebug section. */
......
...@@ -148,16 +148,16 @@ void dump_list_process(struct task_struct *t, void *address) ...@@ -148,16 +148,16 @@ void dump_list_process(struct task_struct *t, void *address)
printk("tasks->mm.pgd == %08lx\n", (unsigned long) t->mm->pgd); printk("tasks->mm.pgd == %08lx\n", (unsigned long) t->mm->pgd);
page_dir = pgd_offset(t->mm, 0); page_dir = pgd_offset(t->mm, 0);
printk("page_dir == %08lx\n", (unsigned long) page_dir); printk("page_dir == %016lx\n", (unsigned long) page_dir);
pgd = pgd_offset(t->mm, addr); pgd = pgd_offset(t->mm, addr);
printk("pgd == %08lx, ", (unsigned long) pgd); printk("pgd == %016lx\n", (unsigned long) pgd);
pmd = pmd_offset(pgd, addr); pmd = pmd_offset(pgd, addr);
printk("pmd == %08lx, ", (unsigned long) pmd); printk("pmd == %016lx\n", (unsigned long) pmd);
pte = pte_offset(pmd, addr); pte = pte_offset(pmd, addr);
printk("pte == %08lx, ", (unsigned long) pte); printk("pte == %016lx\n", (unsigned long) pte);
page = *pte; page = *pte;
printk("page == %08lx\n", pte_val(page)); printk("page == %08lx\n", pte_val(page));
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator * cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator
* *
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* *
* MIPS floating point support * MIPS floating point support
* *
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* This program is free software; you can distribute it and/or modify it * This program is free software; you can distribute it and/or modify it
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
/* /*
* MIPS floating point support * MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd. All rights reserved. * Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk * http://www.algor.co.uk
* *
* ######################################################################## * ########################################################################
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
obj-y += cache.o extable.o fault.o init.o pgtable.o \ obj-y += cache.o extable.o fault.o init.o pgtable.o \
tlbex.o tlbex.o tlbex-fault.o
obj-$(CONFIG_MIPS32) += ioremap.o pgtable-32.o obj-$(CONFIG_MIPS32) += ioremap.o pgtable-32.o
obj-$(CONFIG_MIPS64) += pgtable-64.o obj-$(CONFIG_MIPS64) += pgtable-64.o
...@@ -27,40 +27,6 @@ obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o ...@@ -27,40 +27,6 @@ obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o
obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
#
# TLB exception handling code differs between 32-bit and 64-bit kernels.
#
ifdef CONFIG_MIPS32
obj-$(CONFIG_CPU_R3000) += tlbex32-r3k.o
obj-$(CONFIG_CPU_TX49XX) += tlbex32-r4k.o
obj-$(CONFIG_CPU_R4300) += tlbex32-r4k.o
obj-$(CONFIG_CPU_R4X00) += tlbex32-r4k.o
obj-$(CONFIG_CPU_VR41XX) += tlbex32-r4k.o
obj-$(CONFIG_CPU_R5000) += tlbex32-r4k.o
obj-$(CONFIG_CPU_NEVADA) += tlbex32-r4k.o
obj-$(CONFIG_CPU_R5432) += tlbex32-r4k.o
obj-$(CONFIG_CPU_RM7000) += tlbex32-r4k.o
obj-$(CONFIG_CPU_RM9000) += tlbex32-r4k.o
obj-$(CONFIG_CPU_R10000) += tlbex32-r4k.o
obj-$(CONFIG_CPU_MIPS32) += tlbex32-mips32.o
obj-$(CONFIG_CPU_MIPS64) += tlbex32-r4k.o
obj-$(CONFIG_CPU_SB1) += tlbex32-r4k.o
obj-$(CONFIG_CPU_TX39XX) += tlbex32-r3k.o
endif
ifdef CONFIG_MIPS64
obj-$(CONFIG_CPU_R4300) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_R4X00) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_R5000) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_NEVADA) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_R5432) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_RM7000) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_RM9000) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_R10000) += tlb64-glue-r4k.o
obj-$(CONFIG_CPU_SB1) += tlb64-glue-sb1.o
obj-$(CONFIG_CPU_MIPS64) += tlb64-glue-r4k.o
endif
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
...@@ -68,8 +34,11 @@ obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o ...@@ -68,8 +34,11 @@ obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
# #
# Choose one DMA coherency model # Choose one DMA coherency model
# #
ifndef CONFIG_OWN_DMA
obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
endif
obj-$(CONFIG_DMA_IP27) += dma-ip27.o obj-$(CONFIG_DMA_IP27) += dma-ip27.o
obj-$(CONFIG_DMA_IP32) += dma-ip32.o
EXTRA_AFLAGS := $(CFLAGS) EXTRA_AFLAGS := $(CFLAGS)
...@@ -238,6 +238,22 @@ static inline void r4k_blast_scache_page_setup(void) ...@@ -238,6 +238,22 @@ static inline void r4k_blast_scache_page_setup(void)
r4k_blast_scache_page = blast_scache128_page; r4k_blast_scache_page = blast_scache128_page;
} }
static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
static inline void r4k_blast_scache_page_indexed_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
if (sc_lsize == 16)
r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
else if (sc_lsize == 32)
r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
else if (sc_lsize == 64)
r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
else if (sc_lsize == 128)
r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
}
static void (* r4k_blast_scache)(void); static void (* r4k_blast_scache)(void);
static inline void r4k_blast_scache_setup(void) static inline void r4k_blast_scache_setup(void)
...@@ -318,9 +334,6 @@ static inline void local_r4k_flush_cache_mm(void * args) ...@@ -318,9 +334,6 @@ static inline void local_r4k_flush_cache_mm(void * args)
{ {
struct mm_struct *mm = args; struct mm_struct *mm = args;
if (!cpu_has_dc_aliases)
return;
if (!cpu_context(smp_processor_id(), mm)) if (!cpu_context(smp_processor_id(), mm))
return; return;
...@@ -340,6 +353,9 @@ static inline void local_r4k_flush_cache_mm(void * args) ...@@ -340,6 +353,9 @@ static inline void local_r4k_flush_cache_mm(void * args)
static void r4k_flush_cache_mm(struct mm_struct *mm) static void r4k_flush_cache_mm(struct mm_struct *mm)
{ {
if (!cpu_has_dc_aliases)
return;
on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
} }
...@@ -359,13 +375,6 @@ static inline void local_r4k_flush_cache_page(void *args) ...@@ -359,13 +375,6 @@ static inline void local_r4k_flush_cache_page(void *args)
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
if (cpu_context(smp_processor_id(), mm) == 0)
return;
page &= PAGE_MASK; page &= PAGE_MASK;
pgdp = pgd_offset(mm, page); pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page); pmdp = pmd_offset(pgdp, page);
...@@ -385,8 +394,11 @@ static inline void local_r4k_flush_cache_page(void *args) ...@@ -385,8 +394,11 @@ static inline void local_r4k_flush_cache_page(void *args)
* in that case, which doesn't overly flush the cache too much. * in that case, which doesn't overly flush the cache too much.
*/ */
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
r4k_blast_dcache_page(page); r4k_blast_dcache_page(page);
if (exec && !cpu_icache_snoops_remote_store)
r4k_blast_scache_page(page);
}
if (exec) if (exec)
r4k_blast_icache_page(page); r4k_blast_icache_page(page);
...@@ -398,8 +410,11 @@ static inline void local_r4k_flush_cache_page(void *args) ...@@ -398,8 +410,11 @@ static inline void local_r4k_flush_cache_page(void *args)
* to work correctly. * to work correctly.
*/ */
page = INDEX_BASE + (page & (dcache_size - 1)); page = INDEX_BASE + (page & (dcache_size - 1));
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
r4k_blast_dcache_page_indexed(page); r4k_blast_dcache_page_indexed(page);
if (exec && !cpu_icache_snoops_remote_store)
r4k_blast_scache_page_indexed(page);
}
if (exec) { if (exec) {
if (cpu_has_vtag_icache) { if (cpu_has_vtag_icache) {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -416,6 +431,13 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, ...@@ -416,6 +431,13 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
{ {
struct flush_cache_page_args args; struct flush_cache_page_args args;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
if (cpu_context(smp_processor_id(), vma->vm_mm) == 0)
return;
args.vma = vma; args.vma = vma;
args.page = page; args.page = page;
...@@ -442,14 +464,15 @@ static inline void local_r4k_flush_icache_range(void *args) ...@@ -442,14 +464,15 @@ static inline void local_r4k_flush_icache_range(void *args)
struct flush_icache_range_args *fir_args = args; struct flush_icache_range_args *fir_args = args;
unsigned long dc_lsize = current_cpu_data.dcache.linesz; unsigned long dc_lsize = current_cpu_data.dcache.linesz;
unsigned long ic_lsize = current_cpu_data.icache.linesz; unsigned long ic_lsize = current_cpu_data.icache.linesz;
unsigned long sc_lsize = current_cpu_data.scache.linesz;
unsigned long start = fir_args->start; unsigned long start = fir_args->start;
unsigned long end = fir_args->end; unsigned long end = fir_args->end;
unsigned long addr, aend; unsigned long addr, aend;
if (!cpu_has_ic_fills_f_dc) { if (!cpu_has_ic_fills_f_dc) {
if (end - start > dcache_size) if (end - start > dcache_size) {
r4k_blast_dcache(); r4k_blast_dcache();
else { } else {
addr = start & ~(dc_lsize - 1); addr = start & ~(dc_lsize - 1);
aend = (end - 1) & ~(dc_lsize - 1); aend = (end - 1) & ~(dc_lsize - 1);
...@@ -461,6 +484,23 @@ static inline void local_r4k_flush_icache_range(void *args) ...@@ -461,6 +484,23 @@ static inline void local_r4k_flush_icache_range(void *args)
addr += dc_lsize; addr += dc_lsize;
} }
} }
if (!cpu_icache_snoops_remote_store) {
if (end - start > scache_size) {
r4k_blast_scache();
} else {
addr = start & ~(sc_lsize - 1);
aend = (end - 1) & ~(sc_lsize - 1);
while (1) {
/* Hit_Writeback_Inv_D */
protected_writeback_scache_line(addr);
if (addr == aend)
break;
addr += sc_lsize;
}
}
}
} }
if (end - start > icache_size) if (end - start > icache_size)
...@@ -527,6 +567,8 @@ static inline void local_r4k_flush_icache_page(void *args) ...@@ -527,6 +567,8 @@ static inline void local_r4k_flush_icache_page(void *args)
if (!cpu_has_ic_fills_f_dc) { if (!cpu_has_ic_fills_f_dc) {
unsigned long addr = (unsigned long) page_address(page); unsigned long addr = (unsigned long) page_address(page);
r4k_blast_dcache_page(addr); r4k_blast_dcache_page(addr);
if (!cpu_icache_snoops_remote_store)
r4k_blast_scache_page(addr);
ClearPageDcacheDirty(page); ClearPageDcacheDirty(page);
} }
...@@ -669,10 +711,13 @@ static void local_r4k_flush_cache_sigtramp(void * arg) ...@@ -669,10 +711,13 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
{ {
unsigned long ic_lsize = current_cpu_data.icache.linesz; unsigned long ic_lsize = current_cpu_data.icache.linesz;
unsigned long dc_lsize = current_cpu_data.dcache.linesz; unsigned long dc_lsize = current_cpu_data.dcache.linesz;
unsigned long sc_lsize = current_cpu_data.scache.linesz;
unsigned long addr = (unsigned long) arg; unsigned long addr = (unsigned long) arg;
R4600_HIT_CACHEOP_WAR_IMPL; R4600_HIT_CACHEOP_WAR_IMPL;
protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
if (!cpu_icache_snoops_remote_store)
protected_writeback_scache_line(addr & ~(sc_lsize - 1));
protected_flush_icache_line(addr & ~(ic_lsize - 1)); protected_flush_icache_line(addr & ~(ic_lsize - 1));
if (MIPS4K_ICACHE_REFILL_WAR) { if (MIPS4K_ICACHE_REFILL_WAR) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -739,8 +784,8 @@ static inline void rm7k_erratum31(void) ...@@ -739,8 +784,8 @@ static inline void rm7k_erratum31(void)
} }
} }
static char *way_string[] = { NULL, "direct mapped", "2-way", "3-way", "4-way", static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
"5-way", "6-way", "7-way", "8-way" "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
}; };
static void __init probe_pcache(void) static void __init probe_pcache(void)
...@@ -1178,6 +1223,7 @@ void __init ld_mmu_r4xx0(void) ...@@ -1178,6 +1223,7 @@ void __init ld_mmu_r4xx0(void)
r4k_blast_icache_page_indexed_setup(); r4k_blast_icache_page_indexed_setup();
r4k_blast_icache_setup(); r4k_blast_icache_setup();
r4k_blast_scache_page_setup(); r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup(); r4k_blast_scache_setup();
/* /*
......
...@@ -503,7 +503,7 @@ void ld_mmu_sb1(void) ...@@ -503,7 +503,7 @@ void ld_mmu_sb1(void)
/* Special cache error handler for SB1 */ /* Special cache error handler for SB1 */
memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80); memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80);
memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80); memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80);
memcpy((void *)KSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80); memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80);
probe_cache_sizes(); probe_cache_sizes();
......
...@@ -45,10 +45,17 @@ EXPORT_SYMBOL(_dma_cache_inv); ...@@ -45,10 +45,17 @@ EXPORT_SYMBOL(_dma_cache_inv);
#endif /* CONFIG_DMA_NONCOHERENT */ #endif /* CONFIG_DMA_NONCOHERENT */
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) /*
* We could optimize the case where the cache argument is not BCACHE but
* that seems very atypical use ...
*/
asmlinkage int sys_cacheflush(unsigned long addr, unsigned long int bytes,
unsigned int cache)
{ {
/* This should flush more selectivly ... */ if (verify_area(VERIFY_WRITE, (void *) addr, bytes))
__flush_cache_all(); return -EFAULT;
flush_icache_range(addr, addr + bytes);
return 0; return 0;
} }
......
...@@ -251,14 +251,14 @@ static const uint8_t parity[256] = { ...@@ -251,14 +251,14 @@ static const uint8_t parity[256] = {
/* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */ /* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */
static const uint64_t mask_72_64[8] = { static const uint64_t mask_72_64[8] = {
0x0738C808099264FFL, 0x0738C808099264FFULL,
0x38C808099264FF07L, 0x38C808099264FF07ULL,
0xC808099264FF0738L, 0xC808099264FF0738ULL,
0x08099264FF0738C8L, 0x08099264FF0738C8ULL,
0x099264FF0738C808L, 0x099264FF0738C808ULL,
0x9264FF0738C80809L, 0x9264FF0738C80809ULL,
0x64FF0738C8080992L, 0x64FF0738C8080992ULL,
0xFF0738C808099264L 0xFF0738C808099264ULL
}; };
/* Calculate the parity on a range of bits */ /* Calculate the parity on a range of bits */
...@@ -330,9 +330,9 @@ static uint32_t extract_ic(unsigned short addr, int data) ...@@ -330,9 +330,9 @@ static uint32_t extract_ic(unsigned short addr, int data)
((lru >> 4) & 0x3), ((lru >> 4) & 0x3),
((lru >> 6) & 0x3)); ((lru >> 6) & 0x3));
} }
va = (taglo & 0xC0000FFFFFFFE000) | addr; va = (taglo & 0xC0000FFFFFFFE000ULL) | addr;
if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3)) if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3))
va |= 0x3FFFF00000000000; va |= 0x3FFFF00000000000ULL;
valid = ((taghi >> 29) & 1); valid = ((taghi >> 29) & 1);
if (valid) { if (valid) {
tlo_tmp = taglo & 0xfff3ff; tlo_tmp = taglo & 0xfff3ff;
...@@ -473,7 +473,7 @@ static uint32_t extract_dc(unsigned short addr, int data) ...@@ -473,7 +473,7 @@ static uint32_t extract_dc(unsigned short addr, int data)
: "r" ((way << 13) | addr)); : "r" ((way << 13) | addr));
taglo = ((unsigned long long)taglohi << 32) | taglolo; taglo = ((unsigned long long)taglohi << 32) | taglolo;
pa = (taglo & 0xFFFFFFE000) | addr; pa = (taglo & 0xFFFFFFE000ULL) | addr;
if (way == 0) { if (way == 0) {
lru = (taghi >> 14) & 0xff; lru = (taghi >> 14) & 0xff;
prom_printf("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n", prom_printf("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/asm.h> #include <asm/asm.h>
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
* Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
* Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
* IP32 changes by Ilya.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/ip32/crime.h>
/*
* Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency
* coherent.
*/
/*
* Few notes.
* 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
* 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian)
* 3. All other devices see memory as one big chunk at 0x40000000
* 4. Non-PCI devices will pass NULL as struct device*
* Thus we translate differently, depending on device.
*/
#define RAM_OFFSET_MASK 0x3fffffff
void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, int gfp)
{
void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) {
unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK;
memset(ret, 0, size);
if(dev==NULL)
addr+= CRIME_HI_MEM_BASE;
*dma_handle = addr;
}
return ret;
}
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, int gfp)
{
void *ret;
ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
if (ret) {
dma_cache_wback_inv((unsigned long) ret, size);
ret = UNCAC_ADDR(ret);
}
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
free_pages((unsigned long) vaddr, get_order(size));
}
EXPORT_SYMBOL(dma_free_noncoherent);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr;
addr = CAC_ADDR(addr);
free_pages(addr, get_order(size));
}
EXPORT_SYMBOL(dma_free_coherent);
static inline void __dma_sync(unsigned long addr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
dma_cache_wback(addr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(addr, size);
break;
default:
BUG();
}
}
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
unsigned long addr = (unsigned long) ptr;
switch (direction) {
case DMA_TO_DEVICE:
dma_cache_wback(addr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(addr, size);
break;
default:
BUG();
}
addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;;
if(dev == NULL)
addr+=CRIME_HI_MEM_BASE;
return (dma_addr_t)addr;
}
EXPORT_SYMBOL(dma_map_single);
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
break;
case DMA_FROM_DEVICE:
break;
case DMA_BIDIRECTIONAL:
break;
default:
BUG();
}
}
EXPORT_SYMBOL(dma_unmap_single);
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
addr = (unsigned long) page_address(sg->page)+sg->offset;
if (addr)
__dma_sync(addr, sg->length, direction);
addr = __pa(addr)&RAM_OFFSET_MASK;;
if(dev == NULL)
addr += CRIME_HI_MEM_BASE;
sg->dma_address = (dma_addr_t)addr;
}
return nents;
}
EXPORT_SYMBOL(dma_map_sg);
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
unsigned long addr;
BUG_ON(direction == DMA_NONE);
addr = (unsigned long) page_address(page) + offset;
dma_cache_wback_inv(addr, size);
addr = __pa(addr)&RAM_OFFSET_MASK;;
if(dev == NULL)
addr += CRIME_HI_MEM_BASE;
return (dma_addr_t)addr;
}
EXPORT_SYMBOL(dma_map_page);
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (direction != DMA_TO_DEVICE) {
unsigned long addr;
dma_address&=RAM_OFFSET_MASK;
addr = dma_address + PAGE_OFFSET;
if(dma_address>=256*1024*1024)
addr+=CRIME_HI_MEM_BASE;
dma_cache_wback_inv(addr, size);
}
}
EXPORT_SYMBOL(dma_unmap_page);
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
unsigned long addr;
int i;
BUG_ON(direction == DMA_NONE);
if (direction == DMA_TO_DEVICE)
return;
for (i = 0; i < nhwentries; i++, sg++) {
addr = (unsigned long) page_address(sg->page);
if (!addr)
continue;
dma_cache_wback_inv(addr + sg->offset, sg->length);
}
}
EXPORT_SYMBOL(dma_unmap_sg);
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
unsigned long addr;
BUG_ON(direction == DMA_NONE);
dma_handle&=RAM_OFFSET_MASK;
addr = dma_handle + PAGE_OFFSET;
if(dma_handle>=256*1024*1024)
addr+=CRIME_HI_MEM_BASE;
__dma_sync(addr, size, direction);
}
EXPORT_SYMBOL(dma_sync_single_for_cpu);
void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
unsigned long addr;
BUG_ON(direction == DMA_NONE);
dma_handle&=RAM_OFFSET_MASK;
addr = dma_handle + PAGE_OFFSET;
if(dma_handle>=256*1024*1024)
addr+=CRIME_HI_MEM_BASE;
__dma_sync(addr, size, direction);
}
EXPORT_SYMBOL(dma_sync_single_for_device);
void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
unsigned long addr;
BUG_ON(direction == DMA_NONE);
dma_handle&=RAM_OFFSET_MASK;
addr = dma_handle + offset + PAGE_OFFSET;
if(dma_handle>=256*1024*1024)
addr+=CRIME_HI_MEM_BASE;
__dma_sync(addr, size, direction);
}
EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
unsigned long addr;
BUG_ON(direction == DMA_NONE);
dma_handle&=RAM_OFFSET_MASK;
addr = dma_handle + offset + PAGE_OFFSET;
if(dma_handle>=256*1024*1024)
addr+=CRIME_HI_MEM_BASE;
__dma_sync(addr, size, direction);
}
EXPORT_SYMBOL(dma_sync_single_range_for_device);
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++)
__dma_sync((unsigned long)page_address(sg->page),
sg->length, direction);
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++)
__dma_sync((unsigned long)page_address(sg->page),
sg->length, direction);
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
int dma_mapping_error(dma_addr_t dma_addr)
{
return 0;
}
EXPORT_SYMBOL(dma_mapping_error);
int dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < 0x00ffffff)
return 0;
return 1;
}
EXPORT_SYMBOL(dma_supported);
int dma_is_consistent(dma_addr_t dma_addr)
{
return 1;
}
EXPORT_SYMBOL(dma_is_consistent);
void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
{
if (direction == DMA_NONE)
return;
dma_cache_wback_inv((unsigned long)vaddr, size);
}
EXPORT_SYMBOL(dma_cache_sync);
...@@ -61,7 +61,7 @@ unsigned long setup_zero_pages(void) ...@@ -61,7 +61,7 @@ unsigned long setup_zero_pages(void)
else else
order = 0; order = 0;
empty_zero_page = __get_free_pages(GFP_KERNEL, order); empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page) if (!empty_zero_page)
panic("Oh boy, that early out of memory?"); panic("Oh boy, that early out of memory?");
...@@ -74,7 +74,6 @@ unsigned long setup_zero_pages(void) ...@@ -74,7 +74,6 @@ unsigned long setup_zero_pages(void)
size = PAGE_SIZE << order; size = PAGE_SIZE << order;
zero_page_mask = (size - 1) & PAGE_MASK; zero_page_mask = (size - 1) & PAGE_MASK;
memset((void *)empty_zero_page, 0, size);
return 1UL << order; return 1UL << order;
} }
......
...@@ -3,9 +3,8 @@ ...@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
*/ */
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -57,12 +56,6 @@ void copy_page(void *to, void *from) __attribute__((alias("copy_page_array"))); ...@@ -57,12 +56,6 @@ void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
/*
* An address fits into a single register so it's safe to use 64-bit registers
* if we have 64-bit adresses.
*/
#define cpu_has_64bit_registers cpu_has_64bit_addresses
/* /*
* This is suboptimal for 32-bit kernels; we assume that R10000 is only used * This is suboptimal for 32-bit kernels; we assume that R10000 is only used
* with 64-bit kernels. The prefetch offsets have been experimentally tuned * with 64-bit kernels. The prefetch offsets have been experimentally tuned
...@@ -145,7 +138,7 @@ static inline void __build_load_reg(int reg) ...@@ -145,7 +138,7 @@ static inline void __build_load_reg(int reg)
union mips_instruction mi; union mips_instruction mi;
unsigned int width; unsigned int width;
if (cpu_has_64bit_registers) { if (cpu_has_64bit_gp_regs) {
mi.i_format.opcode = ld_op; mi.i_format.opcode = ld_op;
width = 8; width = 8;
} else { } else {
...@@ -266,7 +259,7 @@ static inline void build_addiu_a2_a0(unsigned long offset) ...@@ -266,7 +259,7 @@ static inline void build_addiu_a2_a0(unsigned long offset)
BUG_ON(offset > 0x7fff); BUG_ON(offset > 0x7fff);
mi.i_format.opcode = cpu_has_64bit_addresses ? daddiu_op : addiu_op; mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op;
mi.i_format.rs = 4; /* $a0 */ mi.i_format.rs = 4; /* $a0 */
mi.i_format.rt = 6; /* $a2 */ mi.i_format.rt = 6; /* $a2 */
mi.i_format.simmediate = offset; mi.i_format.simmediate = offset;
...@@ -280,7 +273,7 @@ static inline void build_addiu_a1(unsigned long offset) ...@@ -280,7 +273,7 @@ static inline void build_addiu_a1(unsigned long offset)
BUG_ON(offset > 0x7fff); BUG_ON(offset > 0x7fff);
mi.i_format.opcode = cpu_has_64bit_addresses ? daddiu_op : addiu_op; mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op;
mi.i_format.rs = 5; /* $a1 */ mi.i_format.rs = 5; /* $a1 */
mi.i_format.rt = 5; /* $a1 */ mi.i_format.rt = 5; /* $a1 */
mi.i_format.simmediate = offset; mi.i_format.simmediate = offset;
...@@ -296,7 +289,7 @@ static inline void build_addiu_a0(unsigned long offset) ...@@ -296,7 +289,7 @@ static inline void build_addiu_a0(unsigned long offset)
BUG_ON(offset > 0x7fff); BUG_ON(offset > 0x7fff);
mi.i_format.opcode = cpu_has_64bit_addresses ? daddiu_op : addiu_op; mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op;
mi.i_format.rs = 4; /* $a0 */ mi.i_format.rs = 4; /* $a0 */
mi.i_format.rt = 4; /* $a0 */ mi.i_format.rt = 4; /* $a0 */
mi.i_format.simmediate = offset; mi.i_format.simmediate = offset;
......
This diff is collapsed.
...@@ -71,8 +71,8 @@ void __init pagetable_init(void) ...@@ -71,8 +71,8 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */ /* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir); pgd_init((unsigned long)swapper_pg_dir);
pgd_init((unsigned long)swapper_pg_dir + pgd_init((unsigned long)swapper_pg_dir
sizeof(pgd_t ) * USER_PTRS_PER_PGD); + sizeof(pgd_t) * USER_PTRS_PER_PGD);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
pgd_base = swapper_pg_dir; pgd_base = swapper_pg_dir;
......
...@@ -55,5 +55,4 @@ void __init pagetable_init(void) ...@@ -55,5 +55,4 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */ /* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir); pgd_init((unsigned long)swapper_pg_dir);
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
memset((void *)invalid_pte_table, 0, sizeof(pte_t) * PTRS_PER_PTE);
} }
#include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/swap.h> #include <linux/swap.h>
......
...@@ -96,13 +96,13 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size) ...@@ -96,13 +96,13 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
} }
/* /*
* This function is executed in the uncached segment KSEG1. * This function is executed in the uncached segment CKSEG1.
* It must not touch the stack, because the stack pointer still points * It must not touch the stack, because the stack pointer still points
* into KSEG0. * into CKSEG0.
* *
* Three options: * Three options:
* - Write it in assembly and guarantee that we don't use the stack. * - Write it in assembly and guarantee that we don't use the stack.
* - Disable caching for KSEG0 before calling it. * - Disable caching for CKSEG0 before calling it.
* - Pray that GCC doesn't randomly start using the stack. * - Pray that GCC doesn't randomly start using the stack.
* *
* This being Linux, we obviously take the least sane of those options - * This being Linux, we obviously take the least sane of those options -
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
* Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com) * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
*/ */
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
...@@ -25,7 +24,7 @@ ...@@ -25,7 +24,7 @@
extern void build_tlb_refill_handler(void); extern void build_tlb_refill_handler(void);
#define UNIQUE_ENTRYHI(idx) (KSEG0 + ((idx) << (PAGE_SHIFT + 1))) #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
/* Dump the current entry* and pagemask registers */ /* Dump the current entry* and pagemask registers */
static inline void dump_cur_tlb_regs(void) static inline void dump_cur_tlb_regs(void)
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#include <linux/init.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/war.h>
.macro __BUILD_cli
CLI
.endm
.macro __BUILD_sti
STI
.endm
.macro __BUILD_kmode
KMODE
.endm
.macro tlb_handler name interruptible writebit
NESTED(__\name, PT_SIZE, sp)
SAVE_ALL
dmfc0 a2, CP0_BADVADDR
__BUILD_\interruptible
li a1, \writebit
sd a2, PT_BVADDR(sp)
move a0, sp
jal do_page_fault
j ret_from_exception
END(__\name)
.endm
.macro tlb_handler_m3 name interruptible writebit
NESTED(__\name, PT_SIZE, sp)
dmfc0 k0, CP0_BADVADDR
dmfc0 k1, CP0_ENTRYHI
xor k0, k1
dsrl k0, k0, PAGE_SHIFT + 1
bnez k0, 1f
SAVE_ALL
dmfc0 a2, CP0_BADVADDR
__BUILD_\interruptible
li a1, \writebit
sd a2, PT_BVADDR(sp)
move a0, sp
jal do_page_fault
1:
j ret_from_exception
END(__\name)
.endm
tlb_handler xtlb_mod kmode 1
#if BCM1250_M3_WAR
tlb_handler_m3 xtlb_tlbl kmode 0
#else
tlb_handler xtlb_tlbl kmode 0
#endif
tlb_handler xtlb_tlbs kmode 1
...@@ -6,36 +6,23 @@ ...@@ -6,36 +6,23 @@
* Copyright (C) 1999 Ralf Baechle * Copyright (C) 1999 Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 1999 Silicon Graphics, Inc.
*/ */
#include <linux/init.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/regdef.h> #include <asm/regdef.h>
#include <asm/stackframe.h> #include <asm/stackframe.h>
.macro __BUILD_cli .macro tlb_do_page_fault, write
CLI NESTED(tlb_do_page_fault_\write, PT_SIZE, sp)
.endm
.macro __BUILD_sti
STI
.endm
.macro __BUILD_kmode
KMODE
.endm
.macro tlb_handler name interruptible writebit
NESTED(__\name, PT_SIZE, sp)
SAVE_ALL SAVE_ALL
dmfc0 a2, CP0_BADVADDR MFC0 a2, CP0_BADVADDR
__BUILD_\interruptible KMODE
li a1, \writebit
sd a2, PT_BVADDR(sp)
move a0, sp move a0, sp
REG_S a2, PT_BVADDR(sp)
li a1, \write
jal do_page_fault jal do_page_fault
j ret_from_exception j ret_from_exception
END(__\name) END(tlb_do_page_fault_\write)
.endm .endm
tlb_handler xtlb_mod kmode 1 tlb_do_page_fault 0
tlb_handler xtlb_tlbl kmode 0 tlb_do_page_fault 1
tlb_handler xtlb_tlbs kmode 1
This diff is collapsed.
/*
* TLB exception handling code for MIPS32 CPUs.
*
* Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
*
* Multi-cpu abstraction and reworking:
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*
* Pete Popov, ppopov@pacbell.net
* Added 36 bit phys address support.
* Copyright (C) 2002 MontaVista Software, Inc.
*/
#include <linux/init.h>
#include <asm/asm.h>
#include <asm/cachectl.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#define TLB_OPTIMIZE /* If you are paranoid, disable this. */
#ifdef CONFIG_64BIT_PHYS_ADDR
/* We really only support 36 bit physical addresses on MIPS32 */
#define PTE_L lw
#define PTE_S sw
#define PTE_SRL srl
#define P_MTC0 mtc0
#define PTE_HALF 4 /* pte_high contains pre-shifted, ready to go entry */
#define PTE_SIZE 8
#define PTEP_INDX_MSK 0xff0
#define PTE_INDX_MSK 0xff8
#define PTE_INDX_SHIFT 9
#define CONVERT_PTE(pte)
#define PTE_MAKEWRITE_HIGH(pte, ptr) \
lw pte, PTE_HALF(ptr); \
ori pte, (_PAGE_VALID | _PAGE_DIRTY); \
sw pte, PTE_HALF(ptr); \
lw pte, 0(ptr);
#define PTE_MAKEVALID_HIGH(pte, ptr) \
lw pte, PTE_HALF(ptr); \
ori pte, pte, _PAGE_VALID; \
sw pte, PTE_HALF(ptr); \
lw pte, 0(ptr);
#else
#define PTE_L lw
#define PTE_S sw
#define PTE_SRL srl
#define P_MTC0 mtc0
#define PTE_HALF 0
#define PTE_SIZE 4
#define PTEP_INDX_MSK 0xff8
#define PTE_INDX_MSK 0xffc
#define PTE_INDX_SHIFT 10
#define CONVERT_PTE(pte) srl pte, pte, 6
#define PTE_MAKEWRITE_HIGH(pte, ptr)
#define PTE_MAKEVALID_HIGH(pte, ptr)
#endif /* CONFIG_64BIT_PHYS_ADDR */
#ifdef CONFIG_64BIT_PHYS_ADDR
#define GET_PTE_OFF(reg)
#else
#define GET_PTE_OFF(reg) srl reg, reg, 1
#endif
/*
* ABUSE of CPP macros 101.
*
* After this macro runs, the pte faulted on is
* in register PTE, a ptr into the table in which
* the pte belongs is in PTR.
*/
#ifdef CONFIG_SMP
#define GET_PGD(scratch, ptr) \
mfc0 ptr, CP0_CONTEXT; \
la scratch, pgd_current;\
srl ptr, 23; \
sll ptr, 2; \
addu ptr, scratch, ptr; \
lw ptr, (ptr);
#else
#define GET_PGD(scratch, ptr) \
lw ptr, pgd_current;
#endif
#define LOAD_PTE(pte, ptr) \
GET_PGD(pte, ptr) \
mfc0 pte, CP0_BADVADDR; \
srl pte, pte, _PGDIR_SHIFT; \
sll pte, pte, 2; \
addu ptr, ptr, pte; \
mfc0 pte, CP0_BADVADDR; \
lw ptr, (ptr); \
srl pte, pte, PTE_INDX_SHIFT; \
and pte, pte, PTE_INDX_MSK; \
addu ptr, ptr, pte; \
PTE_L pte, (ptr);
/* This places the even/odd pte pair in the page
* table at PTR into ENTRYLO0 and ENTRYLO1 using
* TMP as a scratch register.
*/
#define PTE_RELOAD(ptr, tmp) \
ori ptr, ptr, PTE_SIZE; \
xori ptr, ptr, PTE_SIZE; \
PTE_L tmp, (PTE_HALF+PTE_SIZE)(ptr); \
CONVERT_PTE(tmp); \
P_MTC0 tmp, CP0_ENTRYLO1; \
PTE_L ptr, PTE_HALF(ptr); \
CONVERT_PTE(ptr); \
P_MTC0 ptr, CP0_ENTRYLO0;
#define DO_FAULT(write) \
.set noat; \
SAVE_ALL; \
mfc0 a2, CP0_BADVADDR; \
KMODE; \
.set at; \
move a0, sp; \
jal do_page_fault; \
li a1, write; \
j ret_from_exception; \
nop; \
.set noat;
/* Check is PTE is present, if not then jump to LABEL.
* PTR points to the page table where this PTE is located,
* when the macro is done executing PTE will be restored
* with it's original value.
*/
#define PTE_PRESENT(pte, ptr, label) \
andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
bnez pte, label; \
PTE_L pte, (ptr);
/* Make PTE valid, store result in PTR. */
#define PTE_MAKEVALID(pte, ptr) \
ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
PTE_S pte, (ptr);
/* Check if PTE can be written to, if not branch to LABEL.
* Regardless restore PTE with value from PTR when done.
*/
#define PTE_WRITABLE(pte, ptr, label) \
andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
bnez pte, label; \
PTE_L pte, (ptr);
/* Make PTE writable, update software status bits as well,
* then store at PTR.
*/
#define PTE_MAKEWRITE(pte, ptr) \
ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
_PAGE_VALID | _PAGE_DIRTY); \
PTE_S pte, (ptr);
.set noreorder
.align 5
NESTED(handle_tlbl, PT_SIZE, sp)
.set noat
invalid_tlbl:
#ifdef TLB_OPTIMIZE
/* Test present bit in entry. */
LOAD_PTE(k0, k1)
tlbp
PTE_PRESENT(k0, k1, nopage_tlbl)
PTE_MAKEVALID_HIGH(k0, k1)
PTE_MAKEVALID(k0, k1)
PTE_RELOAD(k1, k0)
nop
b 1f
tlbwi
1:
nop
.set mips3
eret
.set mips0
#endif
nopage_tlbl:
DO_FAULT(0)
END(handle_tlbl)
.align 5
NESTED(handle_tlbs, PT_SIZE, sp)
.set noat
#ifdef TLB_OPTIMIZE
.set mips3
li k0,0
LOAD_PTE(k0, k1)
tlbp # find faulting entry
PTE_WRITABLE(k0, k1, nopage_tlbs)
PTE_MAKEWRITE(k0, k1)
PTE_MAKEWRITE_HIGH(k0, k1)
PTE_RELOAD(k1, k0)
nop
b 1f
tlbwi
1:
nop
.set mips3
eret
.set mips0
#endif
nopage_tlbs:
DO_FAULT(1)
END(handle_tlbs)
.align 5
NESTED(handle_mod, PT_SIZE, sp)
.set noat
#ifdef TLB_OPTIMIZE
.set mips3
LOAD_PTE(k0, k1)
tlbp # find faulting entry
andi k0, k0, _PAGE_WRITE
beqz k0, nowrite_mod
PTE_L k0, (k1)
/* Present and writable bits set, set accessed and dirty bits. */
PTE_MAKEWRITE(k0, k1)
PTE_MAKEWRITE_HIGH(k0, k1)
/* Now reload the entry into the tlb. */
PTE_RELOAD(k1, k0)
nop
b 1f
tlbwi
1:
nop
.set mips3
eret
.set mips0
#endif
nowrite_mod:
DO_FAULT(1)
END(handle_mod)
/*
* TLB exception handling code for R2000/R3000.
*
* Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
*
* Multi-CPU abstraction reworking:
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
* Further modifications to make this work:
* Copyright (c) 1998 Harald Koerfgen
* Copyright (c) 1998, 1999 Gleb Raiko & Vladimir Roganov
* Copyright (c) 2001 Ralf Baechle
* Copyright (c) 2001 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <asm/asm.h>
#include <asm/cachectl.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#define TLB_OPTIMIZE /* If you are paranoid, disable this. */
/* ABUSE of CPP macros 101. */
/* After this macro runs, the pte faulted on is
* in register PTE, a ptr into the table in which
* the pte belongs is in PTR.
*/
#define LOAD_PTE(pte, ptr) \
mfc0 pte, CP0_BADVADDR; \
lw ptr, pgd_current; \
srl pte, pte, 22; \
sll pte, pte, 2; \
addu ptr, ptr, pte; \
mfc0 pte, CP0_CONTEXT; \
lw ptr, (ptr); \
andi pte, pte, 0xffc; \
addu ptr, ptr, pte; \
lw pte, (ptr); \
nop;
/* This places the even/odd pte pair in the page
* table at PTR into ENTRYLO0 and ENTRYLO1 using
* TMP as a scratch register.
*/
#define PTE_RELOAD(ptr) \
lw ptr, (ptr) ; \
nop ; \
mtc0 ptr, CP0_ENTRYLO0; \
nop;
#define DO_FAULT(write) \
.set noat; \
.set macro; \
SAVE_ALL; \
mfc0 a2, CP0_BADVADDR; \
KMODE; \
.set at; \
move a0, sp; \
jal do_page_fault; \
li a1, write; \
j ret_from_exception; \
nop; \
.set noat; \
.set nomacro;
/* Check is PTE is present, if not then jump to LABEL.
* PTR points to the page table where this PTE is located,
* when the macro is done executing PTE will be restored
* with it's original value.
*/
#define PTE_PRESENT(pte, ptr, label) \
andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
bnez pte, label; \
.set push; \
.set reorder; \
lw pte, (ptr); \
.set pop;
/* Make PTE valid, store result in PTR. */
#define PTE_MAKEVALID(pte, ptr) \
ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
sw pte, (ptr);
/* Check if PTE can be written to, if not branch to LABEL.
* Regardless restore PTE with value from PTR when done.
*/
#define PTE_WRITABLE(pte, ptr, label) \
andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
bnez pte, label; \
.set push; \
.set reorder; \
lw pte, (ptr); \
.set pop;
/* Make PTE writable, update software status bits as well,
* then store at PTR.
*/
#define PTE_MAKEWRITE(pte, ptr) \
ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
_PAGE_VALID | _PAGE_DIRTY); \
sw pte, (ptr);
/*
* The index register may have the probe fail bit set,
* because we would trap on access kseg2, i.e. without refill.
*/
#define TLB_WRITE(reg) \
mfc0 reg, CP0_INDEX; \
nop; \
bltz reg, 1f; \
nop; \
tlbwi; \
j 2f; \
nop; \
1: tlbwr; \
2:
#define RET(reg) \
mfc0 reg, CP0_EPC; \
nop; \
jr reg; \
rfe
.set noreorder
.align 5
NESTED(handle_tlbl, PT_SIZE, sp)
.set noat
#ifdef TLB_OPTIMIZE
/* Test present bit in entry. */
LOAD_PTE(k0, k1)
tlbp
PTE_PRESENT(k0, k1, nopage_tlbl)
PTE_MAKEVALID(k0, k1)
PTE_RELOAD(k1)
TLB_WRITE(k0)
RET(k0)
nopage_tlbl:
#endif
DO_FAULT(0)
END(handle_tlbl)
NESTED(handle_tlbs, PT_SIZE, sp)
.set noat
#ifdef TLB_OPTIMIZE
LOAD_PTE(k0, k1)
tlbp # find faulting entry
PTE_WRITABLE(k0, k1, nopage_tlbs)
PTE_MAKEWRITE(k0, k1)
PTE_RELOAD(k1)
TLB_WRITE(k0)
RET(k0)
nopage_tlbs:
#endif
DO_FAULT(1)
END(handle_tlbs)
.align 5
NESTED(handle_mod, PT_SIZE, sp)
.set noat
#ifdef TLB_OPTIMIZE
LOAD_PTE(k0, k1)
tlbp # find faulting entry
andi k0, k0, _PAGE_WRITE
beqz k0, nowrite_mod
.set push
.set reorder
lw k0, (k1)
.set pop
/* Present and writable bits set, set accessed and dirty bits. */
PTE_MAKEWRITE(k0, k1)
/* Now reload the entry into the tlb. */
PTE_RELOAD(k1)
tlbwi
RET(k0)
#endif
nowrite_mod:
DO_FAULT(1)
END(handle_mod)
This diff is collapsed.
menu "Profiling support"
depends on EXPERIMENTAL
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
Say Y here to enable the extended profiling support mechanisms used
by profilers such as OProfile.
config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
depends on PROFILING
help
OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries,
and applications.
If unsure, say N.
endmenu
EXTRA_CFLAGS := -Werror
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) common.o
oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_RM9000) += op_model_rm9000.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 by Ralf Baechle
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/oprofile.h>
#include <linux/smp.h>
#include <asm/cpu-info.h>
#include "op_impl.h"
extern struct op_mips_model op_model_mipsxx __attribute__((weak));
extern struct op_mips_model op_model_rm9000 __attribute__((weak));
static struct op_mips_model *model;
static struct op_counter_config ctr[20];
static int op_mips_setup(void)
{
/* Pre-compute the values to stuff in the hardware registers. */
model->reg_setup(ctr);
/* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, 0, 0, 1);
return 0;
}
static int op_mips_create_files(struct super_block * sb, struct dentry * root)
{
int i;
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
char buf[3];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
/* Dummies. */
oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
oprofilefs_create_ulong(sb, dir, "exl", &ctr[i].exl);
oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
}
return 0;
}
static int op_mips_start(void)
{
on_each_cpu(model->cpu_start, NULL, 0, 1);
return 0;
}
static void op_mips_stop(void)
{
/* Disable performance monitoring for all counters. */
on_each_cpu(model->cpu_stop, NULL, 0, 1);
}
void __init oprofile_arch_init(struct oprofile_operations *ops)
{
struct op_mips_model *lmodel = NULL;
switch (current_cpu_data.cputype) {
case CPU_24K:
lmodel = &op_model_mipsxx;
break;
case CPU_RM9000:
lmodel = &op_model_rm9000;
break;
};
if (!lmodel)
return;
if (lmodel->init())
return;
model = lmodel;
ops->create_files = op_mips_create_files;
ops->setup = op_mips_setup;
ops->start = op_mips_start;
ops->stop = op_mips_stop;
ops->cpu_type = lmodel->cpu_type;
printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
lmodel->cpu_type);
}
void oprofile_arch_exit(void)
{
model->exit();
}
/**
* @file arch/alpha/oprofile/op_impl.h
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author Richard Henderson <rth@twiddle.net>
*/
#ifndef OP_IMPL_H
#define OP_IMPL_H 1
/* Per-counter configuration as set via oprofilefs. */
struct op_counter_config {
unsigned long enabled;
unsigned long event;
unsigned long count;
/* Dummies because I am too lazy to hack the userspace tools. */
unsigned long kernel;
unsigned long user;
unsigned long exl;
unsigned long unit_mask;
};
/* Per-architecture configury and hooks. */
struct op_mips_model {
void (*reg_setup) (struct op_counter_config *);
void (*cpu_setup) (void * dummy);
int (*init)(void);
void (*exit)(void);
void (*cpu_start)(void *args);
void (*cpu_stop)(void *args);
char *cpu_type;
unsigned char num_counters;
};
#endif
This diff is collapsed.
...@@ -59,7 +59,7 @@ pcibios_align_resource(void *data, struct resource *res, ...@@ -59,7 +59,7 @@ pcibios_align_resource(void *data, struct resource *res,
if (res->flags & IORESOURCE_IO) { if (res->flags & IORESOURCE_IO) {
/* Make sure we start at our min on all hoses */ /* Make sure we start at our min on all hoses */
if (start - hose->io_resource->start < PCIBIOS_MIN_IO) if (start < PCIBIOS_MIN_IO + hose->io_resource->start)
start = PCIBIOS_MIN_IO + hose->io_resource->start; start = PCIBIOS_MIN_IO + hose->io_resource->start;
/* /*
...@@ -69,7 +69,7 @@ pcibios_align_resource(void *data, struct resource *res, ...@@ -69,7 +69,7 @@ pcibios_align_resource(void *data, struct resource *res,
start = (start + 0x3ff) & ~0x3ff; start = (start + 0x3ff) & ~0x3ff;
} else if (res->flags & IORESOURCE_MEM) { } else if (res->flags & IORESOURCE_MEM) {
/* Make sure we start at our min on all hoses */ /* Make sure we start at our min on all hoses */
if (start - hose->mem_resource->start < PCIBIOS_MIN_MEM) if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start)
start = PCIBIOS_MIN_MEM + hose->mem_resource->start; start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
} }
...@@ -294,6 +294,8 @@ pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, ...@@ -294,6 +294,8 @@ pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
#ifdef CONFIG_HOTPLUG #ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pcibios_resource_to_bus); EXPORT_SYMBOL(pcibios_resource_to_bus);
EXPORT_SYMBOL(PCIBIOS_MIN_IO);
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
#endif #endif
char *pcibios_setup(char *str) char *pcibios_setup(char *str)
......
...@@ -126,6 +126,7 @@ ...@@ -126,6 +126,7 @@
|| defined (CONFIG_CPU_R4X00) \ || defined (CONFIG_CPU_R4X00) \
|| defined (CONFIG_CPU_R5000) \ || defined (CONFIG_CPU_R5000) \
|| defined (CONFIG_CPU_NEVADA) \ || defined (CONFIG_CPU_NEVADA) \
|| defined (CONFIG_CPU_TX49XX) \
|| defined (CONFIG_CPU_MIPS64) || defined (CONFIG_CPU_MIPS64)
#define KUSIZE 0x0000010000000000 /* 2^^40 */ #define KUSIZE 0x0000010000000000 /* 2^^40 */
#define KUSIZE_64 0x0000010000000000 /* 2^^40 */ #define KUSIZE_64 0x0000010000000000 /* 2^^40 */
......
This diff is collapsed.
...@@ -195,6 +195,7 @@ ...@@ -195,6 +195,7 @@
#define MACH_CASIO_E55 5 /* CASIO CASSIOPEIA E-10/15/55/65 */ #define MACH_CASIO_E55 5 /* CASIO CASSIOPEIA E-10/15/55/65 */
#define MACH_TANBAC_TB0226 6 /* TANBAC TB0226 (Mbase) */ #define MACH_TANBAC_TB0226 6 /* TANBAC TB0226 (Mbase) */
#define MACH_TANBAC_TB0229 7 /* TANBAC TB0229 (VR4131DIMM) */ #define MACH_TANBAC_TB0229 7 /* TANBAC TB0229 (VR4131DIMM) */
#define MACH_NEC_CMBVR4133 8 /* CMB VR4133 Board */
#define MACH_GROUP_HP_LJ 20 /* Hewlett Packard LaserJet */ #define MACH_GROUP_HP_LJ 20 /* Hewlett Packard LaserJet */
#define MACH_HP_LASERJET 1 #define MACH_HP_LASERJET 1
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#define BRK_STACKOVERFLOW 9 /* For Ada stackchecking */ #define BRK_STACKOVERFLOW 9 /* For Ada stackchecking */
#define BRK_NORLD 10 /* No rld found - not used by Linux/MIPS */ #define BRK_NORLD 10 /* No rld found - not used by Linux/MIPS */
#define _BRK_THREADBP 11 /* For threads, user bp (used by debuggers) */ #define _BRK_THREADBP 11 /* For threads, user bp (used by debuggers) */
#define BRK_MULOVF 1023 /* Multiply overflow */
#define BRK_BUG 512 /* Used by BUG() */ #define BRK_BUG 512 /* Used by BUG() */
#define BRK_MULOVF 1023 /* Multiply overflow */
#endif /* __ASM_BREAK_H */ #endif /* __ASM_BREAK_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment