Commit bbc4fd12 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (49 commits)
  microblaze: Add KGDB support
  microblaze: Support brki rX, 0x18 for user application debugging
  microblaze: Remove nop after MSRCLR/SET, MTS, MFS instructions
  microblaze: Simplify syscall rutine
  microblaze: Move PT_MODE saving to delay slot
  microblaze: Fix _interrupt function
  microblaze: Fix _user_exception function
  microblaze: Put together addik instructions
  microblaze: Use delay slot in syscall macros
  microblaze: Save kernel mode in delay slot
  microblaze: Do not mix register saving and mode setting
  microblaze: Move SAVE_STATE upward
  microblaze: entry.S: Macro optimization
  microblaze: Optimize hw exception rutine
  microblaze: Implement clear_ums macro and fix SAVE_STATE macro
  microblaze: Remove additional setup for kernel_mode
  microblaze: Optimize SAVE_STATE macro
  microblaze: Remove additional loading
  microblaze: Completely remove working with R11 register
  microblaze: Do not setup BIP in _debug_exception
  ...
parents 673b864f 2d5973cb
......@@ -14,6 +14,7 @@ config MICROBLAZE
select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_OPROFILE
select HAVE_ARCH_KGDB
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select TRACING_SUPPORT
......@@ -223,6 +224,36 @@ config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000"
choice
prompt "Page size"
default MICROBLAZE_4K_PAGES
depends on ADVANCED_OPTIONS && !MMU
help
Select the kernel logical page size. Increasing the page size
will reduce software overhead at each page boundary, allow
hardware prefetch mechanisms to be more effective, and allow
larger dma transfers increasing IO efficiency and reducing
overhead. However the utilization of memory will increase.
For example, each cached file will using a multiple of the
page size to hold its contents and the difference between the
end of file and the end of page is wasted.
If unsure, choose 4K_PAGES.
config MICROBLAZE_4K_PAGES
bool "4k page size"
config MICROBLAZE_8K_PAGES
bool "8k page size"
config MICROBLAZE_16K_PAGES
bool "16k page size"
config MICROBLAZE_32K_PAGES
bool "32k page size"
endchoice
endmenu
source "mm/Kconfig"
......
......@@ -10,6 +10,7 @@ source "lib/Kconfig.debug"
config EARLY_PRINTK
bool "Early printk function for kernel"
depends on SERIAL_UARTLITE_CONSOLE
default n
help
This option turns on/off early printk messages to console.
......
......@@ -35,7 +35,8 @@ quiet_cmd_cp = CP $< $@$2
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@
cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
-K _fdt_start vmlinux -o $@
quiet_cmd_uimage = UIMAGE $@.ub
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \
......
......@@ -17,6 +17,7 @@
/* Somebody depends on this; sigh... */
#include <linux/mm.h>
#include <linux/io.h>
/* Look at Documentation/cachetlb.txt */
......@@ -60,7 +61,6 @@ void microblaze_cache_init(void);
#define invalidate_icache() mbc->iin();
#define invalidate_icache_range(start, end) mbc->iinr(start, end);
#define flush_icache_user_range(vma, pg, adr, len) flush_icache();
#define flush_icache_page(vma, pg) do { } while (0)
......@@ -72,9 +72,15 @@ void microblaze_cache_init(void);
#define flush_dcache() mbc->dfl();
#define flush_dcache_range(start, end) mbc->dflr(start, end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
/* D-cache aliasing problem can't happen - cache is between MMU and ram */
#define flush_dcache_page(page) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
/* MS: We have to implement it because of rootfs-jffs2 issue on WB */
#define flush_dcache_page(page) \
do { \
unsigned long addr = (unsigned long) page_address(page); /* virtual */ \
addr = (u32)virt_to_phys((void *)addr); \
flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \
} while (0);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
......@@ -97,8 +103,10 @@ void microblaze_cache_init(void);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
u32 addr = virt_to_phys(dst); \
invalidate_icache_range((unsigned) (addr), (unsigned) (addr) + (len));\
memcpy((dst), (src), (len)); \
flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
flush_dcache_range((unsigned) (addr), (unsigned) (addr) + (len));\
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
......
......@@ -79,12 +79,6 @@ static inline int dma_supported(struct device *dev, u64 mask)
return ops->dma_supported(dev, mask);
}
#ifdef CONFIG_PCI
/* We have our own implementation of pci_set_dma_mask() */
#define HAVE_ARCH_PCI_SET_DMA_MASK
#endif
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
......
......@@ -77,7 +77,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2MSB
#endif
#define ELF_EXEC_PAGESIZE 4096
#define ELF_EXEC_PAGESIZE PAGE_SIZE
#define ELF_CORE_COPY_REGS(_dest, _regs) \
......
......@@ -14,6 +14,11 @@
#define _ASM_MICROBLAZE_EXCEPTIONS_H
#ifdef __KERNEL__
#ifndef CONFIG_MMU
#define EX_HANDLER_STACK_SIZ (4*19)
#endif
#ifndef __ASSEMBLY__
/* Macros to enable and disable HW exceptions in the MSR */
......@@ -64,22 +69,6 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
void die(const char *str, struct pt_regs *fp, long err);
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
#if defined(CONFIG_KGDB)
void (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs);
int (*debugger_iabr_match)(struct pt_regs *regs);
int (*debugger_dabr_match)(struct pt_regs *regs);
void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#define debugger(regs) do { } while (0)
#define debugger_bpt(regs) 0
#define debugger_sstep(regs) 0
#define debugger_iabr_match(regs) 0
#define debugger_dabr_match(regs) 0
#define debugger_fault_handler ((void (*)(struct pt_regs *))0)
#endif
#endif /*__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_EXCEPTIONS_H */
#ifdef __KERNEL__
#ifndef __MICROBLAZE_KGDB_H__
#define __MICROBLAZE_KGDB_H__
#ifndef __ASSEMBLY__
#define CACHE_FLUSH_IS_SAFE 1
#define BUFMAX 2048
/*
* 32 32-bit general purpose registers (r0-r31)
* 6 32-bit special registers (pc, msr, ear, esr, fsr, btr)
* 12 32-bit PVR
* 7 32-bit MMU Regs (redr, rpid, rzpr, rtlbx, rtlbsx, rtlblo, rtlbhi)
* ------
* 57 registers
*/
#define NUMREGBYTES (57 * 4)
#define BREAK_INSTR_SIZE 4
static inline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__("brki r16, 0x18;");
}
#endif /* __ASSEMBLY__ */
#endif /* __MICROBLAZE_KGDB_H__ */
#endif /* __KERNEL__ */
......@@ -23,8 +23,16 @@
#ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12)
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#if defined(CONFIG_MICROBLAZE_32K_PAGES)
#define PAGE_SHIFT 15
#elif defined(CONFIG_MICROBLAZE_16K_PAGES)
#define PAGE_SHIFT 14
#elif defined(CONFIG_MICROBLAZE_8K_PAGES)
#define PAGE_SHIFT 13
#else
#define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR))
......
......@@ -16,7 +16,7 @@
#define PVR_MSR_BIT 0x400
struct pvr_s {
unsigned pvr[16];
unsigned pvr[12];
};
/* The following taken from Xilinx's standalone BSP pvr.h */
......
......@@ -28,8 +28,6 @@ void disable_early_printk(void);
void heartbeat(void);
void setup_heartbeat(void);
unsigned long long sched_clock(void);
# ifdef CONFIG_MMU
extern void mmu_reset(void);
extern void early_console_reg_tlb_alloc(unsigned int addr);
......
......@@ -45,7 +45,6 @@ extern struct task_struct *_switch_to(struct thread_info *prev,
#define smp_rmb() rmb()
#define smp_wmb() wmb()
void show_trace(struct task_struct *task, unsigned long *stack);
void __bad_xchg(volatile void *ptr, int size);
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
......
......@@ -359,7 +359,7 @@ extern long __user_bad(void);
__copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n))
#define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n))
__copy_from_user((to), (from), (n))
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
......@@ -373,7 +373,7 @@ static inline long copy_from_user(void *to,
#define __copy_to_user(to, from, n) \
__copy_tofrom_user((void __user *)(to), \
(__force const void __user *)(from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
#define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n))
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
......
/*
* Backtrace support for Microblaze
*
* Copyright (C) 2010 Digital Design Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __MICROBLAZE_UNWIND_H
#define __MICROBLAZE_UNWIND_H
struct stack_trace;
struct trap_handler_info {
unsigned long start_addr;
unsigned long end_addr;
const char *trap_name;
};
extern struct trap_handler_info microblaze_trap_handlers;
extern const char _hw_exception_handler;
extern const char ex_handler_unhandled;
void microblaze_unwind(struct task_struct *task, struct stack_trace *trace);
#endif /* __MICROBLAZE_UNWIND_H */
......@@ -17,7 +17,7 @@ extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
of_platform.o process.o prom.o prom_parse.o ptrace.o \
setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
obj-y += cpu/
......@@ -28,5 +28,6 @@ obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-y += entry$(MMU).o
......@@ -126,6 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpuinfo.pvr_user1,
cpuinfo.pvr_user2);
count += seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
return 0;
}
......
......@@ -588,3 +588,31 @@ sys_rt_sigsuspend_wrapper:
#include "syscall_table.S"
syscall_table_size=(.-sys_call_table)
type_SYSCALL:
.ascii "SYSCALL\0"
type_IRQ:
.ascii "IRQ\0"
type_IRQ_PREEMPT:
.ascii "IRQ (PREEMPTED)\0"
type_SYSCALL_PREEMPT:
.ascii " SYSCALL (PREEMPTED)\0"
/*
* Trap decoding for stack unwinder
* Tuples are (start addr, end addr, string)
* If return address lies on [start addr, end addr],
* unwinder displays 'string'
*/
.align 4
.global microblaze_trap_handlers
microblaze_trap_handlers:
/* Exact matches come first */
.word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
.word ret_from_intr; .word ret_from_intr ; .word type_IRQ
/* Fuzzy matches go here */
.word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
.word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
/* End of table */
.word 0 ; .word 0 ; .word 0
......@@ -48,128 +48,107 @@
*/
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
.macro clear_bip
msrclr r11, MSR_BIP
nop
msrclr r0, MSR_BIP
.endm
.macro set_bip
msrset r11, MSR_BIP
nop
msrset r0, MSR_BIP
.endm
.macro clear_eip
msrclr r11, MSR_EIP
nop
msrclr r0, MSR_EIP
.endm
.macro set_ee
msrset r11, MSR_EE
nop
msrset r0, MSR_EE
.endm
.macro disable_irq
msrclr r11, MSR_IE
nop
msrclr r0, MSR_IE
.endm
.macro enable_irq
msrset r11, MSR_IE
nop
msrset r0, MSR_IE
.endm
.macro set_ums
msrset r11, MSR_UMS
nop
msrclr r11, MSR_VMS
nop
msrset r0, MSR_UMS
msrclr r0, MSR_VMS
.endm
.macro set_vms
msrclr r11, MSR_UMS
nop
msrset r11, MSR_VMS
nop
msrclr r0, MSR_UMS
msrset r0, MSR_VMS
.endm
.macro clear_ums
msrclr r0, MSR_UMS
.endm
.macro clear_vms_ums
msrclr r11, MSR_VMS
nop
msrclr r11, MSR_UMS
nop
msrclr r0, MSR_VMS | MSR_UMS
.endm
#else
.macro clear_bip
mfs r11, rmsr
nop
andi r11, r11, ~MSR_BIP
mts rmsr, r11
nop
.endm
.macro set_bip
mfs r11, rmsr
nop
ori r11, r11, MSR_BIP
mts rmsr, r11
nop
.endm
.macro clear_eip
mfs r11, rmsr
nop
andi r11, r11, ~MSR_EIP
mts rmsr, r11
nop
.endm
.macro set_ee
mfs r11, rmsr
nop
ori r11, r11, MSR_EE
mts rmsr, r11
nop
.endm
.macro disable_irq
mfs r11, rmsr
nop
andi r11, r11, ~MSR_IE
mts rmsr, r11
nop
.endm
.macro enable_irq
mfs r11, rmsr
nop
ori r11, r11, MSR_IE
mts rmsr, r11
nop
.endm
.macro set_ums
mfs r11, rmsr
nop
ori r11, r11, MSR_VMS
andni r11, r11, MSR_UMS
mts rmsr, r11
nop
.endm
.macro set_vms
mfs r11, rmsr
nop
ori r11, r11, MSR_VMS
andni r11, r11, MSR_UMS
mts rmsr, r11
nop
.endm
.macro clear_ums
mfs r11, rmsr
andni r11, r11, MSR_UMS
mts rmsr,r11
.endm
.macro clear_vms_ums
mfs r11, rmsr
nop
andni r11, r11, (MSR_VMS|MSR_UMS)
mts rmsr,r11
nop
.endm
#endif
......@@ -182,16 +161,20 @@
#define VM_ON \
set_ums; \
rted r0, 2f; \
2: nop;
nop; \
2:
/* turn off virtual protected mode save and user mode save*/
#define VM_OFF \
clear_vms_ums; \
rted r0, TOPHYS(1f); \
1: nop;
nop; \
1:
#define SAVE_REGS \
swi r2, r1, PTO+PT_R2; /* Save SDA */ \
swi r3, r1, PTO+PT_R3; \
swi r4, r1, PTO+PT_R4; \
swi r5, r1, PTO+PT_R5; \
swi r6, r1, PTO+PT_R6; \
swi r7, r1, PTO+PT_R7; \
......@@ -218,14 +201,14 @@
swi r30, r1, PTO+PT_R30; \
swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
mfs r11, rmsr; /* save MSR */ \
nop; \
swi r11, r1, PTO+PT_MSR;
#define RESTORE_REGS \
lwi r11, r1, PTO+PT_MSR; \
mts rmsr , r11; \
nop; \
lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
lwi r3, r1, PTO+PT_R3; \
lwi r4, r1, PTO+PT_R4; \
lwi r5, r1, PTO+PT_R5; \
lwi r6, r1, PTO+PT_R6; \
lwi r7, r1, PTO+PT_R7; \
......@@ -252,6 +235,39 @@
lwi r30, r1, PTO+PT_R30; \
lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
#define SAVE_STATE \
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
/* See if already in kernel mode.*/ \
mfs r1, rmsr; \
andi r1, r1, MSR_UMS; \
bnei r1, 1f; \
/* Kernel-mode state save. */ \
/* Reload kernel stack-ptr. */ \
lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
/* FIXME: I can add these two lines to one */ \
/* tophys(r1,r1); */ \
/* addik r1, r1, -STATE_SAVE_SIZE; */ \
addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
SAVE_REGS \
brid 2f; \
swi r1, r1, PTO+PT_MODE; \
1: /* User-mode state save. */ \
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
tophys(r1,r1); \
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
/* MS these three instructions can be added to one */ \
/* addik r1, r1, THREAD_SIZE; */ \
/* tophys(r1,r1); */ \
/* addik r1, r1, -STATE_SAVE_SIZE; */ \
addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
SAVE_REGS \
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
/* MS: I am clearing UMS even in case when I come from kernel space */ \
clear_ums; \
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
.text
/*
......@@ -267,45 +283,23 @@
* are masked. This is nice, means we don't have to CLI before state save
*/
C_ENTRY(_user_exception):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
addi r14, r14, 4 /* return address is 4 byte after call */
swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
beqi r11, 1f; /* Jump ahead if coming from user */
/* Kernel-mode state save. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
tophys(r1,r11);
swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
SAVE_REGS
addi r11, r0, 1; /* Was in kernel-mode. */
swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
brid 2f;
nop; /* Fill delay slot */
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
/* User-mode state save. */
1:
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
/* calculate kernel stack pointer from task struct 8k */
addik r1, r1, THREAD_SIZE;
tophys(r1,r1);
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
/* MS these three instructions can be added to one */
/* addik r1, r1, THREAD_SIZE; */
/* tophys(r1,r1); */
/* addik r1, r1, -STATE_SAVE_SIZE; */
addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
SAVE_REGS
swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
clear_ums;
lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r12, r1, PTO+PT_R0;
tovirt(r1,r1)
......@@ -316,10 +310,8 @@ C_ENTRY(_user_exception):
* register should point to the location where
* the called function should return. [note that MAKE_SYS_CALL uses label 1] */
# Step into virtual mode.
set_vms;
addik r11, r0, 3f
rtid r11, 0
/* Step into virtual mode */
rtbd r0, 3f
nop
3:
lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
......@@ -363,24 +355,17 @@ C_ENTRY(_user_exception):
# Find and jump into the syscall handler.
lwi r12, r12, sys_call_table
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_trap-8
addi r15, r0, ret_from_trap-8
bra r12
/* The syscall number is invalid, return an error. */
5:
rtsd r15, 8; /* looks like a normal subroutine return */
addi r3, r0, -ENOSYS;
rtsd r15,8; /* looks like a normal subroutine return */
or r0, r0, r0
/* Entry point used to return from a syscall/trap */
/* We re-enable BIP bit before state restore */
C_ENTRY(ret_from_trap):
set_bip; /* Ints masked for state restore*/
lwi r11, r1, PTO+PT_MODE;
/* See if returning to kernel mode, if so, skip resched &c. */
bnei r11, 2f;
swi r3, r1, PTO + PT_R3
swi r4, r1, PTO + PT_R4
......@@ -413,32 +398,19 @@ C_ENTRY(ret_from_trap):
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
addi r7, r0, 1; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1:
lwi r3, r1, PTO + PT_R3; /* restore syscall result */
lwi r4, r1, PTO + PT_R4;
swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
1: set_bip; /* Ints masked for state restore */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
bri 6f;
/* Return to kernel state. */
2: VM_OFF;
tophys(r1,r1);
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
tovirt(r1,r1);
6:
TRAP_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
nop;
......@@ -450,12 +422,11 @@ TRAP_return: /* Make global symbol for debugging */
C_ENTRY(sys_fork_wrapper):
addi r5, r0, SIGCHLD /* Arg 0: flags */
lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
la r7, r1, PTO /* Arg 2: parent context */
addik r7, r1, PTO /* Arg 2: parent context */
add r8. r0, r0 /* Arg 3: (unused) */
add r9, r0, r0; /* Arg 4: (unused) */
add r10, r0, r0; /* Arg 5: (unused) */
brid do_fork /* Do real work (tail-call) */
nop;
add r10, r0, r0; /* Arg 5: (unused) */
/* This the initial entry point for a new child thread, with an appropriate
stack in place that makes it look the the child is in the middle of an
......@@ -466,35 +437,31 @@ C_ENTRY(ret_from_fork):
bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
add r3, r5, r0; /* switch_thread returns the prev task */
/* ( in the delay slot ) */
add r3, r0, r0; /* Child's fork call should return 0. */
brid ret_from_trap; /* Do normal trap return */
nop;
add r3, r0, r0; /* Child's fork call should return 0. */
C_ENTRY(sys_vfork):
brid microblaze_vfork /* Do real work (tail-call) */
la r5, r1, PTO
addik r5, r1, PTO
C_ENTRY(sys_clone):
bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
1: la r7, r1, PTO; /* Arg 2: parent context */
lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
1: addik r7, r1, PTO; /* Arg 2: parent context */
add r8, r0, r0; /* Arg 3: (unused) */
add r9, r0, r0; /* Arg 4: (unused) */
add r10, r0, r0; /* Arg 5: (unused) */
brid do_fork /* Do real work (tail-call) */
nop;
add r10, r0, r0; /* Arg 5: (unused) */
C_ENTRY(sys_execve):
la r8, r1, PTO; /* add user context as 4th arg */
brid microblaze_execve; /* Do real work (tail-call).*/
nop;
addik r8, r1, PTO; /* add user context as 4th arg */
C_ENTRY(sys_rt_sigreturn_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
la r5, r1, PTO; /* add user context as 1st arg */
brlid r15, sys_rt_sigreturn /* Do real work */
nop;
addik r5, r1, PTO; /* add user context as 1st arg */
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
bri ret_from_trap /* fall through will not work here due to align */
......@@ -503,83 +470,23 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
/*
* HW EXCEPTION rutine start
*/
#define SAVE_STATE \
swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
set_bip; /*equalize initial state for all possible entries*/\
clear_eip; \
enable_irq; \
set_ee; \
/* See if already in kernel mode.*/ \
lwi r11, r0, TOPHYS(PER_CPU(KM)); \
beqi r11, 1f; /* Jump ahead if coming from user */\
/* Kernel-mode state save. */ \
/* Reload kernel stack-ptr. */ \
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
tophys(r1,r11); \
swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
/* store return registers separately because \
* this macros is use for others exceptions */ \
swi r3, r1, PTO + PT_R3; \
swi r4, r1, PTO + PT_R4; \
SAVE_REGS \
/* PC, before IRQ/trap - this is one instruction above */ \
swi r17, r1, PTO+PT_PC; \
\
addi r11, r0, 1; /* Was in kernel-mode. */ \
swi r11, r1, PTO+PT_MODE; \
brid 2f; \
nop; /* Fill delay slot */ \
1: /* User-mode state save. */ \
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
tophys(r1,r1); \
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
tophys(r1,r1); \
\
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
/* store return registers separately because this macros \
* is use for others exceptions */ \
swi r3, r1, PTO + PT_R3; \
swi r4, r1, PTO + PT_R4; \
SAVE_REGS \
/* PC, before IRQ/trap - this is one instruction above FIXME*/ \
swi r17, r1, PTO+PT_PC; \
\
swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
addi r11, r0, 1; \
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
/* Save away the syscall number. */ \
swi r0, r1, PTO+PT_R0; \
tovirt(r1,r1)
C_ENTRY(full_exception_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
/* adjust exception address for privileged instruction
* for finding where is it */
addik r17, r17, -4
SAVE_STATE /* Save registers */
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PTO+PT_PC;
tovirt(r1,r1)
/* FIXME this can be store directly in PT_ESR reg.
* I tested it but there is a fault */
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc - 8
la r5, r1, PTO /* parameter struct pt_regs * regs */
addik r15, r0, ret_from_exc - 8
mfs r6, resr
nop
mfs r7, rfsr; /* save FSR */
nop
mts rfsr, r0; /* Clear sticky fsr */
nop
la r12, r0, full_exception
set_vms;
rtbd r12, 0;
nop;
rted r0, full_exception
addik r5, r1, PTO /* parameter struct pt_regs * regs */
/*
* Unaligned data trap.
......@@ -592,19 +499,27 @@ C_ENTRY(full_exception_trap):
* The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
*/
C_ENTRY(unaligned_data_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
/* MS: I have to save r11 value and then restore it because
* set_bit, clear_eip, set_ee use r11 as temp register if MSR
* instructions are not used. We don't need to do if MSR instructions
* are used and they use r0 instead of r11.
* I am using ENTRY_SP which should be primary used only for stack
* pointer saving. */
swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
set_bip; /* equalize initial state for all possible entries */
clear_eip;
set_ee;
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
SAVE_STATE /* Save registers.*/
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PTO+PT_PC;
tovirt(r1,r1)
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc-8
addik r15, r0, ret_from_exc-8
mfs r3, resr /* ESR */
nop
mfs r4, rear /* EAR */
nop
la r7, r1, PTO /* parameter struct pt_regs * regs */
la r12, r0, _unaligned_data_exception
set_vms;
rtbd r12, 0; /* interrupts enabled */
nop;
rtbd r0, _unaligned_data_exception
addik r7, r1, PTO /* parameter struct pt_regs * regs */
/*
* Page fault traps.
......@@ -625,38 +540,32 @@ C_ENTRY(unaligned_data_trap):
*/
/* data and intruction trap - which is choose is resolved int fault.c */
C_ENTRY(page_fault_data_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
SAVE_STATE /* Save registers.*/
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PTO+PT_PC;
tovirt(r1,r1)
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc-8
la r5, r1, PTO /* parameter struct pt_regs * regs */
addik r15, r0, ret_from_exc-8
mfs r6, rear /* parameter unsigned long address */
nop
mfs r7, resr /* parameter unsigned long error_code */
nop
la r12, r0, do_page_fault
set_vms;
rtbd r12, 0; /* interrupts enabled */
nop;
rted r0, do_page_fault
addik r5, r1, PTO /* parameter struct pt_regs * regs */
C_ENTRY(page_fault_instr_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
SAVE_STATE /* Save registers.*/
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PTO+PT_PC;
tovirt(r1,r1)
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc-8
la r5, r1, PTO /* parameter struct pt_regs * regs */
addik r15, r0, ret_from_exc-8
mfs r6, rear /* parameter unsigned long address */
nop
ori r7, r0, 0 /* parameter unsigned long error_code */
la r12, r0, do_page_fault
set_vms;
rtbd r12, 0; /* interrupts enabled */
nop;
rted r0, do_page_fault
addik r5, r1, PTO /* parameter struct pt_regs * regs */
/* Entry point used to return from an exception. */
C_ENTRY(ret_from_exc):
set_bip; /* Ints masked for state restore*/
lwi r11, r1, PTO+PT_MODE;
lwi r11, r1, PTO + PT_MODE;
bnei r11, 2f; /* See if returning to kernel mode, */
/* ... if so, skip resched &c. */
......@@ -687,32 +596,27 @@ C_ENTRY(ret_from_exc):
* traps), but signal handlers may want to examine or change the
* complete register state. Here we save anything not saved by
* the normal entry sequence, so that it may be safely restored
* (in a possibly modified form) after do_signal returns.
* store return registers separately because this macros is use
* for others exceptions */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
* (in a possibly modified form) after do_signal returns. */
addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
1: set_bip; /* Ints masked for state restore */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
bri 6f;
/* Return to kernel state. */
2: VM_OFF;
2: set_bip; /* Ints masked for state restore */
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
......@@ -736,36 +640,23 @@ C_ENTRY(_interrupt):
/* MS: we are in physical address */
/* Save registers, switch to proper stack, convert SP to virtual.*/
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
/* MS: See if already in kernel mode. */
lwi r11, r0, TOPHYS(PER_CPU(KM));
beqi r11, 1f; /* MS: Jump ahead if coming from user */
mfs r1, rmsr
nop
andi r1, r1, MSR_UMS
bnei r1, 1f
/* Kernel-mode state save. */
or r11, r1, r0
tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
swi r11, r1, (PT_R1 - PT_SIZE);
/* MS: restore r11 because of saving in SAVE_REGS */
lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
/* save registers */
/* MS: Make room on the stack -> activation record */
addik r1, r1, -STATE_SAVE_SIZE;
/* MS: store return registers separately because
* this macros is use for others exceptions */
swi r3, r1, PTO + PT_R3;
swi r4, r1, PTO + PT_R4;
SAVE_REGS
/* MS: store mode */
addi r11, r0, 1; /* MS: Was in kernel-mode. */
swi r11, r1, PTO + PT_MODE; /* MS: and save it */
brid 2f;
nop; /* MS: Fill delay slot */
swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
1:
/* User-mode state save. */
/* MS: restore r11 -> FIXME move before SAVE_REG */
lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
/* MS: get the saved current */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
tophys(r1,r1);
......@@ -774,27 +665,18 @@ C_ENTRY(_interrupt):
tophys(r1,r1);
/* save registers */
addik r1, r1, -STATE_SAVE_SIZE;
swi r3, r1, PTO+PT_R3;
swi r4, r1, PTO+PT_R4;
SAVE_REGS
/* calculate mode */
swi r0, r1, PTO + PT_MODE;
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1;
/* setup kernel mode to KM */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM));
clear_ums;
2:
lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
swi r0, r1, PTO + PT_R0;
tovirt(r1,r1)
la r5, r1, PTO;
set_vms;
la r11, r0, do_IRQ;
la r15, r0, irq_call;
irq_call:rtbd r11, 0;
nop;
addik r15, r0, irq_call;
irq_call:rtbd r0, do_IRQ;
addik r5, r1, PTO;
/* MS: we are in virtual mode */
ret_from_irq:
......@@ -815,7 +697,7 @@ ret_from_irq:
beqid r11, no_intr_resched
/* Handle a signal return; Pending signals should be in r18. */
addi r7, r0, 0; /* Arg 3: int in_syscall */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
bralid r15, do_signal; /* Handle any signals */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
......@@ -823,12 +705,9 @@ ret_from_irq:
no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
lwi r4, r1, PTO + PT_R4;
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;
......@@ -857,8 +736,6 @@ restore:
#endif
VM_OFF /* MS: turn off MMU */
tophys(r1,r1)
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
lwi r4, r1, PTO + PT_R4;
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
tovirt(r1,r1);
......@@ -868,86 +745,91 @@ IRQ_return: /* MS: Make global symbol for debugging */
nop
/*
* `Debug' trap
* We enter dbtrap in "BIP" (breakpoint) mode.
* So we exit the breakpoint mode with an 'rtbd' and proceed with the
* original dbtrap.
* however, wait to save state first
* Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
* and call handling function with saved pt_regs
*/
C_ENTRY(_debug_exception):
/* BIP bit is set on entry, no interrupts can occur */
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
set_bip; /*equalize initial state for all possible entries*/
clear_eip;
enable_irq;
lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
beqi r11, 1f; /* Jump ahead if coming from user */
/* Kernel-mode state save. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
tophys(r1,r11);
swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
mfs r1, rmsr
nop
andi r1, r1, MSR_UMS
bnei r1, 1f
/* MS: Kernel-mode state save - kgdb */
lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
swi r3, r1, PTO + PT_R3;
swi r4, r1, PTO + PT_R4;
/* BIP bit is set on entry, no interrupts can occur */
addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
SAVE_REGS;
/* save all regs to pt_reg structure */
swi r0, r1, PTO+PT_R0; /* R0 must be saved too */
swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */
swi r16, r1, PTO+PT_R16
swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */
swi r17, r1, PTO+PT_R17
/* save special purpose registers to pt_regs */
mfs r11, rear;
swi r11, r1, PTO+PT_EAR;
mfs r11, resr;
swi r11, r1, PTO+PT_ESR;
mfs r11, rfsr;
swi r11, r1, PTO+PT_FSR;
/* stack pointer is in physical address at it is decrease
* by STATE_SAVE_SIZE but we need to get correct R1 value */
addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE;
swi r11, r1, PTO+PT_R1
/* MS: r31 - current pointer isn't changed */
tovirt(r1,r1)
#ifdef CONFIG_KGDB
addi r5, r1, PTO /* pass pt_reg address as the first arg */
la r15, r0, dbtrap_call; /* return address */
rtbd r0, microblaze_kgdb_break
nop;
#endif
/* MS: Place handler for brki from kernel space if KGDB is OFF.
* It is very unlikely that another brki instruction is called. */
bri 0
addi r11, r0, 1; /* Was in kernel-mode. */
swi r11, r1, PTO + PT_MODE;
brid 2f;
nop; /* Fill delay slot */
1: /* User-mode state save. */
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
/* MS: User-mode state save - gdb */
1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
tophys(r1,r1);
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
swi r3, r1, PTO + PT_R3;
swi r4, r1, PTO + PT_R4;
SAVE_REGS;
swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
swi r17, r1, PTO+PT_R17;
swi r16, r1, PTO+PT_R16;
swi r16, r1, PTO+PT_PC; /* Save LP */
swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r0, r1, PTO+PT_R0;
lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
tovirt(r1,r1)
addi r5, r0, SIGTRAP /* send the trap signal */
add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
addk r7, r0, r0 /* 3rd param zero */
set_vms;
la r11, r0, send_sig;
la r15, r0, dbtrap_call;
dbtrap_call: rtbd r11, 0;
nop;
addik r5, r1, PTO;
addik r15, r0, dbtrap_call;
dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
rtbd r0, sw_exception
nop
set_bip; /* Ints masked for state restore*/
lwi r11, r1, PTO+PT_MODE;
/* MS: The first instruction for the second part of the gdb/kgdb */
set_bip; /* Ints masked for state restore */
lwi r11, r1, PTO + PT_MODE;
bnei r11, 2f;
/* MS: Return to user space - gdb */
/* Get current task ptr into r11 */
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
/* Call the scheduler before returning from a syscall/trap. */
/* Call the scheduler before returning from a syscall/trap. */
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
/* XXX Is PT_DTRACE handling needed here? */
/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
/* Maybe handle a signal */
5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
......@@ -955,54 +837,40 @@ dbtrap_call: rtbd r11, 0;
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
/* Handle a signal return; Pending signals should be in r18. */
/* Not all registers are saved by the normal trap/interrupt entry
points (for instance, call-saved registers (because the normal
C-compiler calling sequence in the kernel makes sure they're
preserved), and call-clobbered registers in the case of
traps), but signal handlers may want to examine or change the
complete register state. Here we save anything not saved by
the normal entry sequence, so that it may be safely restored
(in a possibly modified form) after do_signal returns. */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
/* MS: Restore all regs */
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;
/* Restore user stack pointer. */
bri 6f;
lwi r17, r1, PTO+PT_R17;
lwi r16, r1, PTO+PT_R16;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
DBTRAP_return_user: /* MS: Make global symbol for debugging */
rtbd r16, 0; /* MS: Instructions to return from a debug trap */
nop;
/* Return to kernel state. */
/* MS: Return to kernel state - kgdb */
2: VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
/* MS: Restore all regs */
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r14, r1, PTO+PT_R14;
lwi r16, r1, PTO+PT_PC;
lwi r17, r1, PTO+PT_R17;
addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
tovirt(r1,r1);
6:
DBTRAP_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
rtbd r16, 0; /* MS: Instructions to return from a debug trap */
nop;
ENTRY(_switch_to)
/* prepare return value */
addk r3, r0, CURRENT_TASK
......@@ -1037,16 +905,12 @@ ENTRY(_switch_to)
swi r30, r11, CC_R30
/* special purpose registers */
mfs r12, rmsr
nop
swi r12, r11, CC_MSR
mfs r12, rear
nop
swi r12, r11, CC_EAR
mfs r12, resr
nop
swi r12, r11, CC_ESR
mfs r12, rfsr
nop
swi r12, r11, CC_FSR
/* update r31, the current-give me pointer to task which will be next */
......@@ -1085,10 +949,8 @@ ENTRY(_switch_to)
/* special purpose registers */
lwi r12, r11, CC_FSR
mts rfsr, r12
nop
lwi r12, r11, CC_MSR
mts rmsr, r12
nop
rtsd r15, 8
nop
......@@ -1096,15 +958,6 @@ ENTRY(_switch_to)
ENTRY(_reset)
brai 0x70; /* Jump back to FS-boot */
ENTRY(_break)
mfs r5, rmsr
nop
swi r5, r0, 0x250 + TOPHYS(r0_ram)
mfs r5, resr
nop
swi r5, r0, 0x254 + TOPHYS(r0_ram)
bri 0
/* These are compiled and loaded into high memory, then
* copied into place in mach_early_setup */
.section .init.ivt, "ax"
......@@ -1116,14 +969,38 @@ ENTRY(_break)
nop
brai TOPHYS(_user_exception); /* syscall handler */
brai TOPHYS(_interrupt); /* Interrupt handler */
brai TOPHYS(_break); /* nmi trap handler */
brai TOPHYS(_debug_exception); /* debug trap handler */
brai TOPHYS(_hw_exception_handler); /* HW exception handler */
.org 0x60
brai TOPHYS(_debug_exception); /* debug trap handler*/
.section .rodata,"a"
#include "syscall_table.S"
syscall_table_size=(.-sys_call_table)
type_SYSCALL:
.ascii "SYSCALL\0"
type_IRQ:
.ascii "IRQ\0"
type_IRQ_PREEMPT:
.ascii "IRQ (PREEMPTED)\0"
type_SYSCALL_PREEMPT:
.ascii " SYSCALL (PREEMPTED)\0"
/*
* Trap decoding for stack unwinder
* Tuples are (start addr, end addr, string)
* If return address lies on [start addr, end addr],
* unwinder displays 'string'
*/
.align 4
.global microblaze_trap_handlers
microblaze_trap_handlers:
/* Exact matches come first */
.word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
.word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
/* Fuzzy matches go here */
.word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
.word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
/* End of table */
.word 0 ; .word 0 ; .word 0
......@@ -48,12 +48,17 @@ void die(const char *str, struct pt_regs *fp, long err)
do_exit(err);
}
/* for user application debugging */
void sw_exception(struct pt_regs *regs)
{
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
if (kernel_mode(regs)) {
debugger(regs);
die("Exception in kernel mode", regs, signr);
}
info.si_signo = signr;
......@@ -143,7 +148,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
pr_debug(KERN_WARNING "Privileged exception\n");
/* "brk r0,r0" - used as debug breakpoint */
/* "brk r0,r0" - used as debug breakpoint - old toolchain */
if (get_user(code, (unsigned long *)regs->pc) == 0
&& code == 0x980c0000) {
_exception(SIGTRAP, regs, TRAP_BRKPT, addr);
......
......@@ -43,10 +43,10 @@
.global empty_zero_page
.align 12
empty_zero_page:
.space 4096
.space PAGE_SIZE
.global swapper_pg_dir
swapper_pg_dir:
.space 4096
.space PAGE_SIZE
#endif /* CONFIG_MMU */
......
......@@ -78,9 +78,6 @@
#include <asm/asm-offsets.h>
/* Helpful Macros */
#ifndef CONFIG_MMU
#define EX_HANDLER_STACK_SIZ (4*19)
#endif
#define NUM_TO_REG(num) r ## num
#ifdef CONFIG_MMU
......@@ -988,6 +985,7 @@ ex_unaligned_fixup:
.end _unaligned_data_exception
#endif /* CONFIG_MMU */
.global ex_handler_unhandled
ex_handler_unhandled:
/* FIXME add handle function for unhandled exception - dump register */
bri 0
......
......@@ -37,6 +37,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
{
unsigned int irq;
struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off();
irq_enter();
irq = get_irq(regs);
......@@ -53,6 +54,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
irq_exit();
set_irq_regs(old_regs);
trace_hardirqs_on();
}
int show_interrupts(struct seq_file *p, void *v)
......
/*
* Microblaze KGDB support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
#include <asm/pvr.h>
#define GDB_REG 0
#define GDB_PC 32
#define GDB_MSR 33
#define GDB_EAR 34
#define GDB_ESR 35
#define GDB_FSR 36
#define GDB_BTR 37
#define GDB_PVR 38
#define GDB_REDR 50
#define GDB_RPID 51
#define GDB_RZPR 52
#define GDB_RTLBX 53
#define GDB_RTLBSX 54 /* mfs can't read it */
#define GDB_RTLBLO 55
#define GDB_RTLBHI 56
/* keep pvr separately because it is unchangeble */
struct pvr_s pvr;
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
int i;
unsigned long *pt_regb = (unsigned long *)regs;
int temp;
/* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
gdb_regs[i] = pt_regb[i];
/* Branch target register can't be changed */
__asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : );
gdb_regs[GDB_BTR] = temp;
/* pvr part - we have 11 pvr regs */
for (i = 0; i < sizeof(struct pvr_s)/4; i++)
gdb_regs[GDB_PVR + i] = pvr.pvr[i];
/* read special registers - can't be changed */
__asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : );
gdb_regs[GDB_REDR] = temp;
__asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : );
gdb_regs[GDB_RPID] = temp;
__asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : );
gdb_regs[GDB_RZPR] = temp;
__asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : );
gdb_regs[GDB_RTLBX] = temp;
__asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : );
gdb_regs[GDB_RTLBLO] = temp;
__asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : );
gdb_regs[GDB_RTLBHI] = temp;
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
int i;
unsigned long *pt_regb = (unsigned long *)regs;
/* pt_regs and gdb_regs have the same 37 values.
* The rest of gdb_regs are unused and can't be changed.
* r0 register value can't be changed too. */
for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++)
pt_regb[i] = gdb_regs[i];
}
void microblaze_kgdb_break(struct pt_regs *regs)
{
if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
/* Jump over the first arch_kgdb_breakpoint which is barrier to
* get kgdb work. The same solution is used for powerpc */
if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
regs->pc += BREAK_INSTR_SIZE;
}
/* untested */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int i;
unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
/* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
gdb_regs[i] = pt_regb[i];
/* pvr part - we have 11 pvr regs */
for (i = 0; i < sizeof(struct pvr_s)/4; i++)
gdb_regs[GDB_PVR + i] = pvr.pvr[i];
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->pc = ip;
}
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
char *ptr;
unsigned long address;
int cpu = smp_processor_id();
switch (remcom_in_buffer[0]) {
case 'c':
/* handle the optional parameter */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &address))
regs->pc = address;
return 0;
}
return -1; /* this means that we do not want to exit from the handler */
}
int kgdb_arch_init(void)
{
get_pvr(&pvr); /* Fill PVR structure */
return 0;
}
void kgdb_arch_exit(void)
{
/* Nothing to do */
}
/*
* Global data
*/
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */
};
......@@ -76,7 +76,7 @@ early_console_reg_tlb_alloc:
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
ori r4, r0, MICROBLAZE_TLB_SIZE - 1
mts rtlbx, r4 /* TLB slot 2 */
mts rtlbx, r4 /* TLB slot 63 */
or r4,r5,r0
andi r4,r4,0xfffff000
......
......@@ -76,8 +76,11 @@ __setup("hlt", hlt_setup);
void default_idle(void)
{
if (likely(hlt_counter)) {
while (!need_resched())
local_irq_disable();
stop_critical_timings();
cpu_relax();
start_critical_timings();
local_irq_enable();
} else {
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
......
......@@ -38,6 +38,8 @@
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
/* Returns the address where the register at REG_OFFS in P is stashed away. */
static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
......@@ -101,8 +103,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
if (request == PTRACE_PEEKUSR)
val = *reg_addr;
else
else {
#if 1
*reg_addr = data;
#else
/* MS potential problem on WB system
* Be aware that reg_addr is virtual address
* virt_to_phys conversion is necessary.
* This could be sensible solution.
*/
u32 paddr = virt_to_phys((u32)reg_addr);
invalidate_icache_range(paddr, paddr + 4);
*reg_addr = data;
flush_dcache_range(paddr, paddr + 4);
#endif
}
} else
rval = -EIO;
......
......@@ -14,52 +14,18 @@
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <linux/module.h>
#include <asm/unwind.h>
/* FIXME initial support */
void save_stack_trace(struct stack_trace *trace)
{
unsigned long *sp;
unsigned long addr;
asm("addik %0, r1, 0" : "=r" (sp));
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
/* Exclude our helper functions from the trace*/
trace->skip += 2;
microblaze_unwind(NULL, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned int *sp;
unsigned long addr;
struct thread_info *ti = task_thread_info(tsk);
if (tsk == current)
asm("addik %0, r1, 0" : "=r" (sp));
else
sp = (unsigned int *)ti->cpu_context.r1;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
microblaze_unwind(tsk, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
......@@ -28,6 +28,7 @@
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <linux/cnt32_to_63.h>
#ifdef CONFIG_SELFMOD_TIMER
#include <asm/selfmod.h>
......@@ -135,7 +136,7 @@ static void microblaze_timer_set_mode(enum clock_event_mode mode,
static struct clock_event_device clockevent_microblaze_timer = {
.name = "microblaze_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.shift = 24,
.shift = 8,
.rating = 300,
.set_next_event = microblaze_timer_set_next_event,
.set_mode = microblaze_timer_set_mode,
......@@ -195,7 +196,7 @@ static cycle_t microblaze_cc_read(const struct cyclecounter *cc)
static struct cyclecounter microblaze_cc = {
.read = microblaze_cc_read,
.mask = CLOCKSOURCE_MASK(32),
.shift = 24,
.shift = 8,
};
int __init init_microblaze_timecounter(void)
......@@ -213,7 +214,7 @@ static struct clocksource clocksource_microblaze = {
.rating = 300,
.read = microblaze_read,
.mask = CLOCKSOURCE_MASK(32),
.shift = 24, /* I can shift it */
.shift = 8, /* I can shift it */
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
......@@ -235,6 +236,12 @@ static int __init microblaze_clocksource_init(void)
return 0;
}
/*
* We have to protect accesses before timer initialization
* and return 0 for sched_clock function below.
*/
static int timer_initialized;
void __init time_init(void)
{
u32 irq, i = 0;
......@@ -289,4 +296,15 @@ void __init time_init(void)
#endif
microblaze_clocksource_init();
microblaze_clockevent_init();
timer_initialized = 1;
}
unsigned long long notrace sched_clock(void)
{
if (timer_initialized) {
struct clocksource *cs = &clocksource_microblaze;
cycle_t cyc = cnt32_to_63(cs->read(NULL));
return clocksource_cyc2ns(cyc, cs->mult, cs->shift);
}
return 0;
}
......@@ -16,13 +16,14 @@
#include <asm/exceptions.h>
#include <asm/system.h>
#include <asm/unwind.h>
void trap_init(void)
{
__enable_hw_exceptions();
}
static unsigned long kstack_depth_to_print = 24;
static unsigned long kstack_depth_to_print; /* 0 == entire stack */
static int __init kstack_setup(char *s)
{
......@@ -30,31 +31,47 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
void show_trace(struct task_struct *task, unsigned long *stack)
void show_stack(struct task_struct *task, unsigned long *sp)
{
unsigned long addr;
unsigned long words_to_show;
u32 fp = (u32) sp;
if (fp == 0) {
if (task) {
fp = ((struct thread_info *)
(task->stack))->cpu_context.r1;
} else {
/* Pick up caller of dump_stack() */
fp = (u32)&sp - 8;
}
}
if (!stack)
stack = (unsigned long *)&stack;
words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2;
if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print))
words_to_show = kstack_depth_to_print;
pr_info("Kernel Stack:\n");
printk(KERN_NOTICE "Call Trace: ");
#ifdef CONFIG_KALLSYMS
printk(KERN_NOTICE "\n");
#endif
while (!kstack_end(stack)) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
* Make the first line an 'odd' size if necessary to get
* remaining lines to start at an address multiple of 0x10
*/
if (kernel_text_address(addr))
print_ip_sym(addr);
if (fp & 0xF) {
unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2;
if (line1_words < words_to_show) {
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32,
4, (void *)fp, line1_words << 2, 0);
fp += line1_words << 2;
words_to_show -= line1_words;
}
}
printk(KERN_NOTICE "\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
words_to_show << 2, 0);
printk(KERN_INFO "\n\n");
pr_info("Call Trace:\n");
microblaze_unwind(task, NULL);
pr_info("\n");
if (!task)
task = current;
......@@ -62,34 +79,6 @@ void show_trace(struct task_struct *task, unsigned long *stack)
debug_show_held_locks(task);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
unsigned long *stack;
int i;
if (sp == NULL) {
if (task)
sp = (unsigned long *) ((struct thread_info *)
(task->stack))->cpu_context.r1;
else
sp = (unsigned long *)&sp;
}
stack = sp;
printk(KERN_INFO "\nStack:\n ");
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(sp))
break;
if (i && ((i % 8) == 0))
printk("\n ");
printk("%08lx ", *sp++);
}
printk("\n");
show_trace(task, stack);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
......
/*
* Backtrace support for Microblaze
*
* Copyright (C) 2010 Digital Design Corporation
*
* Based on arch/sh/kernel/cpu/sh5/unwind.c code which is:
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
/* #define DEBUG 1 */
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
#include <asm/sections.h>
#include <asm/exceptions.h>
#include <asm/unwind.h>
struct stack_trace;
/*
* On Microblaze, finding the previous stack frame is a little tricky.
* At this writing (3/2010), Microblaze does not support CONFIG_FRAME_POINTERS,
* and even if it did, gcc (4.1.2) does not store the frame pointer at
* a consistent offset within each frame. To determine frame size, it is
* necessary to search for the assembly instruction that creates or reclaims
* the frame and extract the size from it.
*
* Microblaze stores the stack pointer in r1, and creates a frame via
*
* addik r1, r1, -FRAME_SIZE
*
* The frame is reclaimed via
*
* addik r1, r1, FRAME_SIZE
*
* Frame creation occurs at or near the top of a function.
* Depending on the compiler, reclaim may occur at the end, or before
* a mid-function return.
*
* A stack frame is usually not created in a leaf function.
*
*/
/**
* get_frame_size - Extract the stack adjustment from an
* "addik r1, r1, adjust" instruction
* @instr : Microblaze instruction
*
* Return - Number of stack bytes the instruction reserves or reclaims
*/
inline long get_frame_size(unsigned long instr)
{
return abs((s16)(instr & 0xFFFF));
}
/**
* find_frame_creation - Search backward to find the instruction that creates
* the stack frame (hopefully, for the same function the
* initial PC is in).
* @pc : Program counter at which to begin the search
*
* Return - PC at which stack frame creation occurs
* NULL if this cannot be found, i.e. a leaf function
*/
static unsigned long *find_frame_creation(unsigned long *pc)
{
int i;
/* NOTE: Distance to search is arbitrary
* 250 works well for most things,
* 750 picks up things like tcp_recvmsg(),
* 1000 needed for fat_fill_super()
*/
for (i = 0; i < 1000; i++, pc--) {
unsigned long instr;
s16 frame_size;
if (!kernel_text_address((unsigned long) pc))
return NULL;
instr = *pc;
/* addik r1, r1, foo ? */
if ((instr & 0xFFFF0000) != 0x30210000)
continue; /* No */
frame_size = get_frame_size(instr);
if ((frame_size < 8) || (frame_size & 3)) {
pr_debug(" Invalid frame size %d at 0x%p\n",
frame_size, pc);
return NULL;
}
pr_debug(" Found frame creation at 0x%p, size %d\n", pc,
frame_size);
return pc;
}
return NULL;
}
/**
* lookup_prev_stack_frame - Find the stack frame of the previous function.
* @fp : Frame (stack) pointer for current function
* @pc : Program counter within current function
* @leaf_return : r15 value within current function. If the current function
* is a leaf, this is the caller's return address.
* @pprev_fp : On exit, set to frame (stack) pointer for previous function
* @pprev_pc : On exit, set to current function caller's return address
*
* Return - 0 on success, -EINVAL if the previous frame cannot be found
*/
static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
unsigned long leaf_return,
unsigned long *pprev_fp,
unsigned long *pprev_pc)
{
unsigned long *prologue = NULL;
/* _switch_to is a special leaf function */
if (pc != (unsigned long) &_switch_to)
prologue = find_frame_creation((unsigned long *)pc);
if (prologue) {
long frame_size = get_frame_size(*prologue);
*pprev_fp = fp + frame_size;
*pprev_pc = *(unsigned long *)fp;
} else {
if (!leaf_return)
return -EINVAL;
*pprev_pc = leaf_return;
*pprev_fp = fp;
}
/* NOTE: don't check kernel_text_address here, to allow display
* of userland return address
*/
return (!*pprev_pc || (*pprev_pc & 3)) ? -EINVAL : 0;
}
static void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
struct stack_trace *trace);
/**
* unwind_trap - Unwind through a system trap, that stored previous state
* on the stack.
*/
#ifdef CONFIG_MMU
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
unsigned long fp, struct stack_trace *trace)
{
/* To be implemented */
}
#else
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
unsigned long fp, struct stack_trace *trace)
{
const struct pt_regs *regs = (const struct pt_regs *) fp;
microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace);
}
#endif
/**
* microblaze_unwind_inner - Unwind the stack from the specified point
* @task : Task whose stack we are to unwind (may be NULL)
* @pc : Program counter from which we start unwinding
* @fp : Frame (stack) pointer from which we start unwinding
* @leaf_return : Value of r15 at pc. If the function is a leaf, this is
* the caller's return address.
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
*/
void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
struct stack_trace *trace)
{
int ofs = 0;
pr_debug(" Unwinding with PC=%p, FP=%p\n", (void *)pc, (void *)fp);
if (!pc || !fp || (pc & 3) || (fp & 3)) {
pr_debug(" Invalid state for unwind, aborting\n");
return;
}
for (; pc != 0;) {
unsigned long next_fp, next_pc = 0;
unsigned long return_to = pc + 2 * sizeof(unsigned long);
const struct trap_handler_info *handler =
&microblaze_trap_handlers;
/* Is previous function the HW exception handler? */
if ((return_to >= (unsigned long)&_hw_exception_handler)
&&(return_to < (unsigned long)&ex_handler_unhandled)) {
/*
* HW exception handler doesn't save all registers,
* so we open-code a special case of unwind_trap()
*/
#ifndef CONFIG_MMU
const struct pt_regs *regs =
(const struct pt_regs *) fp;
#endif
pr_info("HW EXCEPTION\n");
#ifndef CONFIG_MMU
microblaze_unwind_inner(task, regs->r17 - 4,
fp + EX_HANDLER_STACK_SIZ,
regs->r15, trace);
#endif
return;
}
/* Is previous function a trap handler? */
for (; handler->start_addr; ++handler) {
if ((return_to >= handler->start_addr)
&& (return_to <= handler->end_addr)) {
if (!trace)
pr_info("%s\n", handler->trap_name);
unwind_trap(task, pc, fp, trace);
return;
}
}
pc -= ofs;
if (trace) {
#ifdef CONFIG_STACKTRACE
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = pc;
if (trace->nr_entries >= trace->max_entries)
break;
#endif
} else {
/* Have we reached userland? */
if (unlikely(pc == task_pt_regs(task)->pc)) {
pr_info("[<%p>] PID %lu [%s]\n",
(void *) pc,
(unsigned long) task->pid,
task->comm);
break;
} else
print_ip_sym(pc);
}
/* Stop when we reach anything not part of the kernel */
if (!kernel_text_address(pc))
break;
if (lookup_prev_stack_frame(fp, pc, leaf_return, &next_fp,
&next_pc) == 0) {
ofs = sizeof(unsigned long);
pc = next_pc & ~3;
fp = next_fp;
leaf_return = 0;
} else {
pr_debug(" Failed to find previous stack frame\n");
break;
}
pr_debug(" Next PC=%p, next FP=%p\n",
(void *)next_pc, (void *)next_fp);
}
}
/**
* microblaze_unwind - Stack unwinder for Microblaze (external entry point)
* @task : Task whose stack we are to unwind (NULL == current)
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
*/
void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
{
if (task) {
if (task == current) {
const struct pt_regs *regs = task_pt_regs(task);
microblaze_unwind_inner(task, regs->pc, regs->r1,
regs->r15, trace);
} else {
struct thread_info *thread_info =
(struct thread_info *)(task->stack);
const struct cpu_context *cpu_context =
&thread_info->cpu_context;
microblaze_unwind_inner(task,
(unsigned long) &_switch_to,
cpu_context->r1,
cpu_context->r15, trace);
}
} else {
unsigned long pc, fp;
__asm__ __volatile__ ("or %0, r1, r0" : "=r" (fp));
__asm__ __volatile__ (
"brlid %0, 0f;"
"nop;"
"0:"
: "=r" (pc)
);
/* Since we are not a leaf function, use leaf_return = 0 */
microblaze_unwind_inner(current, pc, fp, 0, trace);
}
}
......@@ -10,7 +10,7 @@
OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze")
OUTPUT_ARCH(microblaze)
ENTRY(_start)
ENTRY(microblaze_start)
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
......@@ -20,7 +20,7 @@ jiffies = jiffies_64 + 4;
SECTIONS {
. = CONFIG_KERNEL_START;
_start = CONFIG_KERNEL_BASE_ADDR;
microblaze_start = CONFIG_KERNEL_BASE_ADDR;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = . ;
_stext = . ;
......@@ -55,7 +55,7 @@ SECTIONS {
*/
.sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
_ssrw = .;
. = ALIGN(4096); /* page aligned when MMU used - origin 0x8 */
. = ALIGN(PAGE_SIZE); /* page aligned when MMU used */
*(.sdata2)
. = ALIGN(8);
_essrw = .;
......@@ -70,7 +70,7 @@ SECTIONS {
/* Reserve some low RAM for r0 based memory references */
. = ALIGN(0x4) ;
r0_ram = . ;
. = . + 4096; /* a page should be enough */
. = . + PAGE_SIZE; /* a page should be enough */
/* Under the microblaze ABI, .sdata and .sbss must be contiguous */
. = ALIGN(8);
......@@ -120,7 +120,7 @@ SECTIONS {
__init_end_before_initramfs = .;
.init.ramfs ALIGN(4096) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
.init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
......@@ -132,11 +132,11 @@ SECTIONS {
* so that __init_end == __bss_start. This will make image.elf
* consistent with the image.bin
*/
/* . = ALIGN(4096); */
/* . = ALIGN(PAGE_SIZE); */
}
__init_end = .;
.bss ALIGN (4096) : AT(ADDR(.bss) - LOAD_OFFSET) {
.bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
/* page aligned when MMU used */
__bss_start = . ;
*(.bss*)
......@@ -145,7 +145,7 @@ SECTIONS {
__bss_stop = . ;
_ebss = . ;
}
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
_end = .;
DISCARDS
......
......@@ -37,10 +37,6 @@
#include <linux/uaccess.h>
#include <asm/exceptions.h>
#if defined(CONFIG_KGDB)
int debugger_kernel_faults = 1;
#endif
static unsigned long pte_misses; /* updated by do_page_fault() */
static unsigned long pte_errors; /* updated by do_page_fault() */
......@@ -81,10 +77,6 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
}
/* kernel has accessed a bad area */
#if defined(CONFIG_KGDB)
if (debugger_kernel_faults)
debugger(regs);
#endif
die("kernel access of bad area", regs, sig);
}
......@@ -115,13 +107,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
#if defined(CONFIG_KGDB)
if (debugger_fault_handler && regs->trap == 0x300) {
debugger_fault_handler(regs);
return;
}
#endif /* CONFIG_KGDB */
if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs))
goto bad_area_nosemaphore;
......@@ -226,7 +211,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
......
......@@ -134,13 +134,8 @@ void __init setup_memory(void)
* for 4GB of memory, using 4kB pages), plus 1 page
* (in case the address isn't page-aligned).
*/
#ifndef CONFIG_MMU
map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)),
min_low_pfn, max_low_pfn);
#else
map_size = init_bootmem_node(&contig_page_data,
map_size = init_bootmem_node(NODE_DATA(0),
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
#endif
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* free bootmem is whole main memory */
......
......@@ -528,7 +528,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE
select KALLSYMS
select KALLSYMS_ALL
......@@ -958,13 +958,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
select FRAME_POINTER if !PPC && !S390
select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
help
Provide stacktrace filter for fault-injection capabilities
config LATENCYTOP
bool "Latency measuring infrastructure"
select FRAME_POINTER if !MIPS && !PPC && !S390
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment