Commit e839ca52 authored by David Howells's avatar David Howells

Disintegrate asm/system.h for SH

Disintegrate asm/system.h for SH.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: linux-sh@vger.kernel.org
parent 4eb14db4
......@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
#include <mach/microdev.h>
......
#ifndef __ASM_SH_ATOMIC_IRQ_H
#define __ASM_SH_ATOMIC_IRQ_H
#include <linux/irqflags.h>
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
......
......@@ -9,7 +9,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/system.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
......
......@@ -33,4 +33,6 @@
#define AT_L1D_CACHESHAPE 35
#define AT_L2_CACHESHAPE 36
#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
#endif /* __ASM_SH_AUXVEC_H */
/*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Paul Mundt
*/
#ifndef __ASM_SH_BARRIER_H
#define __ASM_SH_BARRIER_H
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#include <asm/cache_insns.h>
#endif
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
* Legacy SH cores typically require a sequence of 8 nops after
* modification of a control register in order for the changes to take
* effect. On newer cores (like the sh4a and sh5) this is accomplished
* with icbi.
*
* Also note that on sh4a in the icbi case we can forego a synco for the
* write barrier, as it's not necessary for control registers.
*
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("synco": : :"memory")
#define ctrl_barrier() __icbi(PAGE_OFFSET)
#define read_barrier_depends() do { } while(0)
#else
#define mb() __asm__ __volatile__ ("": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#define read_barrier_depends() do { } while(0)
#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#endif /* __ASM_SH_BARRIER_H */
......@@ -7,7 +7,6 @@
#error only <linux/bitops.h> can be included directly
#endif
#include <asm/system.h>
/* For __swab32 */
#include <asm/byteorder.h>
......
#ifndef __ASM_SH_BL_BIT_H
#define __ASM_SH_BL_BIT_H
#ifdef CONFIG_SUPERH32
# include "bl_bit_32.h"
#else
# include "bl_bit_64.h"
#endif
#endif /* __ASM_SH_BL_BIT_H */
#ifndef __ASM_SH_BL_BIT_32_H
#define __ASM_SH_BL_BIT_32_H
static inline void set_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or %2, %0\n\t"
"and %3, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "r" (0x10000000), "r" (0xffffff0f)
: "memory"
);
}
static inline void clear_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %2, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x10000000)
: "memory"
);
}
#endif /* __ASM_SH_BL_BIT_32_H */
#ifndef __ASM_SH_SYSTEM_64_H
#define __ASM_SH_SYSTEM_64_H
/*
* include/asm-sh/system_64.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
......@@ -12,44 +7,10 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <cpu/registers.h>
#include <asm/processor.h>
/*
* switch_to() should switch tasks to task nr n, first
*/
struct thread_struct;
struct task_struct *sh64_switch_to(struct task_struct *prev,
struct thread_struct *prev_thread,
struct task_struct *next,
struct thread_struct *next_thread);
#define switch_to(prev,next,last) \
do { \
if (last_task_used_math != next) { \
struct pt_regs *regs = next->thread.uregs; \
if (regs) regs->sr |= SR_FD; \
} \
last = sh64_switch_to(prev, &prev->thread, next, \
&next->thread); \
} while (0)
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
#ifndef __ASM_SH_BL_BIT_64_H
#define __ASM_SH_BL_BIT_64_H
static inline reg_size_t register_align(void *val)
{
return (unsigned long long)(signed long long)(signed long)val;
}
extern void phys_stext(void);
static inline void trigger_address_error(void)
{
phys_stext();
}
#include <asm/processor.h>
#define SR_BL_LL 0x0000000010000000LL
......@@ -76,4 +37,4 @@ static inline void clear_bl_bit(void)
: "r" (__dummy1));
}
#endif /* __ASM_SH_SYSTEM_64_H */
#endif /* __ASM_SH_BL_BIT_64_H */
#ifndef __ASM_SH_BUG_H
#define __ASM_SH_BUG_H
#include <linux/linkage.h>
#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
#define BUGFLAG_UNWINDER (1 << 1)
......@@ -107,4 +109,7 @@ do { \
#include <asm-generic/bug.h>
struct pt_regs;
extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
#endif /* __ASM_SH_BUG_H */
#ifndef __ASM_SH_CACHE_INSNS_H
#define __ASM_SH_CACHE_INSNS_H
#ifdef CONFIG_SUPERH32
# include "cache_insns_32.h"
#else
# include "cache_insns_64.h"
#endif
#endif /* __ASM_SH_CACHE_INSNS_H */
#ifndef __ASM_SH_CACHE_INSNS_32_H
#define __ASM_SH_CACHE_INSNS_32_H
#include <linux/types.h>
#if defined(CONFIG_CPU_SH4A)
#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
#else
#define __icbi(addr) mb()
#endif
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
static inline reg_size_t register_align(void *val)
{
return (unsigned long)(signed long)val;
}
#endif /* __ASM_SH_CACHE_INSNS_32_H */
/*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_CACHE_INSNS_64_H
#define __ASM_SH_CACHE_INSNS_64_H
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
static inline reg_size_t register_align(void *val)
{
return (unsigned long long)(signed long long)(signed long)val;
}
#endif /* __ASM_SH_CACHE_INSNS_64_H */
#ifndef __ASM_SH_CMPXCHG_IRQ_H
#define __ASM_SH_CMPXCHG_IRQ_H
#include <linux/irqflags.h>
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long flags, retval;
......
#ifndef __ASM_SH_CMPXCHG_H
#define __ASM_SH_CMPXCHG_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#include <linux/compiler.h>
#include <linux/types.h>
#if defined(CONFIG_GUSA_RB)
#include <asm/cmpxchg-grb.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/cmpxchg-llsc.h>
#else
#include <asm/cmpxchg-irq.h>
#endif
extern void __xchg_called_with_bad_pointer(void);
#define __xchg(ptr, x, size) \
({ \
unsigned long __xchg__res; \
volatile void *__xchg_ptr = (ptr); \
switch (size) { \
case 4: \
__xchg__res = xchg_u32(__xchg_ptr, x); \
break; \
case 1: \
__xchg__res = xchg_u8(__xchg_ptr, x); \
break; \
default: \
__xchg_called_with_bad_pointer(); \
__xchg__res = x; \
break; \
} \
\
__xchg__res; \
})
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#endif /* __ASM_SH_CMPXCHG_H */
/*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Paul Mundt
*/
#ifndef __ASM_SH_EXEC_H
#define __ASM_SH_EXEC_H
#define arch_align_stack(x) (x)
#endif /* __ASM_SH_EXEC_H */
#ifndef __ASM_SH_FUTEX_IRQ_H
#define __ASM_SH_FUTEX_IRQ_H
#include <asm/system.h>
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
int *oldval)
......
......@@ -14,7 +14,6 @@
*/
#include <linux/errno.h>
#include <asm/cache.h>
#include <asm/system.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
#include <asm/pgtable.h>
......
......@@ -101,6 +101,10 @@ extern struct sh_cpuinfo cpu_data[];
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
#define cpu_relax() barrier()
void default_idle(void);
void cpu_idle_wait(void);
void stop_this_cpu(void *);
/* Forward decl */
struct seq_operations;
struct task_struct;
......@@ -161,6 +165,17 @@ int vsyscall_init(void);
#define vsyscall_init() do { } while (0)
#endif
/*
* SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
*/
#ifdef CONFIG_CPU_SH2A
extern unsigned int instruction_size(unsigned int insn);
#elif defined(CONFIG_SUPERH32)
#define instruction_size(insn) (2)
#else
#define instruction_size(insn) (4)
#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_SUPERH32
......
......@@ -37,7 +37,6 @@
#include <linux/thread_info.h>
#include <asm/addrspace.h>
#include <asm/page.h>
#include <asm/system.h>
#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
......
......@@ -20,6 +20,7 @@
void sh_mv_setup(void);
void check_for_initrd(void);
void per_cpu_trap_init(void);
#endif /* __KERNEL__ */
......
/*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_SWITCH_TO_H
#define __ASM_SH_SWITCH_TO_H
#ifdef CONFIG_SUPERH32
# include "switch_to_32.h"
#else
# include "switch_to_64.h"
#endif
#endif /* __ASM_SH_SWITCH_TO_H */
#ifndef __ASM_SH_SYSTEM_32_H
#define __ASM_SH_SYSTEM_32_H
#include <linux/types.h>
#include <asm/mmu.h>
#ifndef __ASM_SH_SWITCH_TO_32_H
#define __ASM_SH_SWITCH_TO_32_H
#ifdef CONFIG_SH_DSP
......@@ -32,7 +29,6 @@ do { \
: : "r" (__ts2)); \
} while (0)
#define __save_dsp(tsk) \
do { \
register u32 *__ts2 __asm__ ("r2") = \
......@@ -64,16 +60,6 @@ do { \
#define __restore_dsp(tsk) do { } while (0)
#endif
#if defined(CONFIG_CPU_SH4A)
#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
#else
#define __icbi(addr) mb()
#endif
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next);
......@@ -145,92 +131,4 @@ do { \
__restore_dsp(prev); \
} while (0)
#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector() \
({ \
unsigned long _vec; \
\
__asm__ __volatile__ ( \
"stc r2_bank, %0\n\t" \
: "=r" (_vec) \
); \
\
_vec; \
})
#else
#define lookup_exception_vector() \
({ \
unsigned long _vec; \
__asm__ __volatile__ ( \
"mov r4, %0\n\t" \
: "=r" (_vec) \
); \
\
_vec; \
})
#endif
static inline reg_size_t register_align(void *val)
{
return (unsigned long)(signed long)val;
}
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int, unsigned long address);
static inline void trigger_address_error(void)
{
__asm__ __volatile__ (
"ldc %0, sr\n\t"
"mov.l @%1, %0"
:
: "r" (0x10000000), "r" (0x80000001)
);
}
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address);
asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
static inline void set_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or %2, %0\n\t"
"and %3, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "r" (0x10000000), "r" (0xffffff0f)
: "memory"
);
}
static inline void clear_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %2, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x10000000)
: "memory"
);
}
#endif /* __ASM_SH_SYSTEM_32_H */
#endif /* __ASM_SH_SWITCH_TO_32_H */
/*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_SWITCH_TO_64_H
#define __ASM_SH_SWITCH_TO_64_H
struct thread_struct;
struct task_struct;
/*
* switch_to() should switch tasks to task nr n, first
*/
struct task_struct *sh64_switch_to(struct task_struct *prev,
struct thread_struct *prev_thread,
struct task_struct *next,
struct thread_struct *next_thread);
#define switch_to(prev,next,last) \
do { \
if (last_task_used_math != next) { \
struct pt_regs *regs = next->thread.uregs; \
if (regs) regs->sr |= SR_FD; \
} \
last = sh64_switch_to(prev, &prev->thread, next, \
&next->thread); \
} while (0)
#endif /* __ASM_SH_SWITCH_TO_64_H */
#ifndef __ASM_SH_SYSTEM_H
#define __ASM_SH_SYSTEM_H
/*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Paul Mundt
*/
#include <linux/irqflags.h>
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/types.h>
#include <asm/uncached.h>
#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
* Legacy SH cores typically require a sequence of 8 nops after
* modification of a control register in order for the changes to take
* effect. On newer cores (like the sh4a and sh5) this is accomplished
* with icbi.
*
* Also note that on sh4a in the icbi case we can forego a synco for the
* write barrier, as it's not necessary for control registers.
*
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("synco": : :"memory")
#define ctrl_barrier() __icbi(PAGE_OFFSET)
#define read_barrier_depends() do { } while(0)
#else
#define mb() __asm__ __volatile__ ("": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#define read_barrier_depends() do { } while(0)
#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#ifdef CONFIG_GUSA_RB
#include <asm/cmpxchg-grb.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/cmpxchg-llsc.h>
#else
#include <asm/cmpxchg-irq.h>
#endif
extern void __xchg_called_with_bad_pointer(void);
#define __xchg(ptr, x, size) \
({ \
unsigned long __xchg__res; \
volatile void *__xchg_ptr = (ptr); \
switch (size) { \
case 4: \
__xchg__res = xchg_u32(__xchg_ptr, x); \
break; \
case 1: \
__xchg__res = xchg_u8(__xchg_ptr, x); \
break; \
default: \
__xchg_called_with_bad_pointer(); \
__xchg__res = x; \
break; \
} \
\
__xchg__res; \
})
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
struct pt_regs;
extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
#include <asm/barrier.h>
#include <asm/bl_bit.h>
#include <asm/cache_insns.h>
#include <asm/cmpxchg.h>
#include <asm/exec.h>
#include <asm/switch_to.h>
#include <asm/traps.h>
void free_initmem(void);
void free_initrd_mem(unsigned long start, unsigned long end);
extern void *set_exception_table_vec(unsigned int vec, void *handler);
static inline void *set_exception_table_evt(unsigned int evt, void *handler)
{
return set_exception_table_vec(evt >> 5, handler);
}
/*
* SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
*/
#ifdef CONFIG_CPU_SH2A
extern unsigned int instruction_size(unsigned int insn);
#elif defined(CONFIG_SUPERH32)
#define instruction_size(insn) (2)
#else
#define instruction_size(insn) (4)
#endif
void per_cpu_trap_init(void);
void default_idle(void);
void cpu_idle_wait(void);
void stop_this_cpu(void *);
#ifdef CONFIG_SUPERH32
#define BUILD_TRAP_HANDLER(name) \
asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \
unsigned long r6, unsigned long r7, \
struct pt_regs __regs)
#define TRAP_HANDLER_DECL \
struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \
unsigned int vec = regs->tra; \
(void)vec;
#else
#define BUILD_TRAP_HANDLER(name) \
asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
#define TRAP_HANDLER_DECL
#endif
BUILD_TRAP_HANDLER(address_error);
BUILD_TRAP_HANDLER(debug);
BUILD_TRAP_HANDLER(bug);
BUILD_TRAP_HANDLER(breakpoint);
BUILD_TRAP_HANDLER(singlestep);
BUILD_TRAP_HANDLER(fpu_error);
BUILD_TRAP_HANDLER(fpu_state_restore);
BUILD_TRAP_HANDLER(nmi);
#define arch_align_stack(x) (x)
struct mem_access {
unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
};
#ifdef CONFIG_SUPERH32
# include "system_32.h"
#else
# include "system_64.h"
#endif
#endif
#ifndef __ASM_SH_TRAPS_H
#define __ASM_SH_TRAPS_H
#include <linux/compiler.h>
#ifdef CONFIG_SUPERH32
# include "traps_32.h"
#else
# include "traps_64.h"
#endif
BUILD_TRAP_HANDLER(address_error);
BUILD_TRAP_HANDLER(debug);
BUILD_TRAP_HANDLER(bug);
BUILD_TRAP_HANDLER(breakpoint);
BUILD_TRAP_HANDLER(singlestep);
BUILD_TRAP_HANDLER(fpu_error);
BUILD_TRAP_HANDLER(fpu_state_restore);
BUILD_TRAP_HANDLER(nmi);
#endif /* __ASM_SH_TRAPS_H */
#ifndef __ASM_SH_TRAPS_32_H
#define __ASM_SH_TRAPS_32_H
#include <linux/types.h>
#include <asm/mmu.h>
#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector() \
({ \
unsigned long _vec; \
\
__asm__ __volatile__ ( \
"stc r2_bank, %0\n\t" \
: "=r" (_vec) \
); \
\
_vec; \
})
#else
#define lookup_exception_vector() \
({ \
unsigned long _vec; \
__asm__ __volatile__ ( \
"mov r4, %0\n\t" \
: "=r" (_vec) \
); \
\
_vec; \
})
#endif
static inline void trigger_address_error(void)
{
__asm__ __volatile__ (
"ldc %0, sr\n\t"
"mov.l @%1, %0"
:
: "r" (0x10000000), "r" (0x80000001)
);
}
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address);
asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
#define BUILD_TRAP_HANDLER(name) \
asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \
unsigned long r6, unsigned long r7, \
struct pt_regs __regs)
#define TRAP_HANDLER_DECL \
struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \
unsigned int vec = regs->tra; \
(void)vec;
#endif /* __ASM_SH_TRAPS_32_H */
/*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_TRAPS_64_H
#define __ASM_SH_TRAPS_64_H
extern void phys_stext(void);
static inline void trigger_address_error(void)
{
phys_stext();
}
#define BUILD_TRAP_HANDLER(name) \
asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
#define TRAP_HANDLER_DECL
#endif /* __ASM_SH_TRAPS_64_H */
......@@ -254,5 +254,19 @@ int fixup_exception(struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
const struct exception_table_entry *search_exception_tables(unsigned long addr);
extern void *set_exception_table_vec(unsigned int vec, void *handler);
static inline void *set_exception_table_evt(unsigned int evt, void *handler)
{
return set_exception_table_vec(evt >> 5, handler);
}
struct mem_access {
unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
};
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int, unsigned long address);
#endif /* __ASM_SH_UACCESS_H */
......@@ -18,13 +18,13 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/sh_bios.h>
#include <asm/setup.h>
#ifdef CONFIG_SH_FPU
#define cpu_has_fpu 1
......
......@@ -19,7 +19,6 @@
#include <linux/cache.h>
#include <linux/irq.h>
#include <linux/bitmap.h>
#include <asm/system.h>
#include <asm/irq.h>
/* Bitmap of IRQ masked */
......
......@@ -10,7 +10,6 @@
* for more details.
*/
#include <linux/kernel.h>
#include <asm/system.h>
/*
* Instructions on SH are generally fixed at 16-bits, however, SH-2A
......
......@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <cpu/fpu.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/fpu.h>
/* The PR (precision) bit in the FP Status Register must be clear when
......
......@@ -22,6 +22,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
......
......@@ -18,9 +18,9 @@
#include <linux/smp.h>
#include <linux/cpuidle.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/bl_bit.h>
void (*pm_idle)(void);
......
......@@ -15,7 +15,6 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/io.h>
......
......@@ -24,7 +24,6 @@
#include <linux/prefetch.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/fpu.h>
#include <asm/syscalls.h>
......
......@@ -30,6 +30,7 @@
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/fpu.h>
#include <asm/switch_to.h>
struct task_struct *last_task_used_math = NULL;
......
......@@ -28,7 +28,6 @@
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
......
......@@ -34,11 +34,11 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
......
......@@ -8,8 +8,8 @@
#endif
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
......
......@@ -25,7 +25,6 @@
#include <linux/freezer.h>
#include <linux/io.h>
#include <linux/tracehook.h>
#include <asm/system.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
......
......@@ -23,7 +23,6 @@
#include <linux/sched.h>
#include <linux/atomic.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
......
......@@ -7,7 +7,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <asm/unwinder.h>
#include <asm/system.h>
#include <asm/traps.h>
#ifdef CONFIG_GENERIC_BUG
static void handle_BUG(struct pt_regs *regs)
......
......@@ -27,10 +27,11 @@
#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#include <asm/traps.h>
#include <asm/bl_bit.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
......
......@@ -25,7 +25,6 @@
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/atomic.h>
......
......@@ -14,7 +14,6 @@
#include <linux/signal.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
......
......@@ -17,9 +17,9 @@
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
......
......@@ -33,7 +33,6 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/io.h>
#include <asm/uaccess.h>
......
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
/*
* Write back the dirty D-caches, but not invalidate them.
......
......@@ -25,7 +25,6 @@
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/sizes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/page.h>
......
......@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
......
......@@ -20,7 +20,6 @@
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
......
......@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
......
......@@ -22,7 +22,6 @@
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment