Commit 17dbe27d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-next-20121018' of git://github.com/czankel/xtensa-linux

Pull Xtensa patchset from Chris Zankel:
 "These are all limited to the xtensa subtree and include some important
  changes (adding long missing system calls for newer libc versions and
  other fixes) and the UAPI changes"

* tag 'xtensa-next-20121018' of git://github.com/czankel/xtensa-linux:
  xtensa: add missing system calls to the syscall table
  xtensa: minor compiler warning fix
  xtensa: Use Kbuild infrastructure to handle asm-generic headers
  UAPI: (Scripted) Disintegrate arch/xtensa/include/asm
  xtensa: fix unaligned usermode access
  xtensa: reorganize SR referencing
  xtensa: fix boot parameters parsing
  xtensa: fix missing return in do_page_fault for SIGBUS case
  xtensa: copy_thread with CLONE_VM must not copy live parent AR windows
  xtensa: fix memmove(), bcopy(), and memcpy().
  xtensa: ISS: fix rs_put_char
  xtensa: ISS: fix specific simcalls
parents b05e585d 7216cabf
...@@ -51,17 +51,17 @@ _start: ...@@ -51,17 +51,17 @@ _start:
/* 'reset' window registers */ /* 'reset' window registers */
movi a4, 1 movi a4, 1
wsr a4, PS wsr a4, ps
rsync rsync
rsr a5, WINDOWBASE rsr a5, windowbase
ssl a5 ssl a5
sll a4, a4 sll a4, a4
wsr a4, WINDOWSTART wsr a4, windowstart
rsync rsync
movi a4, 0x00040000 movi a4, 0x00040000
wsr a4, PS wsr a4, ps
rsync rsync
/* copy the loader to its address /* copy the loader to its address
......
include include/asm-generic/Kbuild.asm generic-y += bitsperlong.h
generic-y += bug.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += fcntl.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += percpu.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += siginfo.h
generic-y += statfs.h
generic-y += termios.h
generic-y += topology.h
generic-y += xor.h
...@@ -73,7 +73,7 @@ static inline void atomic_add(int i, atomic_t * v) ...@@ -73,7 +73,7 @@ static inline void atomic_add(int i, atomic_t * v)
"l32i %0, %2, 0 \n\t" "l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t" "add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t" "s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n" "rsync \n"
: "=&a" (vval) : "=&a" (vval)
: "a" (i), "a" (v) : "a" (i), "a" (v)
...@@ -97,7 +97,7 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -97,7 +97,7 @@ static inline void atomic_sub(int i, atomic_t *v)
"l32i %0, %2, 0 \n\t" "l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t" "sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t" "s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n" "rsync \n"
: "=&a" (vval) : "=&a" (vval)
: "a" (i), "a" (v) : "a" (i), "a" (v)
...@@ -118,7 +118,7 @@ static inline int atomic_add_return(int i, atomic_t * v) ...@@ -118,7 +118,7 @@ static inline int atomic_add_return(int i, atomic_t * v)
"l32i %0, %2, 0 \n\t" "l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t" "add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t" "s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n" "rsync \n"
: "=&a" (vval) : "=&a" (vval)
: "a" (i), "a" (v) : "a" (i), "a" (v)
...@@ -137,7 +137,7 @@ static inline int atomic_sub_return(int i, atomic_t * v) ...@@ -137,7 +137,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
"l32i %0, %2, 0 \n\t" "l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t" "sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t" "s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n" "rsync \n"
: "=&a" (vval) : "=&a" (vval)
: "a" (i), "a" (v) : "a" (i), "a" (v)
...@@ -260,7 +260,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) ...@@ -260,7 +260,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
"xor %1, %4, %3 \n\t" "xor %1, %4, %3 \n\t"
"and %0, %0, %4 \n\t" "and %0, %0, %4 \n\t"
"s32i %0, %2, 0 \n\t" "s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n" "rsync \n"
: "=&a" (vval), "=a" (mask) : "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask) : "a" (v), "a" (all_f), "1" (mask)
...@@ -277,7 +277,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -277,7 +277,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
"l32i %0, %2, 0 \n\t" "l32i %0, %2, 0 \n\t"
"or %0, %0, %1 \n\t" "or %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t" "s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n" "rsync \n"
: "=&a" (vval) : "=&a" (vval)
: "a" (mask), "a" (v) : "a" (mask), "a" (v)
......
#include <asm-generic/bitsperlong.h>
...@@ -165,7 +165,7 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*, ...@@ -165,7 +165,7 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
static inline u32 xtensa_get_cacheattr(void) static inline u32 xtensa_get_cacheattr(void)
{ {
u32 r; u32 r;
asm volatile(" rsr %0, CACHEATTR" : "=a"(r)); asm volatile(" rsr %0, cacheattr" : "=a"(r));
return r; return r;
} }
......
...@@ -27,7 +27,7 @@ __cmpxchg_u32(volatile int *p, int old, int new) ...@@ -27,7 +27,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
"bne %0, %2, 1f \n\t" "bne %0, %2, 1f \n\t"
"s32i %3, %1, 0 \n\t" "s32i %3, %1, 0 \n\t"
"1: \n\t" "1: \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n\t" "rsync \n\t"
: "=&a" (old) : "=&a" (old)
: "a" (p), "a" (old), "r" (new) : "a" (p), "a" (old), "r" (new)
...@@ -97,7 +97,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -97,7 +97,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %1, 0 \n\t" "l32i %0, %1, 0 \n\t"
"s32i %2, %1, 0 \n\t" "s32i %2, %1, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t" "wsr a15, ps \n\t"
"rsync \n\t" "rsync \n\t"
: "=&a" (tmp) : "=&a" (tmp)
: "a" (m), "a" (val) : "a" (m), "a" (val)
......
...@@ -94,11 +94,10 @@ ...@@ -94,11 +94,10 @@
#if XCHAL_HAVE_CP #if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \ #define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ __asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
} while(0); } while(0);
#define WSR_CPENABLE(x) do { \ #define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \ __asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
:: "a" (x)); \
} while(0); } while(0);
#endif /* XCHAL_HAVE_CP */ #endif /* XCHAL_HAVE_CP */
......
#ifndef _XTENSA_CPUTIME_H
#define _XTENSA_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* _XTENSA_CPUTIME_H */
...@@ -27,7 +27,7 @@ static inline void __delay(unsigned long loops) ...@@ -27,7 +27,7 @@ static inline void __delay(unsigned long loops)
static __inline__ u32 xtensa_get_ccount(void) static __inline__ u32 xtensa_get_ccount(void)
{ {
u32 ccount; u32 ccount;
asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount)); asm volatile ("rsr %0, ccount\n" : "=r" (ccount));
return ccount; return ccount;
} }
......
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#include <asm-generic/device.h>
/*
* include/asm-xtensa/div64.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_DIV64_H
#define _XTENSA_DIV64_H
#include <asm-generic/div64.h>
#endif /* _XTENSA_DIV64_H */
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */
/*
* include/asm-xtensa/errno.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ERRNO_H
#define _XTENSA_ERRNO_H
#include <asm-generic/errno.h>
#endif /* _XTENSA_ERRNO_H */
#include <asm-generic/fcntl.h>
#include <asm-generic/futex.h>
/*
* include/asm-xtensa/hardirq.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HARDIRQ_H
#define _XTENSA_HARDIRQ_H
#include <asm-generic/hardirq.h>
#endif /* _XTENSA_HARDIRQ_H */
#include <asm-generic/ioctl.h>
#include <asm-generic/irq_regs.h>
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
unsigned long flags; unsigned long flags;
asm volatile("rsr %0,"__stringify(PS) : "=a" (flags)); asm volatile("rsr %0, ps" : "=a" (flags));
return flags; return flags;
} }
...@@ -41,7 +41,7 @@ static inline void arch_local_irq_enable(void) ...@@ -41,7 +41,7 @@ static inline void arch_local_irq_enable(void)
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
asm volatile("wsr %0, "__stringify(PS)" ; rsync" asm volatile("wsr %0, ps; rsync"
:: "a" (flags) : "memory"); :: "a" (flags) : "memory");
} }
......
#include <asm-generic/kdebug.h>
#ifndef _XTENSA_KMAP_TYPES_H
#define _XTENSA_KMAP_TYPES_H
#include <asm-generic/kmap_types.h>
#endif /* _XTENSA_KMAP_TYPES_H */
#include <asm-generic/kvm_para.h>
/*
* include/asm-xtensa/local.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_LOCAL_H
#define _XTENSA_LOCAL_H
#include <asm-generic/local.h>
#endif /* _XTENSA_LOCAL_H */
#include <asm-generic/local64.h>
...@@ -51,14 +51,14 @@ extern unsigned long asid_cache; ...@@ -51,14 +51,14 @@ extern unsigned long asid_cache;
static inline void set_rasid_register (unsigned long val) static inline void set_rasid_register (unsigned long val)
{ {
__asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" __asm__ __volatile__ (" wsr %0, rasid\n\t"
" isync\n" : : "a" (val)); " isync\n" : : "a" (val));
} }
static inline unsigned long get_rasid_register (void) static inline unsigned long get_rasid_register (void)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp)); __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
return tmp; return tmp;
} }
......
...@@ -7,28 +7,12 @@ ...@@ -7,28 +7,12 @@
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
*/ */
#ifndef _XTENSA_PARAM_H #ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H #define _XTENSA_PARAM_H
#ifdef __KERNEL__ #include <uapi/asm/param.h>
# define HZ CONFIG_HZ /* internal timer frequency */ # define HZ CONFIG_HZ /* internal timer frequency */
# define USER_HZ 100 /* for user interfaces in "ticks" */ # define USER_HZ 100 /* for user interfaces in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */ # define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
#else
# define HZ 100
#endif
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#define NGROUPS 32
#endif
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif /* _XTENSA_PARAM_H */ #endif /* _XTENSA_PARAM_H */
/*
* linux/include/asm-xtensa/percpu.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PERCPU__
#define _XTENSA_PERCPU__
#include <asm-generic/percpu.h>
#endif /* _XTENSA_PERCPU__ */
...@@ -7,73 +7,11 @@ ...@@ -7,73 +7,11 @@
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
*/ */
#ifndef _XTENSA_PTRACE_H #ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H #define _XTENSA_PTRACE_H
/* #include <uapi/asm/ptrace.h>
* Kernel stack
*
* +-----------------------+ -------- STACK_SIZE
* | register file | |
* +-----------------------+ |
* | struct pt_regs | |
* +-----------------------+ | ------ PT_REGS_OFFSET
* double : 16 bytes spill area : | ^
* excetion :- - - - - - - - - - - -: | |
* frame : struct pt_regs : | |
* :- - - - - - - - - - - -: | |
* | | | |
* | memory stack | | |
* | | | |
* ~ ~ ~ ~
* ~ ~ ~ ~
* | | | |
* | | | |
* +-----------------------+ | | --- STACK_BIAS
* | struct task_struct | | | ^
* current --> +-----------------------+ | | |
* | struct thread_info | | | |
* +-----------------------+ --------
*/
#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
#define EXC_TABLE_SIZE 0x400
/* Registers used by strace */
#define REG_A_BASE 0x0000
#define REG_AR_BASE 0x0100
#define REG_PC 0x0020
#define REG_PS 0x02e6
#define REG_WB 0x0248
#define REG_WS 0x0249
#define REG_LBEG 0x0200
#define REG_LEND 0x0201
#define REG_LCOUNT 0x0202
#define REG_SAR 0x0203
#define SYSCALL_NR 0x00ff
/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETXTREGS 18
#define PTRACE_SETXTREGS 19
#ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -132,6 +70,4 @@ struct pt_regs { ...@@ -132,6 +70,4 @@ struct pt_regs {
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _XTENSA_PTRACE_H */ #endif /* _XTENSA_PTRACE_H */
...@@ -27,52 +27,15 @@ ...@@ -27,52 +27,15 @@
/* Special registers. */ /* Special registers. */
#define LBEG 0 #define SREG_MR 32
#define LEND 1 #define SREG_IBREAKA 128
#define LCOUNT 2 #define SREG_DBREAKA 144
#define SAR 3 #define SREG_DBREAKC 160
#define BR 4 #define SREG_EPC 176
#define SCOMPARE1 12 #define SREG_EPS 192
#define ACCHI 16 #define SREG_EXCSAVE 208
#define ACCLO 17 #define SREG_CCOMPARE 240
#define MR 32 #define SREG_MISC 244
#define WINDOWBASE 72
#define WINDOWSTART 73
#define PTEVADDR 83
#define RASID 90
#define ITLBCFG 91
#define DTLBCFG 92
#define IBREAKENABLE 96
#define DDR 104
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPC_1 177
#define DEPC 192
#define EPS 192
#define EPS_1 193
#define EXCSAVE 208
#define EXCSAVE_1 209
#define INTERRUPT 226
#define INTENABLE 228
#define PS 230
#define THREADPTR 231
#define EXCCAUSE 232
#define DEBUGCAUSE 233
#define CCOUNT 234
#define PRID 235
#define ICOUNT 236
#define ICOUNTLEVEL 237
#define EXCVADDR 238
#define CCOMPARE 240
#define MISC_SR 244
/* Special names for read-only and write-only interrupt registers. */
#define INTREAD 226
#define INTSET 226
#define INTCLEAR 227
/* EXCCAUSE register fields */ /* EXCCAUSE register fields */
......
/*
* include/asm-xtensa/resource.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_RESOURCE_H
#define _XTENSA_RESOURCE_H
#include <asm-generic/resource.h>
#endif /* _XTENSA_RESOURCE_H */
/*
* include/asm-xtensa/scatterlist.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SCATTERLIST_H
#define _XTENSA_SCATTERLIST_H
#include <asm-generic/scatterlist.h>
#endif /* _XTENSA_SCATTERLIST_H */
/*
* include/asm-xtensa/sections.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SECTIONS_H
#define _XTENSA_SECTIONS_H
#include <asm-generic/sections.h>
#endif /* _XTENSA_SECTIONS_H */
/*
* include/asm-xtensa/siginfo.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SIGINFO_H
#define _XTENSA_SIGINFO_H
#include <asm-generic/siginfo.h>
#endif /* _XTENSA_SIGINFO_H */
...@@ -9,117 +9,12 @@ ...@@ -9,117 +9,12 @@
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
*/ */
#ifndef _XTENSA_SIGNAL_H #ifndef _XTENSA_SIGNAL_H
#define _XTENSA_SIGNAL_H #define _XTENSA_SIGNAL_H
#include <uapi/asm/signal.h>
#define _NSIG 64
#define _NSIG_BPW 32
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
#ifndef __ASSEMBLY__
#include <linux/types.h>
/* Avoid too many header ordering problems. */
struct siginfo;
typedef unsigned long old_sigset_t; /* at least 32 bits */
typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
#endif
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/* #define SIGLOST 29 */
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
#define SA_SIGINFO 0x00000004
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define SIG_BLOCK 0 /* for blocking signals */
#define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
#ifdef __KERNEL__
struct sigaction { struct sigaction {
__sighandler_t sa_handler; __sighandler_t sa_handler;
unsigned long sa_flags; unsigned long sa_flags;
...@@ -131,35 +26,8 @@ struct k_sigaction { ...@@ -131,35 +26,8 @@ struct k_sigaction {
struct sigaction sa; struct sigaction sa;
}; };
#else
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#ifdef __KERNEL__
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
#define ptrace_signal_deliver(regs, cookie) do { } while (0) #define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _XTENSA_SIGNAL_H */ #endif /* _XTENSA_SIGNAL_H */
/*
* include/asm-xtensa/statfs.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_STATFS_H
#define _XTENSA_STATFS_H
#include <asm-generic/statfs.h>
#endif /* _XTENSA_STATFS_H */
/*
* include/asm-xtensa/termios.h
*
* Copied from SH.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_TERMIOS_H
#define _XTENSA_TERMIOS_H
#include <asm/termbits.h>
#include <asm/ioctls.h>
struct winsize {
unsigned short ws_row;
unsigned short ws_col;
unsigned short ws_xpixel;
unsigned short ws_ypixel;
};
#define NCC 8
struct termio {
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
/* Modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002
#define TIOCM_RTS 0x004
#define TIOCM_ST 0x008
#define TIOCM_SR 0x010
#define TIOCM_CTS 0x020
#define TIOCM_CAR 0x040
#define TIOCM_RNG 0x080
#define TIOCM_DSR 0x100
#define TIOCM_CD TIOCM_CAR
#define TIOCM_RI TIOCM_RNG
#define TIOCM_OUT1 0x2000
#define TIOCM_OUT2 0x4000
#define TIOCM_LOOP 0x8000
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
#ifdef __KERNEL__
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
reprint=^R discard=^U werase=^W lnext=^V
eol2=\0
*/
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
unsigned short __tmp; \
get_user(__tmp,&(termio)->x); \
*(unsigned short *) &(termios)->x = __tmp; \
}
#define user_termio_to_kernel_termios(termios, termio) \
({ \
SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
})
/*
* Translate a "termios" structure into a "termio". Ugh.
*/
#define kernel_termios_to_user_termio(termio, termios) \
({ \
put_user((termios)->c_iflag, &(termio)->c_iflag); \
put_user((termios)->c_oflag, &(termio)->c_oflag); \
put_user((termios)->c_cflag, &(termio)->c_cflag); \
put_user((termios)->c_lflag, &(termio)->c_lflag); \
put_user((termios)->c_line, &(termio)->c_line); \
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
})
#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */
#endif /* _XTENSA_TERMIOS_H */
...@@ -63,10 +63,10 @@ extern cycles_t cacheflush_time; ...@@ -63,10 +63,10 @@ extern cycles_t cacheflush_time;
* Register access. * Register access.
*/ */
#define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r)) #define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
#define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r)) #define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r)) #define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r)) #define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void) static inline unsigned long get_ccount (void)
{ {
......
...@@ -86,26 +86,26 @@ static inline void invalidate_dtlb_entry_no_isync (unsigned entry) ...@@ -86,26 +86,26 @@ static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
static inline void set_itlbcfg_register (unsigned long val) static inline void set_itlbcfg_register (unsigned long val)
{ {
__asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t" __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
: : "a" (val)); : : "a" (val));
} }
static inline void set_dtlbcfg_register (unsigned long val) static inline void set_dtlbcfg_register (unsigned long val)
{ {
__asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t" __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
: : "a" (val)); : : "a" (val));
} }
static inline void set_ptevaddr_register (unsigned long val) static inline void set_ptevaddr_register (unsigned long val)
{ {
__asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n" __asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
: : "a" (val)); : : "a" (val));
} }
static inline unsigned long read_ptevaddr_register (void) static inline unsigned long read_ptevaddr_register (void)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp)); __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
return tmp; return tmp;
} }
......
/*
* include/asm-xtensa/topology.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_TOPOLOGY_H
#define _XTENSA_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* _XTENSA_TOPOLOGY_H */
...@@ -7,30 +7,17 @@ ...@@ -7,30 +7,17 @@
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
*/ */
#ifndef _XTENSA_TYPES_H #ifndef _XTENSA_TYPES_H
#define _XTENSA_TYPES_H #define _XTENSA_TYPES_H
#include <asm-generic/int-ll64.h> #include <uapi/asm/types.h>
#ifdef __ASSEMBLY__
# define __XTENSA_UL(x) (x)
# define __XTENSA_UL_CONST(x) x
#else
# define __XTENSA_UL(x) ((unsigned long)(x))
# define __XTENSA_UL_CONST(x) x##UL
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
* These aren't exported outside the kernel to avoid name space clashes * These aren't exported outside the kernel to avoid name space clashes
*/ */
#ifdef __KERNEL__
#define BITS_PER_LONG 32 #define BITS_PER_LONG 32
#endif /* __KERNEL__ */
#endif #endif
#endif /* _XTENSA_TYPES_H */ #endif /* _XTENSA_TYPES_H */
...@@ -8,705 +8,8 @@ ...@@ -8,705 +8,8 @@
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
*/ */
#ifndef _XTENSA_UNISTD_H #include <uapi/asm/unistd.h>
#define _XTENSA_UNISTD_H
#ifndef __SYSCALL
# define __SYSCALL(nr,func,nargs)
#endif
#define __NR_spill 0
__SYSCALL( 0, sys_ni_syscall, 0)
#define __NR_xtensa 1
__SYSCALL( 1, sys_ni_syscall, 0)
#define __NR_available4 2
__SYSCALL( 2, sys_ni_syscall, 0)
#define __NR_available5 3
__SYSCALL( 3, sys_ni_syscall, 0)
#define __NR_available6 4
__SYSCALL( 4, sys_ni_syscall, 0)
#define __NR_available7 5
__SYSCALL( 5, sys_ni_syscall, 0)
#define __NR_available8 6
__SYSCALL( 6, sys_ni_syscall, 0)
#define __NR_available9 7
__SYSCALL( 7, sys_ni_syscall, 0)
/* File Operations */
#define __NR_open 8
__SYSCALL( 8, sys_open, 3)
#define __NR_close 9
__SYSCALL( 9, sys_close, 1)
#define __NR_dup 10
__SYSCALL( 10, sys_dup, 1)
#define __NR_dup2 11
__SYSCALL( 11, sys_dup2, 2)
#define __NR_read 12
__SYSCALL( 12, sys_read, 3)
#define __NR_write 13
__SYSCALL( 13, sys_write, 3)
#define __NR_select 14
__SYSCALL( 14, sys_select, 5)
#define __NR_lseek 15
__SYSCALL( 15, sys_lseek, 3)
#define __NR_poll 16
__SYSCALL( 16, sys_poll, 3)
#define __NR__llseek 17
__SYSCALL( 17, sys_llseek, 5)
#define __NR_epoll_wait 18
__SYSCALL( 18, sys_epoll_wait, 4)
#define __NR_epoll_ctl 19
__SYSCALL( 19, sys_epoll_ctl, 4)
#define __NR_epoll_create 20
__SYSCALL( 20, sys_epoll_create, 1)
#define __NR_creat 21
__SYSCALL( 21, sys_creat, 2)
#define __NR_truncate 22
__SYSCALL( 22, sys_truncate, 2)
#define __NR_ftruncate 23
__SYSCALL( 23, sys_ftruncate, 2)
#define __NR_readv 24
__SYSCALL( 24, sys_readv, 3)
#define __NR_writev 25
__SYSCALL( 25, sys_writev, 3)
#define __NR_fsync 26
__SYSCALL( 26, sys_fsync, 1)
#define __NR_fdatasync 27
__SYSCALL( 27, sys_fdatasync, 1)
#define __NR_truncate64 28
__SYSCALL( 28, sys_truncate64, 2)
#define __NR_ftruncate64 29
__SYSCALL( 29, sys_ftruncate64, 2)
#define __NR_pread64 30
__SYSCALL( 30, sys_pread64, 6)
#define __NR_pwrite64 31
__SYSCALL( 31, sys_pwrite64, 6)
#define __NR_link 32
__SYSCALL( 32, sys_link, 2)
#define __NR_rename 33
__SYSCALL( 33, sys_rename, 2)
#define __NR_symlink 34
__SYSCALL( 34, sys_symlink, 2)
#define __NR_readlink 35
__SYSCALL( 35, sys_readlink, 3)
#define __NR_mknod 36
__SYSCALL( 36, sys_mknod, 3)
#define __NR_pipe 37
__SYSCALL( 37, sys_pipe, 1)
#define __NR_unlink 38
__SYSCALL( 38, sys_unlink, 1)
#define __NR_rmdir 39
__SYSCALL( 39, sys_rmdir, 1)
#define __NR_mkdir 40
__SYSCALL( 40, sys_mkdir, 2)
#define __NR_chdir 41
__SYSCALL( 41, sys_chdir, 1)
#define __NR_fchdir 42
__SYSCALL( 42, sys_fchdir, 1)
#define __NR_getcwd 43
__SYSCALL( 43, sys_getcwd, 2)
#define __NR_chmod 44
__SYSCALL( 44, sys_chmod, 2)
#define __NR_chown 45
__SYSCALL( 45, sys_chown, 3)
#define __NR_stat 46
__SYSCALL( 46, sys_newstat, 2)
#define __NR_stat64 47
__SYSCALL( 47, sys_stat64, 2)
#define __NR_lchown 48
__SYSCALL( 48, sys_lchown, 3)
#define __NR_lstat 49
__SYSCALL( 49, sys_newlstat, 2)
#define __NR_lstat64 50
__SYSCALL( 50, sys_lstat64, 2)
#define __NR_available51 51
__SYSCALL( 51, sys_ni_syscall, 0)
#define __NR_fchmod 52
__SYSCALL( 52, sys_fchmod, 2)
#define __NR_fchown 53
__SYSCALL( 53, sys_fchown, 3)
#define __NR_fstat 54
__SYSCALL( 54, sys_newfstat, 2)
#define __NR_fstat64 55
__SYSCALL( 55, sys_fstat64, 2)
#define __NR_flock 56
__SYSCALL( 56, sys_flock, 2)
#define __NR_access 57
__SYSCALL( 57, sys_access, 2)
#define __NR_umask 58
__SYSCALL( 58, sys_umask, 1)
#define __NR_getdents 59
__SYSCALL( 59, sys_getdents, 3)
#define __NR_getdents64 60
__SYSCALL( 60, sys_getdents64, 3)
#define __NR_fcntl64 61
__SYSCALL( 61, sys_fcntl64, 3)
#define __NR_available62 62
__SYSCALL( 62, sys_ni_syscall, 0)
#define __NR_fadvise64_64 63
__SYSCALL( 63, xtensa_fadvise64_64, 6)
#define __NR_utime 64 /* glibc 2.3.3 ?? */
__SYSCALL( 64, sys_utime, 2)
#define __NR_utimes 65
__SYSCALL( 65, sys_utimes, 2)
#define __NR_ioctl 66
__SYSCALL( 66, sys_ioctl, 3)
#define __NR_fcntl 67
__SYSCALL( 67, sys_fcntl, 3)
#define __NR_setxattr 68
__SYSCALL( 68, sys_setxattr, 5)
#define __NR_getxattr 69
__SYSCALL( 69, sys_getxattr, 4)
#define __NR_listxattr 70
__SYSCALL( 70, sys_listxattr, 3)
#define __NR_removexattr 71
__SYSCALL( 71, sys_removexattr, 2)
#define __NR_lsetxattr 72
__SYSCALL( 72, sys_lsetxattr, 5)
#define __NR_lgetxattr 73
__SYSCALL( 73, sys_lgetxattr, 4)
#define __NR_llistxattr 74
__SYSCALL( 74, sys_llistxattr, 3)
#define __NR_lremovexattr 75
__SYSCALL( 75, sys_lremovexattr, 2)
#define __NR_fsetxattr 76
__SYSCALL( 76, sys_fsetxattr, 5)
#define __NR_fgetxattr 77
__SYSCALL( 77, sys_fgetxattr, 4)
#define __NR_flistxattr 78
__SYSCALL( 78, sys_flistxattr, 3)
#define __NR_fremovexattr 79
__SYSCALL( 79, sys_fremovexattr, 2)
/* File Map / Shared Memory Operations */
#define __NR_mmap2 80
__SYSCALL( 80, sys_mmap_pgoff, 6)
#define __NR_munmap 81
__SYSCALL( 81, sys_munmap, 2)
#define __NR_mprotect 82
__SYSCALL( 82, sys_mprotect, 3)
#define __NR_brk 83
__SYSCALL( 83, sys_brk, 1)
#define __NR_mlock 84
__SYSCALL( 84, sys_mlock, 2)
#define __NR_munlock 85
__SYSCALL( 85, sys_munlock, 2)
#define __NR_mlockall 86
__SYSCALL( 86, sys_mlockall, 1)
#define __NR_munlockall 87
__SYSCALL( 87, sys_munlockall, 0)
#define __NR_mremap 88
__SYSCALL( 88, sys_mremap, 4)
#define __NR_msync 89
__SYSCALL( 89, sys_msync, 3)
#define __NR_mincore 90
__SYSCALL( 90, sys_mincore, 3)
#define __NR_madvise 91
__SYSCALL( 91, sys_madvise, 3)
#define __NR_shmget 92
__SYSCALL( 92, sys_shmget, 4)
#define __NR_shmat 93
__SYSCALL( 93, xtensa_shmat, 4)
#define __NR_shmctl 94
__SYSCALL( 94, sys_shmctl, 4)
#define __NR_shmdt 95
__SYSCALL( 95, sys_shmdt, 4)
/* Socket Operations */
#define __NR_socket 96
__SYSCALL( 96, sys_socket, 3)
#define __NR_setsockopt 97
__SYSCALL( 97, sys_setsockopt, 5)
#define __NR_getsockopt 98
__SYSCALL( 98, sys_getsockopt, 5)
#define __NR_shutdown 99
__SYSCALL( 99, sys_shutdown, 2)
#define __NR_bind 100
__SYSCALL(100, sys_bind, 3)
#define __NR_connect 101
__SYSCALL(101, sys_connect, 3)
#define __NR_listen 102
__SYSCALL(102, sys_listen, 2)
#define __NR_accept 103
__SYSCALL(103, sys_accept, 3)
#define __NR_getsockname 104
__SYSCALL(104, sys_getsockname, 3)
#define __NR_getpeername 105
__SYSCALL(105, sys_getpeername, 3)
#define __NR_sendmsg 106
__SYSCALL(106, sys_sendmsg, 3)
#define __NR_recvmsg 107
__SYSCALL(107, sys_recvmsg, 3)
#define __NR_send 108
__SYSCALL(108, sys_send, 4)
#define __NR_recv 109
__SYSCALL(109, sys_recv, 4)
#define __NR_sendto 110
__SYSCALL(110, sys_sendto, 6)
#define __NR_recvfrom 111
__SYSCALL(111, sys_recvfrom, 6)
#define __NR_socketpair 112
__SYSCALL(112, sys_socketpair, 4)
#define __NR_sendfile 113
__SYSCALL(113, sys_sendfile, 4)
#define __NR_sendfile64 114
__SYSCALL(114, sys_sendfile64, 4)
#define __NR_available115 115
__SYSCALL(115, sys_ni_syscall, 0)
/* Process Operations */
#define __NR_clone 116
__SYSCALL(116, xtensa_clone, 5)
#define __NR_execve 117
__SYSCALL(117, xtensa_execve, 3)
#define __NR_exit 118
__SYSCALL(118, sys_exit, 1)
#define __NR_exit_group 119
__SYSCALL(119, sys_exit_group, 1)
#define __NR_getpid 120
__SYSCALL(120, sys_getpid, 0)
#define __NR_wait4 121
__SYSCALL(121, sys_wait4, 4)
#define __NR_waitid 122
__SYSCALL(122, sys_waitid, 5)
#define __NR_kill 123
__SYSCALL(123, sys_kill, 2)
#define __NR_tkill 124
__SYSCALL(124, sys_tkill, 2)
#define __NR_tgkill 125
__SYSCALL(125, sys_tgkill, 3)
#define __NR_set_tid_address 126
__SYSCALL(126, sys_set_tid_address, 1)
#define __NR_gettid 127
__SYSCALL(127, sys_gettid, 0)
#define __NR_setsid 128
__SYSCALL(128, sys_setsid, 0)
#define __NR_getsid 129
__SYSCALL(129, sys_getsid, 1)
#define __NR_prctl 130
__SYSCALL(130, sys_prctl, 5)
#define __NR_personality 131
__SYSCALL(131, sys_personality, 1)
#define __NR_getpriority 132
__SYSCALL(132, sys_getpriority, 2)
#define __NR_setpriority 133
__SYSCALL(133, sys_setpriority, 3)
#define __NR_setitimer 134
__SYSCALL(134, sys_setitimer, 3)
#define __NR_getitimer 135
__SYSCALL(135, sys_getitimer, 2)
#define __NR_setuid 136
__SYSCALL(136, sys_setuid, 1)
#define __NR_getuid 137
__SYSCALL(137, sys_getuid, 0)
#define __NR_setgid 138
__SYSCALL(138, sys_setgid, 1)
#define __NR_getgid 139
__SYSCALL(139, sys_getgid, 0)
#define __NR_geteuid 140
__SYSCALL(140, sys_geteuid, 0)
#define __NR_getegid 141
__SYSCALL(141, sys_getegid, 0)
#define __NR_setreuid 142
__SYSCALL(142, sys_setreuid, 2)
#define __NR_setregid 143
__SYSCALL(143, sys_setregid, 2)
#define __NR_setresuid 144
__SYSCALL(144, sys_setresuid, 3)
#define __NR_getresuid 145
__SYSCALL(145, sys_getresuid, 3)
#define __NR_setresgid 146
__SYSCALL(146, sys_setresgid, 3)
#define __NR_getresgid 147
__SYSCALL(147, sys_getresgid, 3)
#define __NR_setpgid 148
__SYSCALL(148, sys_setpgid, 2)
#define __NR_getpgid 149
__SYSCALL(149, sys_getpgid, 1)
#define __NR_getppid 150
__SYSCALL(150, sys_getppid, 0)
#define __NR_getpgrp 151
__SYSCALL(151, sys_getpgrp, 0)
#define __NR_reserved152 152 /* set_thread_area */
__SYSCALL(152, sys_ni_syscall, 0)
#define __NR_reserved153 153 /* get_thread_area */
__SYSCALL(153, sys_ni_syscall, 0)
#define __NR_times 154
__SYSCALL(154, sys_times, 1)
#define __NR_acct 155
__SYSCALL(155, sys_acct, 1)
#define __NR_sched_setaffinity 156
__SYSCALL(156, sys_sched_setaffinity, 3)
#define __NR_sched_getaffinity 157
__SYSCALL(157, sys_sched_getaffinity, 3)
#define __NR_capget 158
__SYSCALL(158, sys_capget, 2)
#define __NR_capset 159
__SYSCALL(159, sys_capset, 2)
#define __NR_ptrace 160
__SYSCALL(160, sys_ptrace, 4)
#define __NR_semtimedop 161
__SYSCALL(161, sys_semtimedop, 5)
#define __NR_semget 162
__SYSCALL(162, sys_semget, 4)
#define __NR_semop 163
__SYSCALL(163, sys_semop, 4)
#define __NR_semctl 164
__SYSCALL(164, sys_semctl, 4)
#define __NR_available165 165
__SYSCALL(165, sys_ni_syscall, 0)
#define __NR_msgget 166
__SYSCALL(166, sys_msgget, 4)
#define __NR_msgsnd 167
__SYSCALL(167, sys_msgsnd, 4)
#define __NR_msgrcv 168
__SYSCALL(168, sys_msgrcv, 4)
#define __NR_msgctl 169
__SYSCALL(169, sys_msgctl, 4)
#define __NR_available170 170
__SYSCALL(170, sys_ni_syscall, 0)
#define __NR_available171 171
__SYSCALL(171, sys_ni_syscall, 0)
/* File System */
#define __NR_mount 172
__SYSCALL(172, sys_mount, 5)
#define __NR_swapon 173
__SYSCALL(173, sys_swapon, 2)
#define __NR_chroot 174
__SYSCALL(174, sys_chroot, 1)
#define __NR_pivot_root 175
__SYSCALL(175, sys_pivot_root, 2)
#define __NR_umount 176
__SYSCALL(176, sys_umount, 2)
#define __NR_swapoff 177
__SYSCALL(177, sys_swapoff, 1)
#define __NR_sync 178
__SYSCALL(178, sys_sync, 0)
#define __NR_available179 179
__SYSCALL(179, sys_ni_syscall, 0)
#define __NR_setfsuid 180
__SYSCALL(180, sys_setfsuid, 1)
#define __NR_setfsgid 181
__SYSCALL(181, sys_setfsgid, 1)
#define __NR_sysfs 182
__SYSCALL(182, sys_sysfs, 3)
#define __NR_ustat 183
__SYSCALL(183, sys_ustat, 2)
#define __NR_statfs 184
__SYSCALL(184, sys_statfs, 2)
#define __NR_fstatfs 185
__SYSCALL(185, sys_fstatfs, 2)
#define __NR_statfs64 186
__SYSCALL(186, sys_statfs64, 3)
#define __NR_fstatfs64 187
__SYSCALL(187, sys_fstatfs64, 3)
/* System */
#define __NR_setrlimit 188
__SYSCALL(188, sys_setrlimit, 2)
#define __NR_getrlimit 189
__SYSCALL(189, sys_getrlimit, 2)
#define __NR_getrusage 190
__SYSCALL(190, sys_getrusage, 2)
#define __NR_futex 191
__SYSCALL(191, sys_futex, 5)
#define __NR_gettimeofday 192
__SYSCALL(192, sys_gettimeofday, 2)
#define __NR_settimeofday 193
__SYSCALL(193, sys_settimeofday, 2)
#define __NR_adjtimex 194
__SYSCALL(194, sys_adjtimex, 1)
#define __NR_nanosleep 195
__SYSCALL(195, sys_nanosleep, 2)
#define __NR_getgroups 196
__SYSCALL(196, sys_getgroups, 2)
#define __NR_setgroups 197
__SYSCALL(197, sys_setgroups, 2)
#define __NR_sethostname 198
__SYSCALL(198, sys_sethostname, 2)
#define __NR_setdomainname 199
__SYSCALL(199, sys_setdomainname, 2)
#define __NR_syslog 200
__SYSCALL(200, sys_syslog, 3)
#define __NR_vhangup 201
__SYSCALL(201, sys_vhangup, 0)
#define __NR_uselib 202
__SYSCALL(202, sys_uselib, 1)
#define __NR_reboot 203
__SYSCALL(203, sys_reboot, 3)
#define __NR_quotactl 204
__SYSCALL(204, sys_quotactl, 4)
#define __NR_nfsservctl 205
__SYSCALL(205, sys_ni_syscall, 0)
#define __NR__sysctl 206
__SYSCALL(206, sys_sysctl, 1)
#define __NR_bdflush 207
__SYSCALL(207, sys_bdflush, 2)
#define __NR_uname 208
__SYSCALL(208, sys_newuname, 1)
#define __NR_sysinfo 209
__SYSCALL(209, sys_sysinfo, 1)
#define __NR_init_module 210
__SYSCALL(210, sys_init_module, 2)
#define __NR_delete_module 211
__SYSCALL(211, sys_delete_module, 1)
#define __NR_sched_setparam 212
__SYSCALL(212, sys_sched_setparam, 2)
#define __NR_sched_getparam 213
__SYSCALL(213, sys_sched_getparam, 2)
#define __NR_sched_setscheduler 214
__SYSCALL(214, sys_sched_setscheduler, 3)
#define __NR_sched_getscheduler 215
__SYSCALL(215, sys_sched_getscheduler, 1)
#define __NR_sched_get_priority_max 216
__SYSCALL(216, sys_sched_get_priority_max, 1)
#define __NR_sched_get_priority_min 217
__SYSCALL(217, sys_sched_get_priority_min, 1)
#define __NR_sched_rr_get_interval 218
__SYSCALL(218, sys_sched_rr_get_interval, 2)
#define __NR_sched_yield 219
__SYSCALL(219, sys_sched_yield, 0)
#define __NR_available222 222
__SYSCALL(222, sys_ni_syscall, 0)
/* Signal Handling */
#define __NR_restart_syscall 223
__SYSCALL(223, sys_restart_syscall, 0)
#define __NR_sigaltstack 224
__SYSCALL(224, xtensa_sigaltstack, 2)
#define __NR_rt_sigreturn 225
__SYSCALL(225, xtensa_rt_sigreturn, 1)
#define __NR_rt_sigaction 226
__SYSCALL(226, sys_rt_sigaction, 4)
#define __NR_rt_sigprocmask 227
__SYSCALL(227, sys_rt_sigprocmask, 4)
#define __NR_rt_sigpending 228
__SYSCALL(228, sys_rt_sigpending, 2)
#define __NR_rt_sigtimedwait 229
__SYSCALL(229, sys_rt_sigtimedwait, 4)
#define __NR_rt_sigqueueinfo 230
__SYSCALL(230, sys_rt_sigqueueinfo, 3)
#define __NR_rt_sigsuspend 231
__SYSCALL(231, sys_rt_sigsuspend, 2)
/* Message */
#define __NR_mq_open 232
__SYSCALL(232, sys_mq_open, 4)
#define __NR_mq_unlink 233
__SYSCALL(233, sys_mq_unlink, 1)
#define __NR_mq_timedsend 234
__SYSCALL(234, sys_mq_timedsend, 5)
#define __NR_mq_timedreceive 235
__SYSCALL(235, sys_mq_timedreceive, 5)
#define __NR_mq_notify 236
__SYSCALL(236, sys_mq_notify, 2)
#define __NR_mq_getsetattr 237
__SYSCALL(237, sys_mq_getsetattr, 3)
#define __NR_available238 238
__SYSCALL(238, sys_ni_syscall, 0)
/* IO */
#define __NR_io_setup 239
__SYSCALL(239, sys_io_setup, 2)
#define __NR_io_destroy 240
__SYSCALL(240, sys_io_destroy, 1)
#define __NR_io_submit 241
__SYSCALL(241, sys_io_submit, 3)
#define __NR_io_getevents 242
__SYSCALL(242, sys_io_getevents, 5)
#define __NR_io_cancel 243
__SYSCALL(243, sys_io_cancel, 3)
#define __NR_clock_settime 244
__SYSCALL(244, sys_clock_settime, 2)
#define __NR_clock_gettime 245
__SYSCALL(245, sys_clock_gettime, 2)
#define __NR_clock_getres 246
__SYSCALL(246, sys_clock_getres, 2)
#define __NR_clock_nanosleep 247
__SYSCALL(247, sys_clock_nanosleep, 4)
/* Timer */
#define __NR_timer_create 248
__SYSCALL(248, sys_timer_create, 3)
#define __NR_timer_delete 249
__SYSCALL(249, sys_timer_delete, 1)
#define __NR_timer_settime 250
__SYSCALL(250, sys_timer_settime, 4)
#define __NR_timer_gettime 251
__SYSCALL(251, sys_timer_gettime, 2)
#define __NR_timer_getoverrun 252
__SYSCALL(252, sys_timer_getoverrun, 1)
/* System */
#define __NR_reserved244 253
__SYSCALL(253, sys_ni_syscall, 0)
#define __NR_lookup_dcookie 254
__SYSCALL(254, sys_lookup_dcookie, 4)
#define __NR_available255 255
__SYSCALL(255, sys_ni_syscall, 0)
#define __NR_add_key 256
__SYSCALL(256, sys_add_key, 5)
#define __NR_request_key 257
__SYSCALL(257, sys_request_key, 5)
#define __NR_keyctl 258
__SYSCALL(258, sys_keyctl, 5)
#define __NR_available259 259
__SYSCALL(259, sys_ni_syscall, 0)
#define __NR_readahead 260
__SYSCALL(260, sys_readahead, 5)
#define __NR_remap_file_pages 261
__SYSCALL(261, sys_remap_file_pages, 5)
#define __NR_migrate_pages 262
__SYSCALL(262, sys_migrate_pages, 0)
#define __NR_mbind 263
__SYSCALL(263, sys_mbind, 6)
#define __NR_get_mempolicy 264
__SYSCALL(264, sys_get_mempolicy, 5)
#define __NR_set_mempolicy 265
__SYSCALL(265, sys_set_mempolicy, 3)
#define __NR_unshare 266
__SYSCALL(266, sys_unshare, 1)
#define __NR_move_pages 267
__SYSCALL(267, sys_move_pages, 0)
#define __NR_splice 268
__SYSCALL(268, sys_splice, 0)
#define __NR_tee 269
__SYSCALL(269, sys_tee, 0)
#define __NR_vmsplice 270
__SYSCALL(270, sys_vmsplice, 0)
#define __NR_available271 271
__SYSCALL(271, sys_ni_syscall, 0)
#define __NR_pselect6 272
__SYSCALL(272, sys_pselect6, 0)
#define __NR_ppoll 273
__SYSCALL(273, sys_ppoll, 0)
#define __NR_epoll_pwait 274
__SYSCALL(274, sys_epoll_pwait, 0)
#define __NR_available275 275
__SYSCALL(275, sys_ni_syscall, 0)
#define __NR_inotify_init 276
__SYSCALL(276, sys_inotify_init, 0)
#define __NR_inotify_add_watch 277
__SYSCALL(277, sys_inotify_add_watch, 3)
#define __NR_inotify_rm_watch 278
__SYSCALL(278, sys_inotify_rm_watch, 2)
#define __NR_available279 279
__SYSCALL(279, sys_ni_syscall, 0)
#define __NR_getcpu 280
__SYSCALL(280, sys_getcpu, 0)
#define __NR_kexec_load 281
__SYSCALL(281, sys_ni_syscall, 0)
#define __NR_ioprio_set 282
__SYSCALL(282, sys_ioprio_set, 2)
#define __NR_ioprio_get 283
__SYSCALL(283, sys_ioprio_get, 3)
#define __NR_set_robust_list 284
__SYSCALL(284, sys_set_robust_list, 3)
#define __NR_get_robust_list 285
__SYSCALL(285, sys_get_robust_list, 3)
#define __NR_reserved286 286 /* sync_file_rangeX */
__SYSCALL(286, sys_ni_syscall, 3)
#define __NR_available287 287
__SYSCALL(287, sys_faccessat, 0)
/* Relative File Operations */
#define __NR_openat 288
__SYSCALL(288, sys_openat, 4)
#define __NR_mkdirat 289
__SYSCALL(289, sys_mkdirat, 3)
#define __NR_mknodat 290
__SYSCALL(290, sys_mknodat, 4)
#define __NR_unlinkat 291
__SYSCALL(291, sys_unlinkat, 3)
#define __NR_renameat 292
__SYSCALL(292, sys_renameat, 4)
#define __NR_linkat 293
__SYSCALL(293, sys_linkat, 5)
#define __NR_symlinkat 294
__SYSCALL(294, sys_symlinkat, 3)
#define __NR_readlinkat 295
__SYSCALL(295, sys_readlinkat, 4)
#define __NR_utimensat 296
__SYSCALL(296, sys_utimensat, 0)
#define __NR_fchownat 297
__SYSCALL(297, sys_fchownat, 5)
#define __NR_futimesat 298
__SYSCALL(298, sys_futimesat, 4)
#define __NR_fstatat64 299
__SYSCALL(299, sys_fstatat64, 0)
#define __NR_fchmodat 300
__SYSCALL(300, sys_fchmodat, 4)
#define __NR_faccessat 301
__SYSCALL(301, sys_faccessat, 4)
#define __NR_available302 302
__SYSCALL(302, sys_ni_syscall, 0)
#define __NR_available303 303
__SYSCALL(303, sys_ni_syscall, 0)
#define __NR_signalfd 304
__SYSCALL(304, sys_signalfd, 3)
/* 305 was __NR_timerfd */
__SYSCALL(305, sys_ni_syscall, 0)
#define __NR_eventfd 306
__SYSCALL(306, sys_eventfd, 1)
#define __NR_recvmmsg 307
__SYSCALL(307, sys_recvmmsg, 5)
#define __NR_setns 308
__SYSCALL(308, sys_setns, 2)
#define __NR_syscall_count 309
/*
* sysxtensa syscall handler
*
* int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
* a2 a6 a3 a4 a5
*/
#define SYS_XTENSA_RESERVED 0 /* don't use this */
#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
#define SYS_XTENSA_COUNT 5 /* count */
#ifdef __KERNEL__
/* /*
* "Conditional" syscalls * "Conditional" syscalls
...@@ -734,6 +37,3 @@ __SYSCALL(308, sys_setns, 2) ...@@ -734,6 +37,3 @@ __SYSCALL(308, sys_setns, 2)
#define __IGNORE_mmap /* use mmap2 */ #define __IGNORE_mmap /* use mmap2 */
#define __IGNORE_vfork /* use clone */ #define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */ #define __IGNORE_fadvise64 /* use fadvise64_64 */
#endif /* __KERNEL__ */
#endif /* _XTENSA_UNISTD_H */
# UAPI Header export list # UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm include include/uapi/asm-generic/Kbuild.asm
header-y += auxvec.h
header-y += byteorder.h
header-y += ioctls.h
header-y += ipcbuf.h
header-y += mman.h
header-y += msgbuf.h
header-y += param.h
header-y += poll.h
header-y += posix_types.h
header-y += ptrace.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
header-y += sigcontext.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
header-y += stat.h
header-y += swab.h
header-y += termbits.h
header-y += types.h
header-y += unistd.h
/* /*
* include/asm-xtensa/xor.h * include/asm-xtensa/param.h
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
...@@ -8,9 +8,23 @@ ...@@ -8,9 +8,23 @@
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
*/ */
#ifndef _XTENSA_XOR_H #ifndef _UAPI_XTENSA_PARAM_H
#define _XTENSA_XOR_H #define _UAPI_XTENSA_PARAM_H
#include <asm-generic/xor.h> #ifndef __KERNEL__
# define HZ 100
#endif
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#define NGROUPS 32
#endif
#ifndef NOGROUP
#define NOGROUP (-1)
#endif #endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif /* _UAPI_XTENSA_PARAM_H */
/*
* include/asm-xtensa/ptrace.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _UAPI_XTENSA_PTRACE_H
#define _UAPI_XTENSA_PTRACE_H
/*
* Kernel stack
*
* +-----------------------+ -------- STACK_SIZE
* | register file | |
* +-----------------------+ |
* | struct pt_regs | |
* +-----------------------+ | ------ PT_REGS_OFFSET
* double : 16 bytes spill area : | ^
* excetion :- - - - - - - - - - - -: | |
* frame : struct pt_regs : | |
* :- - - - - - - - - - - -: | |
* | | | |
* | memory stack | | |
* | | | |
* ~ ~ ~ ~
* ~ ~ ~ ~
* | | | |
* | | | |
* +-----------------------+ | | --- STACK_BIAS
* | struct task_struct | | | ^
* current --> +-----------------------+ | | |
* | struct thread_info | | | |
* +-----------------------+ --------
*/
#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
#define EXC_TABLE_SIZE 0x400
/* Registers used by strace */
#define REG_A_BASE 0x0000
#define REG_AR_BASE 0x0100
#define REG_PC 0x0020
#define REG_PS 0x02e6
#define REG_WB 0x0248
#define REG_WS 0x0249
#define REG_LBEG 0x0200
#define REG_LEND 0x0201
#define REG_LCOUNT 0x0202
#define REG_SAR 0x0203
#define SYSCALL_NR 0x00ff
/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETXTREGS 18
#define PTRACE_SETXTREGS 19
#endif /* _UAPI_XTENSA_PTRACE_H */
/*
* include/asm-xtensa/signal.h
*
* Swiped from SH.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _UAPI_XTENSA_SIGNAL_H
#define _UAPI_XTENSA_SIGNAL_H
#define _NSIG 64
#define _NSIG_BPW 32
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
#ifndef __ASSEMBLY__
#include <linux/types.h>
/* Avoid too many header ordering problems. */
struct siginfo;
typedef unsigned long old_sigset_t; /* at least 32 bits */
typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
#endif
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/* #define SIGLOST 29 */
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
#define SA_SIGINFO 0x00000004
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#ifndef __ASSEMBLY__
#define SIG_BLOCK 0 /* for blocking signals */
#define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
#ifndef __KERNEL__
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_XTENSA_SIGNAL_H */
/* /*
* include/asm-xtensa/bug.h * include/asm-xtensa/types.h
*
* Macros to cause a 'bug' message.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
...@@ -10,9 +8,21 @@ ...@@ -10,9 +8,21 @@
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2005 Tensilica Inc.
*/ */
#ifndef _XTENSA_BUG_H #ifndef _UAPI_XTENSA_TYPES_H
#define _XTENSA_BUG_H #define _UAPI_XTENSA_TYPES_H
#include <asm-generic/int-ll64.h>
#ifdef __ASSEMBLY__
# define __XTENSA_UL(x) (x)
# define __XTENSA_UL_CONST(x) x
#else
# define __XTENSA_UL(x) ((unsigned long)(x))
# define __XTENSA_UL_CONST(x) x##UL
#endif
#ifndef __ASSEMBLY__
#include <asm-generic/bug.h> #endif
#endif /* _XTENSA_BUG_H */ #endif /* _UAPI_XTENSA_TYPES_H */
/*
* include/asm-xtensa/unistd.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2012 Tensilica Inc.
*/
#ifndef _UAPI_XTENSA_UNISTD_H
#define _UAPI_XTENSA_UNISTD_H
#ifndef __SYSCALL
# define __SYSCALL(nr,func,nargs)
#endif
#define __NR_spill 0
__SYSCALL( 0, sys_ni_syscall, 0)
#define __NR_xtensa 1
__SYSCALL( 1, sys_ni_syscall, 0)
#define __NR_available4 2
__SYSCALL( 2, sys_ni_syscall, 0)
#define __NR_available5 3
__SYSCALL( 3, sys_ni_syscall, 0)
#define __NR_available6 4
__SYSCALL( 4, sys_ni_syscall, 0)
#define __NR_available7 5
__SYSCALL( 5, sys_ni_syscall, 0)
#define __NR_available8 6
__SYSCALL( 6, sys_ni_syscall, 0)
#define __NR_available9 7
__SYSCALL( 7, sys_ni_syscall, 0)
/* File Operations */
#define __NR_open 8
__SYSCALL( 8, sys_open, 3)
#define __NR_close 9
__SYSCALL( 9, sys_close, 1)
#define __NR_dup 10
__SYSCALL( 10, sys_dup, 1)
#define __NR_dup2 11
__SYSCALL( 11, sys_dup2, 2)
#define __NR_read 12
__SYSCALL( 12, sys_read, 3)
#define __NR_write 13
__SYSCALL( 13, sys_write, 3)
#define __NR_select 14
__SYSCALL( 14, sys_select, 5)
#define __NR_lseek 15
__SYSCALL( 15, sys_lseek, 3)
#define __NR_poll 16
__SYSCALL( 16, sys_poll, 3)
#define __NR__llseek 17
__SYSCALL( 17, sys_llseek, 5)
#define __NR_epoll_wait 18
__SYSCALL( 18, sys_epoll_wait, 4)
#define __NR_epoll_ctl 19
__SYSCALL( 19, sys_epoll_ctl, 4)
#define __NR_epoll_create 20
__SYSCALL( 20, sys_epoll_create, 1)
#define __NR_creat 21
__SYSCALL( 21, sys_creat, 2)
#define __NR_truncate 22
__SYSCALL( 22, sys_truncate, 2)
#define __NR_ftruncate 23
__SYSCALL( 23, sys_ftruncate, 2)
#define __NR_readv 24
__SYSCALL( 24, sys_readv, 3)
#define __NR_writev 25
__SYSCALL( 25, sys_writev, 3)
#define __NR_fsync 26
__SYSCALL( 26, sys_fsync, 1)
#define __NR_fdatasync 27
__SYSCALL( 27, sys_fdatasync, 1)
#define __NR_truncate64 28
__SYSCALL( 28, sys_truncate64, 2)
#define __NR_ftruncate64 29
__SYSCALL( 29, sys_ftruncate64, 2)
#define __NR_pread64 30
__SYSCALL( 30, sys_pread64, 6)
#define __NR_pwrite64 31
__SYSCALL( 31, sys_pwrite64, 6)
#define __NR_link 32
__SYSCALL( 32, sys_link, 2)
#define __NR_rename 33
__SYSCALL( 33, sys_rename, 2)
#define __NR_symlink 34
__SYSCALL( 34, sys_symlink, 2)
#define __NR_readlink 35
__SYSCALL( 35, sys_readlink, 3)
#define __NR_mknod 36
__SYSCALL( 36, sys_mknod, 3)
#define __NR_pipe 37
__SYSCALL( 37, sys_pipe, 1)
#define __NR_unlink 38
__SYSCALL( 38, sys_unlink, 1)
#define __NR_rmdir 39
__SYSCALL( 39, sys_rmdir, 1)
#define __NR_mkdir 40
__SYSCALL( 40, sys_mkdir, 2)
#define __NR_chdir 41
__SYSCALL( 41, sys_chdir, 1)
#define __NR_fchdir 42
__SYSCALL( 42, sys_fchdir, 1)
#define __NR_getcwd 43
__SYSCALL( 43, sys_getcwd, 2)
#define __NR_chmod 44
__SYSCALL( 44, sys_chmod, 2)
#define __NR_chown 45
__SYSCALL( 45, sys_chown, 3)
#define __NR_stat 46
__SYSCALL( 46, sys_newstat, 2)
#define __NR_stat64 47
__SYSCALL( 47, sys_stat64, 2)
#define __NR_lchown 48
__SYSCALL( 48, sys_lchown, 3)
#define __NR_lstat 49
__SYSCALL( 49, sys_newlstat, 2)
#define __NR_lstat64 50
__SYSCALL( 50, sys_lstat64, 2)
#define __NR_available51 51
__SYSCALL( 51, sys_ni_syscall, 0)
#define __NR_fchmod 52
__SYSCALL( 52, sys_fchmod, 2)
#define __NR_fchown 53
__SYSCALL( 53, sys_fchown, 3)
#define __NR_fstat 54
__SYSCALL( 54, sys_newfstat, 2)
#define __NR_fstat64 55
__SYSCALL( 55, sys_fstat64, 2)
#define __NR_flock 56
__SYSCALL( 56, sys_flock, 2)
#define __NR_access 57
__SYSCALL( 57, sys_access, 2)
#define __NR_umask 58
__SYSCALL( 58, sys_umask, 1)
#define __NR_getdents 59
__SYSCALL( 59, sys_getdents, 3)
#define __NR_getdents64 60
__SYSCALL( 60, sys_getdents64, 3)
#define __NR_fcntl64 61
__SYSCALL( 61, sys_fcntl64, 3)
#define __NR_fallocate 62
__SYSCALL( 62, sys_fallocate, 6)
#define __NR_fadvise64_64 63
__SYSCALL( 63, xtensa_fadvise64_64, 6)
#define __NR_utime 64 /* glibc 2.3.3 ?? */
__SYSCALL( 64, sys_utime, 2)
#define __NR_utimes 65
__SYSCALL( 65, sys_utimes, 2)
#define __NR_ioctl 66
__SYSCALL( 66, sys_ioctl, 3)
#define __NR_fcntl 67
__SYSCALL( 67, sys_fcntl, 3)
#define __NR_setxattr 68
__SYSCALL( 68, sys_setxattr, 5)
#define __NR_getxattr 69
__SYSCALL( 69, sys_getxattr, 4)
#define __NR_listxattr 70
__SYSCALL( 70, sys_listxattr, 3)
#define __NR_removexattr 71
__SYSCALL( 71, sys_removexattr, 2)
#define __NR_lsetxattr 72
__SYSCALL( 72, sys_lsetxattr, 5)
#define __NR_lgetxattr 73
__SYSCALL( 73, sys_lgetxattr, 4)
#define __NR_llistxattr 74
__SYSCALL( 74, sys_llistxattr, 3)
#define __NR_lremovexattr 75
__SYSCALL( 75, sys_lremovexattr, 2)
#define __NR_fsetxattr 76
__SYSCALL( 76, sys_fsetxattr, 5)
#define __NR_fgetxattr 77
__SYSCALL( 77, sys_fgetxattr, 4)
#define __NR_flistxattr 78
__SYSCALL( 78, sys_flistxattr, 3)
#define __NR_fremovexattr 79
__SYSCALL( 79, sys_fremovexattr, 2)
/* File Map / Shared Memory Operations */
#define __NR_mmap2 80
__SYSCALL( 80, sys_mmap_pgoff, 6)
#define __NR_munmap 81
__SYSCALL( 81, sys_munmap, 2)
#define __NR_mprotect 82
__SYSCALL( 82, sys_mprotect, 3)
#define __NR_brk 83
__SYSCALL( 83, sys_brk, 1)
#define __NR_mlock 84
__SYSCALL( 84, sys_mlock, 2)
#define __NR_munlock 85
__SYSCALL( 85, sys_munlock, 2)
#define __NR_mlockall 86
__SYSCALL( 86, sys_mlockall, 1)
#define __NR_munlockall 87
__SYSCALL( 87, sys_munlockall, 0)
#define __NR_mremap 88
__SYSCALL( 88, sys_mremap, 4)
#define __NR_msync 89
__SYSCALL( 89, sys_msync, 3)
#define __NR_mincore 90
__SYSCALL( 90, sys_mincore, 3)
#define __NR_madvise 91
__SYSCALL( 91, sys_madvise, 3)
#define __NR_shmget 92
__SYSCALL( 92, sys_shmget, 4)
#define __NR_shmat 93
__SYSCALL( 93, xtensa_shmat, 4)
#define __NR_shmctl 94
__SYSCALL( 94, sys_shmctl, 4)
#define __NR_shmdt 95
__SYSCALL( 95, sys_shmdt, 4)
/* Socket Operations */
#define __NR_socket 96
__SYSCALL( 96, sys_socket, 3)
#define __NR_setsockopt 97
__SYSCALL( 97, sys_setsockopt, 5)
#define __NR_getsockopt 98
__SYSCALL( 98, sys_getsockopt, 5)
#define __NR_shutdown 99
__SYSCALL( 99, sys_shutdown, 2)
#define __NR_bind 100
__SYSCALL(100, sys_bind, 3)
#define __NR_connect 101
__SYSCALL(101, sys_connect, 3)
#define __NR_listen 102
__SYSCALL(102, sys_listen, 2)
#define __NR_accept 103
__SYSCALL(103, sys_accept, 3)
#define __NR_getsockname 104
__SYSCALL(104, sys_getsockname, 3)
#define __NR_getpeername 105
__SYSCALL(105, sys_getpeername, 3)
#define __NR_sendmsg 106
__SYSCALL(106, sys_sendmsg, 3)
#define __NR_recvmsg 107
__SYSCALL(107, sys_recvmsg, 3)
#define __NR_send 108
__SYSCALL(108, sys_send, 4)
#define __NR_recv 109
__SYSCALL(109, sys_recv, 4)
#define __NR_sendto 110
__SYSCALL(110, sys_sendto, 6)
#define __NR_recvfrom 111
__SYSCALL(111, sys_recvfrom, 6)
#define __NR_socketpair 112
__SYSCALL(112, sys_socketpair, 4)
#define __NR_sendfile 113
__SYSCALL(113, sys_sendfile, 4)
#define __NR_sendfile64 114
__SYSCALL(114, sys_sendfile64, 4)
#define __NR_sendmmsg 115
__SYSCALL(115, sys_sendmmsg, 4)
/* Process Operations */
#define __NR_clone 116
__SYSCALL(116, xtensa_clone, 5)
#define __NR_execve 117
__SYSCALL(117, xtensa_execve, 3)
#define __NR_exit 118
__SYSCALL(118, sys_exit, 1)
#define __NR_exit_group 119
__SYSCALL(119, sys_exit_group, 1)
#define __NR_getpid 120
__SYSCALL(120, sys_getpid, 0)
#define __NR_wait4 121
__SYSCALL(121, sys_wait4, 4)
#define __NR_waitid 122
__SYSCALL(122, sys_waitid, 5)
#define __NR_kill 123
__SYSCALL(123, sys_kill, 2)
#define __NR_tkill 124
__SYSCALL(124, sys_tkill, 2)
#define __NR_tgkill 125
__SYSCALL(125, sys_tgkill, 3)
#define __NR_set_tid_address 126
__SYSCALL(126, sys_set_tid_address, 1)
#define __NR_gettid 127
__SYSCALL(127, sys_gettid, 0)
#define __NR_setsid 128
__SYSCALL(128, sys_setsid, 0)
#define __NR_getsid 129
__SYSCALL(129, sys_getsid, 1)
#define __NR_prctl 130
__SYSCALL(130, sys_prctl, 5)
#define __NR_personality 131
__SYSCALL(131, sys_personality, 1)
#define __NR_getpriority 132
__SYSCALL(132, sys_getpriority, 2)
#define __NR_setpriority 133
__SYSCALL(133, sys_setpriority, 3)
#define __NR_setitimer 134
__SYSCALL(134, sys_setitimer, 3)
#define __NR_getitimer 135
__SYSCALL(135, sys_getitimer, 2)
#define __NR_setuid 136
__SYSCALL(136, sys_setuid, 1)
#define __NR_getuid 137
__SYSCALL(137, sys_getuid, 0)
#define __NR_setgid 138
__SYSCALL(138, sys_setgid, 1)
#define __NR_getgid 139
__SYSCALL(139, sys_getgid, 0)
#define __NR_geteuid 140
__SYSCALL(140, sys_geteuid, 0)
#define __NR_getegid 141
__SYSCALL(141, sys_getegid, 0)
#define __NR_setreuid 142
__SYSCALL(142, sys_setreuid, 2)
#define __NR_setregid 143
__SYSCALL(143, sys_setregid, 2)
#define __NR_setresuid 144
__SYSCALL(144, sys_setresuid, 3)
#define __NR_getresuid 145
__SYSCALL(145, sys_getresuid, 3)
#define __NR_setresgid 146
__SYSCALL(146, sys_setresgid, 3)
#define __NR_getresgid 147
__SYSCALL(147, sys_getresgid, 3)
#define __NR_setpgid 148
__SYSCALL(148, sys_setpgid, 2)
#define __NR_getpgid 149
__SYSCALL(149, sys_getpgid, 1)
#define __NR_getppid 150
__SYSCALL(150, sys_getppid, 0)
#define __NR_getpgrp 151
__SYSCALL(151, sys_getpgrp, 0)
#define __NR_reserved152 152 /* set_thread_area */
__SYSCALL(152, sys_ni_syscall, 0)
#define __NR_reserved153 153 /* get_thread_area */
__SYSCALL(153, sys_ni_syscall, 0)
#define __NR_times 154
__SYSCALL(154, sys_times, 1)
#define __NR_acct 155
__SYSCALL(155, sys_acct, 1)
#define __NR_sched_setaffinity 156
__SYSCALL(156, sys_sched_setaffinity, 3)
#define __NR_sched_getaffinity 157
__SYSCALL(157, sys_sched_getaffinity, 3)
#define __NR_capget 158
__SYSCALL(158, sys_capget, 2)
#define __NR_capset 159
__SYSCALL(159, sys_capset, 2)
#define __NR_ptrace 160
__SYSCALL(160, sys_ptrace, 4)
#define __NR_semtimedop 161
__SYSCALL(161, sys_semtimedop, 5)
#define __NR_semget 162
__SYSCALL(162, sys_semget, 4)
#define __NR_semop 163
__SYSCALL(163, sys_semop, 4)
#define __NR_semctl 164
__SYSCALL(164, sys_semctl, 4)
#define __NR_available165 165
__SYSCALL(165, sys_ni_syscall, 0)
#define __NR_msgget 166
__SYSCALL(166, sys_msgget, 4)
#define __NR_msgsnd 167
__SYSCALL(167, sys_msgsnd, 4)
#define __NR_msgrcv 168
__SYSCALL(168, sys_msgrcv, 4)
#define __NR_msgctl 169
__SYSCALL(169, sys_msgctl, 4)
#define __NR_available170 170
__SYSCALL(170, sys_ni_syscall, 0)
/* File System */
#define __NR_umount2 171
__SYSCALL(171, sys_umount, 2)
#define __NR_mount 172
__SYSCALL(172, sys_mount, 5)
#define __NR_swapon 173
__SYSCALL(173, sys_swapon, 2)
#define __NR_chroot 174
__SYSCALL(174, sys_chroot, 1)
#define __NR_pivot_root 175
__SYSCALL(175, sys_pivot_root, 2)
#define __NR_umount 176
__SYSCALL(176, sys_umount, 2)
#define __NR_swapoff 177
__SYSCALL(177, sys_swapoff, 1)
#define __NR_sync 178
__SYSCALL(178, sys_sync, 0)
#define __NR_syncfs 179
__SYSCALL(179, sys_syncfs, 1)
#define __NR_setfsuid 180
__SYSCALL(180, sys_setfsuid, 1)
#define __NR_setfsgid 181
__SYSCALL(181, sys_setfsgid, 1)
#define __NR_sysfs 182
__SYSCALL(182, sys_sysfs, 3)
#define __NR_ustat 183
__SYSCALL(183, sys_ustat, 2)
#define __NR_statfs 184
__SYSCALL(184, sys_statfs, 2)
#define __NR_fstatfs 185
__SYSCALL(185, sys_fstatfs, 2)
#define __NR_statfs64 186
__SYSCALL(186, sys_statfs64, 3)
#define __NR_fstatfs64 187
__SYSCALL(187, sys_fstatfs64, 3)
/* System */
#define __NR_setrlimit 188
__SYSCALL(188, sys_setrlimit, 2)
#define __NR_getrlimit 189
__SYSCALL(189, sys_getrlimit, 2)
#define __NR_getrusage 190
__SYSCALL(190, sys_getrusage, 2)
#define __NR_futex 191
__SYSCALL(191, sys_futex, 5)
#define __NR_gettimeofday 192
__SYSCALL(192, sys_gettimeofday, 2)
#define __NR_settimeofday 193
__SYSCALL(193, sys_settimeofday, 2)
#define __NR_adjtimex 194
__SYSCALL(194, sys_adjtimex, 1)
#define __NR_nanosleep 195
__SYSCALL(195, sys_nanosleep, 2)
#define __NR_getgroups 196
__SYSCALL(196, sys_getgroups, 2)
#define __NR_setgroups 197
__SYSCALL(197, sys_setgroups, 2)
#define __NR_sethostname 198
__SYSCALL(198, sys_sethostname, 2)
#define __NR_setdomainname 199
__SYSCALL(199, sys_setdomainname, 2)
#define __NR_syslog 200
__SYSCALL(200, sys_syslog, 3)
#define __NR_vhangup 201
__SYSCALL(201, sys_vhangup, 0)
#define __NR_uselib 202
__SYSCALL(202, sys_uselib, 1)
#define __NR_reboot 203
__SYSCALL(203, sys_reboot, 3)
#define __NR_quotactl 204
__SYSCALL(204, sys_quotactl, 4)
#define __NR_nfsservctl 205
__SYSCALL(205, sys_ni_syscall, 0) /* old nfsservctl */
#define __NR__sysctl 206
__SYSCALL(206, sys_sysctl, 1)
#define __NR_bdflush 207
__SYSCALL(207, sys_bdflush, 2)
#define __NR_uname 208
__SYSCALL(208, sys_newuname, 1)
#define __NR_sysinfo 209
__SYSCALL(209, sys_sysinfo, 1)
#define __NR_init_module 210
__SYSCALL(210, sys_init_module, 2)
#define __NR_delete_module 211
__SYSCALL(211, sys_delete_module, 1)
#define __NR_sched_setparam 212
__SYSCALL(212, sys_sched_setparam, 2)
#define __NR_sched_getparam 213
__SYSCALL(213, sys_sched_getparam, 2)
#define __NR_sched_setscheduler 214
__SYSCALL(214, sys_sched_setscheduler, 3)
#define __NR_sched_getscheduler 215
__SYSCALL(215, sys_sched_getscheduler, 1)
#define __NR_sched_get_priority_max 216
__SYSCALL(216, sys_sched_get_priority_max, 1)
#define __NR_sched_get_priority_min 217
__SYSCALL(217, sys_sched_get_priority_min, 1)
#define __NR_sched_rr_get_interval 218
__SYSCALL(218, sys_sched_rr_get_interval, 2)
#define __NR_sched_yield 219
__SYSCALL(219, sys_sched_yield, 0)
#define __NR_available222 222
__SYSCALL(222, sys_ni_syscall, 0)
/* Signal Handling */
#define __NR_restart_syscall 223
__SYSCALL(223, sys_restart_syscall, 0)
#define __NR_sigaltstack 224
__SYSCALL(224, xtensa_sigaltstack, 2)
#define __NR_rt_sigreturn 225
__SYSCALL(225, xtensa_rt_sigreturn, 1)
#define __NR_rt_sigaction 226
__SYSCALL(226, sys_rt_sigaction, 4)
#define __NR_rt_sigprocmask 227
__SYSCALL(227, sys_rt_sigprocmask, 4)
#define __NR_rt_sigpending 228
__SYSCALL(228, sys_rt_sigpending, 2)
#define __NR_rt_sigtimedwait 229
__SYSCALL(229, sys_rt_sigtimedwait, 4)
#define __NR_rt_sigqueueinfo 230
__SYSCALL(230, sys_rt_sigqueueinfo, 3)
#define __NR_rt_sigsuspend 231
__SYSCALL(231, sys_rt_sigsuspend, 2)
/* Message */
#define __NR_mq_open 232
__SYSCALL(232, sys_mq_open, 4)
#define __NR_mq_unlink 233
__SYSCALL(233, sys_mq_unlink, 1)
#define __NR_mq_timedsend 234
__SYSCALL(234, sys_mq_timedsend, 5)
#define __NR_mq_timedreceive 235
__SYSCALL(235, sys_mq_timedreceive, 5)
#define __NR_mq_notify 236
__SYSCALL(236, sys_mq_notify, 2)
#define __NR_mq_getsetattr 237
__SYSCALL(237, sys_mq_getsetattr, 3)
#define __NR_available238 238
__SYSCALL(238, sys_ni_syscall, 0)
/* IO */
#define __NR_io_setup 239
__SYSCALL(239, sys_io_setup, 2)
#define __NR_io_destroy 240
__SYSCALL(240, sys_io_destroy, 1)
#define __NR_io_submit 241
__SYSCALL(241, sys_io_submit, 3)
#define __NR_io_getevents 242
__SYSCALL(242, sys_io_getevents, 5)
#define __NR_io_cancel 243
__SYSCALL(243, sys_io_cancel, 3)
#define __NR_clock_settime 244
__SYSCALL(244, sys_clock_settime, 2)
#define __NR_clock_gettime 245
__SYSCALL(245, sys_clock_gettime, 2)
#define __NR_clock_getres 246
__SYSCALL(246, sys_clock_getres, 2)
#define __NR_clock_nanosleep 247
__SYSCALL(247, sys_clock_nanosleep, 4)
/* Timer */
#define __NR_timer_create 248
__SYSCALL(248, sys_timer_create, 3)
#define __NR_timer_delete 249
__SYSCALL(249, sys_timer_delete, 1)
#define __NR_timer_settime 250
__SYSCALL(250, sys_timer_settime, 4)
#define __NR_timer_gettime 251
__SYSCALL(251, sys_timer_gettime, 2)
#define __NR_timer_getoverrun 252
__SYSCALL(252, sys_timer_getoverrun, 1)
/* System */
#define __NR_reserved253 253
__SYSCALL(253, sys_ni_syscall, 0)
#define __NR_lookup_dcookie 254
__SYSCALL(254, sys_lookup_dcookie, 4)
#define __NR_available255 255
__SYSCALL(255, sys_ni_syscall, 0)
#define __NR_add_key 256
__SYSCALL(256, sys_add_key, 5)
#define __NR_request_key 257
__SYSCALL(257, sys_request_key, 5)
#define __NR_keyctl 258
__SYSCALL(258, sys_keyctl, 5)
#define __NR_available259 259
__SYSCALL(259, sys_ni_syscall, 0)
#define __NR_readahead 260
__SYSCALL(260, sys_readahead, 5)
#define __NR_remap_file_pages 261
__SYSCALL(261, sys_remap_file_pages, 5)
#define __NR_migrate_pages 262
__SYSCALL(262, sys_migrate_pages, 0)
#define __NR_mbind 263
__SYSCALL(263, sys_mbind, 6)
#define __NR_get_mempolicy 264
__SYSCALL(264, sys_get_mempolicy, 5)
#define __NR_set_mempolicy 265
__SYSCALL(265, sys_set_mempolicy, 3)
#define __NR_unshare 266
__SYSCALL(266, sys_unshare, 1)
#define __NR_move_pages 267
__SYSCALL(267, sys_move_pages, 0)
#define __NR_splice 268
__SYSCALL(268, sys_splice, 0)
#define __NR_tee 269
__SYSCALL(269, sys_tee, 0)
#define __NR_vmsplice 270
__SYSCALL(270, sys_vmsplice, 0)
#define __NR_available271 271
__SYSCALL(271, sys_ni_syscall, 0)
#define __NR_pselect6 272
__SYSCALL(272, sys_pselect6, 0)
#define __NR_ppoll 273
__SYSCALL(273, sys_ppoll, 0)
#define __NR_epoll_pwait 274
__SYSCALL(274, sys_epoll_pwait, 0)
#define __NR_epoll_create1 275
__SYSCALL(275, sys_epoll_create1, 1)
#define __NR_inotify_init 276
__SYSCALL(276, sys_inotify_init, 0)
#define __NR_inotify_add_watch 277
__SYSCALL(277, sys_inotify_add_watch, 3)
#define __NR_inotify_rm_watch 278
__SYSCALL(278, sys_inotify_rm_watch, 2)
#define __NR_inotify_init1 279
__SYSCALL(279, sys_inotify_init1, 1)
#define __NR_getcpu 280
__SYSCALL(280, sys_getcpu, 0)
#define __NR_kexec_load 281
__SYSCALL(281, sys_ni_syscall, 0)
#define __NR_ioprio_set 282
__SYSCALL(282, sys_ioprio_set, 2)
#define __NR_ioprio_get 283
__SYSCALL(283, sys_ioprio_get, 3)
#define __NR_set_robust_list 284
__SYSCALL(284, sys_set_robust_list, 3)
#define __NR_get_robust_list 285
__SYSCALL(285, sys_get_robust_list, 3)
#define __NR_available286 286
__SYSCALL(286, sys_ni_syscall, 0)
#define __NR_available287 287
__SYSCALL(287, sys_ni_syscall, 0)
/* Relative File Operations */
#define __NR_openat 288
__SYSCALL(288, sys_openat, 4)
#define __NR_mkdirat 289
__SYSCALL(289, sys_mkdirat, 3)
#define __NR_mknodat 290
__SYSCALL(290, sys_mknodat, 4)
#define __NR_unlinkat 291
__SYSCALL(291, sys_unlinkat, 3)
#define __NR_renameat 292
__SYSCALL(292, sys_renameat, 4)
#define __NR_linkat 293
__SYSCALL(293, sys_linkat, 5)
#define __NR_symlinkat 294
__SYSCALL(294, sys_symlinkat, 3)
#define __NR_readlinkat 295
__SYSCALL(295, sys_readlinkat, 4)
#define __NR_utimensat 296
__SYSCALL(296, sys_utimensat, 0)
#define __NR_fchownat 297
__SYSCALL(297, sys_fchownat, 5)
#define __NR_futimesat 298
__SYSCALL(298, sys_futimesat, 4)
#define __NR_fstatat64 299
__SYSCALL(299, sys_fstatat64, 0)
#define __NR_fchmodat 300
__SYSCALL(300, sys_fchmodat, 4)
#define __NR_faccessat 301
__SYSCALL(301, sys_faccessat, 4)
#define __NR_available302 302
__SYSCALL(302, sys_ni_syscall, 0)
#define __NR_available303 303
__SYSCALL(303, sys_ni_syscall, 0)
#define __NR_signalfd 304
__SYSCALL(304, sys_signalfd, 3)
/* 305 was __NR_timerfd */
__SYSCALL(305, sys_ni_syscall, 0)
#define __NR_eventfd 306
__SYSCALL(306, sys_eventfd, 1)
#define __NR_recvmmsg 307
__SYSCALL(307, sys_recvmmsg, 5)
#define __NR_setns 308
__SYSCALL(308, sys_setns, 2)
#define __NR_signalfd4 309
__SYSCALL(309, sys_signalfd4, 4)
#define __NR_dup3 310
__SYSCALL(310, sys_dup3, 3)
#define __NR_pipe2 311
__SYSCALL(311, sys_pipe2, 2)
#define __NR_timerfd_create 312
__SYSCALL(312, sys_timerfd_create, 2)
#define __NR_timerfd_settime 313
__SYSCALL(313, sys_timerfd_settime, 4)
#define __NR_timerfd_gettime 314
__SYSCALL(314, sys_timerfd_gettime, 2)
#define __NR_available315 315
__SYSCALL(315, sys_ni_syscall, 0)
#define __NR_eventfd2 316
__SYSCALL(316, sys_eventfd2, 2)
#define __NR_preadv 317
__SYSCALL(317, sys_preadv, 5)
#define __NR_pwritev 318
__SYSCALL(318, sys_pwritev, 5)
#define __NR_available319 319
__SYSCALL(319, sys_ni_syscall, 0)
#define __NR_fanotify_init 320
__SYSCALL(320, sys_fanotify_init, 2)
#define __NR_fanotify_mark 321
__SYSCALL(321, sys_fanotify_mark, 6)
#define __NR_process_vm_readv 322
__SYSCALL(322, sys_process_vm_readv, 6)
#define __NR_process_vm_writev 323
__SYSCALL(323, sys_process_vm_writev, 6)
#define __NR_name_to_handle_at 324
__SYSCALL(324, sys_name_to_handle_at, 5)
#define __NR_open_by_handle_at 325
__SYSCALL(325, sys_open_by_handle_at, 3)
#define __NR_sync_file_range 326
__SYSCALL(326, sys_sync_file_range2, 6)
#define __NR_perf_event_open 327
__SYSCALL(327, sys_perf_event_open, 5)
#define __NR_rt_tgsigqueueinfo 328
__SYSCALL(328, sys_rt_tgsigqueueinfo, 4)
#define __NR_clock_adjtime 329
__SYSCALL(329, sys_clock_adjtime, 2)
#define __NR_prlimit64 330
__SYSCALL(330, sys_prlimit64, 4)
#define __NR_kcmp 331
__SYSCALL(331, sys_kcmp, 5)
#define __NR_syscall_count 332
/*
* sysxtensa syscall handler
*
* int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
* a2 a6 a3 a4 a5
*/
#define SYS_XTENSA_RESERVED 0 /* don't use this */
#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
#define SYS_XTENSA_COUNT 5 /* count */
#endif /* _UAPI_XTENSA_UNISTD_H */
...@@ -170,15 +170,15 @@ ENTRY(fast_unaligned) ...@@ -170,15 +170,15 @@ ENTRY(fast_unaligned)
s32i a7, a2, PT_AREG7 s32i a7, a2, PT_AREG7
s32i a8, a2, PT_AREG8 s32i a8, a2, PT_AREG8
rsr a0, DEPC rsr a0, depc
xsr a3, EXCSAVE_1 xsr a3, excsave1
s32i a0, a2, PT_AREG2 s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
/* Keep value of SAR in a0 */ /* Keep value of SAR in a0 */
rsr a0, SAR rsr a0, sar
rsr a8, EXCVADDR # load unaligned memory address rsr a8, excvaddr # load unaligned memory address
/* Now, identify one of the following load/store instructions. /* Now, identify one of the following load/store instructions.
* *
...@@ -197,7 +197,7 @@ ENTRY(fast_unaligned) ...@@ -197,7 +197,7 @@ ENTRY(fast_unaligned)
/* Extract the instruction that caused the unaligned access. */ /* Extract the instruction that caused the unaligned access. */
rsr a7, EPC_1 # load exception address rsr a7, epc1 # load exception address
movi a3, ~3 movi a3, ~3
and a3, a3, a7 # mask lower bits and a3, a3, a7 # mask lower bits
...@@ -275,16 +275,16 @@ ENTRY(fast_unaligned) ...@@ -275,16 +275,16 @@ ENTRY(fast_unaligned)
1: 1:
#if XCHAL_HAVE_LOOPS #if XCHAL_HAVE_LOOPS
rsr a5, LEND # check if we reached LEND rsr a5, lend # check if we reached LEND
bne a7, a5, 1f bne a7, a5, 1f
rsr a5, LCOUNT # and LCOUNT != 0 rsr a5, lcount # and LCOUNT != 0
beqz a5, 1f beqz a5, 1f
addi a5, a5, -1 # decrement LCOUNT and set addi a5, a5, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN rsr a7, lbeg # set PC to LBEGIN
wsr a5, LCOUNT wsr a5, lcount
#endif #endif
1: wsr a7, EPC_1 # skip load instruction 1: wsr a7, epc1 # skip load instruction
extui a4, a4, INSN_T, 4 # extract target register extui a4, a4, INSN_T, 4 # extract target register
movi a5, .Lload_table movi a5, .Lload_table
addx8 a4, a4, a5 addx8 a4, a4, a5
...@@ -355,16 +355,16 @@ ENTRY(fast_unaligned) ...@@ -355,16 +355,16 @@ ENTRY(fast_unaligned)
1: 1:
#if XCHAL_HAVE_LOOPS #if XCHAL_HAVE_LOOPS
rsr a4, LEND # check if we reached LEND rsr a4, lend # check if we reached LEND
bne a7, a4, 1f bne a7, a4, 1f
rsr a4, LCOUNT # and LCOUNT != 0 rsr a4, lcount # and LCOUNT != 0
beqz a4, 1f beqz a4, 1f
addi a4, a4, -1 # decrement LCOUNT and set addi a4, a4, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN rsr a7, lbeg # set PC to LBEGIN
wsr a4, LCOUNT wsr a4, lcount
#endif #endif
1: wsr a7, EPC_1 # skip store instruction 1: wsr a7, epc1 # skip store instruction
movi a4, ~3 movi a4, ~3
and a4, a4, a8 # align memory address and a4, a4, a8 # align memory address
...@@ -406,7 +406,7 @@ ENTRY(fast_unaligned) ...@@ -406,7 +406,7 @@ ENTRY(fast_unaligned)
.Lexit: .Lexit:
movi a4, 0 movi a4, 0
rsr a3, EXCSAVE_1 rsr a3, excsave1
s32i a4, a3, EXC_TABLE_FIXUP s32i a4, a3, EXC_TABLE_FIXUP
/* Restore working register */ /* Restore working register */
...@@ -420,7 +420,7 @@ ENTRY(fast_unaligned) ...@@ -420,7 +420,7 @@ ENTRY(fast_unaligned)
/* restore SAR and return */ /* restore SAR and return */
wsr a0, SAR wsr a0, sar
l32i a0, a2, PT_AREG0 l32i a0, a2, PT_AREG0
l32i a2, a2, PT_AREG2 l32i a2, a2, PT_AREG2
rfe rfe
...@@ -438,10 +438,10 @@ ENTRY(fast_unaligned) ...@@ -438,10 +438,10 @@ ENTRY(fast_unaligned)
l32i a6, a2, PT_AREG6 l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5 l32i a5, a2, PT_AREG5
l32i a4, a2, PT_AREG4 l32i a4, a2, PT_AREG4
wsr a0, SAR wsr a0, sar
mov a1, a2 mov a1, a2
rsr a0, PS rsr a0, ps
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception movi a0, _kernel_exception
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
/* IO protection is currently unsupported. */ /* IO protection is currently unsupported. */
ENTRY(fast_io_protect) ENTRY(fast_io_protect)
wsr a0, EXCSAVE_1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
...@@ -220,7 +220,7 @@ ENTRY(coprocessor_restore) ...@@ -220,7 +220,7 @@ ENTRY(coprocessor_restore)
*/ */
ENTRY(fast_coprocessor_double) ENTRY(fast_coprocessor_double)
wsr a0, EXCSAVE_1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
...@@ -229,13 +229,13 @@ ENTRY(fast_coprocessor) ...@@ -229,13 +229,13 @@ ENTRY(fast_coprocessor)
/* Save remaining registers a1-a3 and SAR */ /* Save remaining registers a1-a3 and SAR */
xsr a3, EXCSAVE_1 xsr a3, excsave1
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
rsr a3, SAR rsr a3, sar
s32i a1, a2, PT_AREG1 s32i a1, a2, PT_AREG1
s32i a3, a2, PT_SAR s32i a3, a2, PT_SAR
mov a1, a2 mov a1, a2
rsr a2, DEPC rsr a2, depc
s32i a2, a1, PT_AREG2 s32i a2, a1, PT_AREG2
/* /*
...@@ -248,17 +248,17 @@ ENTRY(fast_coprocessor) ...@@ -248,17 +248,17 @@ ENTRY(fast_coprocessor)
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
rsr a3, EXCCAUSE rsr a3, exccause
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/ /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
ssl a3 # SAR: 32 - coprocessor_number ssl a3 # SAR: 32 - coprocessor_number
movi a2, 1 movi a2, 1
rsr a0, CPENABLE rsr a0, cpenable
sll a2, a2 sll a2, a2
or a0, a0, a2 or a0, a0, a2
wsr a0, CPENABLE wsr a0, cpenable
rsync rsync
/* Retrieve previous owner. (a3 still holds CP number) */ /* Retrieve previous owner. (a3 still holds CP number) */
...@@ -291,7 +291,7 @@ ENTRY(fast_coprocessor) ...@@ -291,7 +291,7 @@ ENTRY(fast_coprocessor)
/* Note that only a0 and a1 were preserved. */ /* Note that only a0 and a1 were preserved. */
2: rsr a3, EXCCAUSE 2: rsr a3, exccause
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
movi a0, coprocessor_owner movi a0, coprocessor_owner
addx4 a0, a3, a0 addx4 a0, a3, a0
...@@ -321,7 +321,7 @@ ENTRY(fast_coprocessor) ...@@ -321,7 +321,7 @@ ENTRY(fast_coprocessor)
l32i a0, a1, PT_SAR l32i a0, a1, PT_SAR
l32i a3, a1, PT_AREG3 l32i a3, a1, PT_AREG3
l32i a2, a1, PT_AREG2 l32i a2, a1, PT_AREG2
wsr a0, SAR wsr a0, sar
l32i a0, a1, PT_AREG0 l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1 l32i a1, a1, PT_AREG1
......
...@@ -112,8 +112,8 @@ ENTRY(user_exception) ...@@ -112,8 +112,8 @@ ENTRY(user_exception)
/* Save a2, a3, and depc, restore excsave_1 and set SP. */ /* Save a2, a3, and depc, restore excsave_1 and set SP. */
xsr a3, EXCSAVE_1 xsr a3, excsave1
rsr a0, DEPC rsr a0, depc
s32i a1, a2, PT_AREG1 s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2 s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
...@@ -125,16 +125,16 @@ _user_exception: ...@@ -125,16 +125,16 @@ _user_exception:
/* Save SAR and turn off single stepping */ /* Save SAR and turn off single stepping */
movi a2, 0 movi a2, 0
rsr a3, SAR rsr a3, sar
xsr a2, ICOUNTLEVEL xsr a2, icountlevel
s32i a3, a1, PT_SAR s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */ /* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
rsr a2, WINDOWBASE rsr a2, windowbase
rsr a3, WINDOWSTART rsr a3, windowstart
ssr a2 ssr a2
s32i a2, a1, PT_WINDOWBASE s32i a2, a1, PT_WINDOWBASE
s32i a3, a1, PT_WINDOWSTART s32i a3, a1, PT_WINDOWSTART
...@@ -205,12 +205,12 @@ _user_exception: ...@@ -205,12 +205,12 @@ _user_exception:
/* WINDOWBASE still in SAR! */ /* WINDOWBASE still in SAR! */
rsr a2, SAR # original WINDOWBASE rsr a2, sar # original WINDOWBASE
movi a3, 1 movi a3, 1
ssl a2 ssl a2
sll a3, a3 sll a3, a3
wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit wsr a3, windowstart # set corresponding WINDOWSTART bit
wsr a2, WINDOWBASE # and WINDOWSTART wsr a2, windowbase # and WINDOWSTART
rsync rsync
/* We are back to the original stack pointer (a1) */ /* We are back to the original stack pointer (a1) */
...@@ -252,8 +252,8 @@ ENTRY(kernel_exception) ...@@ -252,8 +252,8 @@ ENTRY(kernel_exception)
/* Save a0, a2, a3, DEPC and set SP. */ /* Save a0, a2, a3, DEPC and set SP. */
xsr a3, EXCSAVE_1 # restore a3, excsave_1 xsr a3, excsave1 # restore a3, excsave_1
rsr a0, DEPC # get a2 rsr a0, depc # get a2
s32i a1, a2, PT_AREG1 s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2 s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
...@@ -265,16 +265,16 @@ _kernel_exception: ...@@ -265,16 +265,16 @@ _kernel_exception:
/* Save SAR and turn off single stepping */ /* Save SAR and turn off single stepping */
movi a2, 0 movi a2, 0
rsr a3, SAR rsr a3, sar
xsr a2, ICOUNTLEVEL xsr a2, icountlevel
s32i a3, a1, PT_SAR s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */ /* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
rsr a2, WINDOWBASE # don't need to save these, we only rsr a2, windowbase # don't need to save these, we only
rsr a3, WINDOWSTART # need shifted windowstart: windowmask rsr a3, windowstart # need shifted windowstart: windowmask
ssr a2 ssr a2
slli a2, a3, 32-WSBITS slli a2, a3, 32-WSBITS
src a2, a3, a2 src a2, a3, a2
...@@ -323,24 +323,24 @@ common_exception: ...@@ -323,24 +323,24 @@ common_exception:
/* Save some registers, disable loops and clear the syscall flag. */ /* Save some registers, disable loops and clear the syscall flag. */
rsr a2, DEBUGCAUSE rsr a2, debugcause
rsr a3, EPC_1 rsr a3, epc1
s32i a2, a1, PT_DEBUGCAUSE s32i a2, a1, PT_DEBUGCAUSE
s32i a3, a1, PT_PC s32i a3, a1, PT_PC
movi a2, -1 movi a2, -1
rsr a3, EXCVADDR rsr a3, excvaddr
s32i a2, a1, PT_SYSCALL s32i a2, a1, PT_SYSCALL
movi a2, 0 movi a2, 0
s32i a3, a1, PT_EXCVADDR s32i a3, a1, PT_EXCVADDR
xsr a2, LCOUNT xsr a2, lcount
s32i a2, a1, PT_LCOUNT s32i a2, a1, PT_LCOUNT
/* It is now save to restore the EXC_TABLE_FIXUP variable. */ /* It is now save to restore the EXC_TABLE_FIXUP variable. */
rsr a0, EXCCAUSE rsr a0, exccause
movi a3, 0 movi a3, 0
rsr a2, EXCSAVE_1 rsr a2, excsave1
s32i a0, a1, PT_EXCCAUSE s32i a0, a1, PT_EXCCAUSE
s32i a3, a2, EXC_TABLE_FIXUP s32i a3, a2, EXC_TABLE_FIXUP
...@@ -352,22 +352,22 @@ common_exception: ...@@ -352,22 +352,22 @@ common_exception:
* (interrupts disabled) and if this exception is not an interrupt. * (interrupts disabled) and if this exception is not an interrupt.
*/ */
rsr a3, PS rsr a3, ps
addi a0, a0, -4 addi a0, a0, -4
movi a2, 1 movi a2, 1
extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
moveqz a3, a2, a0 # a3 = 1 iff interrupt exception moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
movi a2, 1 << PS_WOE_BIT movi a2, 1 << PS_WOE_BIT
or a3, a3, a2 or a3, a3, a2
rsr a0, EXCCAUSE rsr a0, exccause
xsr a3, PS xsr a3, ps
s32i a3, a1, PT_PS # save ps s32i a3, a1, PT_PS # save ps
/* Save LBEG, LEND */ /* Save lbeg, lend */
rsr a2, LBEG rsr a2, lbeg
rsr a3, LEND rsr a3, lend
s32i a2, a1, PT_LBEG s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND s32i a3, a1, PT_LEND
...@@ -432,7 +432,7 @@ common_exception_return: ...@@ -432,7 +432,7 @@ common_exception_return:
load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
wsr a3, PS /* disable interrupts */ wsr a3, ps /* disable interrupts */
_bbci.l a3, PS_UM_BIT, kernel_exception_exit _bbci.l a3, PS_UM_BIT, kernel_exception_exit
...@@ -444,12 +444,12 @@ user_exception_exit: ...@@ -444,12 +444,12 @@ user_exception_exit:
l32i a2, a1, PT_WINDOWBASE l32i a2, a1, PT_WINDOWBASE
l32i a3, a1, PT_WINDOWSTART l32i a3, a1, PT_WINDOWSTART
wsr a1, DEPC # use DEPC as temp storage wsr a1, depc # use DEPC as temp storage
wsr a3, WINDOWSTART # restore WINDOWSTART wsr a3, windowstart # restore WINDOWSTART
ssr a2 # preserve user's WB in the SAR ssr a2 # preserve user's WB in the SAR
wsr a2, WINDOWBASE # switch to user's saved WB wsr a2, windowbase # switch to user's saved WB
rsync rsync
rsr a1, DEPC # restore stack pointer rsr a1, depc # restore stack pointer
l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
rotw -1 # we restore a4..a7 rotw -1 # we restore a4..a7
_bltui a6, 16, 1f # only have to restore current window? _bltui a6, 16, 1f # only have to restore current window?
...@@ -475,8 +475,8 @@ user_exception_exit: ...@@ -475,8 +475,8 @@ user_exception_exit:
/* Clear unrestored registers (don't leak anything to user-land */ /* Clear unrestored registers (don't leak anything to user-land */
1: rsr a0, WINDOWBASE 1: rsr a0, windowbase
rsr a3, SAR rsr a3, sar
sub a3, a0, a3 sub a3, a0, a3
beqz a3, 2f beqz a3, 2f
extui a3, a3, 0, WBBITS extui a3, a3, 0, WBBITS
...@@ -556,7 +556,7 @@ kernel_exception_exit: ...@@ -556,7 +556,7 @@ kernel_exception_exit:
/* Test WINDOWSTART now. If spilled, do the movsp */ /* Test WINDOWSTART now. If spilled, do the movsp */
rsr a3, WINDOWSTART rsr a3, windowstart
addi a0, a3, -1 addi a0, a3, -1
and a3, a3, a0 and a3, a3, a0
_bnez a3, common_exception_exit _bnez a3, common_exception_exit
...@@ -604,24 +604,24 @@ common_exception_exit: ...@@ -604,24 +604,24 @@ common_exception_exit:
1: l32i a2, a1, PT_PC 1: l32i a2, a1, PT_PC
l32i a3, a1, PT_SAR l32i a3, a1, PT_SAR
wsr a2, EPC_1 wsr a2, epc1
wsr a3, SAR wsr a3, sar
/* Restore LBEG, LEND, LCOUNT */ /* Restore LBEG, LEND, LCOUNT */
l32i a2, a1, PT_LBEG l32i a2, a1, PT_LBEG
l32i a3, a1, PT_LEND l32i a3, a1, PT_LEND
wsr a2, LBEG wsr a2, lbeg
l32i a2, a1, PT_LCOUNT l32i a2, a1, PT_LCOUNT
wsr a3, LEND wsr a3, lend
wsr a2, LCOUNT wsr a2, lcount
/* We control single stepping through the ICOUNTLEVEL register. */ /* We control single stepping through the ICOUNTLEVEL register. */
l32i a2, a1, PT_ICOUNTLEVEL l32i a2, a1, PT_ICOUNTLEVEL
movi a3, -2 movi a3, -2
wsr a2, ICOUNTLEVEL wsr a2, icountlevel
wsr a3, ICOUNT wsr a3, icount
/* Check if it was double exception. */ /* Check if it was double exception. */
...@@ -636,7 +636,7 @@ common_exception_exit: ...@@ -636,7 +636,7 @@ common_exception_exit:
l32i a1, a1, PT_AREG1 l32i a1, a1, PT_AREG1
rfe rfe
1: wsr a0, DEPC 1: wsr a0, depc
l32i a0, a1, PT_AREG0 l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1 l32i a1, a1, PT_AREG1
rfde rfde
...@@ -651,25 +651,25 @@ common_exception_exit: ...@@ -651,25 +651,25 @@ common_exception_exit:
ENTRY(debug_exception) ENTRY(debug_exception)
rsr a0, EPS + XCHAL_DEBUGLEVEL rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
bbsi.l a0, PS_EXCM_BIT, 1f # exception mode bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
/* Set EPC_1 and EXCCAUSE */ /* Set EPC1 and EXCCAUSE */
wsr a2, DEPC # save a2 temporarily wsr a2, depc # save a2 temporarily
rsr a2, EPC + XCHAL_DEBUGLEVEL rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
wsr a2, EPC_1 wsr a2, epc1
movi a2, EXCCAUSE_MAPPED_DEBUG movi a2, EXCCAUSE_MAPPED_DEBUG
wsr a2, EXCCAUSE wsr a2, exccause
/* Restore PS to the value before the debug exc but with PS.EXCM set.*/ /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
movi a2, 1 << PS_EXCM_BIT movi a2, 1 << PS_EXCM_BIT
or a2, a0, a2 or a2, a0, a2
movi a0, debug_exception # restore a3, debug jump vector movi a0, debug_exception # restore a3, debug jump vector
wsr a2, PS wsr a2, ps
xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
/* Switch to kernel/user stack, restore jump vector, and save a0 */ /* Switch to kernel/user stack, restore jump vector, and save a0 */
...@@ -680,19 +680,19 @@ ENTRY(debug_exception) ...@@ -680,19 +680,19 @@ ENTRY(debug_exception)
movi a0, 0 movi a0, 0
s32i a1, a2, PT_AREG1 s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC # mark it as a regular exception s32i a0, a2, PT_DEPC # mark it as a regular exception
xsr a0, DEPC xsr a0, depc
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2 s32i a0, a2, PT_AREG2
mov a1, a2 mov a1, a2
j _kernel_exception j _kernel_exception
2: rsr a2, EXCSAVE_1 2: rsr a2, excsave1
l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
s32i a0, a2, PT_AREG0 s32i a0, a2, PT_AREG0
movi a0, 0 movi a0, 0
s32i a1, a2, PT_AREG1 s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC s32i a0, a2, PT_DEPC
xsr a0, DEPC xsr a0, depc
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2 s32i a0, a2, PT_AREG2
mov a1, a2 mov a1, a2
...@@ -732,12 +732,12 @@ ENTRY(unrecoverable_exception) ...@@ -732,12 +732,12 @@ ENTRY(unrecoverable_exception)
movi a0, 1 movi a0, 1
movi a1, 0 movi a1, 0
wsr a0, WINDOWSTART wsr a0, windowstart
wsr a1, WINDOWBASE wsr a1, windowbase
rsync rsync
movi a1, (1 << PS_WOE_BIT) | 1 movi a1, (1 << PS_WOE_BIT) | 1
wsr a1, PS wsr a1, ps
rsync rsync
movi a1, init_task movi a1, init_task
...@@ -793,7 +793,7 @@ ENTRY(fast_alloca) ...@@ -793,7 +793,7 @@ ENTRY(fast_alloca)
l32i a0, a2, PT_DEPC l32i a0, a2, PT_DEPC
_bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
rsr a0, DEPC # get a2 rsr a0, depc # get a2
s32i a4, a2, PT_AREG4 # save a4 and s32i a4, a2, PT_AREG4 # save a4 and
s32i a0, a2, PT_AREG2 # a2 to stack s32i a0, a2, PT_AREG2 # a2 to stack
...@@ -804,8 +804,8 @@ ENTRY(fast_alloca) ...@@ -804,8 +804,8 @@ ENTRY(fast_alloca)
/* Restore a3, excsave_1 */ /* Restore a3, excsave_1 */
xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl. xsr a3, excsave1 # make sure excsave_1 is valid for dbl.
rsr a4, EPC_1 # get exception address rsr a4, epc1 # get exception address
s32i a3, a2, PT_AREG3 # save a3 to stack s32i a3, a2, PT_AREG3 # save a3 to stack
#ifdef ALLOCA_EXCEPTION_IN_IRAM #ifdef ALLOCA_EXCEPTION_IN_IRAM
...@@ -820,7 +820,7 @@ ENTRY(fast_alloca) ...@@ -820,7 +820,7 @@ ENTRY(fast_alloca)
jx a3 jx a3
.Lunhandled_double: .Lunhandled_double:
wsr a0, EXCSAVE_1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
...@@ -852,7 +852,7 @@ ENTRY(fast_alloca) ...@@ -852,7 +852,7 @@ ENTRY(fast_alloca)
#endif #endif
addi a4, a4, 3 # step over movsp addi a4, a4, 3 # step over movsp
_EXTUI_MOVSP_DST(a0) # extract destination register _EXTUI_MOVSP_DST(a0) # extract destination register
wsr a4, EPC_1 # save new epc_1 wsr a4, epc1 # save new epc_1
_bnei a0, 1, 1f # no 'movsp a1, ax': jump _bnei a0, 1, 1f # no 'movsp a1, ax': jump
...@@ -953,14 +953,14 @@ ENTRY(fast_syscall_kernel) ...@@ -953,14 +953,14 @@ ENTRY(fast_syscall_kernel)
/* Skip syscall. */ /* Skip syscall. */
rsr a0, EPC_1 rsr a0, epc1
addi a0, a0, 3 addi a0, a0, 3
wsr a0, EPC_1 wsr a0, epc1
l32i a0, a2, PT_DEPC l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
rsr a0, DEPC # get syscall-nr rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers _beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa _beqi a0, __NR_xtensa, fast_syscall_xtensa
...@@ -970,14 +970,14 @@ ENTRY(fast_syscall_user) ...@@ -970,14 +970,14 @@ ENTRY(fast_syscall_user)
/* Skip syscall. */ /* Skip syscall. */
rsr a0, EPC_1 rsr a0, epc1
addi a0, a0, 3 addi a0, a0, 3
wsr a0, EPC_1 wsr a0, epc1
l32i a0, a2, PT_DEPC l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
rsr a0, DEPC # get syscall-nr rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers _beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa _beqi a0, __NR_xtensa, fast_syscall_xtensa
...@@ -988,10 +988,10 @@ ENTRY(fast_syscall_unrecoverable) ...@@ -988,10 +988,10 @@ ENTRY(fast_syscall_unrecoverable)
/* Restore all states. */ /* Restore all states. */
l32i a0, a2, PT_AREG0 # restore a0 l32i a0, a2, PT_AREG0 # restore a0
xsr a2, DEPC # restore a2, depc xsr a2, depc # restore a2, depc
rsr a3, EXCSAVE_1 rsr a3, excsave1
wsr a0, EXCSAVE_1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
...@@ -1047,7 +1047,7 @@ ENTRY(fast_syscall_unrecoverable) ...@@ -1047,7 +1047,7 @@ ENTRY(fast_syscall_unrecoverable)
ENTRY(fast_syscall_xtensa) ENTRY(fast_syscall_xtensa)
xsr a3, EXCSAVE_1 # restore a3, excsave1 xsr a3, excsave1 # restore a3, excsave1
s32i a7, a2, PT_AREG7 # we need an additional register s32i a7, a2, PT_AREG7 # we need an additional register
movi a7, 4 # sizeof(unsigned int) movi a7, 4 # sizeof(unsigned int)
...@@ -1124,13 +1124,13 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1124,13 +1124,13 @@ ENTRY(fast_syscall_spill_registers)
movi a0, fast_syscall_spill_registers_fixup movi a0, fast_syscall_spill_registers_fixup
s32i a0, a3, EXC_TABLE_FIXUP s32i a0, a3, EXC_TABLE_FIXUP
rsr a0, WINDOWBASE rsr a0, windowbase
s32i a0, a3, EXC_TABLE_PARAM s32i a0, a3, EXC_TABLE_PARAM
/* Save a3 and SAR on stack. */ /* Save a3 and SAR on stack. */
rsr a0, SAR rsr a0, sar
xsr a3, EXCSAVE_1 # restore a3 and excsave_1 xsr a3, excsave1 # restore a3 and excsave_1
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
s32i a4, a2, PT_AREG4 s32i a4, a2, PT_AREG4
s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
...@@ -1148,7 +1148,7 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1148,7 +1148,7 @@ ENTRY(fast_syscall_spill_registers)
l32i a3, a2, PT_AREG5 l32i a3, a2, PT_AREG5
l32i a4, a2, PT_AREG4 l32i a4, a2, PT_AREG4
l32i a0, a2, PT_AREG0 l32i a0, a2, PT_AREG0
wsr a3, SAR wsr a3, sar
l32i a3, a2, PT_AREG3 l32i a3, a2, PT_AREG3
/* Restore clobbered registers. */ /* Restore clobbered registers. */
...@@ -1173,8 +1173,8 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1173,8 +1173,8 @@ ENTRY(fast_syscall_spill_registers)
fast_syscall_spill_registers_fixup: fast_syscall_spill_registers_fixup:
rsr a2, WINDOWBASE # get current windowbase (a2 is saved) rsr a2, windowbase # get current windowbase (a2 is saved)
xsr a0, DEPC # restore depc and a0 xsr a0, depc # restore depc and a0
ssl a2 # set shift (32 - WB) ssl a2 # set shift (32 - WB)
/* We need to make sure the current registers (a0-a3) are preserved. /* We need to make sure the current registers (a0-a3) are preserved.
...@@ -1182,12 +1182,12 @@ fast_syscall_spill_registers_fixup: ...@@ -1182,12 +1182,12 @@ fast_syscall_spill_registers_fixup:
* in WS, so that the exception handlers save them to the task stack. * in WS, so that the exception handlers save them to the task stack.
*/ */
rsr a3, EXCSAVE_1 # get spill-mask rsr a3, excsave1 # get spill-mask
slli a2, a3, 1 # shift left by one slli a2, a3, 1 # shift left by one
slli a3, a2, 32-WSBITS slli a3, a2, 32-WSBITS
src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
wsr a2, WINDOWSTART # set corrected windowstart wsr a2, windowstart # set corrected windowstart
movi a3, exc_table movi a3, exc_table
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
...@@ -1201,7 +1201,7 @@ fast_syscall_spill_registers_fixup: ...@@ -1201,7 +1201,7 @@ fast_syscall_spill_registers_fixup:
* excsave_1: a3 * excsave_1: a3
*/ */
wsr a3, WINDOWBASE wsr a3, windowbase
rsync rsync
/* We are now in the original frame when we entered _spill_registers: /* We are now in the original frame when we entered _spill_registers:
...@@ -1227,7 +1227,7 @@ fast_syscall_spill_registers_fixup: ...@@ -1227,7 +1227,7 @@ fast_syscall_spill_registers_fixup:
/* Jump to the exception handler. */ /* Jump to the exception handler. */
movi a3, exc_table movi a3, exc_table
rsr a0, EXCCAUSE rsr a0, exccause
addx4 a0, a0, a3 # find entry in table addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0 jx a0
...@@ -1236,28 +1236,28 @@ fast_syscall_spill_registers_fixup_return: ...@@ -1236,28 +1236,28 @@ fast_syscall_spill_registers_fixup_return:
/* When we return here, all registers have been restored (a2: DEPC) */ /* When we return here, all registers have been restored (a2: DEPC) */
wsr a2, DEPC # exception address wsr a2, depc # exception address
/* Restore fixup handler. */ /* Restore fixup handler. */
xsr a3, EXCSAVE_1 xsr a3, excsave1
movi a2, fast_syscall_spill_registers_fixup movi a2, fast_syscall_spill_registers_fixup
s32i a2, a3, EXC_TABLE_FIXUP s32i a2, a3, EXC_TABLE_FIXUP
rsr a2, WINDOWBASE rsr a2, windowbase
s32i a2, a3, EXC_TABLE_PARAM s32i a2, a3, EXC_TABLE_PARAM
l32i a2, a3, EXC_TABLE_KSTK l32i a2, a3, EXC_TABLE_KSTK
/* Load WB at the time the exception occurred. */ /* Load WB at the time the exception occurred. */
rsr a3, SAR # WB is still in SAR rsr a3, sar # WB is still in SAR
neg a3, a3 neg a3, a3
wsr a3, WINDOWBASE wsr a3, windowbase
rsync rsync
/* Restore a3 and return. */ /* Restore a3 and return. */
movi a3, exc_table movi a3, exc_table
xsr a3, EXCSAVE_1 xsr a3, excsave1
rfde rfde
...@@ -1283,8 +1283,8 @@ ENTRY(_spill_registers) ...@@ -1283,8 +1283,8 @@ ENTRY(_spill_registers)
* Rotate ws right so that a4 = yyxxxwww1. * Rotate ws right so that a4 = yyxxxwww1.
*/ */
rsr a4, WINDOWBASE rsr a4, windowbase
rsr a3, WINDOWSTART # a3 = xxxwww1yy rsr a3, windowstart # a3 = xxxwww1yy
ssr a4 # holds WB ssr a4 # holds WB
slli a4, a3, WSBITS slli a4, a3, WSBITS
or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
...@@ -1302,7 +1302,7 @@ ENTRY(_spill_registers) ...@@ -1302,7 +1302,7 @@ ENTRY(_spill_registers)
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
wsr a3, WINDOWSTART # save shifted windowstart wsr a3, windowstart # save shifted windowstart
neg a4, a3 neg a4, a3
and a3, a4, a3 # first bit set from right: 000010000 and a3, a4, a3 # first bit set from right: 000010000
...@@ -1311,12 +1311,12 @@ ENTRY(_spill_registers) ...@@ -1311,12 +1311,12 @@ ENTRY(_spill_registers)
sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
ssr a4 # save in SAR for later. ssr a4 # save in SAR for later.
rsr a3, WINDOWBASE rsr a3, windowbase
add a3, a3, a4 add a3, a3, a4
wsr a3, WINDOWBASE wsr a3, windowbase
rsync rsync
rsr a3, WINDOWSTART rsr a3, windowstart
srl a3, a3 # shift windowstart srl a3, a3 # shift windowstart
/* WB is now just one frame below the oldest frame in the register /* WB is now just one frame below the oldest frame in the register
...@@ -1364,11 +1364,11 @@ ENTRY(_spill_registers) ...@@ -1364,11 +1364,11 @@ ENTRY(_spill_registers)
.Lexit: /* Done. Do the final rotation, set WS, and return. */ .Lexit: /* Done. Do the final rotation, set WS, and return. */
rotw 1 rotw 1
rsr a3, WINDOWBASE rsr a3, windowbase
ssl a3 ssl a3
movi a3, 1 movi a3, 1
sll a3, a3 sll a3, a3
wsr a3, WINDOWSTART wsr a3, windowstart
ret ret
.Lc4: s32e a4, a9, -16 .Lc4: s32e a4, a9, -16
...@@ -1429,7 +1429,7 @@ ENTRY(_spill_registers) ...@@ -1429,7 +1429,7 @@ ENTRY(_spill_registers)
* however, this condition is unrecoverable in kernel space. * however, this condition is unrecoverable in kernel space.
*/ */
rsr a0, PS rsr a0, ps
_bbci.l a0, PS_UM_BIT, 1f _bbci.l a0, PS_UM_BIT, 1f
/* User space: Setup a dummy frame and kill application. /* User space: Setup a dummy frame and kill application.
...@@ -1439,18 +1439,18 @@ ENTRY(_spill_registers) ...@@ -1439,18 +1439,18 @@ ENTRY(_spill_registers)
movi a0, 1 movi a0, 1
movi a1, 0 movi a1, 0
wsr a0, WINDOWSTART wsr a0, windowstart
wsr a1, WINDOWBASE wsr a1, windowbase
rsync rsync
movi a0, 0 movi a0, 0
movi a3, exc_table movi a3, exc_table
l32i a1, a3, EXC_TABLE_KSTK l32i a1, a3, EXC_TABLE_KSTK
wsr a3, EXCSAVE_1 wsr a3, excsave1
movi a4, (1 << PS_WOE_BIT) | 1 movi a4, (1 << PS_WOE_BIT) | 1
wsr a4, PS wsr a4, ps
rsync rsync
movi a6, SIGSEGV movi a6, SIGSEGV
...@@ -1459,7 +1459,7 @@ ENTRY(_spill_registers) ...@@ -1459,7 +1459,7 @@ ENTRY(_spill_registers)
1: /* Kernel space: PANIC! */ 1: /* Kernel space: PANIC! */
wsr a0, EXCSAVE_1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 # should not return callx0 a0 # should not return
1: j 1b 1: j 1b
...@@ -1524,7 +1524,7 @@ ENTRY(fast_second_level_miss) ...@@ -1524,7 +1524,7 @@ ENTRY(fast_second_level_miss)
/* We deliberately destroy a3 that holds the exception table. */ /* We deliberately destroy a3 that holds the exception table. */
8: rsr a3, EXCVADDR # fault address 8: rsr a3, excvaddr # fault address
_PGD_OFFSET(a0, a3, a1) _PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval l32i a0, a0, 0 # read pmdval
beqz a0, 2f beqz a0, 2f
...@@ -1542,7 +1542,7 @@ ENTRY(fast_second_level_miss) ...@@ -1542,7 +1542,7 @@ ENTRY(fast_second_level_miss)
* pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
*/ */
movi a1, -PAGE_OFFSET movi a1, (-PAGE_OFFSET) & 0xffffffff
add a0, a0, a1 # pmdval - PAGE_OFFSET add a0, a0, a1 # pmdval - PAGE_OFFSET
extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
xor a0, a0, a1 xor a0, a0, a1
...@@ -1561,7 +1561,7 @@ ENTRY(fast_second_level_miss) ...@@ -1561,7 +1561,7 @@ ENTRY(fast_second_level_miss)
*/ */
extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
rsr a1, PTEVADDR rsr a1, ptevaddr
addx2 a3, a3, a3 # -> 0,3,6,9 addx2 a3, a3, a3 # -> 0,3,6,9
srli a1, a1, PAGE_SHIFT srli a1, a1, PAGE_SHIFT
extui a3, a3, 2, 2 # -> 0,0,1,2 extui a3, a3, 2, 2 # -> 0,0,1,2
...@@ -1583,18 +1583,18 @@ ENTRY(fast_second_level_miss) ...@@ -1583,18 +1583,18 @@ ENTRY(fast_second_level_miss)
l32i a0, a2, PT_AREG0 l32i a0, a2, PT_AREG0
l32i a1, a2, PT_AREG1 l32i a1, a2, PT_AREG1
l32i a2, a2, PT_DEPC l32i a2, a2, PT_DEPC
xsr a3, EXCSAVE_1 xsr a3, excsave1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
/* Restore excsave1 and return. */ /* Restore excsave1 and return. */
rsr a2, DEPC rsr a2, depc
rfe rfe
/* Return from double exception. */ /* Return from double exception. */
1: xsr a2, DEPC 1: xsr a2, depc
esync esync
rfde rfde
...@@ -1618,7 +1618,7 @@ ENTRY(fast_second_level_miss) ...@@ -1618,7 +1618,7 @@ ENTRY(fast_second_level_miss)
/* Make sure the exception originated in the special functions */ /* Make sure the exception originated in the special functions */
movi a0, __tlbtemp_mapping_start movi a0, __tlbtemp_mapping_start
rsr a3, EPC_1 rsr a3, epc1
bltu a3, a0, 2f bltu a3, a0, 2f
movi a0, __tlbtemp_mapping_end movi a0, __tlbtemp_mapping_end
bgeu a3, a0, 2f bgeu a3, a0, 2f
...@@ -1626,7 +1626,7 @@ ENTRY(fast_second_level_miss) ...@@ -1626,7 +1626,7 @@ ENTRY(fast_second_level_miss)
/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
movi a3, TLBTEMP_BASE_1 movi a3, TLBTEMP_BASE_1
rsr a0, EXCVADDR rsr a0, excvaddr
bltu a0, a3, 2f bltu a0, a3, 2f
addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
...@@ -1635,7 +1635,7 @@ ENTRY(fast_second_level_miss) ...@@ -1635,7 +1635,7 @@ ENTRY(fast_second_level_miss)
/* Check if we have to restore an ITLB mapping. */ /* Check if we have to restore an ITLB mapping. */
movi a1, __tlbtemp_mapping_itlb movi a1, __tlbtemp_mapping_itlb
rsr a3, EPC_1 rsr a3, epc1
sub a3, a3, a1 sub a3, a3, a1
/* Calculate VPN */ /* Calculate VPN */
...@@ -1671,13 +1671,13 @@ ENTRY(fast_second_level_miss) ...@@ -1671,13 +1671,13 @@ ENTRY(fast_second_level_miss)
2: /* Invalid PGD, default exception handling */ 2: /* Invalid PGD, default exception handling */
movi a3, exc_table movi a3, exc_table
rsr a1, DEPC rsr a1, depc
xsr a3, EXCSAVE_1 xsr a3, excsave1
s32i a1, a2, PT_AREG2 s32i a1, a2, PT_AREG2
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
mov a1, a2 mov a1, a2
rsr a2, PS rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception j _kernel_exception
1: j _user_exception 1: j _user_exception
...@@ -1712,7 +1712,7 @@ ENTRY(fast_store_prohibited) ...@@ -1712,7 +1712,7 @@ ENTRY(fast_store_prohibited)
l32i a0, a1, TASK_MM # tsk->mm l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f beqz a0, 9f
8: rsr a1, EXCVADDR # fault address 8: rsr a1, excvaddr # fault address
_PGD_OFFSET(a0, a1, a4) _PGD_OFFSET(a0, a1, a4)
l32i a0, a0, 0 l32i a0, a0, 0
beqz a0, 2f beqz a0, 2f
...@@ -1725,7 +1725,7 @@ ENTRY(fast_store_prohibited) ...@@ -1725,7 +1725,7 @@ ENTRY(fast_store_prohibited)
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
or a4, a4, a1 or a4, a4, a1
rsr a1, EXCVADDR rsr a1, excvaddr
s32i a4, a0, 0 s32i a4, a0, 0
/* We need to flush the cache if we have page coloring. */ /* We need to flush the cache if we have page coloring. */
...@@ -1749,15 +1749,15 @@ ENTRY(fast_store_prohibited) ...@@ -1749,15 +1749,15 @@ ENTRY(fast_store_prohibited)
/* Restore excsave1 and a3. */ /* Restore excsave1 and a3. */
xsr a3, EXCSAVE_1 xsr a3, excsave1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
rsr a2, DEPC rsr a2, depc
rfe rfe
/* Double exception. Restore FIXUP handler and return. */ /* Double exception. Restore FIXUP handler and return. */
1: xsr a2, DEPC 1: xsr a2, depc
esync esync
rfde rfde
...@@ -1766,14 +1766,14 @@ ENTRY(fast_store_prohibited) ...@@ -1766,14 +1766,14 @@ ENTRY(fast_store_prohibited)
2: /* If there was a problem, handle fault in C */ 2: /* If there was a problem, handle fault in C */
rsr a4, DEPC # still holds a2 rsr a4, depc # still holds a2
xsr a3, EXCSAVE_1 xsr a3, excsave1
s32i a4, a2, PT_AREG2 s32i a4, a2, PT_AREG2
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
l32i a4, a2, PT_AREG4 l32i a4, a2, PT_AREG4
mov a1, a2 mov a1, a2
rsr a2, PS rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception j _kernel_exception
1: j _user_exception 1: j _user_exception
...@@ -1901,8 +1901,8 @@ ENTRY(_switch_to) ...@@ -1901,8 +1901,8 @@ ENTRY(_switch_to)
/* Disable ints while we manipulate the stack pointer. */ /* Disable ints while we manipulate the stack pointer. */
movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
xsr a14, PS xsr a14, ps
rsr a3, EXCSAVE_1 rsr a3, excsave1
rsync rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
...@@ -1910,7 +1910,7 @@ ENTRY(_switch_to) ...@@ -1910,7 +1910,7 @@ ENTRY(_switch_to)
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
l32i a3, a5, THREAD_CPENABLE l32i a3, a5, THREAD_CPENABLE
xsr a3, CPENABLE xsr a3, cpenable
s32i a3, a4, THREAD_CPENABLE s32i a3, a4, THREAD_CPENABLE
#endif #endif
...@@ -1924,7 +1924,7 @@ ENTRY(_switch_to) ...@@ -1924,7 +1924,7 @@ ENTRY(_switch_to)
* we return from kernel space. * we return from kernel space.
*/ */
rsr a3, EXCSAVE_1 # exc_table rsr a3, excsave1 # exc_table
movi a6, 0 movi a6, 0
addi a7, a5, PT_REGS_OFFSET addi a7, a5, PT_REGS_OFFSET
s32i a6, a3, EXC_TABLE_FIXUP s32i a6, a3, EXC_TABLE_FIXUP
...@@ -1937,7 +1937,7 @@ ENTRY(_switch_to) ...@@ -1937,7 +1937,7 @@ ENTRY(_switch_to)
load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
wsr a14, PS wsr a14, ps
mov a2, a12 # return 'prev' mov a2, a12 # return 'prev'
rsync rsync
......
...@@ -61,18 +61,18 @@ _startup: ...@@ -61,18 +61,18 @@ _startup:
/* Disable interrupts and exceptions. */ /* Disable interrupts and exceptions. */
movi a0, LOCKLEVEL movi a0, LOCKLEVEL
wsr a0, PS wsr a0, ps
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, EXCSAVE_1 wsr a2, excsave1
/* Start with a fresh windowbase and windowstart. */ /* Start with a fresh windowbase and windowstart. */
movi a1, 1 movi a1, 1
movi a0, 0 movi a0, 0
wsr a1, WINDOWSTART wsr a1, windowstart
wsr a0, WINDOWBASE wsr a0, windowbase
rsync rsync
/* Set a0 to 0 for the remaining initialization. */ /* Set a0 to 0 for the remaining initialization. */
...@@ -82,46 +82,46 @@ _startup: ...@@ -82,46 +82,46 @@ _startup:
/* Clear debugging registers. */ /* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG #if XCHAL_HAVE_DEBUG
wsr a0, IBREAKENABLE wsr a0, ibreakenable
wsr a0, ICOUNT wsr a0, icount
movi a1, 15 movi a1, 15
wsr a0, ICOUNTLEVEL wsr a0, icountlevel
.set _index, 0 .set _index, 0
.rept XCHAL_NUM_DBREAK - 1 .rept XCHAL_NUM_DBREAK - 1
wsr a0, DBREAKC + _index wsr a0, SREG_DBREAKC + _index
.set _index, _index + 1 .set _index, _index + 1
.endr .endr
#endif #endif
/* Clear CCOUNT (not really necessary, but nice) */ /* Clear CCOUNT (not really necessary, but nice) */
wsr a0, CCOUNT # not really necessary, but nice wsr a0, ccount # not really necessary, but nice
/* Disable zero-loops. */ /* Disable zero-loops. */
#if XCHAL_HAVE_LOOPS #if XCHAL_HAVE_LOOPS
wsr a0, LCOUNT wsr a0, lcount
#endif #endif
/* Disable all timers. */ /* Disable all timers. */
.set _index, 0 .set _index, 0
.rept XCHAL_NUM_TIMERS - 1 .rept XCHAL_NUM_TIMERS - 1
wsr a0, CCOMPARE + _index wsr a0, SREG_CCOMPARE + _index
.set _index, _index + 1 .set _index, _index + 1
.endr .endr
/* Interrupt initialization. */ /* Interrupt initialization. */
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
wsr a0, INTENABLE wsr a0, intenable
wsr a2, INTCLEAR wsr a2, intclear
/* Disable coprocessors. */ /* Disable coprocessors. */
#if XCHAL_CP_NUM > 0 #if XCHAL_CP_NUM > 0
wsr a0, CPENABLE wsr a0, cpenable
#endif #endif
/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0 /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
...@@ -132,7 +132,7 @@ _startup: ...@@ -132,7 +132,7 @@ _startup:
*/ */
movi a1, 1 movi a1, 1
wsr a1, PS wsr a1, ps
rsync rsync
/* Initialize the caches. /* Initialize the caches.
...@@ -206,18 +206,18 @@ _startup: ...@@ -206,18 +206,18 @@ _startup:
addi a1, a1, KERNEL_STACK_SIZE addi a1, a1, KERNEL_STACK_SIZE
movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0 movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
wsr a2, PS # (enable reg-windows; progmode stack) wsr a2, ps # (enable reg-windows; progmode stack)
rsync rsync
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/ /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
movi a2, debug_exception movi a2, debug_exception
wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
/* Set up EXCSAVE[1] to point to the exc_table. */ /* Set up EXCSAVE[1] to point to the exc_table. */
movi a6, exc_table movi a6, exc_table
xsr a6, EXCSAVE_1 xsr a6, excsave1
/* init_arch kick-starts the linux kernel */ /* init_arch kick-starts the linux kernel */
......
...@@ -72,13 +72,13 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -72,13 +72,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
static void xtensa_irq_mask(struct irq_data *d) static void xtensa_irq_mask(struct irq_data *d)
{ {
cached_irq_mask &= ~(1 << d->irq); cached_irq_mask &= ~(1 << d->irq);
set_sr (cached_irq_mask, INTENABLE); set_sr (cached_irq_mask, intenable);
} }
static void xtensa_irq_unmask(struct irq_data *d) static void xtensa_irq_unmask(struct irq_data *d)
{ {
cached_irq_mask |= 1 << d->irq; cached_irq_mask |= 1 << d->irq;
set_sr (cached_irq_mask, INTENABLE); set_sr (cached_irq_mask, intenable);
} }
static void xtensa_irq_enable(struct irq_data *d) static void xtensa_irq_enable(struct irq_data *d)
...@@ -95,7 +95,7 @@ static void xtensa_irq_disable(struct irq_data *d) ...@@ -95,7 +95,7 @@ static void xtensa_irq_disable(struct irq_data *d)
static void xtensa_irq_ack(struct irq_data *d) static void xtensa_irq_ack(struct irq_data *d)
{ {
set_sr(1 << d->irq, INTCLEAR); set_sr(1 << d->irq, intclear);
} }
static int xtensa_irq_retrigger(struct irq_data *d) static int xtensa_irq_retrigger(struct irq_data *d)
......
...@@ -173,6 +173,16 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ...@@ -173,6 +173,16 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* *
* Note: This is a pristine frame, so we don't need any spill region on top of * Note: This is a pristine frame, so we don't need any spill region on top of
* childregs. * childregs.
*
* The fun part: if we're keeping the same VM (i.e. cloning a thread,
* not an entire process), we're normally given a new usp, and we CANNOT share
* any live address register windows. If we just copy those live frames over,
* the two threads (parent and child) will overflow the same frames onto the
* parent stack at different times, likely corrupting the parent stack (esp.
* if the parent returns from functions that called clone() and calls new
* ones, before the child overflows its now old copies of its parent windows).
* One solution is to spill windows to the parent stack, but that's fairly
* involved. Much simpler to just not copy those live frames across.
*/ */
int copy_thread(unsigned long clone_flags, unsigned long usp, int copy_thread(unsigned long clone_flags, unsigned long usp,
...@@ -180,10 +190,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -180,10 +190,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
struct task_struct * p, struct pt_regs * regs) struct task_struct * p, struct pt_regs * regs)
{ {
struct pt_regs *childregs; struct pt_regs *childregs;
struct thread_info *ti;
unsigned long tos; unsigned long tos;
int user_mode = user_mode(regs); int user_mode = user_mode(regs);
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
struct thread_info *ti;
#endif
/* Set up new TSS. */ /* Set up new TSS. */
tos = (unsigned long)task_stack_page(p) + THREAD_SIZE; tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
if (user_mode) if (user_mode)
...@@ -191,13 +204,14 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -191,13 +204,14 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
else else
childregs = (struct pt_regs*)tos - 1; childregs = (struct pt_regs*)tos - 1;
/* This does not copy all the regs. In a bout of brilliance or madness,
ARs beyond a0-a15 exist past the end of the struct. */
*childregs = *regs; *childregs = *regs;
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
*((int*)childregs - 3) = (unsigned long)childregs; *((int*)childregs - 3) = (unsigned long)childregs;
*((int*)childregs - 4) = 0; *((int*)childregs - 4) = 0;
childregs->areg[1] = tos;
childregs->areg[2] = 0; childregs->areg[2] = 0;
p->set_child_tid = p->clear_child_tid = NULL; p->set_child_tid = p->clear_child_tid = NULL;
p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
...@@ -205,10 +219,14 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -205,10 +219,14 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
if (user_mode(regs)) { if (user_mode(regs)) {
int len = childregs->wmask & ~0xf;
childregs->areg[1] = usp; childregs->areg[1] = usp;
if (clone_flags & CLONE_VM) {
childregs->wmask = 1; /* can't share live windows */
} else {
int len = childregs->wmask & ~0xf;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len); &regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
// FIXME: we need to set THREADPTR in thread_info... // FIXME: we need to set THREADPTR in thread_info...
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6]; childregs->areg[2] = childregs->areg[6];
...@@ -216,6 +234,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -216,6 +234,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
} else { } else {
/* In kernel space, we start a new thread with a new stack. */ /* In kernel space, we start a new thread with a new stack. */
childregs->wmask = 1; childregs->wmask = 1;
childregs->areg[1] = tos;
} }
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
......
...@@ -100,7 +100,7 @@ typedef struct tagtable { ...@@ -100,7 +100,7 @@ typedef struct tagtable {
} tagtable_t; } tagtable_t;
#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \ #define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
__attribute__((unused, __section__(".taglist"))) = { tag, fn } __attribute__((used, section(".taglist"))) = { tag, fn }
/* parse current tag */ /* parse current tag */
...@@ -120,7 +120,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag) ...@@ -120,7 +120,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag)
} }
sysmem.bank[sysmem.nr_banks].type = mi->type; sysmem.bank[sysmem.nr_banks].type = mi->type;
sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start); sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE; sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_MASK;
sysmem.nr_banks++; sysmem.nr_banks++;
return 0; return 0;
......
...@@ -34,7 +34,6 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { ...@@ -34,7 +34,6 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
#undef __SYSCALL #undef __SYSCALL
#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol, #define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
#undef _XTENSA_UNISTD_H
#undef __KERNEL_SYSCALLS__ #undef __KERNEL_SYSCALLS__
#include <asm/unistd.h> #include <asm/unistd.h>
}; };
......
...@@ -97,7 +97,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { ...@@ -97,7 +97,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ /* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
/* EXCCAUSE_PRIVILEGED unhandled */ /* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#ifdef CONFIG_UNALIGNED_USER #ifdef CONFIG_XTENSA_UNALIGNED_USER
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned }, { EXCCAUSE_UNALIGNED, USER, fast_unaligned },
#else #else
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, { EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
...@@ -202,8 +202,8 @@ extern void do_IRQ(int, struct pt_regs *); ...@@ -202,8 +202,8 @@ extern void do_IRQ(int, struct pt_regs *);
void do_interrupt (struct pt_regs *regs) void do_interrupt (struct pt_regs *regs)
{ {
unsigned long intread = get_sr (INTREAD); unsigned long intread = get_sr (interrupt);
unsigned long intenable = get_sr (INTENABLE); unsigned long intenable = get_sr (intenable);
int i, mask; int i, mask;
/* Handle all interrupts (no priorities). /* Handle all interrupts (no priorities).
...@@ -213,7 +213,7 @@ void do_interrupt (struct pt_regs *regs) ...@@ -213,7 +213,7 @@ void do_interrupt (struct pt_regs *regs)
for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) { for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
if (mask & (intread & intenable)) { if (mask & (intread & intenable)) {
set_sr (mask, INTCLEAR); set_sr (mask, intclear);
do_IRQ (i,regs); do_IRQ (i,regs);
} }
} }
...@@ -244,7 +244,7 @@ do_illegal_instruction(struct pt_regs *regs) ...@@ -244,7 +244,7 @@ do_illegal_instruction(struct pt_regs *regs)
*/ */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#ifndef CONFIG_UNALIGNED_USER #ifndef CONFIG_XTENSA_UNALIGNED_USER
void void
do_unaligned_user (struct pt_regs *regs) do_unaligned_user (struct pt_regs *regs)
{ {
...@@ -339,7 +339,7 @@ void __init trap_init(void) ...@@ -339,7 +339,7 @@ void __init trap_init(void)
/* Initialize EXCSAVE_1 to hold the address of the exception table. */ /* Initialize EXCSAVE_1 to hold the address of the exception table. */
i = (unsigned long)exc_table; i = (unsigned long)exc_table;
__asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i)); __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i));
} }
/* /*
...@@ -386,16 +386,16 @@ static inline void spill_registers(void) ...@@ -386,16 +386,16 @@ static inline void spill_registers(void)
unsigned int a0, ps; unsigned int a0, ps;
__asm__ __volatile__ ( __asm__ __volatile__ (
"movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" "movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t"
"mov a12, a0\n\t" "mov a12, a0\n\t"
"rsr a13," __stringify(SAR) "\n\t" "rsr a13, sar\n\t"
"xsr a14," __stringify(PS) "\n\t" "xsr a14, ps\n\t"
"movi a0, _spill_registers\n\t" "movi a0, _spill_registers\n\t"
"rsync\n\t" "rsync\n\t"
"callx0 a0\n\t" "callx0 a0\n\t"
"mov a0, a12\n\t" "mov a0, a12\n\t"
"wsr a13," __stringify(SAR) "\n\t" "wsr a13, sar\n\t"
"wsr a14," __stringify(PS) "\n\t" "wsr a14, ps\n\t"
:: "a" (&a0), "a" (&ps) :: "a" (&a0), "a" (&ps)
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
} }
......
...@@ -69,11 +69,11 @@ ...@@ -69,11 +69,11 @@
ENTRY(_UserExceptionVector) ENTRY(_UserExceptionVector)
xsr a3, EXCSAVE_1 # save a3 and get dispatch table xsr a3, excsave1 # save a3 and get dispatch table
wsr a2, DEPC # save a2 wsr a2, depc # save a2
l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
s32i a0, a2, PT_AREG0 # save a0 to ESF s32i a0, a2, PT_AREG0 # save a0 to ESF
rsr a0, EXCCAUSE # retrieve exception cause rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
...@@ -93,11 +93,11 @@ ENTRY(_UserExceptionVector) ...@@ -93,11 +93,11 @@ ENTRY(_UserExceptionVector)
ENTRY(_KernelExceptionVector) ENTRY(_KernelExceptionVector)
xsr a3, EXCSAVE_1 # save a3, and get dispatch table xsr a3, excsave1 # save a3, and get dispatch table
wsr a2, DEPC # save a2 wsr a2, depc # save a2
addi a2, a1, -16-PT_SIZE # adjust stack pointer addi a2, a1, -16-PT_SIZE # adjust stack pointer
s32i a0, a2, PT_AREG0 # save a0 to ESF s32i a0, a2, PT_AREG0 # save a0 to ESF
rsr a0, EXCCAUSE # retrieve exception cause rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
...@@ -205,17 +205,17 @@ ENTRY(_DoubleExceptionVector) ...@@ -205,17 +205,17 @@ ENTRY(_DoubleExceptionVector)
/* Deliberately destroy excsave (don't assume it's value was valid). */ /* Deliberately destroy excsave (don't assume it's value was valid). */
wsr a3, EXCSAVE_1 # save a3 wsr a3, excsave1 # save a3
/* Check for kernel double exception (usually fatal). */ /* Check for kernel double exception (usually fatal). */
rsr a3, PS rsr a3, ps
_bbci.l a3, PS_UM_BIT, .Lksp _bbci.l a3, PS_UM_BIT, .Lksp
/* Check if we are currently handling a window exception. */ /* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */ /* Note: We don't need to indicate that we enter a critical section. */
xsr a0, DEPC # get DEPC, save a0 xsr a0, depc # get DEPC, save a0
movi a3, XCHAL_WINDOW_VECTORS_VADDR movi a3, XCHAL_WINDOW_VECTORS_VADDR
_bltu a0, a3, .Lfixup _bltu a0, a3, .Lfixup
...@@ -243,21 +243,21 @@ ENTRY(_DoubleExceptionVector) ...@@ -243,21 +243,21 @@ ENTRY(_DoubleExceptionVector)
* Note: We can trash the current window frame (a0...a3) and depc! * Note: We can trash the current window frame (a0...a3) and depc!
*/ */
wsr a2, DEPC # save stack pointer temporarily wsr a2, depc # save stack pointer temporarily
rsr a0, PS rsr a0, ps
extui a0, a0, PS_OWB_SHIFT, 4 extui a0, a0, PS_OWB_SHIFT, 4
wsr a0, WINDOWBASE wsr a0, windowbase
rsync rsync
/* We are now in the previous window frame. Save registers again. */ /* We are now in the previous window frame. Save registers again. */
xsr a2, DEPC # save a2 and get stack pointer xsr a2, depc # save a2 and get stack pointer
s32i a0, a2, PT_AREG0 s32i a0, a2, PT_AREG0
wsr a3, EXCSAVE_1 # save a3 wsr a3, excsave1 # save a3
movi a3, exc_table movi a3, exc_table
rsr a0, EXCCAUSE rsr a0, exccause
s32i a0, a2, PT_DEPC # mark it as a regular exception s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER l32i a0, a0, EXC_TABLE_FAST_USER
...@@ -290,14 +290,14 @@ ENTRY(_DoubleExceptionVector) ...@@ -290,14 +290,14 @@ ENTRY(_DoubleExceptionVector)
/* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */ /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
xsr a3, DEPC xsr a3, depc
s32i a0, a2, PT_DEPC s32i a0, a2, PT_DEPC
s32i a3, a2, PT_AREG0 s32i a3, a2, PT_AREG0
/* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
movi a3, exc_table movi a3, exc_table
rsr a0, EXCCAUSE rsr a0, exccause
addx4 a0, a0, a3 addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER l32i a0, a0, EXC_TABLE_FAST_USER
jx a0 jx a0
...@@ -312,7 +312,7 @@ ENTRY(_DoubleExceptionVector) ...@@ -312,7 +312,7 @@ ENTRY(_DoubleExceptionVector)
.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ .Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
rsr a3, EXCCAUSE rsr a3, exccause
beqi a3, EXCCAUSE_ITLB_MISS, 1f beqi a3, EXCCAUSE_ITLB_MISS, 1f
addi a3, a3, -EXCCAUSE_DTLB_MISS addi a3, a3, -EXCCAUSE_DTLB_MISS
bnez a3, .Lunrecoverable bnez a3, .Lunrecoverable
...@@ -328,11 +328,11 @@ ENTRY(_DoubleExceptionVector) ...@@ -328,11 +328,11 @@ ENTRY(_DoubleExceptionVector)
.Lunrecoverable_fixup: .Lunrecoverable_fixup:
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a0, DEPC xsr a0, depc
.Lunrecoverable: .Lunrecoverable:
rsr a3, EXCSAVE_1 rsr a3, excsave1
wsr a0, EXCSAVE_1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
...@@ -349,7 +349,7 @@ ENTRY(_DoubleExceptionVector) ...@@ -349,7 +349,7 @@ ENTRY(_DoubleExceptionVector)
.section .DebugInterruptVector.text, "ax" .section .DebugInterruptVector.text, "ax"
ENTRY(_DebugInterruptVector) ENTRY(_DebugInterruptVector)
xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
jx a0 jx a0
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2002 - 2005 Tensilica Inc. * Copyright (C) 2002 - 2012 Tensilica Inc.
*/ */
#include <variant/core.h> #include <variant/core.h>
...@@ -27,14 +27,11 @@ ...@@ -27,14 +27,11 @@
#endif #endif
.endm .endm
/* /*
* void *memcpy(void *dst, const void *src, size_t len); * void *memcpy(void *dst, const void *src, size_t len);
* void *memmove(void *dst, const void *src, size_t len);
* void *bcopy(const void *src, void *dst, size_t len);
* *
* This function is intended to do the same thing as the standard * This function is intended to do the same thing as the standard
* library function memcpy() (or bcopy()) for most cases. * library function memcpy() for most cases.
* However, where the source and/or destination references * However, where the source and/or destination references
* an instruction RAM or ROM or a data RAM or ROM, that * an instruction RAM or ROM or a data RAM or ROM, that
* source and/or destination will always be accessed with * source and/or destination will always be accessed with
...@@ -45,9 +42,6 @@ ...@@ -45,9 +42,6 @@
* !!!!!!! Handling of IRAM/IROM has not yet * !!!!!!! Handling of IRAM/IROM has not yet
* !!!!!!! been implemented. * !!!!!!! been implemented.
* *
* The bcopy version is provided here to avoid the overhead
* of an extra call, for callers that require this convention.
*
* The (general case) algorithm is as follows: * The (general case) algorithm is as follows:
* If destination is unaligned, align it by conditionally * If destination is unaligned, align it by conditionally
* copying 1 and 2 bytes. * copying 1 and 2 bytes.
...@@ -76,17 +70,6 @@ ...@@ -76,17 +70,6 @@
*/ */
.text .text
.align 4
.global bcopy
.type bcopy,@function
bcopy:
entry sp, 16 # minimal stack frame
# a2=src, a3=dst, a4=len
mov a5, a3 # copy dst so that a2 is return value
mov a3, a2
mov a2, a5
j .Lcommon # go to common code for memcpy+bcopy
/* /*
* Byte by byte copy * Byte by byte copy
...@@ -107,7 +90,7 @@ bcopy: ...@@ -107,7 +90,7 @@ bcopy:
s8i a6, a5, 0 s8i a6, a5, 0
addi a5, a5, 1 addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a7, .Lnextbyte bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbytecopydone: .Lbytecopydone:
retw retw
...@@ -144,9 +127,6 @@ bcopy: ...@@ -144,9 +127,6 @@ bcopy:
.global memcpy .global memcpy
.type memcpy,@function .type memcpy,@function
memcpy: memcpy:
.global memmove
.type memmove,@function
memmove:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
...@@ -182,7 +162,7 @@ memmove: ...@@ -182,7 +162,7 @@ memmove:
s32i a7, a5, 12 s32i a7, a5, 12
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a8, .Loop1 bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Loop1done: .Loop1done:
bbci.l a4, 3, .L2 bbci.l a4, 3, .L2
...@@ -260,7 +240,7 @@ memmove: ...@@ -260,7 +240,7 @@ memmove:
s32i a9, a5, 12 s32i a9, a5, 12
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a10, .Loop2 bne a3, a10, .Loop2 # continue loop if a3:src != a10:src_end
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Loop2done: .Loop2done:
bbci.l a4, 3, .L12 bbci.l a4, 3, .L12
...@@ -305,6 +285,285 @@ memmove: ...@@ -305,6 +285,285 @@ memmove:
l8ui a6, a3, 0 l8ui a6, a3, 0
s8i a6, a5, 0 s8i a6, a5, 0
retw retw
/*
* void bcopy(const void *src, void *dest, size_t n);
*/
.align 4
.global bcopy
.type bcopy,@function
bcopy:
entry sp, 16 # minimal stack frame
# a2=src, a3=dst, a4=len
mov a5, a3
mov a3, a2
mov a2, a5
j .Lmovecommon # go to common code for memmove+bcopy
/*
* void *memmove(void *dst, const void *src, size_t len);
*
* This function is intended to do the same thing as the standard
* library function memmove() for most cases.
* However, where the source and/or destination references
* an instruction RAM or ROM or a data RAM or ROM, that
* source and/or destination will always be accessed with
* 32-bit load and store instructions (as required for these
* types of devices).
*
* !!!!!!! XTFIXME:
* !!!!!!! Handling of IRAM/IROM has not yet
* !!!!!!! been implemented.
*
* The (general case) algorithm is as follows:
* If end of source doesn't overlap destination then use memcpy.
* Otherwise do memcpy backwards.
*
* Register use:
* a0/ return address
* a1/ stack pointer
* a2/ return value
* a3/ src
* a4/ length
* a5/ dst
* a6/ tmp
* a7/ tmp
* a8/ tmp
* a9/ tmp
* a10/ tmp
* a11/ tmp
*/
/*
* Byte by byte copy
*/
.align 4
.byte 0 # 1 mod 4 alignment for LOOPNEZ
# (0 mod 4 alignment for LBEG)
.Lbackbytecopy:
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lbackbytecopydone
#else /* !XCHAL_HAVE_LOOPS */
beqz a4, .Lbackbytecopydone
sub a7, a3, a4 # a7 = start address for source
#endif /* !XCHAL_HAVE_LOOPS */
.Lbacknextbyte:
addi a3, a3, -1
l8ui a6, a3, 0
addi a5, a5, -1
s8i a6, a5, 0
#if !XCHAL_HAVE_LOOPS
bne a3, a7, .Lbacknextbyte # continue loop if
# $a3:src != $a7:src_start
#endif /* !XCHAL_HAVE_LOOPS */
.Lbackbytecopydone:
retw
/*
* Destination is unaligned
*/
.align 4
.Lbackdst1mod2: # dst is only byte aligned
_bltui a4, 7, .Lbackbytecopy # do short copies byte by byte
# copy 1 byte
addi a3, a3, -1
l8ui a6, a3, 0
addi a5, a5, -1
s8i a6, a5, 0
addi a4, a4, -1
_bbci.l a5, 1, .Lbackdstaligned # if dst is now aligned, then
# return to main algorithm
.Lbackdst2mod4: # dst 16-bit aligned
# copy 2 bytes
_bltui a4, 6, .Lbackbytecopy # do short copies byte by byte
addi a3, a3, -2
l8ui a6, a3, 0
l8ui a7, a3, 1
addi a5, a5, -2
s8i a6, a5, 0
s8i a7, a5, 1
addi a4, a4, -2
j .Lbackdstaligned # dst is now aligned,
# return to main algorithm
.align 4
.global memmove
.type memmove,@function
memmove:
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value
.Lmovecommon:
sub a6, a5, a3
bgeu a6, a4, .Lcommon
add a5, a5, a4
add a3, a3, a4
_bbsi.l a5, 0, .Lbackdst1mod2 # if dst is 1 mod 2
_bbsi.l a5, 1, .Lbackdst2mod4 # if dst is 2 mod 4
.Lbackdstaligned: # return here from .Lbackdst?mod? once dst is aligned
srli a7, a4, 4 # number of loop iterations with 16B
# per iteration
movi a8, 3 # if source is not aligned,
_bany a3, a8, .Lbacksrcunaligned # then use shifting copy
/*
* Destination and source are word-aligned, use word copy.
*/
# copy 16 bytes per iteration for word-aligned dst and word-aligned src
#if XCHAL_HAVE_LOOPS
loopnez a7, .backLoop1done
#else /* !XCHAL_HAVE_LOOPS */
beqz a7, .backLoop1done
slli a8, a7, 4
sub a8, a3, a8 # a8 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.backLoop1:
addi a3, a3, -16
l32i a7, a3, 12
l32i a6, a3, 8
addi a5, a5, -16
s32i a7, a5, 12
l32i a7, a3, 4
s32i a6, a5, 8
l32i a6, a3, 0
s32i a7, a5, 4
s32i a6, a5, 0
#if !XCHAL_HAVE_LOOPS
bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start
#endif /* !XCHAL_HAVE_LOOPS */
.backLoop1done:
bbci.l a4, 3, .Lback2
# copy 8 bytes
addi a3, a3, -8
l32i a6, a3, 0
l32i a7, a3, 4
addi a5, a5, -8
s32i a6, a5, 0
s32i a7, a5, 4
.Lback2:
bbsi.l a4, 2, .Lback3
bbsi.l a4, 1, .Lback4
bbsi.l a4, 0, .Lback5
retw
.Lback3:
# copy 4 bytes
addi a3, a3, -4
l32i a6, a3, 0
addi a5, a5, -4
s32i a6, a5, 0
bbsi.l a4, 1, .Lback4
bbsi.l a4, 0, .Lback5
retw
.Lback4:
# copy 2 bytes
addi a3, a3, -2
l16ui a6, a3, 0
addi a5, a5, -2
s16i a6, a5, 0
bbsi.l a4, 0, .Lback5
retw
.Lback5:
# copy 1 byte
addi a3, a3, -1
l8ui a6, a3, 0
addi a5, a5, -1
s8i a6, a5, 0
retw
/*
* Destination is aligned, Source is unaligned
*/
.align 4
.Lbacksrcunaligned:
_beqz a4, .Lbackdone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset
#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with
* the lint or ferret client, or 0
* to save a few cycles */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
and a11, a3, a8 # save unalignment offset for below
sub a3, a3, a11 # align a3
#endif
l32i a6, a3, 0 # load first word
#if XCHAL_HAVE_LOOPS
loopnez a7, .backLoop2done
#else /* !XCHAL_HAVE_LOOPS */
beqz a7, .backLoop2done
slli a10, a7, 4
sub a10, a3, a10 # a10 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.backLoop2:
addi a3, a3, -16
l32i a7, a3, 12
l32i a8, a3, 8
addi a5, a5, -16
src_b a6, a7, a6
s32i a6, a5, 12
l32i a9, a3, 4
src_b a7, a8, a7
s32i a7, a5, 8
l32i a6, a3, 0
src_b a8, a9, a8
s32i a8, a5, 4
src_b a9, a6, a9
s32i a9, a5, 0
#if !XCHAL_HAVE_LOOPS
bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
#endif /* !XCHAL_HAVE_LOOPS */
.backLoop2done:
bbci.l a4, 3, .Lback12
# copy 8 bytes
addi a3, a3, -8
l32i a7, a3, 4
l32i a8, a3, 0
addi a5, a5, -8
src_b a6, a7, a6
s32i a6, a5, 4
src_b a7, a8, a7
s32i a7, a5, 0
mov a6, a8
.Lback12:
bbci.l a4, 2, .Lback13
# copy 4 bytes
addi a3, a3, -4
l32i a7, a3, 0
addi a5, a5, -4
src_b a6, a7, a6
s32i a6, a5, 0
mov a6, a7
.Lback13:
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
add a3, a3, a11 # readjust a3 with correct misalignment
#endif
bbsi.l a4, 1, .Lback14
bbsi.l a4, 0, .Lback15
.Lbackdone:
retw
.Lback14:
# copy 2 bytes
addi a3, a3, -2
l8ui a6, a3, 0
l8ui a7, a3, 1
addi a5, a5, -2
s8i a6, a5, 0
s8i a7, a5, 1
bbsi.l a4, 0, .Lback15
retw
.Lback15:
# copy 1 byte
addi a3, a3, -1
addi a5, a5, -1
l8ui a6, a3, 0
s8i a6, a5, 0
retw
/* /*
* Local Variables: * Local Variables:
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2010 Tensilica Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
...@@ -186,6 +186,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -186,6 +186,7 @@ void do_page_fault(struct pt_regs *regs)
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS); bad_page_fault(regs, address, SIGBUS);
return;
vmalloc_fault: vmalloc_fault:
{ {
......
...@@ -91,7 +91,7 @@ static int rs_write(struct tty_struct * tty, ...@@ -91,7 +91,7 @@ static int rs_write(struct tty_struct * tty,
{ {
/* see drivers/char/serialX.c to reference original version */ /* see drivers/char/serialX.c to reference original version */
__simc (SYS_write, 1, (unsigned long)buf, count, 0, 0); simc_write(1, buf, count);
return count; return count;
} }
...@@ -122,12 +122,7 @@ static void rs_poll(unsigned long priv) ...@@ -122,12 +122,7 @@ static void rs_poll(unsigned long priv)
static int rs_put_char(struct tty_struct *tty, unsigned char ch) static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{ {
char buf[2]; return rs_write(tty, &ch, 1);
buf[0] = ch;
buf[1] = '\0'; /* Is this NULL necessary? */
__simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0);
return 1;
} }
static void rs_flush_chars(struct tty_struct *tty) static void rs_flush_chars(struct tty_struct *tty)
......
...@@ -78,8 +78,9 @@ static inline int __simc(int a, int b, int c, int d, int e, int f) ...@@ -78,8 +78,9 @@ static inline int __simc(int a, int b, int c, int d, int e, int f)
return ret; return ret;
} }
static inline int simc_open(char *file, int flags, int mode) static inline int simc_open(const char *file, int flags, int mode)
{ {
wmb();
return __simc(SYS_open, (int) file, flags, mode, 0, 0); return __simc(SYS_open, (int) file, flags, mode, 0, 0);
} }
...@@ -90,16 +91,19 @@ static inline int simc_close(int fd) ...@@ -90,16 +91,19 @@ static inline int simc_close(int fd)
static inline int simc_ioctl(int fd, int request, void *arg) static inline int simc_ioctl(int fd, int request, void *arg)
{ {
wmb();
return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0); return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
} }
static inline int simc_read(int fd, void *buf, size_t count) static inline int simc_read(int fd, void *buf, size_t count)
{ {
rmb();
return __simc(SYS_read, fd, (int) buf, count, 0, 0); return __simc(SYS_read, fd, (int) buf, count, 0, 0);
} }
static inline int simc_write(int fd, void *buf, size_t count) static inline int simc_write(int fd, const void *buf, size_t count)
{ {
wmb();
return __simc(SYS_write, fd, (int) buf, count, 0, 0); return __simc(SYS_write, fd, (int) buf, count, 0, 0);
} }
...@@ -107,6 +111,7 @@ static inline int simc_poll(int fd) ...@@ -107,6 +111,7 @@ static inline int simc_poll(int fd)
{ {
struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
wmb();
return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv, return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,
0, 0); 0, 0);
} }
......
...@@ -61,13 +61,13 @@ void platform_restart(void) ...@@ -61,13 +61,13 @@ void platform_restart(void)
* jump to the reset vector. */ * jump to the reset vector. */
__asm__ __volatile__("movi a2, 15\n\t" __asm__ __volatile__("movi a2, 15\n\t"
"wsr a2, " __stringify(ICOUNTLEVEL) "\n\t" "wsr a2, icountlevel\n\t"
"movi a2, 0\n\t" "movi a2, 0\n\t"
"wsr a2, " __stringify(ICOUNT) "\n\t" "wsr a2, icount\n\t"
"wsr a2, " __stringify(IBREAKENABLE) "\n\t" "wsr a2, ibreakenable\n\t"
"wsr a2, " __stringify(LCOUNT) "\n\t" "wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t" "movi a2, 0x1f\n\t"
"wsr a2, " __stringify(PS) "\n\t" "wsr a2, ps\n\t"
"isync\n\t" "isync\n\t"
"jx %0\n\t" "jx %0\n\t"
: :
......
...@@ -66,13 +66,13 @@ void platform_restart(void) ...@@ -66,13 +66,13 @@ void platform_restart(void)
* jump to the reset vector. */ * jump to the reset vector. */
__asm__ __volatile__ ("movi a2, 15\n\t" __asm__ __volatile__ ("movi a2, 15\n\t"
"wsr a2, " __stringify(ICOUNTLEVEL) "\n\t" "wsr a2, icountlevel\n\t"
"movi a2, 0\n\t" "movi a2, 0\n\t"
"wsr a2, " __stringify(ICOUNT) "\n\t" "wsr a2, icount\n\t"
"wsr a2, " __stringify(IBREAKENABLE) "\n\t" "wsr a2, ibreakenable\n\t"
"wsr a2, " __stringify(LCOUNT) "\n\t" "wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t" "movi a2, 0x1f\n\t"
"wsr a2, " __stringify(PS) "\n\t" "wsr a2, ps\n\t"
"isync\n\t" "isync\n\t"
"jx %0\n\t" "jx %0\n\t"
: :
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment