Commit 96263573 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-next-20130225' of git://github.com/czankel/xtensa-linux

Pull xtensa update from Chris Zankel:
 "Added features:
   - add support for thread local storage (TLS)

   - add accept4 and finit_module syscalls

   - support medium-priority interrupts

   - add support for dc232c processor variant

   - support file-base simulated disk for ISS simulator

  Bug fixes:

   - fix return values returned by the str[n]cmp functions

   - avoid mmap cache aliasing

   - fix handling of 'windowed registers' in ptrace"

* tag 'xtensa-next-20130225' of git://github.com/czankel/xtensa-linux:
  xtensa: add accept4 syscall
  xtensa: add support for TLS
  xtensa: add missing include asm/uaccess.h to checksum.h
  xtensa: do not enable GENERIC_GPIO by default
  xtensa: complete ptrace handling of register windows
  xtensa: add support for oprofile
  xtensa: move spill_registers to traps.h
  xtensa: ISS: add host file-based simulated disk
  xtensa: fix str[n]cmp return value
  xtensa: avoid mmap cache aliasing
  xtensa: add finit_module syscall
  xtensa: pull signal definitions from signal-defs.h
  xtensa: fix ipc_parse_version selection
  xtensa: dispatch medium-priority interrupts
  xtensa: Add config files for Diamond 233L - Rev C processor variant
  xtensa: use new common dtc rule
  xtensa: rename prom_update_property to of_update_property
parents 2b37e9a2 9cf81c75
...@@ -13,9 +13,11 @@ config XTENSA ...@@ -13,9 +13,11 @@ config XTENSA
select GENERIC_CPU_DEVICES select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select CLONE_BACKWARDS select CLONE_BACKWARDS
select IRQ_DOMAIN select IRQ_DOMAIN
select HAVE_OPROFILE
help help
Xtensa processors are 32-bit RISC machines designed by Tensilica Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both primarily for embedded systems. These processors are both
...@@ -31,7 +33,7 @@ config GENERIC_HWEIGHT ...@@ -31,7 +33,7 @@ config GENERIC_HWEIGHT
def_bool y def_bool y
config GENERIC_GPIO config GENERIC_GPIO
def_bool y bool
config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U32
def_bool n def_bool n
...@@ -71,6 +73,12 @@ config XTENSA_VARIANT_DC232B ...@@ -71,6 +73,12 @@ config XTENSA_VARIANT_DC232B
help help
This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE). This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE).
config XTENSA_VARIANT_DC233C
bool "dc233c - Diamond 233L Standard Core Rev.C (LE)"
select MMU
help
This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE).
config XTENSA_VARIANT_S6000 config XTENSA_VARIANT_S6000
bool "s6000 - Stretch software configurable processor" bool "s6000 - Stretch software configurable processor"
select VARIANT_IRQ_SWITCH select VARIANT_IRQ_SWITCH
...@@ -197,6 +205,42 @@ config BUILTIN_DTB ...@@ -197,6 +205,42 @@ config BUILTIN_DTB
string "DTB to build into the kernel image" string "DTB to build into the kernel image"
depends on OF depends on OF
config BLK_DEV_SIMDISK
tristate "Host file-based simulated block device support"
default n
depends on XTENSA_PLATFORM_ISS
help
Create block devices that map to files in the host file system.
Device binding to host file may be changed at runtime via proc
interface provided the device is not in use.
config BLK_DEV_SIMDISK_COUNT
int "Number of host file-based simulated block devices"
range 1 10
depends on BLK_DEV_SIMDISK
default 2
help
This is the default minimal number of created block devices.
Kernel/module parameter 'simdisk_count' may be used to change this
value at runtime. More file names (but no more than 10) may be
specified as parameters, simdisk_count grows accordingly.
config SIMDISK0_FILENAME
string "Host filename for the first simulated device"
depends on BLK_DEV_SIMDISK = y
default ""
help
Attach a first simdisk to a host file. Conventionally, this file
contains a root file system.
config SIMDISK1_FILENAME
string "Host filename for the second simulated device"
depends on BLK_DEV_SIMDISK = y && BLK_DEV_SIMDISK_COUNT != 1
default ""
help
Another simulated disk in a host file for a buildroot-independent
storage.
source "mm/Kconfig" source "mm/Kconfig"
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b
variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c
variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000 variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000
variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
...@@ -86,9 +87,10 @@ core-y += arch/xtensa/kernel/ arch/xtensa/mm/ ...@@ -86,9 +87,10 @@ core-y += arch/xtensa/kernel/ arch/xtensa/mm/
core-y += $(buildvar) $(buildplf) core-y += $(buildvar) $(buildplf)
libs-y += arch/xtensa/lib/ $(LIBGCC) libs-y += arch/xtensa/lib/ $(LIBGCC)
drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
ifneq ($(CONFIG_BUILTIN_DTB),"") ifneq ($(CONFIG_BUILTIN_DTB),"")
core-$(CONFIG_OF) += arch/xtensa/boot/ core-$(CONFIG_OF) += arch/xtensa/boot/dts/
endif endif
boot := arch/xtensa/boot boot := arch/xtensa/boot
...@@ -101,7 +103,7 @@ zImage: vmlinux ...@@ -101,7 +103,7 @@ zImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@ $(Q)$(MAKE) $(build)=$(boot) $@
%.dtb: %.dtb:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
define archhelp define archhelp
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
......
...@@ -25,18 +25,6 @@ bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf ...@@ -25,18 +25,6 @@ bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot
bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot
BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
endif
# Rule to build device tree blobs
$(obj)/%.dtb: $(src)/dts/%.dts FORCE
$(call if_changed_dep,dtc)
clean-files := *.dtb.S
zImage Image: $(bootdir-y) zImage Image: $(bootdir-y)
$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \ $(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
......
#
# arch/xtensa/boot/dts/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
#
BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
endif
clean-files := *.dtb.S
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2008 Tensilica Inc.
*/ */
#ifndef _XTENSA_ATOMIC_H #ifndef _XTENSA_ATOMIC_H
...@@ -24,11 +24,11 @@ ...@@ -24,11 +24,11 @@
/* /*
* This Xtensa implementation assumes that the right mechanism * This Xtensa implementation assumes that the right mechanism
* for exclusion is for locking interrupts to level 1. * for exclusion is for locking interrupts to level EXCM_LEVEL.
* *
* Locking interrupts looks like this: * Locking interrupts looks like this:
* *
* rsil a15, 1 * rsil a15, LOCKLEVEL
* <code> * <code>
* wsr a15, PS * wsr a15, PS
* rsync * rsync
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define _XTENSA_CHECKSUM_H #define _XTENSA_CHECKSUM_H
#include <linux/in6.h> #include <linux/in6.h>
#include <asm/uaccess.h>
#include <variant/core.h> #include <variant/core.h>
/* /*
......
...@@ -84,7 +84,8 @@ typedef struct { ...@@ -84,7 +84,8 @@ typedef struct {
elf_greg_t sar; elf_greg_t sar;
elf_greg_t windowstart; elf_greg_t windowstart;
elf_greg_t windowbase; elf_greg_t windowbase;
elf_greg_t reserved[8+48]; elf_greg_t threadptr;
elf_greg_t reserved[7+48];
elf_greg_t a[64]; elf_greg_t a[64];
} xtensa_gregset_t; } xtensa_gregset_t;
......
...@@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t; ...@@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t;
#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PTE_SAME
/* We provide our own get_unmapped_area to cope with
* SHM area cache aliasing for userland.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2008 Tensilica Inc.
*/ */
#ifndef _XTENSA_PROCESSOR_H #ifndef _XTENSA_PROCESSOR_H
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
/* LOCKLEVEL defines the interrupt level that masks all /* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts. * general-purpose interrupts.
*/ */
#define LOCKLEVEL 1 #define LOCKLEVEL XCHAL_EXCM_LEVEL
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers * registers
......
...@@ -38,6 +38,7 @@ struct pt_regs { ...@@ -38,6 +38,7 @@ struct pt_regs {
unsigned long syscall; /* 56 */ unsigned long syscall; /* 56 */
unsigned long icountlevel; /* 60 */ unsigned long icountlevel; /* 60 */
unsigned long scompare1; /* 64 */ unsigned long scompare1; /* 64 */
unsigned long threadptr; /* 68 */
/* Additional configurable registers that are used by the compiler. */ /* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt; xtregs_opt_t xtregs_opt;
...@@ -48,7 +49,7 @@ struct pt_regs { ...@@ -48,7 +49,7 @@ struct pt_regs {
/* current register frame. /* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers! * Note: The ESF for kernel exceptions ends after 16 registers!
*/ */
unsigned long areg[16]; /* 128 (64) */ unsigned long areg[16];
}; };
#include <variant/core.h> #include <variant/core.h>
......
...@@ -88,6 +88,7 @@ ...@@ -88,6 +88,7 @@
#define PS_UM_BIT 5 #define PS_UM_BIT 5
#define PS_EXCM_BIT 4 #define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0 #define PS_INTLEVEL_SHIFT 0
#define PS_INTLEVEL_WIDTH 4
#define PS_INTLEVEL_MASK 0x0000000F #define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields. */ /* DBREAKCn register fields. */
......
...@@ -74,7 +74,7 @@ static inline int strcmp(const char *__cs, const char *__ct) ...@@ -74,7 +74,7 @@ static inline int strcmp(const char *__cs, const char *__ct)
"beqz %2, 2f\n\t" "beqz %2, 2f\n\t"
"beq %2, %3, 1b\n" "beq %2, %3, 1b\n"
"2:\n\t" "2:\n\t"
"sub %2, %3, %2" "sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct)); : "0" (__cs), "1" (__ct));
...@@ -99,7 +99,7 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) ...@@ -99,7 +99,7 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
"beqz %3, 2f\n\t" "beqz %3, 2f\n\t"
"beq %2, %3, 1b\n" "beq %2, %3, 1b\n"
"2:\n\t" "2:\n\t"
"sub %2, %3, %2" "sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct), "r" (__cs+__n)); : "0" (__cs), "1" (__ct), "r" (__cs+__n));
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2008 Tensilica Inc.
*/ */
#ifndef _XTENSA_TIMEX_H #ifndef _XTENSA_TIMEX_H
...@@ -19,13 +19,13 @@ ...@@ -19,13 +19,13 @@
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL #define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x) #define INTLEVEL(x) _INTLEVEL(x)
#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1 #if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 0 # define LINUX_TIMER 0
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1 #elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 1 # define LINUX_TIMER 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1 #elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 2 # define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else #else
......
...@@ -20,4 +20,28 @@ ...@@ -20,4 +20,28 @@
extern void * __init trap_set_handler(int cause, void *handler); extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
static inline void spill_registers(void)
{
unsigned int a0, ps;
__asm__ __volatile__ (
"movi a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t"
"mov a12, a0\n\t"
"rsr a13, sar\n\t"
"xsr a14, ps\n\t"
"movi a0, _spill_registers\n\t"
"rsync\n\t"
"callx0 a0\n\t"
"mov a0, a12\n\t"
"wsr a13, sar\n\t"
"wsr a14, ps\n\t"
: : "a" (&a0), "a" (&ps)
#if defined(CONFIG_FRAME_POINTER)
: "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
#else
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
#endif
"memory");
}
#endif /* _XTENSA_TRAPS_H */ #endif /* _XTENSA_TRAPS_H */
...@@ -102,16 +102,7 @@ typedef struct { ...@@ -102,16 +102,7 @@ typedef struct {
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define SIG_BLOCK 0 /* for blocking signals */ #include <asm-generic/signal-defs.h>
#define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
#ifndef __KERNEL__ #ifndef __KERNEL__
......
...@@ -728,8 +728,13 @@ __SYSCALL(330, sys_prlimit64, 4) ...@@ -728,8 +728,13 @@ __SYSCALL(330, sys_prlimit64, 4)
#define __NR_kcmp 331 #define __NR_kcmp 331
__SYSCALL(331, sys_kcmp, 5) __SYSCALL(331, sys_kcmp, 5)
#define __NR_finit_module 332
__SYSCALL(332, sys_finit_module, 3)
#define __NR_syscall_count 332 #define __NR_accept4 333
__SYSCALL(333, sys_accept4, 4)
#define __NR_syscall_count 334
/* /*
* sysxtensa syscall handler * sysxtensa syscall handler
......
...@@ -42,6 +42,7 @@ int main(void) ...@@ -42,6 +42,7 @@ int main(void)
DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1)); DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2004-2007 by Tensilica Inc. * Copyright (C) 2004 - 2008 by Tensilica Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* *
...@@ -130,6 +130,11 @@ _user_exception: ...@@ -130,6 +130,11 @@ _user_exception:
s32i a3, a1, PT_SAR s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL s32i a2, a1, PT_ICOUNTLEVEL
#if XCHAL_HAVE_THREADPTR
rur a2, threadptr
s32i a2, a1, PT_THREADPTR
#endif
/* Rotate ws so that the current windowbase is at bit0. */ /* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
...@@ -349,15 +354,16 @@ common_exception: ...@@ -349,15 +354,16 @@ common_exception:
* so we can allow exceptions and interrupts (*) again. * so we can allow exceptions and interrupts (*) again.
* Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
* *
* (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before * (*) We only allow interrupts of higher priority than current IRQ
* (interrupts disabled) and if this exception is not an interrupt.
*/ */
rsr a3, ps rsr a3, ps
addi a0, a0, -4 addi a0, a0, -4
movi a2, 1 movi a2, 1
extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
moveqz a3, a2, a0 # a3 = 1 iff interrupt exception # a3 = PS.INTLEVEL
movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority
moveqz a3, a2, a0 # a3 = IRQ level iff interrupt
movi a2, 1 << PS_WOE_BIT movi a2, 1 << PS_WOE_BIT
or a3, a3, a2 or a3, a3, a2
rsr a0, exccause rsr a0, exccause
...@@ -398,7 +404,7 @@ common_exception: ...@@ -398,7 +404,7 @@ common_exception:
callx4 a4 callx4 a4
/* Jump here for exception exit */ /* Jump here for exception exit */
.global common_exception_return
common_exception_return: common_exception_return:
/* Jump if we are returning from kernel exceptions. */ /* Jump if we are returning from kernel exceptions. */
...@@ -509,6 +515,11 @@ user_exception_exit: ...@@ -509,6 +515,11 @@ user_exception_exit:
* (if we have restored WSBITS-1 frames). * (if we have restored WSBITS-1 frames).
*/ */
#if XCHAL_HAVE_THREADPTR
l32i a3, a1, PT_THREADPTR
wur a3, threadptr
#endif
2: j common_exception_exit 2: j common_exception_exit
/* This is the kernel exception exit. /* This is the kernel exception exit.
...@@ -641,19 +652,51 @@ common_exception_exit: ...@@ -641,19 +652,51 @@ common_exception_exit:
l32i a0, a1, PT_DEPC l32i a0, a1, PT_DEPC
l32i a3, a1, PT_AREG3 l32i a3, a1, PT_AREG3
_bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
wsr a0, depc
l32i a2, a1, PT_AREG2 l32i a2, a1, PT_AREG2
_bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfde
1:
/* Restore a0...a3 and return */ /* Restore a0...a3 and return */
rsr a0, ps
extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
movi a0, 2f
slli a2, a2, 4
add a0, a2, a0
l32i a2, a1, PT_AREG2
jx a0
.macro irq_exit_level level
.align 16
.if XCHAL_EXCM_LEVEL >= \level
l32i a0, a1, PT_PC
wsr a0, epc\level
l32i a0, a1, PT_AREG0 l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1 l32i a1, a1, PT_AREG1
rfe rfi \level
.endif
.endm
1: wsr a0, depc .align 16
2:
l32i a0, a1, PT_AREG0 l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1 l32i a1, a1, PT_AREG1
rfde rfe
.align 16
/* no rfi for level-1 irq, handled by rfe above*/
nop
irq_exit_level 2
irq_exit_level 3
irq_exit_level 4
irq_exit_level 5
irq_exit_level 6
ENDPROC(kernel_exception) ENDPROC(kernel_exception)
...@@ -753,7 +796,7 @@ ENTRY(unrecoverable_exception) ...@@ -753,7 +796,7 @@ ENTRY(unrecoverable_exception)
wsr a1, windowbase wsr a1, windowbase
rsync rsync
movi a1, (1 << PS_WOE_BIT) | 1 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
wsr a1, ps wsr a1, ps
rsync rsync
...@@ -1474,7 +1517,7 @@ ENTRY(_spill_registers) ...@@ -1474,7 +1517,7 @@ ENTRY(_spill_registers)
l32i a1, a3, EXC_TABLE_KSTK l32i a1, a3, EXC_TABLE_KSTK
wsr a3, excsave1 wsr a3, excsave1
movi a4, (1 << PS_WOE_BIT) | 1 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
wsr a4, ps wsr a4, ps
rsync rsync
...@@ -1922,7 +1965,7 @@ ENTRY(_switch_to) ...@@ -1922,7 +1965,7 @@ ENTRY(_switch_to)
s32i a6, a3, EXC_TABLE_FIXUP s32i a6, a3, EXC_TABLE_FIXUP
s32i a7, a3, EXC_TABLE_KSTK s32i a7, a3, EXC_TABLE_KSTK
/* restore context of the task that 'next' addresses */ /* restore context of the task 'next' */
l32i a0, a13, THREAD_RA # restore return address l32i a0, a13, THREAD_RA # restore return address
l32i a1, a13, THREAD_SP # restore stack pointer l32i a1, a13, THREAD_SP # restore stack pointer
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2008 Tensilica Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
...@@ -128,14 +128,14 @@ ENTRY(_startup) ...@@ -128,14 +128,14 @@ ENTRY(_startup)
wsr a0, cpenable wsr a0, cpenable
#endif #endif
/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0 /* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0
* *
* Note: PS.EXCM must be cleared before using any loop * Note: PS.EXCM must be cleared before using any loop
* instructions; otherwise, they are silently disabled, and * instructions; otherwise, they are silently disabled, and
* at most one iteration of the loop is executed. * at most one iteration of the loop is executed.
*/ */
movi a1, 1 movi a1, LOCKLEVEL
wsr a1, ps wsr a1, ps
rsync rsync
...@@ -211,7 +211,8 @@ ENTRY(_startup) ...@@ -211,7 +211,8 @@ ENTRY(_startup)
movi a1, init_thread_union movi a1, init_thread_union
addi a1, a1, KERNEL_STACK_SIZE addi a1, a1, KERNEL_STACK_SIZE
movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0 movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
# WOE=1, INTLEVEL=LOCKLEVEL, UM=0
wsr a2, ps # (enable reg-windows; progmode stack) wsr a2, ps # (enable reg-windows; progmode stack)
rsync rsync
......
...@@ -259,9 +259,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, ...@@ -259,9 +259,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len); &regs->areg[XCHAL_NUM_AREGS - len/4], len);
} }
// FIXME: we need to set THREADPTR in thread_info...
/* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6]; childregs->threadptr = childregs->areg[5];
} else { } else {
p->thread.ra = MAKE_RA_FOR_CALL( p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_kernel_thread, 1); (unsigned long)ret_from_kernel_thread, 1);
......
...@@ -53,9 +53,8 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs) ...@@ -53,9 +53,8 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
{ {
struct pt_regs *regs = task_pt_regs(child); struct pt_regs *regs = task_pt_regs(child);
xtensa_gregset_t __user *gregset = uregs; xtensa_gregset_t __user *gregset = uregs;
unsigned long wm = regs->wmask;
unsigned long wb = regs->windowbase; unsigned long wb = regs->windowbase;
int live, i; int i;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO; return -EIO;
...@@ -67,13 +66,11 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs) ...@@ -67,13 +66,11 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
__put_user(regs->lcount, &gregset->lcount); __put_user(regs->lcount, &gregset->lcount);
__put_user(regs->windowstart, &gregset->windowstart); __put_user(regs->windowstart, &gregset->windowstart);
__put_user(regs->windowbase, &gregset->windowbase); __put_user(regs->windowbase, &gregset->windowbase);
__put_user(regs->threadptr, &gregset->threadptr);
live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16; for (i = 0; i < XCHAL_NUM_AREGS; i++)
__put_user(regs->areg[i],
for (i = 0; i < live; i++) gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
__put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++)
__put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
return 0; return 0;
} }
...@@ -84,7 +81,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs) ...@@ -84,7 +81,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
xtensa_gregset_t *gregset = uregs; xtensa_gregset_t *gregset = uregs;
const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
unsigned long ps; unsigned long ps;
unsigned long wb; unsigned long wb, ws;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO; return -EIO;
...@@ -94,21 +91,33 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs) ...@@ -94,21 +91,33 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
__get_user(regs->lbeg, &gregset->lbeg); __get_user(regs->lbeg, &gregset->lbeg);
__get_user(regs->lend, &gregset->lend); __get_user(regs->lend, &gregset->lend);
__get_user(regs->lcount, &gregset->lcount); __get_user(regs->lcount, &gregset->lcount);
__get_user(regs->windowstart, &gregset->windowstart); __get_user(ws, &gregset->windowstart);
__get_user(wb, &gregset->windowbase); __get_user(wb, &gregset->windowbase);
__get_user(regs->threadptr, &gregset->threadptr);
regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT); regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
if (wb >= XCHAL_NUM_AREGS / 4) if (wb >= XCHAL_NUM_AREGS / 4)
return -EFAULT; return -EFAULT;
if (wb != regs->windowbase || ws != regs->windowstart) {
unsigned long rotws, wmask;
rotws = (((ws | (ws << WSBITS)) >> wb) &
((1 << WSBITS) - 1)) & ~1;
wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
(rotws & 0xF) | 1;
regs->windowbase = wb; regs->windowbase = wb;
regs->windowstart = ws;
regs->wmask = wmask;
}
if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4, if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
gregset->a, wb * 16)) gregset->a, wb * 16))
return -EFAULT; return -EFAULT;
if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16)) if (__copy_from_user(regs->areg, gregset->a + wb * 4,
(WSBITS - wb) * 16))
return -EFAULT; return -EFAULT;
return 0; return 0;
......
...@@ -328,6 +328,27 @@ extern char _UserExceptionVector_literal_start; ...@@ -328,6 +328,27 @@ extern char _UserExceptionVector_literal_start;
extern char _UserExceptionVector_text_end; extern char _UserExceptionVector_text_end;
extern char _DoubleExceptionVector_literal_start; extern char _DoubleExceptionVector_literal_start;
extern char _DoubleExceptionVector_text_end; extern char _DoubleExceptionVector_text_end;
#if XCHAL_EXCM_LEVEL >= 2
extern char _Level2InterruptVector_text_start;
extern char _Level2InterruptVector_text_end;
#endif
#if XCHAL_EXCM_LEVEL >= 3
extern char _Level3InterruptVector_text_start;
extern char _Level3InterruptVector_text_end;
#endif
#if XCHAL_EXCM_LEVEL >= 4
extern char _Level4InterruptVector_text_start;
extern char _Level4InterruptVector_text_end;
#endif
#if XCHAL_EXCM_LEVEL >= 5
extern char _Level5InterruptVector_text_start;
extern char _Level5InterruptVector_text_end;
#endif
#if XCHAL_EXCM_LEVEL >= 6
extern char _Level6InterruptVector_text_start;
extern char _Level6InterruptVector_text_end;
#endif
#ifdef CONFIG_S32C1I_SELFTEST #ifdef CONFIG_S32C1I_SELFTEST
...@@ -482,6 +503,27 @@ void __init setup_arch(char **cmdline_p) ...@@ -482,6 +503,27 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(&_DoubleExceptionVector_literal_start), mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
__pa(&_DoubleExceptionVector_text_end), 0); __pa(&_DoubleExceptionVector_text_end), 0);
#if XCHAL_EXCM_LEVEL >= 2
mem_reserve(__pa(&_Level2InterruptVector_text_start),
__pa(&_Level2InterruptVector_text_end), 0);
#endif
#if XCHAL_EXCM_LEVEL >= 3
mem_reserve(__pa(&_Level3InterruptVector_text_start),
__pa(&_Level3InterruptVector_text_end), 0);
#endif
#if XCHAL_EXCM_LEVEL >= 4
mem_reserve(__pa(&_Level4InterruptVector_text_start),
__pa(&_Level4InterruptVector_text_end), 0);
#endif
#if XCHAL_EXCM_LEVEL >= 5
mem_reserve(__pa(&_Level5InterruptVector_text_start),
__pa(&_Level5InterruptVector_text_end), 0);
#endif
#if XCHAL_EXCM_LEVEL >= 6
mem_reserve(__pa(&_Level6InterruptVector_text_start),
__pa(&_Level6InterruptVector_text_end), 0);
#endif
bootmem_init(); bootmem_init();
#ifdef CONFIG_OF #ifdef CONFIG_OF
......
...@@ -337,7 +337,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -337,7 +337,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe *frame; struct rt_sigframe *frame;
int err = 0; int err = 0;
int signal; int signal;
unsigned long sp, ra; unsigned long sp, ra, tp;
sp = regs->areg[1]; sp = regs->areg[1];
...@@ -391,7 +391,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -391,7 +391,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Return context not modified until this point. * Return context not modified until this point.
*/ */
/* Set up registers for signal handler */ /* Set up registers for signal handler; preserve the threadptr */
tp = regs->threadptr;
start_thread(regs, (unsigned long) ka->sa.sa_handler, start_thread(regs, (unsigned long) ka->sa.sa_handler,
(unsigned long) frame); (unsigned long) frame);
...@@ -402,6 +403,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -402,6 +403,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->areg[6] = (unsigned long) signal; regs->areg[6] = (unsigned long) signal;
regs->areg[7] = (unsigned long) &frame->info; regs->areg[7] = (unsigned long) &frame->info;
regs->areg[8] = (unsigned long) &frame->uc; regs->areg[8] = (unsigned long) &frame->uc;
regs->threadptr = tp;
/* Set access mode to USER_DS. Nomenclature is outdated, but /* Set access mode to USER_DS. Nomenclature is outdated, but
* functionality is used in uaccess.h * functionality is used in uaccess.h
......
...@@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { ...@@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
}; };
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
{ {
unsigned long ret; unsigned long ret;
...@@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, ...@@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
{ {
return sys_fadvise64_64(fd, offset, len, advice); return sys_fadvise64_64(fd, offset, len, advice);
} }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vmm;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
}
}
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/traps.h>
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
extern int gdb_enter; extern int gdb_enter;
...@@ -193,28 +194,49 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) ...@@ -193,28 +194,49 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
} }
/* /*
* Level-1 interrupt. * IRQ handler.
* We currently have no priority encoding. * PS.INTLEVEL is the current IRQ priority level.
*/ */
unsigned long ignored_level1_interrupts;
extern void do_IRQ(int, struct pt_regs *); extern void do_IRQ(int, struct pt_regs *);
void do_interrupt (struct pt_regs *regs) void do_interrupt(struct pt_regs *regs)
{ {
unsigned long intread = get_sr (interrupt); static const unsigned int_level_mask[] = {
unsigned long intenable = get_sr (intenable); 0,
int i, mask; XCHAL_INTLEVEL1_MASK,
XCHAL_INTLEVEL2_MASK,
XCHAL_INTLEVEL3_MASK,
XCHAL_INTLEVEL4_MASK,
XCHAL_INTLEVEL5_MASK,
XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK,
};
unsigned level = get_sr(ps) & PS_INTLEVEL_MASK;
if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask)))
return;
for (;;) {
unsigned intread = get_sr(interrupt);
unsigned intenable = get_sr(intenable);
unsigned int_at_level = intread & intenable &
int_level_mask[level];
if (!int_at_level)
return;
/* Handle all interrupts (no priorities). /*
* (Clear the interrupt before processing, in case it's * Clear the interrupt before processing, in case it's
* edge-triggered or software-generated) * edge-triggered or software-generated
*/ */
while (int_at_level) {
unsigned i = __ffs(int_at_level);
unsigned mask = 1 << i;
for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) { int_at_level ^= mask;
if (mask & (intread & intenable)) { set_sr(mask, intclear);
set_sr (mask, intclear); do_IRQ(i, regs);
do_IRQ (i,regs);
} }
} }
} }
...@@ -392,26 +414,6 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task) ...@@ -392,26 +414,6 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task)
return sp; return sp;
} }
static inline void spill_registers(void)
{
unsigned int a0, ps;
__asm__ __volatile__ (
"movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t"
"mov a12, a0\n\t"
"rsr a13, sar\n\t"
"xsr a14, ps\n\t"
"movi a0, _spill_registers\n\t"
"rsync\n\t"
"callx0 a0\n\t"
"mov a0, a12\n\t"
"wsr a13, sar\n\t"
"wsr a14, ps\n\t"
:: "a" (&a0), "a" (&ps)
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
"memory");
}
void show_trace(struct task_struct *task, unsigned long *sp) void show_trace(struct task_struct *task, unsigned long *sp)
{ {
unsigned long a0, a1, pc; unsigned long a0, a1, pc;
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* Public License. See the file "COPYING" in the main directory of * Public License. See the file "COPYING" in the main directory of
* this archive for more details. * this archive for more details.
* *
* Copyright (C) 2005 Tensilica, Inc. * Copyright (C) 2005 - 2008 Tensilica, Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* *
...@@ -366,6 +366,41 @@ ENTRY(_DebugInterruptVector) ...@@ -366,6 +366,41 @@ ENTRY(_DebugInterruptVector)
ENDPROC(_DebugInterruptVector) ENDPROC(_DebugInterruptVector)
/*
* Medium priority level interrupt vectors
*
* Each takes less than 16 (0x10) bytes, no literals, by placing
* the extra 8 bytes that would otherwise be required in the window
* vectors area where there is space. With relocatable vectors,
* all vectors are within ~ 4 kB range of each other, so we can
* simply jump (J) to another vector without having to use JX.
*
* common_exception code gets current IRQ level in PS.INTLEVEL
* and preserves it for the IRQ handling time.
*/
.macro irq_entry_level level
.if XCHAL_EXCM_LEVEL >= \level
.section .Level\level\()InterruptVector.text, "ax"
ENTRY(_Level\level\()InterruptVector)
wsr a0, epc1
rsr a0, epc\level
xsr a0, epc1
# branch to user or kernel vector
j _SimulateUserKernelVectorException
.endif
.endm
irq_entry_level 2
irq_entry_level 3
irq_entry_level 4
irq_entry_level 5
irq_entry_level 6
/* Window overflow and underflow handlers. /* Window overflow and underflow handlers.
* The handlers must be 64 bytes apart, first starting with the underflow * The handlers must be 64 bytes apart, first starting with the underflow
* handlers underflow-4 to underflow-12, then the overflow handlers * handlers underflow-4 to underflow-12, then the overflow handlers
...@@ -396,6 +431,26 @@ ENTRY_ALIGN64(_WindowOverflow4) ...@@ -396,6 +431,26 @@ ENTRY_ALIGN64(_WindowOverflow4)
ENDPROC(_WindowOverflow4) ENDPROC(_WindowOverflow4)
#if XCHAL_EXCM_LEVEL >= 2
/* Not a window vector - but a convenient location
* (where we know there's space) for continuation of
* medium priority interrupt dispatch code.
* On entry here, a0 contains PS, and EPC2 contains saved a0:
*/
.align 4
_SimulateUserKernelVectorException:
wsr a0, excsave2
movi a0, 4 # LEVEL1_INTERRUPT cause
wsr a0, exccause
rsr a0, ps
bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
rsr a0, excsave2 # restore a0
j _KernelExceptionVector # simulate kernel vector exception
1: rsr a0, excsave2 # restore a0
j _UserExceptionVector # simulate user vector exception
#endif
/* 4-Register Window Underflow Vector (Handler) */ /* 4-Register Window Underflow Vector (Handler) */
ENTRY_ALIGN64(_WindowUnderflow4) ENTRY_ALIGN64(_WindowUnderflow4)
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2008 Tensilica Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
...@@ -134,6 +134,26 @@ SECTIONS ...@@ -134,6 +134,26 @@ SECTIONS
RELOCATE_ENTRY(_WindowVectors_text, RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text); .WindowVectors.text);
#if XCHAL_EXCM_LEVEL >= 2
RELOCATE_ENTRY(_Level2InterruptVector_text,
.Level2InterruptVector.text);
#endif
#if XCHAL_EXCM_LEVEL >= 3
RELOCATE_ENTRY(_Level3InterruptVector_text,
.Level3InterruptVector.text);
#endif
#if XCHAL_EXCM_LEVEL >= 4
RELOCATE_ENTRY(_Level4InterruptVector_text,
.Level4InterruptVector.text);
#endif
#if XCHAL_EXCM_LEVEL >= 5
RELOCATE_ENTRY(_Level5InterruptVector_text,
.Level5InterruptVector.text);
#endif
#if XCHAL_EXCM_LEVEL >= 6
RELOCATE_ENTRY(_Level6InterruptVector_text,
.Level6InterruptVector.text);
#endif
RELOCATE_ENTRY(_KernelExceptionVector_text, RELOCATE_ENTRY(_KernelExceptionVector_text,
.KernelExceptionVector.text); .KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text, RELOCATE_ENTRY(_UserExceptionVector_text,
...@@ -177,11 +197,53 @@ SECTIONS ...@@ -177,11 +197,53 @@ SECTIONS
XCHAL_DEBUG_VECTOR_VADDR, XCHAL_DEBUG_VECTOR_VADDR,
4, 4,
.DebugInterruptVector.literal) .DebugInterruptVector.literal)
#undef LAST
#define LAST .DebugInterruptVector.text
#if XCHAL_EXCM_LEVEL >= 2
SECTION_VECTOR (_Level2InterruptVector_text,
.Level2InterruptVector.text,
XCHAL_INTLEVEL2_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level2InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 3
SECTION_VECTOR (_Level3InterruptVector_text,
.Level3InterruptVector.text,
XCHAL_INTLEVEL3_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level3InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 4
SECTION_VECTOR (_Level4InterruptVector_text,
.Level4InterruptVector.text,
XCHAL_INTLEVEL4_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level4InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 5
SECTION_VECTOR (_Level5InterruptVector_text,
.Level5InterruptVector.text,
XCHAL_INTLEVEL5_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level5InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 6
SECTION_VECTOR (_Level6InterruptVector_text,
.Level6InterruptVector.text,
XCHAL_INTLEVEL6_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level6InterruptVector.text
#endif
SECTION_VECTOR (_KernelExceptionVector_literal, SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal, .KernelExceptionVector.literal,
XCHAL_KERNEL_VECTOR_VADDR - 4, XCHAL_KERNEL_VECTOR_VADDR - 4,
SIZEOF(.DebugInterruptVector.text), SIZEOF(LAST), LAST)
.DebugInterruptVector.text) #undef LAST
SECTION_VECTOR (_KernelExceptionVector_text, SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text, .KernelExceptionVector.text,
XCHAL_KERNEL_VECTOR_VADDR, XCHAL_KERNEL_VECTOR_VADDR,
......
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
/**
* @file backtrace.c
*
* @remark Copyright 2008 Tensilica Inc.
* @remark Read the file COPYING
*
*/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
/* Address of common_exception_return, used to check the
* transition from kernel to user space.
*/
extern int common_exception_return;
/* A struct that maps to the part of the frame containing the a0 and
* a1 registers.
*/
struct frame_start {
unsigned long a0;
unsigned long a1;
};
static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth)
{
unsigned long windowstart = regs->windowstart;
unsigned long windowbase = regs->windowbase;
unsigned long a0 = regs->areg[0];
unsigned long a1 = regs->areg[1];
unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc);
int index;
/* First add the current PC to the trace. */
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
/* Two steps:
*
* 1. Look through the register window for the
* previous PCs in the call trace.
*
* 2. Look on the stack.
*/
/* Step 1. */
/* Rotate WINDOWSTART to move the bit corresponding to
* the current window to the bit #0.
*/
windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
/* Look for bits that are set, they correspond to
* valid windows.
*/
for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
if (windowstart & (1 << index)) {
/* Read a0 and a1 from the
* corresponding position in AREGs.
*/
a0 = regs->areg[index * 4];
a1 = regs->areg[index * 4 + 1];
/* Get the PC from a0 and a1. */
pc = MAKE_PC_FROM_RA(a0, pc);
/* Add the PC to the trace. */
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
}
/* Step 2. */
/* We are done with the register window, we need to
* look through the stack.
*/
if (depth > 0) {
/* Start from the a1 register. */
/* a1 = regs->areg[1]; */
while (a0 != 0 && depth--) {
struct frame_start frame_start;
/* Get the location for a1, a0 for the
* previous frame from the current a1.
*/
unsigned long *psp = (unsigned long *)a1;
psp -= 4;
/* Check if the region is OK to access. */
if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
return;
/* Copy a1, a0 from user space stack frame. */
if (__copy_from_user_inatomic(&frame_start, psp,
sizeof(frame_start)))
return;
a0 = frame_start.a0;
a1 = frame_start.a1;
pc = MAKE_PC_FROM_RA(a0, pc);
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
}
}
}
static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth)
{
unsigned long pc = regs->pc;
unsigned long *psp;
unsigned long sp_start, sp_end;
unsigned long a0 = regs->areg[0];
unsigned long a1 = regs->areg[1];
sp_start = a1 & ~(THREAD_SIZE-1);
sp_end = sp_start + THREAD_SIZE;
/* Spill the register window to the stack first. */
spill_registers();
/* Read the stack frames one by one and create the PC
* from the a0 and a1 registers saved there.
*/
while (a1 > sp_start && a1 < sp_end && depth--) {
pc = MAKE_PC_FROM_RA(a0, pc);
/* Add the PC to the trace. */
if (kernel_text_address(pc))
oprofile_add_trace(pc);
if (pc == (unsigned long) &common_exception_return) {
regs = (struct pt_regs *)a1;
if (user_mode(regs)) {
pc = regs->pc;
if (pc != 0 && pc <= TASK_SIZE)
oprofile_add_trace(pc);
else
return;
return xtensa_backtrace_user(regs, depth);
}
a0 = regs->areg[0];
a1 = regs->areg[1];
continue;
}
psp = (unsigned long *)a1;
a0 = *(psp - 4);
a1 = *(psp - 3);
if (a1 <= (unsigned long)psp)
return;
}
return;
}
void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
{
if (user_mode(regs))
xtensa_backtrace_user(regs, depth);
else
xtensa_backtrace_kernel(regs, depth);
}
/**
* @file init.c
*
* @remark Copyright 2008 Tensilica Inc.
* @remark Read the file COPYING
*
*/
#include <linux/kernel.h>
#include <linux/oprofile.h>
#include <linux/errno.h>
#include <linux/init.h>
extern void xtensa_backtrace(struct pt_regs *const regs, unsigned int depth);
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = xtensa_backtrace;
return -ENODEV;
}
void oprofile_arch_exit(void)
{
}
...@@ -6,3 +6,4 @@ ...@@ -6,3 +6,4 @@
obj-y = console.o setup.o obj-y = console.o setup.o
obj-$(CONFIG_NET) += network.o obj-$(CONFIG_NET) += network.o
obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
/*
* arch/xtensa/platforms/iss/simdisk.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2013 Tensilica Inc.
* Authors Victor Prupis
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <platform/simcall.h>
#define SIMDISK_MAJOR 240
#define SECTOR_SHIFT 9
#define SIMDISK_MINORS 1
#define MAX_SIMDISK_COUNT 10
struct simdisk {
const char *filename;
spinlock_t lock;
struct request_queue *queue;
struct gendisk *gd;
struct proc_dir_entry *procfile;
int users;
unsigned long size;
int fd;
};
static int simdisk_count = CONFIG_BLK_DEV_SIMDISK_COUNT;
module_param(simdisk_count, int, S_IRUGO);
MODULE_PARM_DESC(simdisk_count, "Number of simdisk units.");
static int n_files;
static const char *filename[MAX_SIMDISK_COUNT] = {
#ifdef CONFIG_SIMDISK0_FILENAME
CONFIG_SIMDISK0_FILENAME,
#ifdef CONFIG_SIMDISK1_FILENAME
CONFIG_SIMDISK1_FILENAME,
#endif
#endif
};
static int simdisk_param_set_filename(const char *val,
const struct kernel_param *kp)
{
if (n_files < ARRAY_SIZE(filename))
filename[n_files++] = val;
else
return -EINVAL;
return 0;
}
static const struct kernel_param_ops simdisk_param_ops_filename = {
.set = simdisk_param_set_filename,
};
module_param_cb(filename, &simdisk_param_ops_filename, &n_files, 0);
MODULE_PARM_DESC(filename, "Backing storage filename.");
static int simdisk_major = SIMDISK_MAJOR;
static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
unsigned long nsect, char *buffer, int write)
{
unsigned long offset = sector << SECTOR_SHIFT;
unsigned long nbytes = nsect << SECTOR_SHIFT;
if (offset > dev->size || dev->size - offset < nbytes) {
pr_notice("Beyond-end %s (%ld %ld)\n",
write ? "write" : "read", offset, nbytes);
return;
}
spin_lock(&dev->lock);
while (nbytes > 0) {
unsigned long io;
__simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0);
if (write)
io = simc_write(dev->fd, buffer, nbytes);
else
io = simc_read(dev->fd, buffer, nbytes);
if (io == -1) {
pr_err("SIMDISK: IO error %d\n", errno);
break;
}
buffer += io;
offset += io;
nbytes -= io;
}
spin_unlock(&dev->lock);
}
static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
{
int i;
struct bio_vec *bvec;
sector_t sector = bio->bi_sector;
bio_for_each_segment(bvec, bio, i) {
char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);
unsigned len = bvec->bv_len >> SECTOR_SHIFT;
simdisk_transfer(dev, sector, len, buffer,
bio_data_dir(bio) == WRITE);
sector += len;
__bio_kunmap_atomic(bio, KM_USER0);
}
return 0;
}
static void simdisk_make_request(struct request_queue *q, struct bio *bio)
{
struct simdisk *dev = q->queuedata;
int status = simdisk_xfer_bio(dev, bio);
bio_endio(bio, status);
}
static int simdisk_open(struct block_device *bdev, fmode_t mode)
{
struct simdisk *dev = bdev->bd_disk->private_data;
spin_lock(&dev->lock);
if (!dev->users)
check_disk_change(bdev);
++dev->users;
spin_unlock(&dev->lock);
return 0;
}
static int simdisk_release(struct gendisk *disk, fmode_t mode)
{
struct simdisk *dev = disk->private_data;
spin_lock(&dev->lock);
--dev->users;
spin_unlock(&dev->lock);
return 0;
}
static const struct block_device_operations simdisk_ops = {
.owner = THIS_MODULE,
.open = simdisk_open,
.release = simdisk_release,
};
static struct simdisk *sddev;
static struct proc_dir_entry *simdisk_procdir;
static int simdisk_attach(struct simdisk *dev, const char *filename)
{
int err = 0;
filename = kstrdup(filename, GFP_KERNEL);
if (filename == NULL)
return -ENOMEM;
spin_lock(&dev->lock);
if (dev->fd != -1) {
err = -EBUSY;
goto out;
}
dev->fd = simc_open(filename, O_RDWR, 0);
if (dev->fd == -1) {
pr_err("SIMDISK: Can't open %s: %d\n", filename, errno);
err = -ENODEV;
goto out;
}
dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0);
set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
dev->filename = filename;
pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
out:
if (err)
kfree(filename);
spin_unlock(&dev->lock);
return err;
}
static int simdisk_detach(struct simdisk *dev)
{
int err = 0;
spin_lock(&dev->lock);
if (dev->users != 0) {
err = -EBUSY;
} else if (dev->fd != -1) {
if (simc_close(dev->fd)) {
pr_err("SIMDISK: error closing %s: %d\n",
dev->filename, errno);
err = -EIO;
} else {
pr_info("SIMDISK: %s detached from %s\n",
dev->gd->disk_name, dev->filename);
dev->fd = -1;
kfree(dev->filename);
dev->filename = NULL;
}
}
spin_unlock(&dev->lock);
return err;
}
static int proc_read_simdisk(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len;
struct simdisk *dev = (struct simdisk *) data;
len = sprintf(page, "%s\n", dev->filename ? dev->filename : "");
return len;
}
static int proc_write_simdisk(struct file *file, const char *buffer,
unsigned long count, void *data)
{
char *tmp = kmalloc(count + 1, GFP_KERNEL);
struct simdisk *dev = (struct simdisk *) data;
int err;
if (tmp == NULL)
return -ENOMEM;
if (copy_from_user(tmp, buffer, count)) {
err = -EFAULT;
goto out_free;
}
err = simdisk_detach(dev);
if (err != 0)
goto out_free;
if (count > 0 && tmp[count - 1] == '\n')
tmp[count - 1] = 0;
else
tmp[count] = 0;
if (tmp[0])
err = simdisk_attach(dev, tmp);
if (err == 0)
err = count;
out_free:
kfree(tmp);
return err;
}
static int __init simdisk_setup(struct simdisk *dev, int which,
struct proc_dir_entry *procdir)
{
char tmp[2] = { '0' + which, 0 };
dev->fd = -1;
dev->filename = NULL;
spin_lock_init(&dev->lock);
dev->users = 0;
dev->queue = blk_alloc_queue(GFP_KERNEL);
if (dev->queue == NULL) {
pr_err("blk_alloc_queue failed\n");
goto out_alloc_queue;
}
blk_queue_make_request(dev->queue, simdisk_make_request);
dev->queue->queuedata = dev;
dev->gd = alloc_disk(SIMDISK_MINORS);
if (dev->gd == NULL) {
pr_err("alloc_disk failed\n");
goto out_alloc_disk;
}
dev->gd->major = simdisk_major;
dev->gd->first_minor = which;
dev->gd->fops = &simdisk_ops;
dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
set_capacity(dev->gd, 0);
add_disk(dev->gd);
dev->procfile = create_proc_entry(tmp, 0644, procdir);
dev->procfile->data = dev;
dev->procfile->read_proc = proc_read_simdisk;
dev->procfile->write_proc = proc_write_simdisk;
return 0;
out_alloc_disk:
blk_cleanup_queue(dev->queue);
dev->queue = NULL;
out_alloc_queue:
simc_close(dev->fd);
return -EIO;
}
static int __init simdisk_init(void)
{
int i;
if (register_blkdev(simdisk_major, "simdisk") < 0) {
pr_err("SIMDISK: register_blkdev: %d\n", simdisk_major);
return -EIO;
}
pr_info("SIMDISK: major: %d\n", simdisk_major);
if (n_files > simdisk_count)
simdisk_count = n_files;
if (simdisk_count > MAX_SIMDISK_COUNT)
simdisk_count = MAX_SIMDISK_COUNT;
sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
GFP_KERNEL);
if (sddev == NULL)
goto out_unregister;
simdisk_procdir = proc_mkdir("simdisk", 0);
if (simdisk_procdir == NULL)
goto out_free_unregister;
for (i = 0; i < simdisk_count; ++i) {
if (simdisk_setup(sddev + i, i, simdisk_procdir) == 0) {
if (filename[i] != NULL && filename[i][0] != 0 &&
(n_files == 0 || i < n_files))
simdisk_attach(sddev + i, filename[i]);
}
}
return 0;
out_free_unregister:
kfree(sddev);
out_unregister:
unregister_blkdev(simdisk_major, "simdisk");
return -ENOMEM;
}
module_init(simdisk_init);
static void simdisk_teardown(struct simdisk *dev, int which,
struct proc_dir_entry *procdir)
{
char tmp[2] = { '0' + which, 0 };
simdisk_detach(dev);
if (dev->gd)
del_gendisk(dev->gd);
if (dev->queue)
blk_cleanup_queue(dev->queue);
remove_proc_entry(tmp, procdir);
}
static void __exit simdisk_exit(void)
{
int i;
for (i = 0; i < simdisk_count; ++i)
simdisk_teardown(sddev + i, i, simdisk_procdir);
remove_proc_entry("simdisk", 0);
kfree(sddev);
unregister_blkdev(simdisk_major, "simdisk");
}
module_exit(simdisk_exit);
MODULE_ALIAS_BLOCKDEV_MAJOR(SIMDISK_MAJOR);
MODULE_LICENSE("GPL");
...@@ -100,7 +100,7 @@ static void __init update_clock_frequency(struct device_node *node) ...@@ -100,7 +100,7 @@ static void __init update_clock_frequency(struct device_node *node)
} }
*(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR); *(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
prom_update_property(node, newfreq); of_update_property(node, newfreq);
} }
#define MAC_LEN 6 #define MAC_LEN 6
...@@ -128,7 +128,7 @@ static void __init update_local_mac(struct device_node *node) ...@@ -128,7 +128,7 @@ static void __init update_local_mac(struct device_node *node)
memcpy(newmac->value, macaddr, MAC_LEN); memcpy(newmac->value, macaddr, MAC_LEN);
((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f; ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
prom_update_property(node, newmac); of_update_property(node, newmac);
} }
static int __init machine_setup(void) static int __init machine_setup(void)
......
/*
* xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
* processor CORE configuration
*
* See <xtensa/config/core.h>, which includes this file, for more details.
*/
/* Xtensa processor core configuration information.
Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_CONFIGURATION_H
#define _XTENSA_CORE_CONFIGURATION_H
/****************************************************************************
Parameters Useful for Any Code, USER or PRIVILEGED
****************************************************************************/
/*
* Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
* configured, and a value of 0 otherwise. These macros are always defined.
*/
/*----------------------------------------------------------------------
ISA
----------------------------------------------------------------------*/
#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
#define XCHAL_HAVE_DEBUG 1 /* debug option */
#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
#define XCHAL_HAVE_L32R 1 /* L32R instruction */
#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
#define XCHAL_HAVE_ABS 1 /* ABS instruction */
/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
#define XCHAL_HAVE_SPECULATION 0 /* speculation */
#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
#define XCHAL_NUM_CONTEXTS 1 /* */
#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
#define XCHAL_HAVE_PRID 1 /* processor ID register */
#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
#define XCHAL_HAVE_FP 0 /* floating point pkg */
#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
/*----------------------------------------------------------------------
MISC
----------------------------------------------------------------------*/
#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
/* In T1050, applies to selected core load and store instructions (see ISA): */
#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
#define XCHAL_SW_VERSION 900001 /* sw version of this header */
#define XCHAL_CORE_ID "dc233c" /* alphanum core name
(CoreID) set in the Xtensa
Processor Generator */
#define XCHAL_CORE_DESCRIPTION "dc233c"
#define XCHAL_BUILD_UNIQUE_ID 0x00004B21 /* 22-bit sw build ID */
/*
* These definitions describe the hardware targeted by this software.
*/
#define XCHAL_HW_CONFIGID0 0xC56707FE /* ConfigID hi 32 bits*/
#define XCHAL_HW_CONFIGID1 0x14404B21 /* ConfigID lo 32 bits*/
#define XCHAL_HW_VERSION_NAME "LX4.0.1" /* full version name */
#define XCHAL_HW_VERSION_MAJOR 2400 /* major ver# of targeted hw */
#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
#define XCHAL_HW_VERSION 240001 /* major*100+minor */
#define XCHAL_HW_REL_LX4 1
#define XCHAL_HW_REL_LX4_0 1
#define XCHAL_HW_REL_LX4_0_1 1
#define XCHAL_HW_CONFIGID_RELIABLE 1
/* If software targets a *range* of hardware versions, these are the bounds: */
#define XCHAL_HW_MIN_VERSION_MAJOR 2400 /* major v of earliest tgt hw */
#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
#define XCHAL_HW_MIN_VERSION 240001 /* earliest targeted hw */
#define XCHAL_HW_MAX_VERSION_MAJOR 2400 /* major v of latest tgt hw */
#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
#define XCHAL_HW_MAX_VERSION 240001 /* latest targeted hw */
/*----------------------------------------------------------------------
CACHE
----------------------------------------------------------------------*/
#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
/****************************************************************************
Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
****************************************************************************/
#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
/*----------------------------------------------------------------------
CACHE
----------------------------------------------------------------------*/
#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
/* Number of cache sets in log2(lines per way): */
#define XCHAL_ICACHE_SETWIDTH 7
#define XCHAL_DCACHE_SETWIDTH 7
/* Cache set associativity (number of ways): */
#define XCHAL_ICACHE_WAYS 4
#define XCHAL_DCACHE_WAYS 4
/* Cache features: */
#define XCHAL_ICACHE_LINE_LOCKABLE 1
#define XCHAL_DCACHE_LINE_LOCKABLE 1
#define XCHAL_ICACHE_ECC_PARITY 0
#define XCHAL_DCACHE_ECC_PARITY 0
/* Cache access size in bytes (affects operation of SICW instruction): */
#define XCHAL_ICACHE_ACCESS_SIZE 4
#define XCHAL_DCACHE_ACCESS_SIZE 4
/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
#define XCHAL_CA_BITS 4
/*----------------------------------------------------------------------
INTERNAL I/D RAM/ROMs and XLMI
----------------------------------------------------------------------*/
#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
/*----------------------------------------------------------------------
INTERRUPTS and TIMERS
----------------------------------------------------------------------*/
#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
(not including level zero) */
#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
/* Masks of interrupts at each interrupt level: */
#define XCHAL_INTLEVEL1_MASK 0x001F80FF
#define XCHAL_INTLEVEL2_MASK 0x00000100
#define XCHAL_INTLEVEL3_MASK 0x00200E00
#define XCHAL_INTLEVEL4_MASK 0x00001000
#define XCHAL_INTLEVEL5_MASK 0x00002000
#define XCHAL_INTLEVEL6_MASK 0x00000000
#define XCHAL_INTLEVEL7_MASK 0x00004000
/* Masks of interrupts at each range 1..n of interrupt levels: */
#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
/* Level of each interrupt: */
#define XCHAL_INT0_LEVEL 1
#define XCHAL_INT1_LEVEL 1
#define XCHAL_INT2_LEVEL 1
#define XCHAL_INT3_LEVEL 1
#define XCHAL_INT4_LEVEL 1
#define XCHAL_INT5_LEVEL 1
#define XCHAL_INT6_LEVEL 1
#define XCHAL_INT7_LEVEL 1
#define XCHAL_INT8_LEVEL 2
#define XCHAL_INT9_LEVEL 3
#define XCHAL_INT10_LEVEL 3
#define XCHAL_INT11_LEVEL 3
#define XCHAL_INT12_LEVEL 4
#define XCHAL_INT13_LEVEL 5
#define XCHAL_INT14_LEVEL 7
#define XCHAL_INT15_LEVEL 1
#define XCHAL_INT16_LEVEL 1
#define XCHAL_INT17_LEVEL 1
#define XCHAL_INT18_LEVEL 1
#define XCHAL_INT19_LEVEL 1
#define XCHAL_INT20_LEVEL 1
#define XCHAL_INT21_LEVEL 3
#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
EXCSAVE/EPS/EPC_n, RFI n) */
/* Type of each interrupt: */
#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
/* Masks of interrupts for each type of interrupt: */
#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
#define XCHAL_INTTYPE_MASK_NMI 0x00004000
#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
/* Interrupt numbers assigned to specific interrupt sources: */
#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
/* Interrupt numbers for levels at which only one interrupt is configured: */
#define XCHAL_INTLEVEL2_NUM 8
#define XCHAL_INTLEVEL4_NUM 12
#define XCHAL_INTLEVEL5_NUM 13
#define XCHAL_INTLEVEL7_NUM 14
/* (There are many interrupts each at level(s) 1, 3.) */
/*
* External interrupt vectors/levels.
* These macros describe how Xtensa processor interrupt numbers
* (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
* map to external BInterrupt<n> pins, for those interrupts
* configured as external (level-triggered, edge-triggered, or NMI).
* See the Xtensa processor databook for more details.
*/
/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
/*----------------------------------------------------------------------
EXCEPTIONS and VECTORS
----------------------------------------------------------------------*/
#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
number: 1 == XEA1 (old)
2 == XEA2 (new)
0 == XEAX (extern) or TX */
#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
#define XCHAL_HAVE_HALT 0 /* halt architecture option */
#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
#define XCHAL_VECBASE_RESET_PADDR 0x00002000
#define XCHAL_RESET_VECBASE_OVERLAP 0
#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
#define XCHAL_USER_VECOFS 0x00000340
#define XCHAL_USER_VECTOR_VADDR 0x00002340
#define XCHAL_USER_VECTOR_PADDR 0x00002340
#define XCHAL_KERNEL_VECOFS 0x00000300
#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
#define XCHAL_INTLEVEL2_VECOFS 0x00000180
#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
#define XCHAL_INTLEVEL4_VECOFS 0x00000200
#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
#define XCHAL_INTLEVEL5_VECOFS 0x00000240
#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
#define XCHAL_INTLEVEL6_VECOFS 0x00000280
#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
#define XCHAL_NMI_VECOFS 0x000002C0
#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
/*----------------------------------------------------------------------
DEBUG
----------------------------------------------------------------------*/
#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
/*----------------------------------------------------------------------
MMU
----------------------------------------------------------------------*/
/* See core-matmap.h header file for more details. */
#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
[autorefill] and protection)
usable for an MMU-based OS */
/* If none of the above last 4 are set, it's a custom TLB configuration. */
#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
#endif /* _XTENSA_CORE_CONFIGURATION_H */
/*
* tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
*
* NOTE: This header file is not meant to be included directly.
*/
/* This header file contains assembly-language definitions (assembly
macros, etc.) for this specific Xtensa processor's TIE extensions
and options. It is customized to this Xtensa processor configuration.
Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_TIE_ASM_H
#define _XTENSA_CORE_TIE_ASM_H
/* Selection parameter values for save-area save/restore macros: */
/* Option vs. TIE: */
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
/* Whether used automatically by compiler: */
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
/* ABI handling across function calls: */
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
/* Misc */
#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
| ((ccuse) & XTHAL_SAS_ANYCC) \
| ((abi) & XTHAL_SAS_ANYABI) )
/*
* Macro to save all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Required parameters:
* ptr Save area pointer address register (clobbered)
* (register must contain a 4 byte aligned address).
* at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
* registers are clobbered, the remaining are unused).
* Optional parameters:
* continue If macro invoked as part of a larger store sequence, set to 1
* if this is not the first in the sequence. Defaults to 0.
* ofs Offset from start of larger sequence (from value of first ptr
* in sequence) at which to store. Defaults to next available space
* (or 0 if <continue> is 0).
* select Select what category(ies) of registers to store, as a bitmask
* (see XTHAL_SAS_xxx constants). Defaults to all registers.
* alloc Select what category(ies) of registers to allocate; if any
* category is selected here that is not in <select>, space for
* the corresponding registers is skipped without doing any store.
*/
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
// Optional global register used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
xchal_sa_align \ptr, 0, 1020, 4, 4
rur.THREADPTR \at1 // threadptr option
s32i \at1, \ptr, .Lxchal_ofs_+0
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1020, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
// Optional caller-saved registers used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1016, 4, 4
rsr \at1, ACCLO // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr \at1, ACCHI // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.endif
// Optional caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1004, 4, 4
rsr \at1, M0 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr \at1, M1 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+4
rsr \at1, M2 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+8
rsr \at1, M3 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+12
rsr \at1, SCOMPARE1 // conditional store option
s32i \at1, \ptr, .Lxchal_ofs_+16
.set .Lxchal_ofs_, .Lxchal_ofs_ + 20
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1004, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 20
.endif
.endm // xchal_ncp_store
/*
* Macro to restore all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Required parameters:
* ptr Save area pointer address register (clobbered)
* (register must contain a 4 byte aligned address).
* at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
* registers are clobbered, the remaining are unused).
* Optional parameters:
* continue If macro invoked as part of a larger load sequence, set to 1
* if this is not the first in the sequence. Defaults to 0.
* ofs Offset from start of larger sequence (from value of first ptr
* in sequence) at which to load. Defaults to next available space
* (or 0 if <continue> is 0).
* select Select what category(ies) of registers to load, as a bitmask
* (see XTHAL_SAS_xxx constants). Defaults to all registers.
* alloc Select what category(ies) of registers to allocate; if any
* category is selected here that is not in <select>, space for
* the corresponding registers is skipped without doing any load.
*/
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
// Optional global register used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
xchal_sa_align \ptr, 0, 1020, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.THREADPTR \at1 // threadptr option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1020, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
// Optional caller-saved registers used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1016, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr \at1, ACCLO // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr \at1, ACCHI // MAC16 option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.endif
// Optional caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1004, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr \at1, M0 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr \at1, M1 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+8
wsr \at1, M2 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+12
wsr \at1, M3 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+16
wsr \at1, SCOMPARE1 // conditional store option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 20
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1004, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 20
.endif
.endm // xchal_ncp_load
#define XCHAL_NCP_NUM_ATMPS 1
#define XCHAL_SA_NUM_ATMPS 1
#endif /*_XTENSA_CORE_TIE_ASM_H*/
/*
* tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
*
* NOTE: This header file is not meant to be included directly.
*/
/* This header file describes this specific Xtensa processor's TIE extensions
that extend basic Xtensa core functionality. It is customized to this
Xtensa processor configuration.
Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_TIE_H
#define _XTENSA_CORE_TIE_H
#define XCHAL_CP_NUM 1 /* number of coprocessors */
#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
/* Basic parameters of each coprocessor: */
#define XCHAL_CP7_NAME "XTIOP"
#define XCHAL_CP7_IDENT XTIOP
#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
#define XCHAL_CP0_SA_SIZE 0
#define XCHAL_CP0_SA_ALIGN 1
#define XCHAL_CP1_SA_SIZE 0
#define XCHAL_CP1_SA_ALIGN 1
#define XCHAL_CP2_SA_SIZE 0
#define XCHAL_CP2_SA_ALIGN 1
#define XCHAL_CP3_SA_SIZE 0
#define XCHAL_CP3_SA_ALIGN 1
#define XCHAL_CP4_SA_SIZE 0
#define XCHAL_CP4_SA_ALIGN 1
#define XCHAL_CP5_SA_SIZE 0
#define XCHAL_CP5_SA_ALIGN 1
#define XCHAL_CP6_SA_SIZE 0
#define XCHAL_CP6_SA_ALIGN 1
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 32
#define XCHAL_NCP_SA_ALIGN 4
/* Total save area for optional and custom state (NCP + CPn): */
#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */
#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
/*
* Detailed contents of save areas.
* NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
* before expanding the XCHAL_xxx_SA_LIST() macros.
*
* XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
* dbnum,base,regnum,bitsz,gapsz,reset,x...)
*
* s = passed from XCHAL_*_LIST(s), eg. to select how to expand
* ccused = set if used by compiler without special options or code
* abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
* kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
* opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
* name = lowercase reg name (no quotes)
* galign = group byte alignment (power of 2) (galign >= align)
* align = register byte alignment (power of 2)
* asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
* (not including any pad bytes required to galign this or next reg)
* dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
* base = reg shortname w/o index (or sr=special, ur=TIE user reg)
* regnum = reg index in regfile, or special/TIE-user reg number
* bitsz = number of significant bits (regfile width, or ur/sr mask bits)
* gapsz = intervening bits, if bitsz bits not stored contiguously
* (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
* reset = register reset value (or 0 if undefined at reset)
* x = reserved for future use (0 until then)
*
* To filter out certain registers, e.g. to expand only the non-global
* registers used by the compiler, you can do something like this:
*
* #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
* #define SELCC0(p...)
* #define SELCC1(abikind,p...) SELAK##abikind(p)
* #define SELAK0(p...) REG(p)
* #define SELAK1(p...) REG(p)
* #define SELAK2(p...)
* #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
* ...what you want to expand...
*/
#define XCHAL_NCP_SA_NUM 8
#define XCHAL_NCP_SA_LIST(s) \
XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0)
#define XCHAL_CP0_SA_NUM 0
#define XCHAL_CP0_SA_LIST(s) /* empty */
#define XCHAL_CP1_SA_NUM 0
#define XCHAL_CP1_SA_LIST(s) /* empty */
#define XCHAL_CP2_SA_NUM 0
#define XCHAL_CP2_SA_LIST(s) /* empty */
#define XCHAL_CP3_SA_NUM 0
#define XCHAL_CP3_SA_LIST(s) /* empty */
#define XCHAL_CP4_SA_NUM 0
#define XCHAL_CP4_SA_LIST(s) /* empty */
#define XCHAL_CP5_SA_NUM 0
#define XCHAL_CP5_SA_LIST(s) /* empty */
#define XCHAL_CP6_SA_NUM 0
#define XCHAL_CP6_SA_LIST(s) /* empty */
#define XCHAL_CP7_SA_NUM 0
#define XCHAL_CP7_SA_LIST(s) /* empty */
/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
#endif /*_XTENSA_CORE_TIE_H*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment