Commit 5a0015d6 authored by Chris Zankel's avatar Chris Zankel Committed by Linus Torvalds

[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 3

The attached patches provides part 3 of an architecture implementation for the
Tensilica Xtensa CPU series.
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4bedea94
#
# Makefile for the Linux/Xtensa kernel.
#
extra-y := head.o vmlinux.lds
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
pci-dma.o
## windowspill.o
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
This diff is collapsed.
/*
* arch/xtensa/kernel/asm-offsets.c
*
* Generates definitions from c-type structures used by assembly sources.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <asm/processor.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* struct pt_regs */
DEFINE(PT_PC, offsetof (struct pt_regs, pc));
DEFINE(PT_PS, offsetof (struct pt_regs, ps));
DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
BLANK();
/* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
DEFINE(TASK_MM, offsetof (struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
DEFINE(TASK_PID, offsetof (struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
BLANK();
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
BLANK();
/* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
BLANK();
DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
return 0;
}
/*
* arch/xtensa/kernel/coprocessor.S
*
* Xtensa processor configuration-specific table of coprocessor and
* other custom register layout information.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*
* Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
*/
/*
* This module contains a table that describes the layout of the various
* custom registers and states associated with each coprocessor, as well
* as those not associated with any coprocessor ("extra state").
* This table is included with core dumps and is available via the ptrace
* interface, allowing the layout of such register/state information to
* be modified in the kernel without affecting the debugger. Each
* register or state is identified using a 32-bit "libdb target number"
* assigned when the Xtensa processor is generated.
*/
#include <linux/config.h>
#include <linux/linkage.h>
#include <asm/processor.h>
#if XCHAL_HAVE_CP
#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
ENTRY(release_coprocessors)
entry a1, 16
# a2: task
movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
movi a4, coprocessor_info+CP_LAST # a4: owner-table
# a5: tmp
movi a6, 0 # a6: 0
rsil a7, LOCKLEVEL # a7: PS
1: /* Check if task is coprocessor owner of coprocessor[i]. */
l32i a5, a4, COPROCESSOR_INFO_OWNER
srli a3, a3, 1
beqz a3, 1f
addi a4, a4, -8
beq a2, a5, 1b
/* Found an entry: Clear entry CPENABLE bit to disable CP. */
rsr a5, CPENABLE
s32i a6, a4, COPROCESSOR_INFO_OWNER
xor a5, a3, a5
wsr a5, CPENABLE
bnez a3, 1b
1: wsr a7, PS
rsync
retw
ENTRY(disable_coprocessor)
entry sp, 16
rsil a7, LOCKLEVEL
rsr a3, CPENABLE
movi a4, 1
ssl a2
sll a4, a4
and a4, a3, a4
xor a3, a3, a4
wsr a3, CPENABLE
wsr a7, PS
rsync
retw
ENTRY(enable_coprocessor)
entry sp, 16
rsil a7, LOCKLEVEL
rsr a3, CPENABLE
movi a4, 1
ssl a2
sll a4, a4
or a3, a3, a4
wsr a3, CPENABLE
wsr a7, PS
rsync
retw
#endif
ENTRY(save_coprocessor_extra)
entry sp, 16
xchal_extra_store_funcbody
retw
ENTRY(restore_coprocessor_extra)
entry sp, 16
xchal_extra_load_funcbody
retw
ENTRY(save_coprocessor_registers)
entry sp, 16
xchal_cpi_store_funcbody
retw
ENTRY(restore_coprocessor_registers)
entry sp, 16
xchal_cpi_load_funcbody
retw
/*
* The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
* describe the contents of coprocessor & extra save areas in terms of
* undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
* latter macros here; they expand into a table of the format we want.
* The general format is:
*
* CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
* bitmask, rsv2, rsv3)
* CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
* bitmask, rsv2, rsv3)
* CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
* numentries, contentsize, regname_base,
* regfile_name, rsv2, rsv3)
*
* For this table, we only care about the <libdbnum>, <offset> and <size>
* fields.
*/
/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
bitmask, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
bitmask, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
numentries, contentsize, regname_base, \
regfile_name, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
/* A single table entry: */
.macro reg_entry libdbnum, offset, size
.ifne (__last_offset-(__last_group_offset+\offset))
/* padding entry */
.word (0xFC000000+__last_offset-(__last_group_offset+\offset))
.endif
.word \libdbnum /* actual entry */
.set __last_offset, __last_group_offset+\offset+\size
.endm /* reg_entry */
/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
.macro reg_group cpnum, num_entries, align
.set __last_group_offset, (__last_offset + \align- 1) & -\align
.ifne \num_entries
.word 0xFD000000+(\cpnum<<16)+\num_entries
.endif
.endm /* reg_group */
/*
* Register info tables.
*/
.section .rodata, "a"
.globl _xtensa_reginfo_tables
.globl _xtensa_reginfo_table_size
.align 4
_xtensa_reginfo_table_size:
.word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
_xtensa_reginfo_tables:
.set __last_offset, 0
reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
XCHAL_EXTRA_SA_CONTENTS_LIBDB
reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
XCHAL_CP0_SA_CONTENTS_LIBDB
reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
XCHAL_CP1_SA_CONTENTS_LIBDB
reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
XCHAL_CP2_SA_CONTENTS_LIBDB
reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
XCHAL_CP3_SA_CONTENTS_LIBDB
reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
XCHAL_CP4_SA_CONTENTS_LIBDB
reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
XCHAL_CP5_SA_CONTENTS_LIBDB
reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
XCHAL_CP6_SA_CONTENTS_LIBDB
reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
XCHAL_CP7_SA_CONTENTS_LIBDB
.word 0xFC000000 /* invalid register number,marks end of table*/
_xtensa_reginfo_table_end:
This diff is collapsed.
/*
* arch/xtensa/kernel/head.S
*
* Xtensa Processor startup code.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Kevin Chea
*/
#include <xtensa/cacheasm.h>
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* This module contains the entry code for kernel images. It performs the
* minimal setup needed to call the generic C routines.
*
* Prerequisites:
*
* - The kernel image has been loaded to the actual address where it was
* compiled to.
* - a2 contains either 0 or a pointer to a list of boot parameters.
* (see setup.c for more details)
*
*/
.macro iterate from, to , cmd
.ifeq ((\to - \from) & ~0xfff)
\cmd \from
iterate "(\from+1)", \to, \cmd
.endif
.endm
/*
* _start
*
* The bootloader passes a pointer to a list of boot parameters in a2.
*/
/* The first bytes of the kernel image must be an instruction, so we
* manually allocate and define the literal constant we need for a jx
* instruction.
*/
.section .head.text, "ax"
.globl _start
_start: _j 2f
.align 4
1: .word _startup
2: l32r a0, 1b
jx a0
.text
.align 4
_startup:
/* Disable interrupts and exceptions. */
movi a0, XCHAL_PS_EXCM_MASK
wsr a0, PS
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, EXCSAVE_1
/* Start with a fresh windowbase and windowstart. */
movi a1, 1
movi a0, 0
wsr a1, WINDOWSTART
wsr a0, WINDOWBASE
rsync
/* Set a0 to 0 for the remaining initialization. */
movi a0, 0
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
wsr a0, IBREAKENABLE
wsr a0, ICOUNT
movi a1, 15
wsr a0, ICOUNTLEVEL
.macro reset_dbreak num
wsr a0, DBREAKC + \num
.endm
iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
#endif
/* Clear CCOUNT (not really necessary, but nice) */
wsr a0, CCOUNT # not really necessary, but nice
/* Disable zero-loops. */
#if XCHAL_HAVE_LOOPS
wsr a0, LCOUNT
#endif
/* Disable all timers. */
.macro reset_timer num
wsr a0, CCOMPARE_0 + \num
.endm
iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
/* Interrupt initialization. */
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
wsr a0, INTENABLE
wsr a2, INTCLEAR
/* Disable coprocessors. */
#if XCHAL_CP_NUM > 0
wsr a0, CPENABLE
#endif
/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
*
* Note: PS.EXCM must be cleared before using any loop
* instructions; otherwise, they are silently disabled, and
* at most one iteration of the loop is executed.
*/
movi a1, 1
wsr a1, PS
rsync
/* Initialize the caches.
* Does not include flushing writeback d-cache.
* a6, a7 are just working registers (clobbered).
*/
icache_reset a2, a3
dcache_reset a2, a3
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
* creates a table located at __boot_reloc_table_start
* that contans the information what data needs to be unpacked.
*
* Uses a2-a7.
*/
movi a2, __boot_reloc_table_start
movi a3, __boot_reloc_table_end
1: beq a2, a3, 3f # no more entries?
l32i a4, a2, 0 # start destination (in RAM)
l32i a5, a2, 4 # end desination (in RAM)
l32i a6, a2, 8 # start source (in ROM)
addi a2, a2, 12 # next entry
beq a4, a5, 1b # skip, empty entry
beq a4, a6, 1b # skip, source and dest. are the same
2: l32i a7, a6, 0 # load word
addi a6, a6, 4
s32i a7, a4, 0 # store word
addi a4, a4, 4
bltu a4, a5, 2b
j 1b
3:
/* All code and initialized data segments have been copied.
* Now clear the BSS segment.
*/
movi a2, _bss_start # start of BSS
movi a3, _bss_end # end of BSS
1: addi a2, a2, 4
s32i a0, a2, 0
blt a2, a3, 1b
#if XCHAL_DCACHE_IS_WRITEBACK
/* After unpacking, flush the writeback cache to memory so the
* instructions/data are available.
*/
dcache_writeback_all a2, a3
#endif
/* Setup stack and enable window exceptions (keep irqs disabled) */
movi a1, init_thread_union
addi a1, a1, KERNEL_STACK_SIZE
movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
wsr a2, PS # (enable reg-windows; progmode stack)
rsync
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
movi a2, debug_exception
wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
/* Set up EXCSAVE[1] to point to the exc_table. */
movi a6, exc_table
xsr a6, EXCSAVE_1
/* init_arch kick-starts the linux kernel */
movi a4, init_arch
callx4 a4
movi a4, start_kernel
callx4 a4
should_never_return:
j should_never_return
/* Define some common data structures here. We define them
* here in this assembly file due to their unusual alignment
* requirements.
*/
.comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
.comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
/*
* linux/arch/xtensa/kernel/irq.c
*
* Xtensa built-in interrupt controller and some generic functions copied
* from i386.
*
* Copyright (C) 2002 - 2005 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
*
* Chris Zankel <chris@zankel.net>
* Kevin Chea
*
*/
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <asm/uaccess.h>
#include <asm/platform.h>
static void enable_xtensa_irq(unsigned int irq);
static void disable_xtensa_irq(unsigned int irq);
static void mask_and_ack_xtensa(unsigned int irq);
static void end_xtensa_irq(unsigned int irq);
static unsigned int cached_irq_mask;
atomic_t irq_err_count;
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ trap at vector %02x\n", irq);
}
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
unsigned int do_IRQ(int irq, struct pt_regs *regs)
{
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
unsigned long sp;
__asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
sp &= THREAD_SIZE - 1;
if (unlikely(sp < (sizeof(thread_info) + 1024)))
printk("Stack overflow in do_IRQ: %ld\n",
sp - sizeof(struct thread_info));
}
#endif
__do_IRQ(irq, regs);
irq_exit();
return 1;
}
/*
* Generic, controller-independent functions:
*/
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
if (i == 0) {
seq_printf(p, " ");
for (j=0; j<NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "CPU%d ",j);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %14s", irq_desc[i].handler->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
}
return 0;
}
/* shutdown is same as "disable" */
#define shutdown_xtensa_irq disable_xtensa_irq
static unsigned int startup_xtensa_irq(unsigned int irq)
{
enable_xtensa_irq(irq);
return 0; /* never anything pending */
}
static struct hw_interrupt_type xtensa_irq_type = {
"Xtensa-IRQ",
startup_xtensa_irq,
shutdown_xtensa_irq,
enable_xtensa_irq,
disable_xtensa_irq,
mask_and_ack_xtensa,
end_xtensa_irq
};
static inline void mask_irq(unsigned int irq)
{
cached_irq_mask &= ~(1 << irq);
set_sr (cached_irq_mask, INTENABLE);
}
static inline void unmask_irq(unsigned int irq)
{
cached_irq_mask |= 1 << irq;
set_sr (cached_irq_mask, INTENABLE);
}
static void disable_xtensa_irq(unsigned int irq)
{
unsigned long flags;
local_save_flags(flags);
mask_irq(irq);
local_irq_restore(flags);
}
static void enable_xtensa_irq(unsigned int irq)
{
unsigned long flags;
local_save_flags(flags);
unmask_irq(irq);
local_irq_restore(flags);
}
static void mask_and_ack_xtensa(unsigned int irq)
{
disable_xtensa_irq(irq);
}
static void end_xtensa_irq(unsigned int irq)
{
enable_xtensa_irq(irq);
}
void __init init_IRQ(void)
{
int i;
for (i=0; i < XTENSA_NR_IRQS; i++)
irq_desc[i].handler = &xtensa_irq_type;
cached_irq_mask = 0;
platform_init_irq();
}
/*
* arch/xtensa/kernel/platform.c
*
* Module support.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
*/
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cache.h>
LIST_HEAD(module_buf_list);
void *module_alloc(unsigned long size)
{
panic("module_alloc not implemented");
}
void module_free(struct module *mod, void *module_region)
{
panic("module_free not implemented");
}
int module_frob_arch_sections(Elf32_Ehdr *hdr,
Elf32_Shdr *sechdrs,
char *secstrings,
struct module *me)
{
panic("module_frob_arch_sections not implemented");
}
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *module)
{
panic ("apply_relocate not implemented");
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *module)
{
panic("apply_relocate_add not implemented");
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
panic ("module_finalize not implemented");
}
void module_arch_cleanup(struct module *mod)
{
panic("module_arch_cleanup not implemented");
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
{
panic("module_find_bug not implemented");
}
/*
* arch/xtensa/pci-dma.c
*
* DMA coherent memory allocation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*
* Based on version for i386.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
/*
* Note: We assume that the full memory space is always mapped to 'kseg'
* Otherwise we have to use page attributes (not implemented).
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
{
void *ret;
/* ignore region speicifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*handle = virt_to_bus(ret);
}
return (void*) BYPASS_ADDR((unsigned long)ret);
}
void dma_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
}
void consistent_sync(void *vaddr, size_t size, int direction)
{
switch (direction) {
case PCI_DMA_NONE:
BUG();
case PCI_DMA_FROMDEVICE: /* invalidate only */
__invalidate_dcache_range((unsigned long)vaddr,
(unsigned long)size);
break;
case PCI_DMA_TODEVICE: /* writeback only */
case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
__flush_invalidate_dcache_range((unsigned long)vaddr,
(unsigned long)size);
break;
}
}
This diff is collapsed.
/*
* arch/xtensa/kernel/platform.c
*
* Default platform functions.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/time.h>
#include <asm/platform.h>
#include <asm/timex.h>
#define _F(r,f,a,b) \
r __platform_##f a b; \
r platform_##f a __attribute__((weak, alias("__platform_"#f)))
/*
* Default functions that are used if no platform specific function is defined.
* (Please, refer to include/asm-xtensa/platform.h for more information)
*/
_F(void, setup, (char** cmd), { });
_F(void, init_irq, (void), { });
_F(void, restart, (void), { while(1); });
_F(void, halt, (void), { while(1); });
_F(void, power_off, (void), { while(1); });
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { });
_F(int, pcibios_fixup, (void), { return 0; });
_F(int, get_rtc_time, (time_t* t), { return 0; });
_F(int, set_rtc_time, (time_t t), { return 0; });
#if CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void),
{
printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
ccount_per_jiffy = 100 * (1000000UL/HZ);
});
#endif
This diff is collapsed.
// TODO some minor issues
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
* Scott Foehner<sfoehner@yahoo.com>,
* Kevin Chea
* Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/security.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/elf.h>
#define TEST_KERNEL // verify kernel operations FIXME: remove
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* Nothing to do.. */
}
int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
int ret = -EPERM;
lock_kernel();
#if 0
if ((int)request != 1)
printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
(int) request, (int) pid, (unsigned long) addr,
(unsigned long) data);
#endif
if (request == PTRACE_TRACEME) {
/* Are we already being traced? */
if (current->ptrace & PT_PTRACED)
goto out;
if ((ret = security_ptrace(current->parent, current)))
goto out;
/* Set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED;
ret = 0;
goto out;
}
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* you may not mess with init */
goto out;
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
goto out_tsk;
}
if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
goto out_tsk;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
{
unsigned long tmp;
int copied;
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
break;
ret = put_user(tmp,(unsigned long *) data);
goto out;
}
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
{
struct pt_regs *regs;
unsigned long tmp;
regs = xtensa_pt_regs(child);
tmp = 0; /* Default return value. */
switch(addr) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
{
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
ar &= (XCHAL_NUM_AREGS - 1);
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
tmp = regs->areg[ar];
else
ret = -EIO;
break;
}
case REG_A_BASE ... REG_A_BASE + 15:
tmp = regs->areg[addr - REG_A_BASE];
break;
case REG_PC:
tmp = regs->pc;
break;
case REG_PS:
/* Note: PS.EXCM is not set while user task is running;
* its being set in regs is for exception handling
* convenience. */
tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
break;
case REG_WB:
tmp = regs->windowbase;
break;
case REG_WS:
tmp = regs->windowstart;
break;
case REG_LBEG:
tmp = regs->lbeg;
break;
case REG_LEND:
tmp = regs->lend;
break;
case REG_LCOUNT:
tmp = regs->lcount;
break;
case REG_SAR:
tmp = regs->sar;
break;
case REG_DEPC:
tmp = regs->depc;
break;
case REG_EXCCAUSE:
tmp = regs->exccause;
break;
case REG_EXCVADDR:
tmp = regs->excvaddr;
break;
case SYSCALL_NR:
tmp = regs->syscall;
break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, (unsigned long *) data);
goto out;
}
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
if (access_process_vm(child, addr, &data, sizeof(data), 1)
== sizeof(data))
break;
ret = -EIO;
goto out;
case PTRACE_POKEUSR:
{
struct pt_regs *regs;
regs = xtensa_pt_regs(child);
switch (addr) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
{
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
else
ret = -EIO;
break;
}
case REG_A_BASE ... REG_A_BASE + 15:
regs->areg[addr - REG_A_BASE] = data;
break;
case REG_PC:
regs->pc = data;
break;
case SYSCALL_NR:
regs->syscall = data;
break;
#ifdef TEST_KERNEL
case REG_WB:
regs->windowbase = data;
break;
case REG_WS:
regs->windowstart = data;
break;
#endif
default:
/* The rest are not allowed. */
ret = -EIO;
break;
}
break;
}
/* continue and stop at next (return from) syscall */
case PTRACE_SYSCALL:
case PTRACE_CONT: /* restart after signal. */
{
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* Make sure the single step bit is not set. */
child->ptrace &= ~PT_SINGLESTEP;
wake_up_process(child);
ret = 0;
break;
}
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL:
ret = 0;
if (child->state == EXIT_ZOMBIE) /* already dead */
break;
child->exit_code = SIGKILL;
child->ptrace &= ~PT_SINGLESTEP;
wake_up_process(child);
break;
case PTRACE_SINGLESTEP:
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->ptrace |= PT_SINGLESTEP;
child->exit_code = data;
wake_up_process(child);
ret = 0;
break;
case PTRACE_GETREGS:
{
/* 'data' points to user memory in which to write.
* Mainly due to the non-live register values, we
* reformat the register values into something more
* standard. For convenience, we use the handy
* elf_gregset_t format. */
xtensa_gregset_t format;
struct pt_regs *regs = xtensa_pt_regs(child);
do_copy_regs (&format, regs, child);
/* Now, copy to user space nice and easy... */
ret = 0;
if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
ret = -EFAULT;
break;
}
case PTRACE_SETREGS:
{
/* 'data' points to user memory that contains the new
* values in the elf_gregset_t format. */
xtensa_gregset_t format;
struct pt_regs *regs = xtensa_pt_regs(child);
if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
ret = -EFAULT;
break;
}
/* FIXME: Perhaps we want some sanity checks on
* these user-space values? See ARM version. Are
* debuggers a security concern? */
do_restore_regs (&format, regs, child);
ret = 0;
break;
}
case PTRACE_GETFPREGS:
{
/* 'data' points to user memory in which to write.
* For convenience, we use the handy
* elf_fpregset_t format. */
elf_fpregset_t fpregs;
struct pt_regs *regs = xtensa_pt_regs(child);
do_save_fpregs (&fpregs, regs, child);
/* Now, copy to user space nice and easy... */
ret = 0;
if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
ret = -EFAULT;
break;
}
case PTRACE_SETFPREGS:
{
/* 'data' points to user memory that contains the new
* values in the elf_fpregset_t format.
*/
elf_fpregset_t fpregs;
struct pt_regs *regs = xtensa_pt_regs(child);
ret = 0;
if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
ret = -EFAULT;
break;
}
if (do_restore_fpregs (&fpregs, regs, child))
ret = -EIO;
break;
}
case PTRACE_GETFPREGSIZE:
/* 'data' points to 'unsigned long' set to the size
* of elf_fpregset_t
*/
ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
break;
case PTRACE_DETACH: /* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
goto out;
}
out_tsk:
put_task_struct(child);
out:
unlock_kernel();
return ret;
}
void do_syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/*
* The 0x80 provides a way for the tracing parent to distinguish
* between a syscall stop and SIGTRAP delivery
*/
ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* arch/xtensa/kernel/xtensa_ksyms.c
*
* Export Xtensa-specific functions for loadable modules.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com>
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <linux/in6.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/semaphore.h>
#ifdef CONFIG_BLK_DEV_FD
#include <asm/floppy.h>
#endif
#ifdef CONFIG_NET
#include <net/checksum.h>
#endif /* CONFIG_NET */
/*
* String functions
*/
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/*
* gcc internal math functions
*/
extern long long __ashrdi3(long long, int);
extern long long __ashldi3(long long, int);
extern long long __lshrdi3(long long, int);
extern int __divsi3(int, int);
extern int __modsi3(int, int);
extern long long __muldi3(long long, long long);
extern int __mulsi3(int, int);
extern unsigned int __udivsi3(unsigned int, unsigned int);
extern unsigned int __umodsi3(unsigned int, unsigned int);
extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
/*
* Semaphore operations
*/
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
#ifdef CONFIG_NET
/*
* Networking support
*/
EXPORT_SYMBOL(csum_partial_copy_generic);
#endif /* CONFIG_NET */
/*
* Architecture-specific symbols
*/
EXPORT_SYMBOL(__xtensa_copy_user);
/*
* Kernel hacking ...
*/
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
// FIXME EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(get_wchan);
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment