Commit c658eac6 authored by Chris Zankel's avatar Chris Zankel

[XTENSA] Add support for configurable registers and coprocessors

The Xtensa architecture allows to define custom instructions and
registers. Registers that are bound to a coprocessor are only
accessible if the corresponding enable bit is set, which allows
to implement a 'lazy' context switch mechanism. Other registers
needs to be saved and restore at the time of the context switch
or during interrupt handling.

This patch adds support for these additional states:

- save and restore registers that are used by the compiler upon
  interrupt entry and exit.
- context switch additional registers unbound to any coprocessor
- 'lazy' context switch of registers bound to a coprocessor
- ptrace interface to provide access to additional registers
- update configuration files in include/asm-xtensa/variant-fsf
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
parent 71d28e6c
...@@ -63,6 +63,8 @@ int main(void) ...@@ -63,6 +63,8 @@ int main(void)
DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
/* struct task_struct */ /* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace)); DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
...@@ -76,7 +78,19 @@ int main(void) ...@@ -76,7 +78,19 @@ int main(void)
/* struct thread_info (offset from start_struct) */ /* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
#if XTENSA_HAVE_COPROCESSORS
DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
#endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
/* struct mm_struct */ /* struct mm_struct */
......
This diff is collapsed.
This diff is collapsed.
...@@ -52,6 +52,55 @@ void (*pm_power_off)(void) = NULL; ...@@ -52,6 +52,55 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(pm_power_off);
#if XTENSA_HAVE_COPROCESSORS
void coprocessor_release_all(struct thread_info *ti)
{
unsigned long cpenable;
int i;
/* Make sure we don't switch tasks during this operation. */
preempt_disable();
/* Walk through all cp owners and release it for the requested one. */
cpenable = ti->cpenable;
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (coprocessor_owner[i] == ti) {
coprocessor_owner[i] = 0;
cpenable &= ~(1 << i);
}
}
ti->cpenable = cpenable;
coprocessor_clear_cpenable();
preempt_enable();
}
void coprocessor_flush_all(struct thread_info *ti)
{
unsigned long cpenable;
int i;
preempt_disable();
cpenable = ti->cpenable;
for (i = 0; i < XCHAL_CP_MAX; i++) {
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
coprocessor_flush(ti, i);
cpenable >>= 1;
}
preempt_enable();
}
#endif
/* /*
* Powermanagement idle function, if any is provided by the platform. * Powermanagement idle function, if any is provided by the platform.
*/ */
...@@ -71,15 +120,36 @@ void cpu_idle(void) ...@@ -71,15 +120,36 @@ void cpu_idle(void)
} }
/* /*
* Free current thread data structures etc.. * This is called when the thread calls exit().
*/ */
void exit_thread(void) void exit_thread(void)
{ {
#if XTENSA_HAVE_COPROCESSORS
coprocessor_release_all(current_thread_info());
#endif
} }
/*
* Flush thread state. This is called when a thread does an execve()
* Note that we flush coprocessor registers for the case execve fails.
*/
void flush_thread(void) void flush_thread(void)
{ {
#if XTENSA_HAVE_COPROCESSORS
struct thread_info *ti = current_thread_info();
coprocessor_flush_all(ti);
coprocessor_release_all(ti);
#endif
}
/*
* This is called before the thread is copied.
*/
void prepare_to_copy(struct task_struct *tsk)
{
#if XTENSA_HAVE_COPROCESSORS
coprocessor_flush_all(task_thread_info(tsk));
#endif
} }
/* /*
...@@ -107,6 +177,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -107,6 +177,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
struct task_struct * p, struct pt_regs * regs) struct task_struct * p, struct pt_regs * regs)
{ {
struct pt_regs *childregs; struct pt_regs *childregs;
struct thread_info *ti;
unsigned long tos; unsigned long tos;
int user_mode = user_mode(regs); int user_mode = user_mode(regs);
...@@ -128,13 +199,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -128,13 +199,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
p->set_child_tid = p->clear_child_tid = NULL; p->set_child_tid = p->clear_child_tid = NULL;
p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs; p->thread.sp = (unsigned long)childregs;
if (user_mode(regs)) { if (user_mode(regs)) {
int len = childregs->wmask & ~0xf; int len = childregs->wmask & ~0xf;
childregs->areg[1] = usp; childregs->areg[1] = usp;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len); &regs->areg[XCHAL_NUM_AREGS - len/4], len);
// FIXME: we need to set THREADPTR in thread_info...
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6]; childregs->areg[2] = childregs->areg[6];
...@@ -142,6 +214,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -142,6 +214,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
/* In kernel space, we start a new thread with a new stack. */ /* In kernel space, we start a new thread with a new stack. */
childregs->wmask = 1; childregs->wmask = 1;
} }
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
ti = task_thread_info(p);
ti->cpenable = 0;
#endif
return 0; return 0;
} }
...@@ -179,10 +257,6 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -179,10 +257,6 @@ unsigned long get_wchan(struct task_struct *p)
} }
/* /*
* do_copy_regs() gathers information from 'struct pt_regs' and
* 'current->thread.areg[]' to fill in the xtensa_gregset_t
* structure.
*
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
* of processor registers. Besides different ordering, * of processor registers. Besides different ordering,
* xtensa_gregset_t contains non-live register information that * xtensa_gregset_t contains non-live register information that
...@@ -191,9 +265,20 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -191,9 +265,20 @@ unsigned long get_wchan(struct task_struct *p)
* *
*/ */
void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs, void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
struct task_struct *tsk)
{ {
unsigned long wb, ws, wm;
int live, last;
wb = regs->windowbase;
ws = regs->windowstart;
wm = regs->wmask;
ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
/* Don't leak any random bits. */
memset(elfregs, 0, sizeof (elfregs));
/* Note: PS.EXCM is not set while user task is running; its /* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience. * being set in regs->ps is for exception handling convenience.
*/ */
...@@ -204,159 +289,18 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs, ...@@ -204,159 +289,18 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
elfregs->lend = regs->lend; elfregs->lend = regs->lend;
elfregs->lcount = regs->lcount; elfregs->lcount = regs->lcount;
elfregs->sar = regs->sar; elfregs->sar = regs->sar;
elfregs->windowstart = ws;
memcpy (elfregs->a, regs->areg, sizeof(elfregs->a)); live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
} last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
memcpy(elfregs->a, regs->areg, live * 4);
void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs) memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
{
do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
}
/* The inverse of do_copy_regs(). No error or sanity checking. */
void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
struct task_struct *tsk)
{
const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
unsigned long ps;
/* Note: PS.EXCM is not set while user task is running; it
* needs to be set in regs->ps is for exception handling convenience.
*/
ps = (regs->ps & ~ps_mask) | (elfregs->ps & ps_mask) | (1<<PS_EXCM_BIT);
regs->ps = ps;
regs->pc = elfregs->pc;
regs->lbeg = elfregs->lbeg;
regs->lend = elfregs->lend;
regs->lcount = elfregs->lcount;
regs->sar = elfregs->sar;
memcpy (regs->areg, elfregs->a, sizeof(regs->areg));
}
/*
* do_save_fpregs() gathers information from 'struct pt_regs' and
* 'current->thread' to fill in the elf_fpregset_t structure.
*
* Core files and ptrace use elf_fpregset_t.
*/
void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
struct task_struct *tsk)
{
#if XCHAL_HAVE_CP
extern unsigned char _xtensa_reginfo_tables[];
extern unsigned _xtensa_reginfo_table_size;
int i;
unsigned long flags;
/* Before dumping coprocessor state from memory,
* ensure any live coprocessor contents for this
* task are first saved to memory:
*/
local_irq_save(flags);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (tsk == coprocessor_info[i].owner) {
enable_coprocessor(i);
save_coprocessor_registers(
tsk->thread.cp_save+coprocessor_info[i].offset,i);
disable_coprocessor(i);
}
}
local_irq_restore(flags);
/* Now dump coprocessor & extra state: */
memcpy((unsigned char*)fpregs,
_xtensa_reginfo_tables, _xtensa_reginfo_table_size);
memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
#endif
} }
/* int dump_fpu(void)
* The inverse of do_save_fpregs().
* Copies coprocessor and extra state from fpregs into regs and tsk->thread.
* Returns 0 on success, non-zero if layout doesn't match.
*/
int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
struct task_struct *tsk)
{ {
#if XCHAL_HAVE_CP
extern unsigned char _xtensa_reginfo_tables[];
extern unsigned _xtensa_reginfo_table_size;
int i;
unsigned long flags;
/* Make sure save area layouts match.
* FIXME: in the future we could allow restoring from
* a different layout of the same registers, by comparing
* fpregs' table with _xtensa_reginfo_tables and matching
* entries and copying registers one at a time.
* Not too sure yet whether that's very useful.
*/
if( memcmp((unsigned char*)fpregs,
_xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
return -1;
}
/* Before restoring coprocessor state from memory,
* ensure any live coprocessor contents for this
* task are first invalidated.
*/
local_irq_save(flags);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (tsk == coprocessor_info[i].owner) {
enable_coprocessor(i);
save_coprocessor_registers(
tsk->thread.cp_save+coprocessor_info[i].offset,i);
coprocessor_info[i].owner = 0;
disable_coprocessor(i);
}
}
local_irq_restore(flags);
/* Now restore coprocessor & extra state: */
memcpy(tsk->thread.cp_save,
(unsigned char*)fpregs + _xtensa_reginfo_table_size,
XTENSA_CP_EXTRA_SIZE);
#endif
return 0; return 0;
} }
/*
* Fill in the CP structure for a core dump for a particular task.
*/
int
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
{
return 0; /* no coprocessors active on this processor */
}
/*
* Fill in the CP structure for a core dump.
* This includes any FPU coprocessor.
* Here, we dump all coprocessors, and other ("extra") custom state.
*
* This function is called by elf_core_dump() in fs/binfmt_elf.c
* (in which case 'regs' comes from calls to do_coredump, see signals.c).
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{
return dump_task_fpu(regs, current, r);
}
asmlinkage asmlinkage
long xtensa_clone(unsigned long clone_flags, unsigned long newsp, long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
...@@ -370,8 +314,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp, ...@@ -370,8 +314,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
} }
/* /*
* * xtensa_execve() executes a new program. * xtensa_execve() executes a new program.
* */ */
asmlinkage asmlinkage
long xtensa_execve(char __user *name, char __user * __user *argv, long xtensa_execve(char __user *name, char __user * __user *argv,
...@@ -386,7 +330,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv, ...@@ -386,7 +330,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv,
error = PTR_ERR(filename); error = PTR_ERR(filename);
if (IS_ERR(filename)) if (IS_ERR(filename))
goto out; goto out;
// FIXME: release coprocessor??
error = do_execve(filename, argv, envp, regs); error = do_execve(filename, argv, envp, regs);
if (error == 0) { if (error == 0) {
task_lock(current); task_lock(current);
......
This diff is collapsed.
...@@ -35,13 +35,17 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); ...@@ -35,13 +35,17 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
extern struct task_struct *coproc_owners[]; extern struct task_struct *coproc_owners[];
extern void release_all_cp (struct task_struct *);
struct rt_sigframe struct rt_sigframe
{ {
struct siginfo info; struct siginfo info;
struct ucontext uc; struct ucontext uc;
cp_state_t cpstate; struct {
xtregs_opt_t opt;
xtregs_user_t user;
#if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t cp;
#endif
} xtregs;
unsigned char retcode[6]; unsigned char retcode[6];
unsigned int window[4]; unsigned int window[4];
}; };
...@@ -132,9 +136,10 @@ flush_window_regs_user(struct pt_regs *regs) ...@@ -132,9 +136,10 @@ flush_window_regs_user(struct pt_regs *regs)
*/ */
static int static int
setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate, setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
struct pt_regs *regs)
{ {
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
struct thread_info *ti = current_thread_info();
int err = 0; int err = 0;
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
...@@ -148,21 +153,32 @@ setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate, ...@@ -148,21 +153,32 @@ setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
err |= flush_window_regs_user(regs); err |= flush_window_regs_user(regs);
err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4); err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
err |= __put_user(0, &sc->sc_xtregs);
// err |= __copy_to_user (sc->sc_a, regs->areg, XCHAL_NUM_AREGS * 4) if (err)
return err;
#if XCHAL_HAVE_CP #if XTENSA_HAVE_COPROCESSORS
# error Coprocessors unsupported coprocessor_flush_all(ti);
err |= save_cpextra(cpstate); coprocessor_release_all(ti);
err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate); err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
sizeof (frame->xtregs.cp));
#endif #endif
err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
sizeof (xtregs_opt_t));
err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
sizeof (xtregs_user_t));
err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
return err; return err;
} }
static int static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
{ {
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
struct thread_info *ti = current_thread_info();
unsigned int err = 0; unsigned int err = 0;
unsigned long ps; unsigned long ps;
...@@ -180,6 +196,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -180,6 +196,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
regs->windowbase = 0; regs->windowbase = 0;
regs->windowstart = 1; regs->windowstart = 1;
regs->syscall = -1; /* disable syscall checks */
/* For PS, restore only PS.CALLINC. /* For PS, restore only PS.CALLINC.
* Assume that all other bits are either the same as for the signal * Assume that all other bits are either the same as for the signal
* handler, or the user mode value doesn't matter (e.g. PS.OWB). * handler, or the user mode value doesn't matter (e.g. PS.OWB).
...@@ -195,8 +213,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -195,8 +213,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4); err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
#if XCHAL_HAVE_CP if (err)
# error Coprocessors unsupported return err;
/* The signal handler may have used coprocessors in which /* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a * case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy * reloading of the original task's CP state by the lazy
...@@ -204,20 +223,20 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -204,20 +223,20 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
* Also, we essentially discard any coprocessor state that the * Also, we essentially discard any coprocessor state that the
* signal handler created. */ * signal handler created. */
if (!err) { #if XTENSA_HAVE_COPROCESSORS
struct task_struct *tsk = current; coprocessor_release_all(ti);
release_all_cp(tsk); err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
err |= __copy_from_user(tsk->thread.cpextra, sc->sc_cpstate, sizeof (frame->xtregs.cp));
XTENSA_CP_EXTRA_SIZE);
}
#endif #endif
err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
sizeof (xtregs_user_t));
err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
sizeof (xtregs_opt_t));
regs->syscall = -1; /* disable syscall checks */
return err; return err;
} }
/* /*
* Do a signal return; undo the signal stack. * Do a signal return; undo the signal stack.
*/ */
...@@ -246,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, ...@@ -246,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) if (restore_sigcontext(regs, frame))
goto badframe; goto badframe;
ret = regs->areg[2]; ret = regs->areg[2];
...@@ -359,7 +378,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -359,7 +378,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(sas_ss_flags(regs->areg[1]), err |= __put_user(sas_ss_flags(regs->areg[1]),
&frame->uc.uc_stack.ss_flags); &frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate, regs); err |= setup_sigcontext(frame, regs);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Create sys_rt_sigreturn syscall in stack frame */ /* Create sys_rt_sigreturn syscall in stack frame */
......
...@@ -118,28 +118,28 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { ...@@ -118,28 +118,28 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if (XCHAL_CP_MASK & 1) #if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0), COPROCESSOR(0),
#endif #endif
#if (XCHAL_CP_MASK & 2) #if XTENSA_HAVE_COPROCESSOR(1)
COPROCESSOR(1), COPROCESSOR(1),
#endif #endif
#if (XCHAL_CP_MASK & 4) #if XTENSA_HAVE_COPROCESSOR(2)
COPROCESSOR(2), COPROCESSOR(2),
#endif #endif
#if (XCHAL_CP_MASK & 8) #if XTENSA_HAVE_COPROCESSOR(3)
COPROCESSOR(3), COPROCESSOR(3),
#endif #endif
#if (XCHAL_CP_MASK & 16) #if XTENSA_HAVE_COPROCESSOR(4)
COPROCESSOR(4), COPROCESSOR(4),
#endif #endif
#if (XCHAL_CP_MASK & 32) #if XTENSA_HAVE_COPROCESSOR(5)
COPROCESSOR(5), COPROCESSOR(5),
#endif #endif
#if (XCHAL_CP_MASK & 64) #if XTENSA_HAVE_COPROCESSOR(6)
COPROCESSOR(6), COPROCESSOR(6),
#endif #endif
#if (XCHAL_CP_MASK & 128) #if XTENSA_HAVE_COPROCESSOR(7)
COPROCESSOR(7), COPROCESSOR(7),
#endif #endif
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
......
...@@ -5,81 +5,168 @@ ...@@ -5,81 +5,168 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2003 - 2005 Tensilica Inc. * Copyright (C) 2003 - 2007 Tensilica Inc.
*/ */
#ifndef _XTENSA_COPROCESSOR_H #ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H #define _XTENSA_COPROCESSOR_H
#include <asm/variant/core.h> #include <linux/stringify.h>
#include <asm/variant/tie.h> #include <asm/variant/tie.h>
#include <asm/types.h>
#ifdef __ASSEMBLY__
# include <asm/variant/tie-asm.h>
.macro xchal_sa_start a b
.set .Lxchal_pofs_, 0
.set .Lxchal_ofs_, 0
.endm
.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
.set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
.set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
.endm
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
| XTHAL_SAS_CC \
| XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
.if XTREGS_OPT_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
.if XTREGS_OPT_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
#undef _SELECT
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
| XTHAL_SAS_NOCC \
| XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
.if XTREGS_USER_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
.if XTREGS_USER_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
#undef _SELECT
#endif /* __ASSEMBLY__ */
#if !XCHAL_HAVE_CP
#define XTENSA_CP_EXTRA_OFFSET 0
#define XTENSA_CP_EXTRA_ALIGN 1 /* must be a power of 2 */
#define XTENSA_CP_EXTRA_SIZE 0
#else
#define XTOFS(last_start,last_size,align) \
((last_start+last_size+align-1) & -align)
#define XTENSA_CP_EXTRA_OFFSET 0
#define XTENSA_CP_EXTRA_ALIGN XCHAL_EXTRA_SA_ALIGN
#define XTENSA_CPE_CP0_OFFSET \
XTOFS(XTENSA_CP_EXTRA_OFFSET, XCHAL_EXTRA_SA_SIZE, XCHAL_CP0_SA_ALIGN)
#define XTENSA_CPE_CP1_OFFSET \
XTOFS(XTENSA_CPE_CP0_OFFSET, XCHAL_CP0_SA_SIZE, XCHAL_CP1_SA_ALIGN)
#define XTENSA_CPE_CP2_OFFSET \
XTOFS(XTENSA_CPE_CP1_OFFSET, XCHAL_CP1_SA_SIZE, XCHAL_CP2_SA_ALIGN)
#define XTENSA_CPE_CP3_OFFSET \
XTOFS(XTENSA_CPE_CP2_OFFSET, XCHAL_CP2_SA_SIZE, XCHAL_CP3_SA_ALIGN)
#define XTENSA_CPE_CP4_OFFSET \
XTOFS(XTENSA_CPE_CP3_OFFSET, XCHAL_CP3_SA_SIZE, XCHAL_CP4_SA_ALIGN)
#define XTENSA_CPE_CP5_OFFSET \
XTOFS(XTENSA_CPE_CP4_OFFSET, XCHAL_CP4_SA_SIZE, XCHAL_CP5_SA_ALIGN)
#define XTENSA_CPE_CP6_OFFSET \
XTOFS(XTENSA_CPE_CP5_OFFSET, XCHAL_CP5_SA_SIZE, XCHAL_CP6_SA_ALIGN)
#define XTENSA_CPE_CP7_OFFSET \
XTOFS(XTENSA_CPE_CP6_OFFSET, XCHAL_CP6_SA_SIZE, XCHAL_CP7_SA_ALIGN)
#define XTENSA_CP_EXTRA_SIZE \
XTOFS(XTENSA_CPE_CP7_OFFSET, XCHAL_CP7_SA_SIZE, 16)
#if XCHAL_CP_NUM > 0
# ifndef __ASSEMBLY__
/* /*
* Tasks that own contents of (last user) each coprocessor. * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
* Entries are 0 for not-owned or non-existent coprocessors. *
* Note: The size of this structure is fixed to 8 bytes in entry.S * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
*
*/ */
typedef struct {
struct task_struct *owner; /* owner */
int offset; /* offset in cpextra space. */
} coprocessor_info_t;
# else
# define COPROCESSOR_INFO_OWNER 0
# define COPROCESSOR_INFO_OFFSET 4
# define COPROCESSOR_INFO_SIZE 8
# endif
#endif
#endif /* XCHAL_HAVE_CP */
#define XTENSA_HAVE_COPROCESSOR(x) \
((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
#define XTENSA_HAVE_COPROCESSORS \
(XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
#define XTENSA_HAVE_IO_PORT(x) \
(XCHAL_CP_PORT_MASK & (1 << (x)))
#define XTENSA_HAVE_IO_PORTS \
XCHAL_CP_PORT_MASK
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
# if XCHAL_CP_NUM > 0
struct task_struct;
extern void release_coprocessors (struct task_struct*);
extern void save_coprocessor_registers(void*, int);
# else
# define release_coprocessors(task)
# endif
typedef unsigned char cp_state_t[XTENSA_CP_EXTRA_SIZE]
__attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
#endif /* !__ASSEMBLY__ */ #if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \
:: "a" (x)); \
} while(0);
#endif /* XCHAL_HAVE_CP */
/*
* Additional registers.
* We define three types of additional registers:
* ext: extra registers that are used by the compiler
* cpn: optional registers that can be used by a user application
* cpX: coprocessor registers that can only be used if the corresponding
* CPENABLE bit is set.
*/
#define XCHAL_SA_REG(list,compiler,x,type,y,name,z,align,size,...) \
__REG ## list (compiler, type, name, size, align)
#define __REG0(compiler,t,name,s,a) __REG0_ ## compiler (name)
#define __REG1(compiler,t,name,s,a) __REG1_ ## compiler (name)
#define __REG2(c,type,...) __REG2_ ## type (__VA_ARGS__)
#define __REG0_0(name)
#define __REG0_1(name) __u32 name;
#define __REG1_0(name) __u32 name;
#define __REG1_1(name)
#define __REG2_0(n,s,a) __u32 name;
#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
#if XTENSA_HAVE_COPROCESSORS
typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
__attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
__attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
__attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
__attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
__attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
__attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
__attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
extern void coprocessor_save(void*, int);
extern void coprocessor_load(void*, int);
extern void coprocessor_flush(struct thread_info*, int);
extern void coprocessor_restore(struct thread_info*, int);
extern void coprocessor_release_all(struct thread_info*);
extern void coprocessor_flush_all(struct thread_info*);
static inline void coprocessor_clear_cpenable(void)
{
unsigned long i = 0;
WSR_CPENABLE(i);
}
#endif /* XTENSA_HAVE_COPROCESSORS */
#endif /* !__ASSEMBLY__ */
#endif /* _XTENSA_COPROCESSOR_H */ #endif /* _XTENSA_COPROCESSOR_H */
...@@ -173,6 +173,21 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); ...@@ -173,6 +173,21 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
} while (0) } while (0)
typedef struct {
xtregs_opt_t opt;
xtregs_user_t user;
#if XTENSA_HAVE_COPROCESSORS
xtregs_cp0_t cp0;
xtregs_cp1_t cp1;
xtregs_cp2_t cp2;
xtregs_cp3_t cp3;
xtregs_cp4_t cp4;
xtregs_cp5_t cp5;
xtregs_cp6_t cp6;
xtregs_cp7_t cp7;
#endif
} elf_xtregs_t;
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) #define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
struct task_struct; struct task_struct;
......
...@@ -103,10 +103,6 @@ struct thread_struct { ...@@ -103,10 +103,6 @@ struct thread_struct {
unsigned long dbreaka[XCHAL_NUM_DBREAK]; unsigned long dbreaka[XCHAL_NUM_DBREAK];
unsigned long dbreakc[XCHAL_NUM_DBREAK]; unsigned long dbreakc[XCHAL_NUM_DBREAK];
/* Allocate storage for extra state and coprocessor state. */
unsigned char cp_save[XTENSA_CP_EXTRA_SIZE]
__attribute__ ((aligned(XTENSA_CP_EXTRA_ALIGN)));
/* Make structure 16 bytes aligned. */ /* Make structure 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16))); int align[0] __attribute__ ((aligned(16)));
}; };
...@@ -162,21 +158,16 @@ struct thread_struct { ...@@ -162,21 +158,16 @@ struct thread_struct {
struct task_struct; struct task_struct;
struct mm_struct; struct mm_struct;
// FIXME: do we need release_thread for CP??
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0) #define release_thread(thread) do { } while(0)
// FIXME: do we need prepare_to_copy (lazy status) for CP??
/* Prepare to copy thread state - unlazy all lazy status */ /* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0) extern void prepare_to_copy(struct task_struct*);
/* /* Create a kernel thread without removing it from tasklists */
* create a kernel thread without removing it from tasklists
*/
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */ /* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0) #define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0) #define release_segments(mm) do { } while(0)
#define forget_segments() do { } while (0) #define forget_segments() do { } while (0)
......
...@@ -53,33 +53,30 @@ ...@@ -53,33 +53,30 @@
/* Registers used by strace */ /* Registers used by strace */
#define REG_A_BASE 0xfc000000 #define REG_A_BASE 0x0000
#define REG_AR_BASE 0x04000000 #define REG_AR_BASE 0x0100
#define REG_PC 0x14000000 #define REG_PC 0x0020
#define REG_PS 0x080000e6 #define REG_PS 0x02e6
#define REG_WB 0x08000048 #define REG_WB 0x0248
#define REG_WS 0x08000049 #define REG_WS 0x0249
#define REG_LBEG 0x08000000 #define REG_LBEG 0x0200
#define REG_LEND 0x08000001 #define REG_LEND 0x0201
#define REG_LCOUNT 0x08000002 #define REG_LCOUNT 0x0202
#define REG_SAR 0x08000003 #define REG_SAR 0x0203
#define REG_DEPC 0x080000c0
#define REG_EXCCAUSE 0x080000e8 #define SYSCALL_NR 0x00ff
#define REG_EXCVADDR 0x080000ee
#define SYSCALL_NR 0x1
#define AR_REGNO_TO_A_REGNO(ar, wb) (ar - wb*4) & ~(XCHAL_NUM_AREGS - 1)
/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */ /* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
#define PTRACE_GETREGS 12 #define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13 #define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14 #define PTRACE_GETXTREGS 18
#define PTRACE_SETFPREGS 15 #define PTRACE_SETXTREGS 19
#define PTRACE_GETFPREGSIZE 18
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef __KERNEL__
/* /*
* This struct defines the way the registers are stored on the * This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry. * kernel stack during a system call or other kernel entry.
...@@ -102,6 +99,9 @@ struct pt_regs { ...@@ -102,6 +99,9 @@ struct pt_regs {
unsigned long icountlevel; /* 60 */ unsigned long icountlevel; /* 60 */
int reserved[1]; /* 64 */ int reserved[1]; /* 64 */
/* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt;
/* Make sure the areg field is 16 bytes aligned. */ /* Make sure the areg field is 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16))); int align[0] __attribute__ ((aligned(16)));
...@@ -111,8 +111,6 @@ struct pt_regs { ...@@ -111,8 +111,6 @@ struct pt_regs {
unsigned long areg[16]; /* 128 (64) */ unsigned long areg[16]; /* 128 (64) */
}; };
#ifdef __KERNEL__
#include <asm/variant/core.h> #include <asm/variant/core.h>
# define task_pt_regs(tsk) ((struct pt_regs*) \ # define task_pt_regs(tsk) ((struct pt_regs*) \
......
...@@ -100,7 +100,14 @@ ...@@ -100,7 +100,14 @@
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27 #define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 #define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 #define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
#define EXCCAUSE_FLOATING_POINT 40 #define EXCCAUSE_COPROCESSOR0_DISABLED 32
#define EXCCAUSE_COPROCESSOR1_DISABLED 33
#define EXCCAUSE_COPROCESSOR2_DISABLED 34
#define EXCCAUSE_COPROCESSOR3_DISABLED 35
#define EXCCAUSE_COPROCESSOR4_DISABLED 36
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
/* PS register fields. */ /* PS register fields. */
......
...@@ -22,6 +22,7 @@ struct sigcontext { ...@@ -22,6 +22,7 @@ struct sigcontext {
unsigned long sc_acclo; unsigned long sc_acclo;
unsigned long sc_acchi; unsigned long sc_acchi;
unsigned long sc_a[16]; unsigned long sc_a[16];
void *sc_xtregs;
}; };
#endif /* _XTENSA_SIGCONTEXT_H */ #endif /* _XTENSA_SIGCONTEXT_H */
...@@ -46,42 +46,6 @@ static inline int irqs_disabled(void) ...@@ -46,42 +46,6 @@ static inline int irqs_disabled(void)
return flags & 0xf; return flags & 0xf;
} }
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \
:: "a" (x));} while(0);
#define clear_cpenable() __clear_cpenable()
static inline void __clear_cpenable(void)
{
#if XCHAL_HAVE_CP
unsigned long i = 0;
WSR_CPENABLE(i);
#endif
}
static inline void enable_coprocessor(int i)
{
#if XCHAL_HAVE_CP
int cp;
RSR_CPENABLE(cp);
cp |= 1 << i;
WSR_CPENABLE(cp);
#endif
}
static inline void disable_coprocessor(int i)
{
#if XCHAL_HAVE_CP
int cp;
RSR_CPENABLE(cp);
cp &= ~(1 << i);
WSR_CPENABLE(cp);
#endif
}
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
...@@ -111,7 +75,6 @@ extern void *_switch_to(void *last, void *next); ...@@ -111,7 +75,6 @@ extern void *_switch_to(void *last, void *next);
#define switch_to(prev,next,last) \ #define switch_to(prev,next,last) \
do { \ do { \
clear_cpenable(); \
(last) = _switch_to(prev, next); \ (last) = _switch_to(prev, next); \
} while(0) } while(0)
...@@ -244,7 +207,7 @@ static inline void spill_registers(void) ...@@ -244,7 +207,7 @@ static inline void spill_registers(void)
"wsr a13," __stringify(SAR) "\n\t" "wsr a13," __stringify(SAR) "\n\t"
"wsr a14," __stringify(PS) "\n\t" "wsr a14," __stringify(PS) "\n\t"
:: "a" (&a0), "a" (&ps) :: "a" (&a0), "a" (&ps)
: "a2", "a3", "a12", "a13", "a14", "a15", "memory"); : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
} }
#define arch_align_stack(x) (x) #define arch_align_stack(x) (x)
......
...@@ -27,6 +27,21 @@ ...@@ -27,6 +27,21 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#if XTENSA_HAVE_COPROCESSORS
typedef struct xtregs_coprocessor {
xtregs_cp0_t cp0;
xtregs_cp1_t cp1;
xtregs_cp2_t cp2;
xtregs_cp3_t cp3;
xtregs_cp4_t cp4;
xtregs_cp5_t cp5;
xtregs_cp6_t cp6;
xtregs_cp7_t cp7;
} xtregs_coprocessor_t;
#endif
struct thread_info { struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */ struct exec_domain *exec_domain; /* execution domain */
...@@ -38,7 +53,13 @@ struct thread_info { ...@@ -38,7 +53,13 @@ struct thread_info {
mm_segment_t addr_limit; /* thread address space */ mm_segment_t addr_limit; /* thread address space */
struct restart_block restart_block; struct restart_block restart_block;
unsigned long cpenable;
/* Allocate storage for extra user states and coprocessor states. */
#if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t xtregs_cp;
#endif
xtregs_user_t xtregs_user;
}; };
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
......
/*
* This header file contains assembly-language definitions (assembly
* macros, etc.) for this specific Xtensa processor's TIE extensions
* and options. It is customized to this Xtensa processor configuration.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2008 Tensilica Inc.
*/
#ifndef _XTENSA_CORE_TIE_ASM_H
#define _XTENSA_CORE_TIE_ASM_H
/* Selection parameter values for save-area save/restore macros: */
/* Option vs. TIE: */
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
/* Whether used automatically by compiler: */
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
/* ABI handling across function calls: */
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
/* Misc */
#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Save area ptr (clobbered): ptr (1 byte aligned)
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
*/
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
xchal_sa_start \continue, \ofs
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
xchal_sa_align \ptr, 0, 1024-4, 4, 4
rur \at1, THREADPTR // threadptr option
s32i \at1, \ptr, .Lxchal_ofs_ + 0
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
.endm // xchal_ncp_store
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Save area ptr (clobbered): ptr (1 byte aligned)
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
*/
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
xchal_sa_start \continue, \ofs
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
xchal_sa_align \ptr, 0, 1024-4, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_ + 0
wur \at1, THREADPTR // threadptr option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
.endm // xchal_ncp_load
#define XCHAL_NCP_NUM_ATMPS 1
#define XCHAL_SA_NUM_ATMPS 1
#endif /*_XTENSA_CORE_TIE_ASM_H*/
/* /*
* Xtensa processor core configuration information. * This header file describes this specific Xtensa processor's TIE extensions
* that extend basic Xtensa core functionality. It is customized to this
* Xtensa processor configuration.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1999-2006 Tensilica Inc. * Copyright (C) 1999-2007 Tensilica Inc.
*/ */
#ifndef XTENSA_TIE_H #ifndef _XTENSA_CORE_TIE_H
#define XTENSA_TIE_H #define _XTENSA_CORE_TIE_H
/*----------------------------------------------------------------------
COPROCESSORS and EXTRA STATE
----------------------------------------------------------------------*/
#define XCHAL_CP_NUM 0 /* number of coprocessors */ #define XCHAL_CP_NUM 0 /* number of coprocessors */
#define XCHAL_CP_MASK 0x00 #define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
/* Basic parameters of each coprocessor: */
#define XCHAL_CP7_NAME "XTIOP"
#define XCHAL_CP7_IDENT XTIOP
#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
#define XCHAL_CP0_SA_SIZE 0
#define XCHAL_CP0_SA_ALIGN 1
#define XCHAL_CP1_SA_SIZE 0
#define XCHAL_CP1_SA_ALIGN 1
#define XCHAL_CP2_SA_SIZE 0
#define XCHAL_CP2_SA_ALIGN 1
#define XCHAL_CP3_SA_SIZE 0
#define XCHAL_CP3_SA_ALIGN 1
#define XCHAL_CP4_SA_SIZE 0
#define XCHAL_CP4_SA_ALIGN 1
#define XCHAL_CP5_SA_SIZE 0
#define XCHAL_CP5_SA_ALIGN 1
#define XCHAL_CP6_SA_SIZE 0
#define XCHAL_CP6_SA_ALIGN 1
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
/* Total save area for optional and custom state (NCP + CPn): */
#define XCHAL_TOTAL_SA_SIZE 0 /* with 16-byte align padding */
#define XCHAL_TOTAL_SA_ALIGN 1 /* actual minimum alignment */
#define XCHAL_NCP_SA_NUM 0
#define XCHAL_NCP_SA_LIST(s)
#define XCHAL_CP0_SA_NUM 0
#define XCHAL_CP0_SA_LIST(s)
#define XCHAL_CP1_SA_NUM 0
#define XCHAL_CP1_SA_LIST(s)
#define XCHAL_CP2_SA_NUM 0
#define XCHAL_CP2_SA_LIST(s)
#define XCHAL_CP3_SA_NUM 0
#define XCHAL_CP3_SA_LIST(s)
#define XCHAL_CP4_SA_NUM 0
#define XCHAL_CP4_SA_LIST(s)
#define XCHAL_CP5_SA_NUM 0
#define XCHAL_CP5_SA_LIST(s)
#define XCHAL_CP6_SA_NUM 0
#define XCHAL_CP6_SA_LIST(s)
#define XCHAL_CP7_SA_NUM 0
#define XCHAL_CP7_SA_LIST(s)
/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
#endif /*XTENSA_CONFIG_TIE_H*/ #endif /*_XTENSA_CORE_TIE_H*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment