Commit 0a2796da authored by Greg Ungerer's avatar Greg Ungerer

m68k: add ColdFire FPU support for the V4e ColdFire CPUs

The V4e ColdFire CPU family also has an integrated FPU (as well as the MMU).
So add code to support this hardware along side the existing m68k FPU code.

The ColdFire FPU is of course different to all previous 68k FP units. It is
close in operation to the 68060, but not completely compatible. The biggest
issue to deal with is that the ColdFire FPU multi-move instructions are
different. It does not support multi-moving the FP control registers, and
the multi-move of the FP data registers uses a different instruction
mnemonic.
Signed-off-by: default avatarGreg Ungerer <gerg@uclinux.org>
Acked-by: default avatarMatt Waddel <mwaddel@yahoo.com>
Acked-by: default avatarKurt Mahan <kmahan@xmission.com>
parent e9fcffa4
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#define FPSTATESIZE (96) #define FPSTATESIZE (96)
#elif defined(CONFIG_M68KFPU_EMU) #elif defined(CONFIG_M68KFPU_EMU)
#define FPSTATESIZE (28) #define FPSTATESIZE (28)
#elif defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
#define FPSTATESIZE (16)
#elif defined(CONFIG_M68060) #elif defined(CONFIG_M68060)
#define FPSTATESIZE (12) #define FPSTATESIZE (12)
#else #else
......
...@@ -172,9 +172,7 @@ void flush_thread(void) ...@@ -172,9 +172,7 @@ void flush_thread(void)
current->thread.fs = __USER_DS; current->thread.fs = __USER_DS;
if (!FPU_IS_EMU) if (!FPU_IS_EMU)
asm volatile (".chip 68k/68881\n\t" asm volatile ("frestore %0@" : : "a" (&zero) : "memory");
"frestore %0@\n\t"
".chip 68k" : : "a" (&zero));
} }
/* /*
...@@ -248,11 +246,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -248,11 +246,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Copy the current fpu state */ /* Copy the current fpu state */
asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" if (CPU_IS_COLDFIRE) {
"fmoveml %/fpiar/%/fpcr/%/fpsr,%1" asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
: : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0]) "fmovel %/fpiar,%1\n\t"
: "memory"); "fmovel %/fpcr,%2\n\t"
"fmovel %/fpsr,%3"
:
: "m" (p->thread.fp[0]),
"m" (p->thread.fpcntl[0]),
"m" (p->thread.fpcntl[1]),
"m" (p->thread.fpcntl[2])
: "memory");
} else {
asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
"fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
:
: "m" (p->thread.fp[0]),
"m" (p->thread.fpcntl[0])
: "memory");
}
}
/* Restore the state in case the fpu was busy */ /* Restore the state in case the fpu was busy */
asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
} }
...@@ -285,12 +300,28 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu) ...@@ -285,12 +300,28 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
return 0; return 0;
asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" if (CPU_IS_COLDFIRE) {
:: "m" (fpu->fpcntl[0]) asm volatile ("fmovel %/fpiar,%0\n\t"
: "memory"); "fmovel %/fpcr,%1\n\t"
asm volatile ("fmovemx %/fp0-%/fp7,%0" "fmovel %/fpsr,%2\n\t"
:: "m" (fpu->fpregs[0]) "fmovemd %/fp0-%/fp7,%3"
: "memory"); :
: "m" (fpu->fpcntl[0]),
"m" (fpu->fpcntl[1]),
"m" (fpu->fpcntl[2]),
"m" (fpu->fpregs[0])
: "memory");
} else {
asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
:
: "m" (fpu->fpcntl[0])
: "memory");
asm volatile ("fmovemx %/fp0-%/fp7,%0"
:
: "m" (fpu->fpregs[0])
: "memory");
}
return 1; return 1;
} }
EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_fpu);
......
...@@ -236,7 +236,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -236,7 +236,7 @@ void __init setup_arch(char **cmdline_p)
* with them, we should add a test to check_bugs() below] */ * with them, we should add a test to check_bugs() below] */
#ifndef CONFIG_M68KFPU_EMU_ONLY #ifndef CONFIG_M68KFPU_EMU_ONLY
/* clear the fpu if we have one */ /* clear the fpu if we have one */
if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) { if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
volatile int zero = 0; volatile int zero = 0;
asm volatile ("frestore %0" : : "m" (zero)); asm volatile ("frestore %0" : : "m" (zero));
} }
......
...@@ -203,7 +203,8 @@ static inline int restore_fpu_state(struct sigcontext *sc) ...@@ -203,7 +203,8 @@ static inline int restore_fpu_state(struct sigcontext *sc)
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
/* Verify the frame format. */ /* Verify the frame format. */
if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version)) if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
(sc->sc_fpstate[0] != fpu_version))
goto out; goto out;
if (CPU_IS_020_OR_030) { if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 && if (m68k_fputype & FPU_68881 &&
...@@ -222,19 +223,43 @@ static inline int restore_fpu_state(struct sigcontext *sc) ...@@ -222,19 +223,43 @@ static inline int restore_fpu_state(struct sigcontext *sc)
sc->sc_fpstate[3] == 0x60 || sc->sc_fpstate[3] == 0x60 ||
sc->sc_fpstate[3] == 0xe0)) sc->sc_fpstate[3] == 0xe0))
goto out; goto out;
} else if (CPU_IS_COLDFIRE) {
if (!(sc->sc_fpstate[0] == 0x00 ||
sc->sc_fpstate[0] == 0x05 ||
sc->sc_fpstate[0] == 0xe5))
goto out;
} else } else
goto out; goto out;
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fmovemx %0,%%fp0-%%fp1\n\t" __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" "fmovel %1,%%fpcr\n\t"
".chip 68k" "fmovel %2,%%fpsr\n\t"
: /* no outputs */ "fmovel %3,%%fpiar"
: "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); : /* no outputs */
: "m" (sc->sc_fpregs[0]),
"m" (sc->sc_fpcntl[0]),
"m" (sc->sc_fpcntl[1]),
"m" (sc->sc_fpcntl[2]));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*sc->sc_fpregs),
"m" (*sc->sc_fpcntl));
}
}
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate));
} }
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k" : : "m" (*sc->sc_fpstate));
err = 0; err = 0;
out: out:
...@@ -249,7 +274,7 @@ static inline int restore_fpu_state(struct sigcontext *sc) ...@@ -249,7 +274,7 @@ static inline int restore_fpu_state(struct sigcontext *sc)
static inline int rt_restore_fpu_state(struct ucontext __user *uc) static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{ {
unsigned char fpstate[FPCONTEXT_SIZE]; unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0; int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
fpregset_t fpregs; fpregset_t fpregs;
int err = 1; int err = 1;
...@@ -268,10 +293,11 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc) ...@@ -268,10 +293,11 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
goto out; goto out;
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
if (!CPU_IS_060) if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1]; context_size = fpstate[1];
/* Verify the frame format. */ /* Verify the frame format. */
if (!CPU_IS_060 && (fpstate[0] != fpu_version)) if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
(fpstate[0] != fpu_version))
goto out; goto out;
if (CPU_IS_020_OR_030) { if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 && if (m68k_fputype & FPU_68881 &&
...@@ -290,26 +316,50 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc) ...@@ -290,26 +316,50 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
fpstate[3] == 0x60 || fpstate[3] == 0x60 ||
fpstate[3] == 0xe0)) fpstate[3] == 0xe0))
goto out; goto out;
} else if (CPU_IS_COLDFIRE) {
if (!(fpstate[3] == 0x00 ||
fpstate[3] == 0x05 ||
fpstate[3] == 0xe5))
goto out;
} else } else
goto out; goto out;
if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
sizeof(fpregs))) sizeof(fpregs)))
goto out; goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t" if (CPU_IS_COLDFIRE) {
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
".chip 68k" "fmovel %1,%%fpcr\n\t"
: /* no outputs */ "fmovel %2,%%fpsr\n\t"
: "m" (*fpregs.f_fpregs), "fmovel %3,%%fpiar"
"m" (*fpregs.f_fpcntl)); : /* no outputs */
: "m" (fpregs.f_fpregs[0]),
"m" (fpregs.f_fpcntl[0]),
"m" (fpregs.f_fpcntl[1]),
"m" (fpregs.f_fpcntl[2]));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*fpregs.f_fpregs),
"m" (*fpregs.f_fpcntl));
}
} }
if (context_size && if (context_size &&
__copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
context_size)) context_size))
goto out; goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t" if (CPU_IS_COLDFIRE) {
".chip 68k" : : "m" (*fpstate)); __asm__ volatile ("frestore %0" : : "m" (*fpstate));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k"
: : "m" (*fpstate));
}
err = 0; err = 0;
out: out:
...@@ -529,10 +579,15 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) ...@@ -529,10 +579,15 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
return; return;
} }
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fsave %0\n\t" __asm__ volatile ("fsave %0"
".chip 68k" : : "m" (*sc->sc_fpstate) : "memory");
: : "m" (*sc->sc_fpstate) : "memory"); } else {
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate) : "memory");
}
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0]; fpu_version = sc->sc_fpstate[0];
...@@ -543,21 +598,35 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) ...@@ -543,21 +598,35 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
if (*(unsigned short *) sc->sc_fpstate == 0x1f38) if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
sc->sc_fpstate[0x38] |= 1 << 3; sc->sc_fpstate[0x38] |= 1 << 3;
} }
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t" if (CPU_IS_COLDFIRE) {
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
".chip 68k" "fmovel %%fpcr,%1\n\t"
: "=m" (*sc->sc_fpregs), "fmovel %%fpsr,%2\n\t"
"=m" (*sc->sc_fpcntl) "fmovel %%fpiar,%3"
: /* no inputs */ : "=m" (sc->sc_fpregs[0]),
: "memory"); "=m" (sc->sc_fpcntl[0]),
"=m" (sc->sc_fpcntl[1]),
"=m" (sc->sc_fpcntl[2])
: /* no inputs */
: "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*sc->sc_fpregs),
"=m" (*sc->sc_fpcntl)
: /* no inputs */
: "memory");
}
} }
} }
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{ {
unsigned char fpstate[FPCONTEXT_SIZE]; unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0; int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
int err = 0; int err = 0;
if (FPU_IS_EMU) { if (FPU_IS_EMU) {
...@@ -570,15 +639,19 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * ...@@ -570,15 +639,19 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
return err; return err;
} }
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fsave %0\n\t" __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
".chip 68k" } else {
: : "m" (*fpstate) : "memory"); __asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*fpstate) : "memory");
}
err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
fpregset_t fpregs; fpregset_t fpregs;
if (!CPU_IS_060) if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1]; context_size = fpstate[1];
fpu_version = fpstate[0]; fpu_version = fpstate[0];
if (CPU_IS_020_OR_030 && if (CPU_IS_020_OR_030 &&
...@@ -588,14 +661,27 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * ...@@ -588,14 +661,27 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
if (*(unsigned short *) fpstate == 0x1f38) if (*(unsigned short *) fpstate == 0x1f38)
fpstate[0x38] |= 1 << 3; fpstate[0x38] |= 1 << 3;
} }
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fmovemx %%fp0-%%fp7,%0\n\t" __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" "fmovel %%fpcr,%1\n\t"
".chip 68k" "fmovel %%fpsr,%2\n\t"
: "=m" (*fpregs.f_fpregs), "fmovel %%fpiar,%3"
"=m" (*fpregs.f_fpcntl) : "=m" (fpregs.f_fpregs[0]),
: /* no inputs */ "=m" (fpregs.f_fpcntl[0]),
: "memory"); "=m" (fpregs.f_fpcntl[1]),
"=m" (fpregs.f_fpcntl[2])
: /* no inputs */
: "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*fpregs.f_fpregs),
"=m" (*fpregs.f_fpcntl)
: /* no inputs */
: "memory");
}
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
sizeof(fpregs)); sizeof(fpregs));
} }
...@@ -692,8 +778,7 @@ static inline void push_cache (unsigned long vaddr) ...@@ -692,8 +778,7 @@ static inline void push_cache (unsigned long vaddr)
"cpushl %%bc,(%0)\n\t" "cpushl %%bc,(%0)\n\t"
".chip 68k" ".chip 68k"
: : "a" (temp)); : : "a" (temp));
} } else if (!CPU_IS_COLDFIRE) {
else {
/* /*
* 68030/68020 have no writeback cache; * 68030/68020 have no writeback cache;
* still need to clear icache. * still need to clear icache.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment