Commit afe4b25e authored by Lennert Buytenhek's avatar Lennert Buytenhek Committed by Russell King

[ARM] 3881/4: xscale: clean up cp0/cp1 handling

XScale cores either have a DSP coprocessor (which contains a single
40 bit accumulator register), or an iWMMXt coprocessor (which contains
eight 64 bit registers.)

Because of the small amount of state in the DSP coprocessor, access to
the DSP coprocessor (CP0) is always enabled, and DSP context switching
is done unconditionally on every task switch.  Access to the iWMMXt
coprocessor (CP0/CP1) is enabled only when an iWMMXt instruction is
first issued, and iWMMXt context switching is done lazily.

CONFIG_IWMMXT is supposed to mean 'the cpu we will be running on will
have iWMMXt support', but boards are supposed to select this config
symbol by hand, and at least one pxa27x board doesn't get this right,
so on that board, proc-xscale.S will incorrectly assume that we have a
DSP coprocessor, enable CP0 on boot, and we will then only save the
first iWMMXt register (wR0) on context switches, which is Bad.

This patch redefines CONFIG_IWMMXT as 'the cpu we will be running on
might have iWMMXt support, and we will enable iWMMXt context switching
if it does.'  This means that with this patch, running a CONFIG_IWMMXT=n
kernel on an iWMMXt-capable CPU will no longer potentially corrupt iWMMXt
state over context switches, and running a CONFIG_IWMMXT=y kernel on a
non-iWMMXt capable CPU will still do DSP context save/restore.

These changes should make iWMMXt work on PXA3xx, and as a side effect,
enable proper acc0 save/restore on non-iWMMXt capable xsc3 cores such
as IOP13xx and IXP23xx (which will not have CONFIG_CPU_XSCALE defined),
as well as setting and using HWCAP_IWMMXT properly.
Signed-off-by: default avatarLennert Buytenhek <buytenh@wantstofly.org>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent f5236225
...@@ -374,6 +374,14 @@ config PLAT_IOP ...@@ -374,6 +374,14 @@ config PLAT_IOP
source arch/arm/mm/Kconfig source arch/arm/mm/Kconfig
config IWMMXT
bool "Enable iWMMXt support"
depends CPU_XSCALE || CPU_XSC3
default y if PXA27x
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
# bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER # bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
config XSCALE_PMU config XSCALE_PMU
bool bool
......
...@@ -24,7 +24,9 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o ...@@ -24,7 +24,9 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
obj-$(CONFIG_IWMMXT) += iwmmxt.o iwmmxt-notifier.o obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
ifneq ($(CONFIG_ARCH_EBSA110),y) ifneq ($(CONFIG_ARCH_EBSA110),y)
......
...@@ -589,10 +589,6 @@ ENTRY(__switch_to) ...@@ -589,10 +589,6 @@ ENTRY(__switch_to)
strex r5, r4, [ip] @ Clear exclusive monitor strex r5, r4, [ip] @ Clear exclusive monitor
#endif #endif
#endif #endif
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
mra r4, r5, acc0
stmia ip, {r4, r5}
#endif
#if defined(CONFIG_HAS_TLS_REG) #if defined(CONFIG_HAS_TLS_REG)
mcr p15, 0, r3, c13, c0, 3 @ set TLS register mcr p15, 0, r3, c13, c0, 3 @ set TLS register
#elif !defined(CONFIG_TLS_REG_EMUL) #elif !defined(CONFIG_TLS_REG_EMUL)
...@@ -601,11 +597,6 @@ ENTRY(__switch_to) ...@@ -601,11 +597,6 @@ ENTRY(__switch_to)
#endif #endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
ldmib r4, {r4, r5}
mar acc0, r4, r5
#endif #endif
mov r5, r0 mov r5, r0
add r4, r2, #TI_CPU_SAVE add r4, r2, #TI_CPU_SAVE
......
/*
* linux/arch/arm/kernel/iwmmxt-notifier.c
*
* XScale iWMMXt (Concan) context switching and handling
*
* Initial code:
* Copyright (c) 2003, Intel Corporation
*
* Full lazy switching support, optimizations and more, by Nicolas Pitre
* Copyright (c) 2003-2004, MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/thread_notify.h>
#include <asm/io.h>
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
/*
* flush_thread() zeroes thread->fpstate, so no need
* to do anything here.
*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_RELEASE:
iwmmxt_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
iwmmxt_task_switch(thread);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block iwmmxt_notifier_block = {
.notifier_call = iwmmxt_do,
};
static int __init iwmmxt_init(void)
{
thread_register_notifier(&iwmmxt_notifier_block);
return 0;
}
late_initcall(iwmmxt_init);
...@@ -357,9 +357,6 @@ static void __init setup_processor(void) ...@@ -357,9 +357,6 @@ static void __init setup_processor(void)
#ifndef CONFIG_VFP #ifndef CONFIG_VFP
elf_hwcap &= ~HWCAP_VFP; elf_hwcap &= ~HWCAP_VFP;
#endif #endif
#ifndef CONFIG_IWMMXT
elf_hwcap &= ~HWCAP_IWMMXT;
#endif
cpu_proc_init(); cpu_proc_init();
} }
......
/*
* linux/arch/arm/kernel/xscale-cp0.c
*
* XScale DSP and iWMMXt coprocessor context switching and handling
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/thread_notify.h>
#include <asm/io.h>
static inline void dsp_save_state(u32 *state)
{
__asm__ __volatile__ (
"mrrc p0, 0, %0, %1, c0\n"
: "=r" (state[0]), "=r" (state[1]));
}
static inline void dsp_load_state(u32 *state)
{
__asm__ __volatile__ (
"mcrr p0, 0, %0, %1, c0\n"
: : "r" (state[0]), "r" (state[1]));
}
static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
thread->cpu_context.extra[0] = 0;
thread->cpu_context.extra[1] = 0;
break;
case THREAD_NOTIFY_SWITCH:
dsp_save_state(current_thread_info()->cpu_context.extra);
dsp_load_state(thread->cpu_context.extra);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block dsp_notifier_block = {
.notifier_call = dsp_do,
};
#ifdef CONFIG_IWMMXT
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
/*
* flush_thread() zeroes thread->fpstate, so no need
* to do anything here.
*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_RELEASE:
iwmmxt_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
iwmmxt_task_switch(thread);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block iwmmxt_notifier_block = {
.notifier_call = iwmmxt_do,
};
#endif
static u32 __init xscale_cp_access_read(void)
{
u32 value;
__asm__ __volatile__ (
"mrc p15, 0, %0, c15, c1, 0\n\t"
: "=r" (value));
return value;
}
static void __init xscale_cp_access_write(u32 value)
{
u32 temp;
__asm__ __volatile__ (
"mcr p15, 0, %1, c15, c1, 0\n\t"
"mrc p15, 0, %0, c15, c1, 0\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
: "=r" (temp) : "r" (value));
}
/*
* Detect whether we have a MAC coprocessor (40 bit register) or an
* iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000
* into a coprocessor register and reading it back, and checking
* whether the upper word survived intact.
*/
static int __init cpu_has_iwmmxt(void)
{
u32 lo;
u32 hi;
/*
* This sequence is interpreted by the DSP coprocessor as:
* mar acc0, %2, %3
* mra %0, %1, acc0
*
* And by the iWMMXt coprocessor as:
* tmcrr wR0, %2, %3
* tmrrc %0, %1, wR0
*/
__asm__ __volatile__ (
"mcrr p0, 0, %2, %3, c0\n"
"mrrc p0, 0, %0, %1, c0\n"
: "=r" (lo), "=r" (hi)
: "r" (0), "r" (0x100));
return !!hi;
}
/*
* If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we
* disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
* switch code handle iWMMXt context switching. If on the other
* hand the CPU has a DSP coprocessor, we keep access to CP0 enabled
* all the time, and save/restore acc0 on context switch in non-lazy
* fashion.
*/
static int __init xscale_cp0_init(void)
{
u32 cp_access;
cp_access = xscale_cp_access_read() & ~3;
xscale_cp_access_write(cp_access | 1);
if (cpu_has_iwmmxt()) {
#ifndef CONFIG_IWMMXT
printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor "
"detected, but kernel support is missing.\n");
#else
printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n");
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
#endif
} else {
printk(KERN_INFO "XScale DSP coprocessor detected.\n");
thread_register_notifier(&dsp_notifier_block);
cp_access |= 1;
}
xscale_cp_access_write(cp_access);
return 0;
}
late_initcall(xscale_cp0_init);
...@@ -13,12 +13,10 @@ config ARCH_LUBBOCK ...@@ -13,12 +13,10 @@ config ARCH_LUBBOCK
config MACH_LOGICPD_PXA270 config MACH_LOGICPD_PXA270
bool "LogicPD PXA270 Card Engine Development Platform" bool "LogicPD PXA270 Card Engine Development Platform"
select PXA27x select PXA27x
select IWMMXT
config MACH_MAINSTONE config MACH_MAINSTONE
bool "Intel HCDDBBVA0 Development Platform" bool "Intel HCDDBBVA0 Development Platform"
select PXA27x select PXA27x
select IWMMXT
config ARCH_PXA_IDP config ARCH_PXA_IDP
bool "Accelent Xscale IDP" bool "Accelent Xscale IDP"
...@@ -53,7 +51,6 @@ config PXA_SHARPSL_25x ...@@ -53,7 +51,6 @@ config PXA_SHARPSL_25x
config PXA_SHARPSL_27x config PXA_SHARPSL_27x
bool "Sharp PXA270 models (SL-Cxx00)" bool "Sharp PXA270 models (SL-Cxx00)"
select PXA27x select PXA27x
select IWMMXT
endchoice endchoice
...@@ -129,11 +126,6 @@ config PXA27x ...@@ -129,11 +126,6 @@ config PXA27x
help help
Select code specific to PXA27x variants Select code specific to PXA27x variants
config IWMMXT
bool
help
Enable support for iWMMXt
config PXA_SHARP_C7xx config PXA_SHARP_C7xx
bool bool
select PXA_SSP select PXA_SSP
......
...@@ -83,6 +83,7 @@ int pxa_pm_enter(suspend_state_t state) ...@@ -83,6 +83,7 @@ int pxa_pm_enter(suspend_state_t state)
#ifdef CONFIG_IWMMXT #ifdef CONFIG_IWMMXT
/* force any iWMMXt context to ram **/ /* force any iWMMXt context to ram **/
if (elf_hwcap & HWCAP_IWMMXT)
iwmmxt_task_disable(NULL); iwmmxt_task_disable(NULL);
#endif #endif
......
...@@ -491,12 +491,7 @@ __xscale_setup: ...@@ -491,12 +491,7 @@ __xscale_setup:
mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs
#ifdef CONFIG_IWMMXT mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde
mov r0, #0 @ initially disallow access to CP0/CP1
#else
mov r0, #1 @ Allow access to CP0
#endif
orr r0, r0, #1 << 6 @ cp6 for IOP3xx and Bulverde
orr r0, r0, #1 << 13 @ Its undefined whether this orr r0, r0, #1 << 13 @ Its undefined whether this
mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes
...@@ -909,7 +904,7 @@ __pxa270_proc_info: ...@@ -909,7 +904,7 @@ __pxa270_proc_info:
b __xscale_setup b __xscale_setup
.long cpu_arch_name .long cpu_arch_name
.long cpu_elf_name .long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_IWMMXT .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long cpu_pxa270_name .long cpu_pxa270_name
.long xscale_processor_functions .long xscale_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
......
...@@ -114,40 +114,24 @@ extern char elf_platform[]; ...@@ -114,40 +114,24 @@ extern char elf_platform[];
have no such handler. */ have no such handler. */
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
#ifndef CONFIG_IWMMXT
/* Old NetWinder binaries were compiled in such a way that the iBCS
heuristic always trips on them. Until these binaries become uncommon
enough not to care, don't trust the `ibcs' flag here. In any case
there is no other ELF system currently supported by iBCS.
@@ Could print a warning message to encourage users to upgrade. */
#define SET_PERSONALITY(ex,ibcs2) \
set_personality(((ex).e_flags & EF_ARM_APCS26 ? PER_LINUX : PER_LINUX_32BIT))
#else
/* /*
* All iWMMXt capable CPUs don't support 26-bit mode. Yet they can run * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
* legacy binaries which used to contain FPA11 floating point instructions * and CP1, we only enable access to the iWMMXt coprocessor if the
* that have always been emulated by the kernel. PFA11 and iWMMXt overlap * binary is EABI or softfloat (and thus, guaranteed not to use
* on coprocessor 1 space though. We therefore must decide if given task * FPA instructions.)
* is allowed to use CP 0 and 1 for iWMMXt, or if they should be blocked
* at all times for the prefetch exception handler to catch FPA11 opcodes
* and emulate them. The best indication to discriminate those two cases
* is the SOFT_FLOAT flag in the ELF header.
*/ */
#define SET_PERSONALITY(ex, ibcs2) \
#define SET_PERSONALITY(ex,ibcs2) \ do { \
do { \ if ((ex).e_flags & EF_ARM_APCS26) { \
set_personality(PER_LINUX); \
} else { \
set_personality(PER_LINUX_32BIT); \ set_personality(PER_LINUX_32BIT); \
if (((ex).e_flags & EF_ARM_EABI_MASK) || \ if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \
((ex).e_flags & EF_ARM_SOFT_FLOAT)) \
set_thread_flag(TIF_USING_IWMMXT); \ set_thread_flag(TIF_USING_IWMMXT); \
else \ else \
clear_thread_flag(TIF_USING_IWMMXT); \ clear_thread_flag(TIF_USING_IWMMXT); \
} while (0) } \
} while (0)
#endif
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment