Commit c76c6c4e authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Russell King (Oracle)

ARM: 9294/2: vfp: Fix broken softirq handling with instrumentation enabled

Commit 62b95a7b ("ARM: 9282/1: vfp: Manipulate task VFP state with
softirqs disabled") replaced the en/disable preemption calls inside the
VFP state handling code with en/disabling of soft IRQs, which is
necessary to allow kernel use of the VFP/SIMD unit when handling a soft
IRQ.

Unfortunately, when lockdep is enabled (or other instrumentation that
enables TRACE_IRQFLAGS), the disable path implemented in asm fails to
perform the lockdep and RCU related bookkeeping, resulting in spurious
warnings and other badness.

Set let's rework the VFP entry code a little bit so we can make the
local_bh_disable() call from C, with all the instrumentations that
happen to have been configured. Calling local_bh_enable() can be done
from asm, as it is a simple wrapper around __local_bh_enable_ip(), which
is always a callable function.

Link: https://lore.kernel.org/all/ZBBYCSZUJOWBg1s8@localhost.localdomain/

Fixes: 62b95a7b ("ARM: 9282/1: vfp: Manipulate task VFP state with softirqs disabled")
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Reviewed-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
parent 3a2bdad0
...@@ -244,19 +244,6 @@ THUMB( fpreg .req r7 ) ...@@ -244,19 +244,6 @@ THUMB( fpreg .req r7 )
.endm .endm
#endif #endif
.macro local_bh_disable, ti, tmp
ldr \tmp, [\ti, #TI_PREEMPT]
add \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
str \tmp, [\ti, #TI_PREEMPT]
.endm
.macro local_bh_enable_ti, ti, tmp
get_thread_info \ti
ldr \tmp, [\ti, #TI_PREEMPT]
sub \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
str \tmp, [\ti, #TI_PREEMPT]
.endm
#define USERL(l, x...) \ #define USERL(l, x...) \
9999: x; \ 9999: x; \
.pushsection __ex_table,"a"; \ .pushsection __ex_table,"a"; \
......
...@@ -24,14 +24,5 @@ ...@@ -24,14 +24,5 @@
ENTRY(do_vfp) ENTRY(do_vfp)
mov r1, r10 mov r1, r10
mov r3, r9 mov r3, r9
ldr r4, .LCvfp b vfp_entry
ldr pc, [r4] @ call VFP entry point
ENDPROC(do_vfp) ENDPROC(do_vfp)
ENTRY(vfp_null_entry)
ret lr
ENDPROC(vfp_null_entry)
.align 2
.LCvfp:
.word vfp_vector
...@@ -75,8 +75,6 @@ ...@@ -75,8 +75,6 @@
@ lr = unrecognised instruction return address @ lr = unrecognised instruction return address
@ IRQs enabled. @ IRQs enabled.
ENTRY(vfp_support_entry) ENTRY(vfp_support_entry)
local_bh_disable r1, r4
ldr r11, [r1, #TI_CPU] @ CPU number ldr r11, [r1, #TI_CPU] @ CPU number
add r10, r1, #TI_VFPSTATE @ r10 = workspace add r10, r1, #TI_VFPSTATE @ r10 = workspace
...@@ -179,9 +177,12 @@ vfp_hw_state_valid: ...@@ -179,9 +177,12 @@ vfp_hw_state_valid:
@ else it's one 32-bit instruction, so @ else it's one 32-bit instruction, so
@ always subtract 4 from the following @ always subtract 4 from the following
@ instruction address. @ instruction address.
local_bh_enable_ti r10, r4
ret r3 @ we think we have handled things
mov lr, r3 @ we think we have handled things
local_bh_enable_and_ret:
adr r0, .
mov r1, #SOFTIRQ_DISABLE_OFFSET
b __local_bh_enable_ip @ tail call
look_for_VFP_exceptions: look_for_VFP_exceptions:
@ Check for synchronous or asynchronous exception @ Check for synchronous or asynchronous exception
...@@ -204,8 +205,7 @@ skip: ...@@ -204,8 +205,7 @@ skip:
@ not recognised by VFP @ not recognised by VFP
DBGSTR "not VFP" DBGSTR "not VFP"
local_bh_enable_ti r10, r4 b local_bh_enable_and_ret
ret lr
process_exception: process_exception:
DBGSTR "bounce" DBGSTR "bounce"
......
...@@ -32,10 +32,9 @@ ...@@ -32,10 +32,9 @@
/* /*
* Our undef handlers (in entry.S) * Our undef handlers (in entry.S)
*/ */
asmlinkage void vfp_support_entry(void); asmlinkage void vfp_support_entry(u32, void *, u32, u32);
asmlinkage void vfp_null_entry(void);
asmlinkage void (*vfp_vector)(void) = vfp_null_entry; static bool have_vfp __ro_after_init;
/* /*
* Dual-use variable. * Dual-use variable.
...@@ -645,6 +644,25 @@ static int vfp_starting_cpu(unsigned int unused) ...@@ -645,6 +644,25 @@ static int vfp_starting_cpu(unsigned int unused)
return 0; return 0;
} }
/*
* Entered with:
*
* r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
* r1 = thread_info pointer
* r2 = PC value to resume execution after successful emulation
* r3 = normal "successful" return address
* lr = unrecognised instruction return address
*/
asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc,
u32 resume_return_address)
{
if (unlikely(!have_vfp))
return;
local_bh_disable();
vfp_support_entry(trigger, ti, resume_pc, resume_return_address);
}
#ifdef CONFIG_KERNEL_MODE_NEON #ifdef CONFIG_KERNEL_MODE_NEON
static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
...@@ -798,7 +816,6 @@ static int __init vfp_init(void) ...@@ -798,7 +816,6 @@ static int __init vfp_init(void)
vfpsid = fmrx(FPSID); vfpsid = fmrx(FPSID);
barrier(); barrier();
unregister_undef_hook(&vfp_detect_hook); unregister_undef_hook(&vfp_detect_hook);
vfp_vector = vfp_null_entry;
pr_info("VFP support v0.3: "); pr_info("VFP support v0.3: ");
if (VFP_arch) { if (VFP_arch) {
...@@ -883,7 +900,7 @@ static int __init vfp_init(void) ...@@ -883,7 +900,7 @@ static int __init vfp_init(void)
"arm/vfp:starting", vfp_starting_cpu, "arm/vfp:starting", vfp_starting_cpu,
vfp_dying_cpu); vfp_dying_cpu);
vfp_vector = vfp_support_entry; have_vfp = true;
thread_register_notifier(&vfp_notifier_block); thread_register_notifier(&vfp_notifier_block);
vfp_pm_init(); vfp_pm_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment