Commit 0fbeb318 authored by James Morse's avatar James Morse Committed by Will Deacon

arm64: explicitly mask all exceptions

There are a few places where we want to mask all exceptions. Today we
do this in a piecemeal fashion, typically we expect the caller to
have masked irqs and the arch code masks debug exceptions, ignoring
serror which is probably masked.

Make it clear that 'mask all exceptions' is the intention by adding
helpers to do exactly that.

This will let us unmask SError without having to add 'oh and SError'
to these paths.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarJulien Thierry <julien.thierry@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent c10f0d06
...@@ -32,6 +32,23 @@ ...@@ -32,6 +32,23 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
.macro save_and_disable_daif, flags
mrs \flags, daif
msr daifset, #0xf
.endm
.macro disable_daif
msr daifset, #0xf
.endm
.macro enable_daif
msr daifclr, #0xf
.endm
.macro restore_daif, flags:req
msr daif, \flags
.endm
/* /*
* Enable and disable interrupts. * Enable and disable interrupts.
*/ */
......
/*
* Copyright (C) 2017 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_DAIFFLAGS_H
#define __ASM_DAIFFLAGS_H
#include <linux/irqflags.h>
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
{
asm volatile(
"msr daifset, #0xf // local_daif_mask\n"
:
:
: "memory");
trace_hardirqs_off();
}
static inline unsigned long local_daif_save(void)
{
unsigned long flags;
asm volatile(
"mrs %0, daif // local_daif_save\n"
: "=r" (flags)
:
: "memory");
local_daif_mask();
return flags;
}
static inline void local_daif_unmask(void)
{
trace_hardirqs_on();
asm volatile(
"msr daifclr, #0xf // local_daif_unmask"
:
:
: "memory");
}
static inline void local_daif_restore(unsigned long flags)
{
if (!arch_irqs_disabled_flags(flags))
trace_hardirqs_on();
asm volatile(
"msr daif, %0 // local_daif_restore"
:
: "r" (flags)
: "memory");
if (arch_irqs_disabled_flags(flags))
trace_hardirqs_off();
}
#endif
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/daifflags.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/memory.h> #include <asm/memory.h>
...@@ -285,7 +286,7 @@ int swsusp_arch_suspend(void) ...@@ -285,7 +286,7 @@ int swsusp_arch_suspend(void)
return -EBUSY; return -EBUSY;
} }
local_dbg_save(flags); flags = local_daif_save();
if (__cpu_suspend_enter(&state)) { if (__cpu_suspend_enter(&state)) {
/* make the crash dump kernel image visible/saveable */ /* make the crash dump kernel image visible/saveable */
...@@ -315,7 +316,7 @@ int swsusp_arch_suspend(void) ...@@ -315,7 +316,7 @@ int swsusp_arch_suspend(void)
__cpu_suspend_exit(); __cpu_suspend_exit();
} }
local_dbg_restore(flags); local_daif_restore(flags);
return ret; return ret;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/daifflags.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -195,8 +196,7 @@ void machine_kexec(struct kimage *kimage) ...@@ -195,8 +196,7 @@ void machine_kexec(struct kimage *kimage)
pr_info("Bye!\n"); pr_info("Bye!\n");
/* Disable all DAIF exceptions. */ local_daif_mask();
asm volatile ("msr daifset, #0xf" : : : "memory");
/* /*
* cpu_soft_restart will shutdown the MMU, disable data caches, then * cpu_soft_restart will shutdown the MMU, disable data caches, then
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/daifflags.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -370,10 +371,6 @@ void __cpu_die(unsigned int cpu) ...@@ -370,10 +371,6 @@ void __cpu_die(unsigned int cpu)
/* /*
* Called from the idle thread for the CPU which has been shutdown. * Called from the idle thread for the CPU which has been shutdown.
* *
* Note that we disable IRQs here, but do not re-enable them
* before returning to the caller. This is also the behaviour
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/ */
void cpu_die(void) void cpu_die(void)
{ {
...@@ -381,7 +378,7 @@ void cpu_die(void) ...@@ -381,7 +378,7 @@ void cpu_die(void)
idle_task_exit(); idle_task_exit();
local_irq_disable(); local_daif_mask();
/* Tell __cpu_die() that this CPU is now safe to dispose of */ /* Tell __cpu_die() that this CPU is now safe to dispose of */
(void)cpu_report_death(); (void)cpu_report_death();
...@@ -839,7 +836,7 @@ static void ipi_cpu_stop(unsigned int cpu) ...@@ -839,7 +836,7 @@ static void ipi_cpu_stop(unsigned int cpu)
{ {
set_cpu_online(cpu, false); set_cpu_online(cpu, false);
local_irq_disable(); local_daif_mask();
while (1) while (1)
cpu_relax(); cpu_relax();
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/exec.h> #include <asm/exec.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -56,7 +57,7 @@ void notrace __cpu_suspend_exit(void) ...@@ -56,7 +57,7 @@ void notrace __cpu_suspend_exit(void)
/* /*
* Restore HW breakpoint registers to sane values * Restore HW breakpoint registers to sane values
* before debug exceptions are possibly reenabled * before debug exceptions are possibly reenabled
* through local_dbg_restore. * by cpu_suspend()s local_daif_restore() call.
*/ */
if (hw_breakpoint_restore) if (hw_breakpoint_restore)
hw_breakpoint_restore(cpu); hw_breakpoint_restore(cpu);
...@@ -80,7 +81,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -80,7 +81,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* updates to mdscr register (saved and restored along with * updates to mdscr register (saved and restored along with
* general purpose registers) from kernel debuggers. * general purpose registers) from kernel debuggers.
*/ */
local_dbg_save(flags); flags = local_daif_save();
/* /*
* Function graph tracer state gets incosistent when the kernel * Function graph tracer state gets incosistent when the kernel
...@@ -113,7 +114,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -113,7 +114,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* restored, so from this point onwards, debugging is fully * restored, so from this point onwards, debugging is fully
* renabled if it was enabled when core started shutdown. * renabled if it was enabled when core started shutdown.
*/ */
local_dbg_restore(flags); local_daif_restore(flags);
return ret; return ret;
} }
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/insn.h> #include <asm/insn.h>
...@@ -594,7 +595,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) ...@@ -594,7 +595,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
esr_get_class_string(esr)); esr_get_class_string(esr));
die("Oops - bad mode", regs, 0); die("Oops - bad mode", regs, 0);
local_irq_disable(); local_daif_mask();
panic("bad mode"); panic("bad mode");
} }
......
...@@ -109,10 +109,10 @@ ENTRY(cpu_do_resume) ...@@ -109,10 +109,10 @@ ENTRY(cpu_do_resume)
/* /*
* __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
* debug exceptions. By restoring MDSCR_EL1 here, we may take a debug * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
* exception. Mask them until local_dbg_restore() in cpu_suspend() * exception. Mask them until local_daif_restore() in cpu_suspend()
* resets them. * resets them.
*/ */
disable_dbg disable_daif
msr mdscr_el1, x10 msr mdscr_el1, x10
msr sctlr_el1, x12 msr sctlr_el1, x12
...@@ -155,8 +155,7 @@ ENDPROC(cpu_do_switch_mm) ...@@ -155,8 +155,7 @@ ENDPROC(cpu_do_switch_mm)
* called by anything else. It can only be executed from a TTBR0 mapping. * called by anything else. It can only be executed from a TTBR0 mapping.
*/ */
ENTRY(idmap_cpu_replace_ttbr1) ENTRY(idmap_cpu_replace_ttbr1)
mrs x2, daif save_and_disable_daif flags=x2
msr daifset, #0xf
adrp x1, empty_zero_page adrp x1, empty_zero_page
msr ttbr1_el1, x1 msr ttbr1_el1, x1
...@@ -169,7 +168,7 @@ ENTRY(idmap_cpu_replace_ttbr1) ...@@ -169,7 +168,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
msr ttbr1_el1, x0 msr ttbr1_el1, x0
isb isb
msr daif, x2 restore_daif x2
ret ret
ENDPROC(idmap_cpu_replace_ttbr1) ENDPROC(idmap_cpu_replace_ttbr1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment