Commit 4a503217 authored by Julien Thierry's avatar Julien Thierry Committed by Catalin Marinas

arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking

Instead disabling interrupts by setting the PSR.I bit, use a priority
higher than the one used for interrupts to mask them via PMR.

When using PMR to disable interrupts, the value of PMR will be used
instead of PSR.[DAIF] for the irqflags.
Signed-off-by: default avatarJulien Thierry <julien.thierry@arm.com>
Suggested-by: default avatarDaniel Thompson <daniel.thompson@linaro.org>
Acked-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 13b210dd
...@@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); ...@@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
/*
* Even when Linux uses IRQ priorities for IRQ disabling, EFI does not.
* And EFI shouldn't really play around with priority masking as it is not aware
* which priorities the OS has assigned to its interrupts.
*/
#define arch_efi_save_flags(state_flags) \
((void)((state_flags) = read_sysreg(daif)))
#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif)
/* arch specific definitions used by the stub code */ /* arch specific definitions used by the stub code */
/* /*
......
...@@ -18,7 +18,9 @@ ...@@ -18,7 +18,9 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/alternative.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sysreg.h>
/* /*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
...@@ -36,33 +38,27 @@ ...@@ -36,33 +38,27 @@
/* /*
* CPU interrupt mask handling. * CPU interrupt mask handling.
*/ */
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile(
"mrs %0, daif // arch_local_irq_save\n"
"msr daifset, #2"
: "=r" (flags)
:
: "memory");
return flags;
}
static inline void arch_local_irq_enable(void) static inline void arch_local_irq_enable(void)
{ {
asm volatile( asm volatile(ALTERNATIVE(
"msr daifclr, #2 // arch_local_irq_enable" "msr daifclr, #2 // arch_local_irq_enable\n"
: "nop",
"msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
"dsb sy",
ARM64_HAS_IRQ_PRIO_MASKING)
: :
: "r" (GIC_PRIO_IRQON)
: "memory"); : "memory");
} }
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
asm volatile( asm volatile(ALTERNATIVE(
"msr daifset, #2 // arch_local_irq_disable" "msr daifset, #2 // arch_local_irq_disable",
: "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0",
ARM64_HAS_IRQ_PRIO_MASKING)
: :
: "r" (GIC_PRIO_IRQOFF)
: "memory"); : "memory");
} }
...@@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void) ...@@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void)
*/ */
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
unsigned long daif_bits;
unsigned long flags; unsigned long flags;
asm volatile(
"mrs %0, daif // arch_local_save_flags" daif_bits = read_sysreg(daif);
: "=r" (flags)
: /*
* The asm is logically equivalent to:
*
* if (system_uses_irq_prio_masking())
* flags = (daif_bits & PSR_I_BIT) ?
* GIC_PRIO_IRQOFF :
* read_sysreg_s(SYS_ICC_PMR_EL1);
* else
* flags = daif_bits;
*/
asm volatile(ALTERNATIVE(
"mov %0, %1\n"
"nop\n"
"nop",
"mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n"
"ands %1, %1, " __stringify(PSR_I_BIT) "\n"
"csel %0, %0, %2, eq",
ARM64_HAS_IRQ_PRIO_MASKING)
: "=&r" (flags), "+r" (daif_bits)
: "r" (GIC_PRIO_IRQOFF)
: "memory"); : "memory");
return flags;
}
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
flags = arch_local_save_flags();
arch_local_irq_disable();
return flags; return flags;
} }
...@@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void) ...@@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void)
*/ */
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
asm volatile( asm volatile(ALTERNATIVE(
"msr daif, %0 // arch_local_irq_restore" "msr daif, %0\n"
: "nop",
: "r" (flags) "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
: "memory"); "dsb sy",
ARM64_HAS_IRQ_PRIO_MASKING)
: "+r" (flags)
:
: "memory");
} }
static inline int arch_irqs_disabled_flags(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags)
{ {
return flags & PSR_I_BIT; int res;
asm volatile(ALTERNATIVE(
"and %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
"nop",
"cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
"cset %w0, ls",
ARM64_HAS_IRQ_PRIO_MASKING)
: "=&r" (res)
: "r" ((int) flags)
: "memory");
return res;
} }
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment