Commit b5efec00 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32s: Move KUEP locking/unlocking in C

This can be done in C, do it.

Unrolling the loop gains approx. 15% performance.

From now on, prepare_transfer_to_handler() is only for
interrupts from kernel.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/4eadd873927e9a73c3d1dfe2f9497353465514cf.1615552867.git.christophe.leroy@csgroup.eu
parent a2b3e09a
...@@ -7,37 +7,6 @@ ...@@ -7,37 +7,6 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */
101: mtsrin \gpr1, \gpr2
addi \gpr1, \gpr1, 0x111 /* next VSID */
rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
addis \gpr2, \gpr2, 0x1000 /* address of next segment */
bdnz 101b
isync
.endm
.macro kuep_lock gpr1, gpr2
#ifdef CONFIG_PPC_KUEP
li \gpr1, NUM_USER_SEGMENTS
li \gpr2, 0
mtctr \gpr1
mfsrin \gpr1, \gpr2
oris \gpr1, \gpr1, SR_NX@h /* set Nx */
kuep_update_sr \gpr1, \gpr2
#endif
.endm
.macro kuep_unlock gpr1, gpr2
#ifdef CONFIG_PPC_KUEP
li \gpr1, NUM_USER_SEGMENTS
li \gpr2, 0
mtctr \gpr1
mfsrin \gpr1, \gpr2
rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */
kuep_update_sr \gpr1, \gpr2
#endif
.endm
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */ .macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
......
...@@ -33,8 +33,10 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup ...@@ -33,8 +33,10 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (!arch_irq_disabled_regs(regs)) if (!arch_irq_disabled_regs(regs))
trace_hardirqs_off(); trace_hardirqs_off();
if (user_mode(regs)) if (user_mode(regs)) {
kuep_lock();
account_cpu_user_entry(); account_cpu_user_entry();
}
#endif #endif
/* /*
* Book3E reconciles irq soft mask in asm * Book3E reconciles irq soft mask in asm
...@@ -89,6 +91,8 @@ static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt ...@@ -89,6 +91,8 @@ static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt
exception_exit(state->ctx_state); exception_exit(state->ctx_state);
#endif #endif
if (user_mode(regs))
kuep_unlock();
/* /*
* Book3S exits to user via interrupt_exit_user_prepare(), which does * Book3S exits to user via interrupt_exit_user_prepare(), which does
* context tracking, which is a cleaner way to handle PREEMPT=y * context tracking, which is a cleaner way to handle PREEMPT=y
......
...@@ -55,6 +55,14 @@ void setup_kuep(bool disabled); ...@@ -55,6 +55,14 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { } static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */ #endif /* CONFIG_PPC_KUEP */
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
void kuep_lock(void);
void kuep_unlock(void);
#else
static inline void kuep_lock(void) { }
static inline void kuep_unlock(void) { }
#endif
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled); void setup_kuap(bool disabled);
#else #else
......
...@@ -51,16 +51,9 @@ ...@@ -51,16 +51,9 @@
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
.globl prepare_transfer_to_handler .globl prepare_transfer_to_handler
prepare_transfer_to_handler: prepare_transfer_to_handler:
andi. r12,r9,MSR_PR
addi r12,r2,THREAD addi r12,r2,THREAD
beq 2f
#ifdef CONFIG_PPC_BOOK3S_32
kuep_lock r11, r12
#endif
blr
/* if from kernel, check interrupted DOZE/NAP mode */ /* if from kernel, check interrupted DOZE/NAP mode */
2:
kuap_save_and_lock r11, r12, r9, r5, r6 kuap_save_and_lock r11, r12, r9, r5, r6
lwz r12,TI_LOCAL_FLAGS(r2) lwz r12,TI_LOCAL_FLAGS(r2)
mtcrf 0x01,r12 mtcrf 0x01,r12
...@@ -86,9 +79,6 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) ...@@ -86,9 +79,6 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
.globl transfer_to_syscall .globl transfer_to_syscall
transfer_to_syscall: transfer_to_syscall:
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
#ifdef CONFIG_PPC_BOOK3S_32
kuep_lock r11, r12
#endif
/* Calling convention has r9 = orig r0, r10 = regs */ /* Calling convention has r9 = orig r0, r10 = regs */
addi r10,r1,STACK_FRAME_OVERHEAD addi r10,r1,STACK_FRAME_OVERHEAD
...@@ -105,9 +95,6 @@ ret_from_syscall: ...@@ -105,9 +95,6 @@ ret_from_syscall:
cmplwi cr0,r5,0 cmplwi cr0,r5,0
bne- 2f bne- 2f
#endif /* CONFIG_PPC_47x */ #endif /* CONFIG_PPC_47x */
#ifdef CONFIG_PPC_BOOK3S_32
kuep_unlock r5, r7
#endif
kuap_check r2, r4 kuap_check r2, r4
lwz r4,_LINK(r1) lwz r4,_LINK(r1)
lwz r5,_CCR(r1) lwz r5,_CCR(r1)
...@@ -311,9 +298,6 @@ interrupt_return: ...@@ -311,9 +298,6 @@ interrupt_return:
bne- .Lrestore_nvgprs bne- .Lrestore_nvgprs
.Lfast_user_interrupt_return: .Lfast_user_interrupt_return:
#ifdef CONFIG_PPC_BOOK3S_32
kuep_unlock r10, r11
#endif
kuap_check r2, r4 kuap_check r2, r4
lwz r11,_NIP(r1) lwz r11,_NIP(r1)
lwz r12,_MSR(r1) lwz r12,_MSR(r1)
......
...@@ -133,7 +133,10 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt) ...@@ -133,7 +133,10 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
.macro prepare_transfer_to_handler .macro prepare_transfer_to_handler
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
andi. r12,r9,MSR_PR
bne 777f
bl prepare_transfer_to_handler bl prepare_transfer_to_handler
777:
#endif #endif
.endm .endm
......
...@@ -105,7 +105,10 @@ END_BTB_FLUSH_SECTION ...@@ -105,7 +105,10 @@ END_BTB_FLUSH_SECTION
.macro prepare_transfer_to_handler .macro prepare_transfer_to_handler
#ifdef CONFIG_E500 #ifdef CONFIG_E500
andi. r12,r9,MSR_PR
bne 777f
bl prepare_transfer_to_handler bl prepare_transfer_to_handler
777:
#endif #endif
.endm .endm
......
...@@ -33,6 +33,8 @@ notrace long system_call_exception(long r3, long r4, long r5, ...@@ -33,6 +33,8 @@ notrace long system_call_exception(long r3, long r4, long r5,
{ {
syscall_fn f; syscall_fn f;
kuep_lock();
regs->orig_gpr3 = r3; regs->orig_gpr3 = r3;
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
...@@ -354,6 +356,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -354,6 +356,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
*/ */
kuap_user_restore(regs); kuap_user_restore(regs);
#endif #endif
kuep_unlock();
return ret; return ret;
} }
......
...@@ -9,3 +9,4 @@ endif ...@@ -9,3 +9,4 @@ endif
obj-y += mmu.o mmu_context.o obj-y += mmu.o mmu_context.o
obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
obj-$(CONFIG_PPC_KUEP) += kuep.o
// SPDX-License-Identifier: GPL-2.0-or-later
#include <asm/kup.h>
#include <asm/reg.h>
#include <asm/task_size_32.h>
#include <asm/mmu.h>
#define KUEP_UPDATE_TWO_USER_SEGMENTS(n) do { \
if (TASK_SIZE > ((n) << 28)) \
mtsr(val1, (n) << 28); \
if (TASK_SIZE > (((n) + 1) << 28)) \
mtsr(val2, ((n) + 1) << 28); \
val1 = (val1 + 0x222) & 0xf0ffffff; \
val2 = (val2 + 0x222) & 0xf0ffffff; \
} while (0)
static __always_inline void kuep_update(u32 val)
{
int val1 = val;
int val2 = (val + 0x111) & 0xf0ffffff;
KUEP_UPDATE_TWO_USER_SEGMENTS(0);
KUEP_UPDATE_TWO_USER_SEGMENTS(2);
KUEP_UPDATE_TWO_USER_SEGMENTS(4);
KUEP_UPDATE_TWO_USER_SEGMENTS(6);
KUEP_UPDATE_TWO_USER_SEGMENTS(8);
KUEP_UPDATE_TWO_USER_SEGMENTS(10);
KUEP_UPDATE_TWO_USER_SEGMENTS(12);
KUEP_UPDATE_TWO_USER_SEGMENTS(14);
}
void kuep_lock(void)
{
kuep_update(mfsr(0) | SR_NX);
}
void kuep_unlock(void)
{
kuep_update(mfsr(0) & ~SR_NX);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment