Commit c1672883 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
parent 0b45359a
...@@ -5,55 +5,7 @@ ...@@ -5,55 +5,7 @@
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h> #include <asm/book3s/32/mmu-hash.h>
#ifdef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_KUAP
.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
101: mtsrin \gpr1, \gpr2
addi \gpr1, \gpr1, 0x111 /* next VSID */
rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
addis \gpr2, \gpr2, 0x1000 /* address of next segment */
cmplw \gpr2, \gpr3
blt- 101b
isync
.endm
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
lwz \gpr2, KUAP(\thread)
rlwinm. \gpr3, \gpr2, 28, 0xf0000000
stw \gpr2, STACK_REGS_KUAP(\sp)
beq+ 102f
li \gpr1, 0
stw \gpr1, KUAP(\thread)
mfsrin \gpr1, \gpr2
oris \gpr1, \gpr1, SR_KS@h /* set Ks */
kuap_update_sr \gpr1, \gpr2, \gpr3
102:
.endm
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
lwz \gpr2, STACK_REGS_KUAP(\sp)
rlwinm. \gpr3, \gpr2, 28, 0xf0000000
stw \gpr2, THREAD + KUAP(\current)
beq+ 102f
mfsrin \gpr1, \gpr2
rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
kuap_update_sr \gpr1, \gpr2, \gpr3
102:
.endm
.macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG
lwz \gpr, THREAD + KUAP(\current)
999: twnei \gpr, 0
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif
.endm
#endif /* CONFIG_PPC_KUAP */
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
......
...@@ -36,6 +36,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup ...@@ -36,6 +36,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (user_mode(regs)) { if (user_mode(regs)) {
kuep_lock(); kuep_lock();
account_cpu_user_entry(); account_cpu_user_entry();
} else {
kuap_save_and_lock(regs);
} }
#endif #endif
/* /*
......
...@@ -28,15 +28,6 @@ ...@@ -28,15 +28,6 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#ifndef CONFIG_PPC_KUAP #ifndef CONFIG_PPC_KUAP
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
.endm
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
.endm
.macro kuap_check current, gpr
.endm
.macro kuap_check_amr gpr1, gpr2 .macro kuap_check_amr gpr1, gpr2
.endm .endm
......
...@@ -7,30 +7,7 @@ ...@@ -7,30 +7,7 @@
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
#ifdef __ASSEMBLY__ #ifndef __ASSEMBLY__
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */
mfspr \gpr1, SPRN_MD_AP
mtspr SPRN_MD_AP, \gpr2
stw \gpr1, STACK_REGS_KUAP(\sp)
.endm
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
lwz \gpr1, STACK_REGS_KUAP(\sp)
mtspr SPRN_MD_AP, \gpr1
.endm
.macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG
mfspr \gpr, SPRN_MD_AP
rlwinm \gpr, \gpr, 16, 0xffff
999: twnei \gpr, MD_APG_KUAP@h
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif
.endm
#else /* !__ASSEMBLY__ */
#include <asm/reg.h> #include <asm/reg.h>
......
...@@ -51,10 +51,7 @@ ...@@ -51,10 +51,7 @@
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
.globl prepare_transfer_to_handler .globl prepare_transfer_to_handler
prepare_transfer_to_handler: prepare_transfer_to_handler:
addi r12,r2,THREAD
/* if from kernel, check interrupted DOZE/NAP mode */ /* if from kernel, check interrupted DOZE/NAP mode */
kuap_save_and_lock r11, r12, r9, r5, r6
lwz r12,TI_LOCAL_FLAGS(r2) lwz r12,TI_LOCAL_FLAGS(r2)
mtcrf 0x01,r12 mtcrf 0x01,r12
bt- 31-TLF_NAPPING,4f bt- 31-TLF_NAPPING,4f
...@@ -70,7 +67,6 @@ prepare_transfer_to_handler: ...@@ -70,7 +67,6 @@ prepare_transfer_to_handler:
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
rlwinm r9,r9,0,~MSR_EE rlwinm r9,r9,0,~MSR_EE
lwz r12,_LINK(r11) /* and return to address in LR */ lwz r12,_LINK(r11) /* and return to address in LR */
kuap_restore r11, r2, r3, r4, r5
lwz r2, GPR2(r11) lwz r2, GPR2(r11)
b fast_exception_return b fast_exception_return
_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
...@@ -95,7 +91,6 @@ ret_from_syscall: ...@@ -95,7 +91,6 @@ ret_from_syscall:
cmplwi cr0,r5,0 cmplwi cr0,r5,0
bne- 2f bne- 2f
#endif /* CONFIG_PPC_47x */ #endif /* CONFIG_PPC_47x */
kuap_check r2, r4
lwz r4,_LINK(r1) lwz r4,_LINK(r1)
lwz r5,_CCR(r1) lwz r5,_CCR(r1)
mtlr r4 mtlr r4
...@@ -207,7 +202,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) ...@@ -207,7 +202,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
stw r10,_CCR(r1) stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */ stw r1,KSP(r3) /* Set old stack pointer */
kuap_check r2, r0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the /* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all * previous task gets rescheduled on another CPU, it sees all
...@@ -298,7 +292,6 @@ interrupt_return: ...@@ -298,7 +292,6 @@ interrupt_return:
bne- .Lrestore_nvgprs bne- .Lrestore_nvgprs
.Lfast_user_interrupt_return: .Lfast_user_interrupt_return:
kuap_check r2, r4
lwz r11,_NIP(r1) lwz r11,_NIP(r1)
lwz r12,_MSR(r1) lwz r12,_MSR(r1)
mtspr SPRN_SRR0,r11 mtspr SPRN_SRR0,r11
...@@ -347,7 +340,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) ...@@ -347,7 +340,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
.Lfast_kernel_interrupt_return: .Lfast_kernel_interrupt_return:
cmpwi cr1,r3,0 cmpwi cr1,r3,0
kuap_restore r1, r2, r3, r4, r5
lwz r11,_NIP(r1) lwz r11,_NIP(r1)
lwz r12,_MSR(r1) lwz r12,_MSR(r1)
mtspr SPRN_SRR0,r11 mtspr SPRN_SRR0,r11
......
...@@ -34,6 +34,9 @@ notrace long system_call_exception(long r3, long r4, long r5, ...@@ -34,6 +34,9 @@ notrace long system_call_exception(long r3, long r4, long r5,
syscall_fn f; syscall_fn f;
kuep_lock(); kuep_lock();
#ifdef CONFIG_PPC32
kuap_save_and_lock(regs);
#endif
regs->orig_gpr3 = r3; regs->orig_gpr3 = r3;
...@@ -75,9 +78,7 @@ notrace long system_call_exception(long r3, long r4, long r5, ...@@ -75,9 +78,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
isync(); isync();
} else } else
#endif #endif
#ifdef CONFIG_PPC64
kuap_assert_locked(); kuap_assert_locked();
#endif
booke_restore_dbcr0(); booke_restore_dbcr0();
...@@ -253,9 +254,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -253,9 +254,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
CT_WARN_ON(ct_state() == CONTEXT_USER); CT_WARN_ON(ct_state() == CONTEXT_USER);
#ifdef CONFIG_PPC64
kuap_assert_locked(); kuap_assert_locked();
#endif
regs->result = r3; regs->result = r3;
...@@ -350,7 +349,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -350,7 +349,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
account_cpu_user_exit(); account_cpu_user_exit();
#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */ #ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not using this */
/* /*
* We do this at the end so that we do context switch with KERNEL AMR * We do this at the end so that we do context switch with KERNEL AMR
*/ */
...@@ -379,9 +378,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -379,9 +378,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
* We don't need to restore AMR on the way back to userspace for KUAP. * We don't need to restore AMR on the way back to userspace for KUAP.
* AMR can only have been unlocked if we interrupted the kernel. * AMR can only have been unlocked if we interrupted the kernel.
*/ */
#ifdef CONFIG_PPC64
kuap_assert_locked(); kuap_assert_locked();
#endif
local_irq_save(flags); local_irq_save(flags);
...@@ -438,9 +435,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -438,9 +435,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
/* /*
* We do this at the end so that we do context switch with KERNEL AMR * We do this at the end so that we do context switch with KERNEL AMR
*/ */
#ifdef CONFIG_PPC64
kuap_user_restore(regs); kuap_user_restore(regs);
#endif
return ret; return ret;
} }
...@@ -450,9 +445,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -450,9 +445,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
{ {
unsigned long flags; unsigned long flags;
unsigned long ret = 0; unsigned long ret = 0;
#ifdef CONFIG_PPC64
unsigned long kuap; unsigned long kuap;
#endif
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) && if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
unlikely(!(regs->msr & MSR_RI))) unlikely(!(regs->msr & MSR_RI)))
...@@ -466,9 +459,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -466,9 +459,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
if (TRAP(regs) != 0x700) if (TRAP(regs) != 0x700)
CT_WARN_ON(ct_state() == CONTEXT_USER); CT_WARN_ON(ct_state() == CONTEXT_USER);
#ifdef CONFIG_PPC64
kuap = kuap_get_and_assert_locked(); kuap = kuap_get_and_assert_locked();
#endif
if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) { if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags); clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
...@@ -510,9 +501,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -510,9 +501,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
* which would cause Read-After-Write stalls. Hence, we take the AMR * which would cause Read-After-Write stalls. Hence, we take the AMR
* value from the check above. * value from the check above.
*/ */
#ifdef CONFIG_PPC64
kuap_kernel_restore(regs, kuap); kuap_kernel_restore(regs, kuap);
#endif
return ret; return ret;
} }
......
...@@ -1256,6 +1256,9 @@ struct task_struct *__switch_to(struct task_struct *prev, ...@@ -1256,6 +1256,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/ */
restore_sprs(old_thread, new_thread); restore_sprs(old_thread, new_thread);
#ifdef CONFIG_PPC32
kuap_assert_locked();
#endif
last = _switch(old_thread, new_thread); last = _switch(old_thread, new_thread);
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment