Commit 42b682a3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_asm_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm updates from Borislav Petkov:

 - A bunch of changes towards streamlining low level asm helpers'
   calling conventions so that former can be converted to C eventually

 - Simplify PUSH_AND_CLEAR_REGS so that it can be used at the system
   call entry paths instead of having opencoded, slightly different
   variants of it everywhere

 - Misc other fixes

* tag 'x86_asm_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/entry: Fix register corruption in compat syscall
  objtool: Fix STACK_FRAME_NON_STANDARD reloc type
  linkage: Fix issue with missing symbol size
  x86/entry: Remove skip_r11rcx
  x86/entry: Use PUSH_AND_CLEAR_REGS for compat
  x86/entry: Simplify entry_INT80_compat()
  x86/mm: Simplify RESERVE_BRK()
  x86/entry: Convert SWAPGS to swapgs and remove the definition of SWAPGS
  x86/entry: Don't call error_entry() for XENPV
  x86/entry: Move CLD to the start of the idtentry macro
  x86/entry: Move PUSH_AND_CLEAR_REGS out of error_entry()
  x86/entry: Switch the stack after error_entry() returns
  x86/traps: Use pt_regs directly in fixup_bad_iret()
parents c5a3d3c0 036c07c0
...@@ -63,7 +63,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -63,7 +63,7 @@ For 32-bit we have the following conventions - kernel is built with
* for assembly code: * for assembly code:
*/ */
.macro PUSH_REGS rdx=%rdx rax=%rax save_ret=0 .macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
.if \save_ret .if \save_ret
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
...@@ -73,7 +73,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -73,7 +73,7 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
.endif .endif
pushq \rdx /* pt_regs->dx */ pushq \rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */ pushq \rcx /* pt_regs->cx */
pushq \rax /* pt_regs->ax */ pushq \rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */ pushq %r8 /* pt_regs->r8 */
pushq %r9 /* pt_regs->r9 */ pushq %r9 /* pt_regs->r9 */
...@@ -99,6 +99,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -99,6 +99,7 @@ For 32-bit we have the following conventions - kernel is built with
* well before they could be put to use in a speculative execution * well before they could be put to use in a speculative execution
* gadget. * gadget.
*/ */
xorl %esi, %esi /* nospec si */
xorl %edx, %edx /* nospec dx */ xorl %edx, %edx /* nospec dx */
xorl %ecx, %ecx /* nospec cx */ xorl %ecx, %ecx /* nospec cx */
xorl %r8d, %r8d /* nospec r8 */ xorl %r8d, %r8d /* nospec r8 */
...@@ -114,32 +115,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -114,32 +115,24 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
PUSH_REGS rdx=\rdx, rax=\rax, save_ret=\save_ret PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret
CLEAR_REGS CLEAR_REGS
.endm .endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0 .macro POP_REGS pop_rdi=1
popq %r15 popq %r15
popq %r14 popq %r14
popq %r13 popq %r13
popq %r12 popq %r12
popq %rbp popq %rbp
popq %rbx popq %rbx
.if \skip_r11rcx
popq %rsi
.else
popq %r11 popq %r11
.endif
popq %r10 popq %r10
popq %r9 popq %r9
popq %r8 popq %r8
popq %rax popq %rax
.if \skip_r11rcx
popq %rsi
.else
popq %rcx popq %rcx
.endif
popq %rdx popq %rdx
popq %rsi popq %rsi
.if \pop_rdi .if \pop_rdi
......
...@@ -191,8 +191,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) ...@@ -191,8 +191,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
* perf profiles. Nothing jumps here. * perf profiles. Nothing jumps here.
*/ */
syscall_return_via_sysret: syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */ POP_REGS pop_rdi=0
POP_REGS pop_rdi=0 skip_r11rcx=1
/* /*
* Now all regs are restored except RSP and RDI. * Now all regs are restored except RSP and RDI.
...@@ -323,6 +322,14 @@ SYM_CODE_END(ret_from_fork) ...@@ -323,6 +322,14 @@ SYM_CODE_END(ret_from_fork)
#endif #endif
.endm .endm
/* Save all registers in pt_regs */
SYM_CODE_START_LOCAL(push_and_clear_regs)
UNWIND_HINT_FUNC
PUSH_AND_CLEAR_REGS save_ret=1
ENCODE_FRAME_POINTER 8
RET
SYM_CODE_END(push_and_clear_regs)
/** /**
* idtentry_body - Macro to emit code calling the C function * idtentry_body - Macro to emit code calling the C function
* @cfunc: C function to be called * @cfunc: C function to be called
...@@ -330,7 +337,21 @@ SYM_CODE_END(ret_from_fork) ...@@ -330,7 +337,21 @@ SYM_CODE_END(ret_from_fork)
*/ */
.macro idtentry_body cfunc has_error_code:req .macro idtentry_body cfunc has_error_code:req
call error_entry call push_and_clear_regs
UNWIND_HINT_REGS
/*
* Call error_entry() and switch to the task stack if from userspace.
*
* When in XENPV, it is already in the task stack, and it can't fault
* for native_iret() nor native_load_gs_index() since XENPV uses its
* own pvops for IRET and load_gs_index(). And it doesn't need to
* switch the CR3. So it can skip invoking error_entry().
*/
ALTERNATIVE "call error_entry; movq %rax, %rsp", \
"", X86_FEATURE_XENPV
ENCODE_FRAME_POINTER
UNWIND_HINT_REGS UNWIND_HINT_REGS
movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
...@@ -363,6 +384,7 @@ SYM_CODE_START(\asmsym) ...@@ -363,6 +384,7 @@ SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
ENDBR ENDBR
ASM_CLAC ASM_CLAC
cld
.if \has_error_code == 0 .if \has_error_code == 0
pushq $-1 /* ORIG_RAX: no syscall to restart */ pushq $-1 /* ORIG_RAX: no syscall to restart */
...@@ -431,6 +453,7 @@ SYM_CODE_START(\asmsym) ...@@ -431,6 +453,7 @@ SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_REGS
ENDBR ENDBR
ASM_CLAC ASM_CLAC
cld
pushq $-1 /* ORIG_RAX: no syscall to restart */ pushq $-1 /* ORIG_RAX: no syscall to restart */
...@@ -487,6 +510,7 @@ SYM_CODE_START(\asmsym) ...@@ -487,6 +510,7 @@ SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_REGS
ENDBR ENDBR
ASM_CLAC ASM_CLAC
cld
/* /*
* If the entry is from userspace, switch stacks and treat it as * If the entry is from userspace, switch stacks and treat it as
...@@ -550,6 +574,7 @@ SYM_CODE_START(\asmsym) ...@@ -550,6 +574,7 @@ SYM_CODE_START(\asmsym)
UNWIND_HINT_IRET_REGS offset=8 UNWIND_HINT_IRET_REGS offset=8
ENDBR ENDBR
ASM_CLAC ASM_CLAC
cld
/* paranoid_entry returns GS information for paranoid_exit in EBX. */ /* paranoid_entry returns GS information for paranoid_exit in EBX. */
call paranoid_entry call paranoid_entry
...@@ -875,7 +900,6 @@ SYM_CODE_END(xen_failsafe_callback) ...@@ -875,7 +900,6 @@ SYM_CODE_END(xen_failsafe_callback)
*/ */
SYM_CODE_START_LOCAL(paranoid_entry) SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1 PUSH_AND_CLEAR_REGS save_ret=1
ENCODE_FRAME_POINTER 8 ENCODE_FRAME_POINTER 8
...@@ -989,13 +1013,10 @@ SYM_CODE_START_LOCAL(paranoid_exit) ...@@ -989,13 +1013,10 @@ SYM_CODE_START_LOCAL(paranoid_exit)
SYM_CODE_END(paranoid_exit) SYM_CODE_END(paranoid_exit)
/* /*
* Save all registers in pt_regs, and switch GS if needed. * Switch GS and CR3 if needed.
*/ */
SYM_CODE_START_LOCAL(error_entry) SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
ENCODE_FRAME_POINTER 8
testb $3, CS+8(%rsp) testb $3, CS+8(%rsp)
jz .Lerror_kernelspace jz .Lerror_kernelspace
...@@ -1003,19 +1024,15 @@ SYM_CODE_START_LOCAL(error_entry) ...@@ -1003,19 +1024,15 @@ SYM_CODE_START_LOCAL(error_entry)
* We entered from user mode or we're pretending to have entered * We entered from user mode or we're pretending to have entered
* from user mode due to an IRET fault. * from user mode due to an IRET fault.
*/ */
SWAPGS swapgs
FENCE_SWAPGS_USER_ENTRY FENCE_SWAPGS_USER_ENTRY
/* We have user CR3. Change to kernel CR3. */ /* We have user CR3. Change to kernel CR3. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
.Lerror_entry_from_usermode_after_swapgs: .Lerror_entry_from_usermode_after_swapgs:
/* Put us onto the real thread stack. */ /* Put us onto the real thread stack. */
popq %r12 /* save return addr in %12 */
movq %rsp, %rdi /* arg0 = pt_regs pointer */
call sync_regs call sync_regs
movq %rax, %rsp /* switch stack */
ENCODE_FRAME_POINTER
pushq %r12
RET RET
/* /*
...@@ -1039,7 +1056,7 @@ SYM_CODE_START_LOCAL(error_entry) ...@@ -1039,7 +1056,7 @@ SYM_CODE_START_LOCAL(error_entry)
* gsbase and proceed. We'll fix up the exception and land in * gsbase and proceed. We'll fix up the exception and land in
* .Lgs_change's error handler with kernel gsbase. * .Lgs_change's error handler with kernel gsbase.
*/ */
SWAPGS swapgs
/* /*
* Issue an LFENCE to prevent GS speculation, regardless of whether it is a * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
...@@ -1047,6 +1064,7 @@ SYM_CODE_START_LOCAL(error_entry) ...@@ -1047,6 +1064,7 @@ SYM_CODE_START_LOCAL(error_entry)
*/ */
.Lerror_entry_done_lfence: .Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY FENCE_SWAPGS_KERNEL_ENTRY
leaq 8(%rsp), %rax /* return pt_regs pointer */
RET RET
.Lbstep_iret: .Lbstep_iret:
...@@ -1059,7 +1077,7 @@ SYM_CODE_START_LOCAL(error_entry) ...@@ -1059,7 +1077,7 @@ SYM_CODE_START_LOCAL(error_entry)
* We came from an IRET to user mode, so we have user * We came from an IRET to user mode, so we have user
* gsbase and CR3. Switch to kernel gsbase and CR3: * gsbase and CR3. Switch to kernel gsbase and CR3:
*/ */
SWAPGS swapgs
FENCE_SWAPGS_USER_ENTRY FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
...@@ -1067,9 +1085,9 @@ SYM_CODE_START_LOCAL(error_entry) ...@@ -1067,9 +1085,9 @@ SYM_CODE_START_LOCAL(error_entry)
* Pretend that the exception came from user mode: set up pt_regs * Pretend that the exception came from user mode: set up pt_regs
* as if we faulted immediately after IRET. * as if we faulted immediately after IRET.
*/ */
mov %rsp, %rdi leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
call fixup_bad_iret call fixup_bad_iret
mov %rax, %rsp mov %rax, %rdi
jmp .Lerror_entry_from_usermode_after_swapgs jmp .Lerror_entry_from_usermode_after_swapgs
SYM_CODE_END(error_entry) SYM_CODE_END(error_entry)
...@@ -1132,6 +1150,7 @@ SYM_CODE_START(asm_exc_nmi) ...@@ -1132,6 +1150,7 @@ SYM_CODE_START(asm_exc_nmi)
*/ */
ASM_CLAC ASM_CLAC
cld
/* Use %rdx as our temp variable throughout */ /* Use %rdx as our temp variable throughout */
pushq %rdx pushq %rdx
...@@ -1151,7 +1170,6 @@ SYM_CODE_START(asm_exc_nmi) ...@@ -1151,7 +1170,6 @@ SYM_CODE_START(asm_exc_nmi)
*/ */
swapgs swapgs
cld
FENCE_SWAPGS_USER_ENTRY FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
movq %rsp, %rdx movq %rsp, %rdx
......
...@@ -50,7 +50,7 @@ SYM_CODE_START(entry_SYSENTER_compat) ...@@ -50,7 +50,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR ENDBR
/* Interrupts are off on entry. */ /* Interrupts are off on entry. */
SWAPGS swapgs
pushq %rax pushq %rax
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
...@@ -83,32 +83,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) ...@@ -83,32 +83,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
movl %eax, %eax movl %eax, %eax
pushq %rax /* pt_regs->orig_ax */ pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
xorl %r8d, %r8d /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
xorl %r9d, %r9d /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
xorl %r10d, %r10d /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
xorl %ebp, %ebp /* nospec rbp */
pushq $0 /* pt_regs->r12 = 0 */
xorl %r12d, %r12d /* nospec r12 */
pushq $0 /* pt_regs->r13 = 0 */
xorl %r13d, %r13d /* nospec r13 */
pushq $0 /* pt_regs->r14 = 0 */
xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
UNWIND_HINT_REGS UNWIND_HINT_REGS
cld cld
...@@ -225,35 +200,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL) ...@@ -225,35 +200,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
movl %eax, %eax /* discard orig_ax high bits */ movl %eax, %eax /* discard orig_ax high bits */
pushq %rax /* pt_regs->orig_ax */ pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */ PUSH_AND_CLEAR_REGS rcx=%rbp rax=$-ENOSYS
pushq %rsi /* pt_regs->si */
xorl %esi, %esi /* nospec si */
pushq %rdx /* pt_regs->dx */
xorl %edx, %edx /* nospec dx */
pushq %rbp /* pt_regs->cx (stashed in bp) */
xorl %ecx, %ecx /* nospec cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
xorl %r8d, %r8d /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
xorl %r9d, %r9d /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
xorl %r10d, %r10d /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
xorl %ebp, %ebp /* nospec rbp */
pushq $0 /* pt_regs->r12 = 0 */
xorl %r12d, %r12d /* nospec r12 */
pushq $0 /* pt_regs->r13 = 0 */
xorl %r13d, %r13d /* nospec r13 */
pushq $0 /* pt_regs->r14 = 0 */
xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
UNWIND_HINT_REGS UNWIND_HINT_REGS
movq %rsp, %rdi movq %rsp, %rdi
...@@ -367,54 +314,25 @@ SYM_CODE_START(entry_INT80_compat) ...@@ -367,54 +314,25 @@ SYM_CODE_START(entry_INT80_compat)
/* switch to thread stack expects orig_ax and rdi to be pushed */ /* switch to thread stack expects orig_ax and rdi to be pushed */
pushq %rax /* pt_regs->orig_ax */ pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
/* Need to switch before accessing the thread stack. */ /* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
/* In the Xen PV case we already run on the thread stack. */ /* In the Xen PV case we already run on the thread stack. */
ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
movq %rsp, %rdi movq %rsp, %rax
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 6*8(%rdi) /* regs->ss */ pushq 5*8(%rax) /* regs->ss */
pushq 5*8(%rdi) /* regs->rsp */ pushq 4*8(%rax) /* regs->rsp */
pushq 4*8(%rdi) /* regs->eflags */ pushq 3*8(%rax) /* regs->eflags */
pushq 3*8(%rdi) /* regs->cs */ pushq 2*8(%rax) /* regs->cs */
pushq 2*8(%rdi) /* regs->ip */ pushq 1*8(%rax) /* regs->ip */
pushq 1*8(%rdi) /* regs->orig_ax */ pushq 0*8(%rax) /* regs->orig_ax */
pushq (%rdi) /* pt_regs->di */
.Lint80_keep_stack: .Lint80_keep_stack:
pushq %rsi /* pt_regs->si */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS
xorl %esi, %esi /* nospec si */
pushq %rdx /* pt_regs->dx */
xorl %edx, %edx /* nospec dx */
pushq %rcx /* pt_regs->cx */
xorl %ecx, %ecx /* nospec cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */
xorl %r8d, %r8d /* nospec r8 */
pushq %r9 /* pt_regs->r9 */
xorl %r9d, %r9d /* nospec r9 */
pushq %r10 /* pt_regs->r10*/
xorl %r10d, %r10d /* nospec r10 */
pushq %r11 /* pt_regs->r11 */
xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp */
pushq %r12 /* pt_regs->r12 */
xorl %r12d, %r12d /* nospec r12 */
pushq %r13 /* pt_regs->r13 */
xorl %r13d, %r13d /* nospec r13 */
pushq %r14 /* pt_regs->r14 */
xorl %r14d, %r14d /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15 */
UNWIND_HINT_REGS UNWIND_HINT_REGS
cld cld
......
...@@ -137,14 +137,6 @@ static __always_inline void arch_local_irq_restore(unsigned long flags) ...@@ -137,14 +137,6 @@ static __always_inline void arch_local_irq_restore(unsigned long flags)
if (!arch_irqs_disabled_flags(flags)) if (!arch_irqs_disabled_flags(flags))
arch_local_irq_enable(); arch_local_irq_enable();
} }
#else
#ifdef CONFIG_X86_64
#ifdef CONFIG_XEN_PV
#define SWAPGS ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
#else
#define SWAPGS swapgs
#endif
#endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif #endif
...@@ -108,27 +108,19 @@ extern unsigned long _brk_end; ...@@ -108,27 +108,19 @@ extern unsigned long _brk_end;
void *extend_brk(size_t size, size_t align); void *extend_brk(size_t size, size_t align);
/* /*
* Reserve space in the brk section. The name must be unique within * Reserve space in the brk section. The name must be unique within the file,
* the file, and somewhat descriptive. The size is in bytes. Must be * and somewhat descriptive. The size is in bytes.
* used at file scope.
* *
* (This uses a temp function to wrap the asm so we can pass it the * The allocation is done using inline asm (rather than using a section
* size parameter; otherwise we wouldn't be able to. We can't use a * attribute on a normal variable) in order to allow the use of @nobits, so
* "section" attribute on a normal variable because it always ends up * that it doesn't take up any space in the vmlinux file.
* being @progbits, which ends up allocating space in the vmlinux
* executable.)
*/ */
#define RESERVE_BRK(name,sz) \ #define RESERVE_BRK(name, size) \
static void __section(".discard.text") __noendbr __used notrace \ asm(".pushsection .brk_reservation,\"aw\",@nobits\n\t" \
__brk_reservation_fn_##name##__(void) { \ ".brk." #name ":\n\t" \
asm volatile ( \ ".skip " __stringify(size) "\n\t" \
".pushsection .brk_reservation,\"aw\",@nobits;" \ ".size .brk." #name ", " __stringify(size) "\n\t" \
".brk." #name ":" \ ".popsection\n\t")
" 1:.skip %c0;" \
" .size .brk." #name ", . - 1b;" \
" .popsection" \
: : "i" (sz)); \
}
extern void probe_roms(void); extern void probe_roms(void);
#ifdef __i386__ #ifdef __i386__
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
asmlinkage __visible notrace asmlinkage __visible notrace
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s); struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs);
void __init trap_init(void); void __init trap_init(void);
asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs); asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs);
#endif #endif
......
...@@ -898,14 +898,10 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r ...@@ -898,14 +898,10 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
} }
#endif #endif
struct bad_iret_stack { asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
void *error_entry_ret;
struct pt_regs regs;
};
asmlinkage __visible noinstr
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
{ {
struct pt_regs tmp, *new_stack;
/* /*
* This is called from entry_64.S early in handling a fault * This is called from entry_64.S early in handling a fault
* caused by a bad iret to user mode. To handle the fault * caused by a bad iret to user mode. To handle the fault
...@@ -914,19 +910,18 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) ...@@ -914,19 +910,18 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
* just below the IRET frame) and we want to pretend that the * just below the IRET frame) and we want to pretend that the
* exception came from the IRET target. * exception came from the IRET target.
*/ */
struct bad_iret_stack tmp, *new_stack = new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
/* Copy the IRET target to the temporary storage. */ /* Copy the IRET target to the temporary storage. */
__memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8); __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
/* Copy the remainder of the stack from the current stack. */ /* Copy the remainder of the stack from the current stack. */
__memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip)); __memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
/* Update the entry stack */ /* Update the entry stack */
__memcpy(new_stack, &tmp, sizeof(tmp)); __memcpy(new_stack, &tmp, sizeof(tmp));
BUG_ON(!user_mode(&new_stack->regs)); BUG_ON(!user_mode(new_stack));
return new_stack; return new_stack;
} }
#endif #endif
......
...@@ -171,12 +171,9 @@ ...@@ -171,12 +171,9 @@
/* SYM_ALIAS -- use only if you have to */ /* SYM_ALIAS -- use only if you have to */
#ifndef SYM_ALIAS #ifndef SYM_ALIAS
#define SYM_ALIAS(alias, name, sym_type, linkage) \ #define SYM_ALIAS(alias, name, linkage) \
linkage(alias) ASM_NL \ linkage(alias) ASM_NL \
.set alias, name ASM_NL \ .set alias, name ASM_NL
.type alias sym_type ASM_NL \
.set .L__sym_size_##alias, .L__sym_size_##name ASM_NL \
.size alias, .L__sym_size_##alias
#endif #endif
/* === code annotations === */ /* === code annotations === */
...@@ -261,7 +258,7 @@ ...@@ -261,7 +258,7 @@
*/ */
#ifndef SYM_FUNC_ALIAS #ifndef SYM_FUNC_ALIAS
#define SYM_FUNC_ALIAS(alias, name) \ #define SYM_FUNC_ALIAS(alias, name) \
SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_GLOBAL) SYM_ALIAS(alias, name, SYM_L_GLOBAL)
#endif #endif
/* /*
...@@ -269,7 +266,7 @@ ...@@ -269,7 +266,7 @@
*/ */
#ifndef SYM_FUNC_ALIAS_LOCAL #ifndef SYM_FUNC_ALIAS_LOCAL
#define SYM_FUNC_ALIAS_LOCAL(alias, name) \ #define SYM_FUNC_ALIAS_LOCAL(alias, name) \
SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_LOCAL) SYM_ALIAS(alias, name, SYM_L_LOCAL)
#endif #endif
/* /*
...@@ -277,7 +274,7 @@ ...@@ -277,7 +274,7 @@
*/ */
#ifndef SYM_FUNC_ALIAS_WEAK #ifndef SYM_FUNC_ALIAS_WEAK
#define SYM_FUNC_ALIAS_WEAK(alias, name) \ #define SYM_FUNC_ALIAS_WEAK(alias, name) \
SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK) SYM_ALIAS(alias, name, SYM_L_WEAK)
#endif #endif
/* SYM_CODE_START -- use for non-C (special) functions */ /* SYM_CODE_START -- use for non-C (special) functions */
......
...@@ -40,6 +40,8 @@ struct unwind_hint { ...@@ -40,6 +40,8 @@ struct unwind_hint {
#ifdef CONFIG_STACK_VALIDATION #ifdef CONFIG_STACK_VALIDATION
#include <asm/asm.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define UNWIND_HINT(sp_reg, sp_offset, type, end) \ #define UNWIND_HINT(sp_reg, sp_offset, type, end) \
...@@ -137,7 +139,7 @@ struct unwind_hint { ...@@ -137,7 +139,7 @@ struct unwind_hint {
.macro STACK_FRAME_NON_STANDARD func:req .macro STACK_FRAME_NON_STANDARD func:req
.pushsection .discard.func_stack_frame_non_standard, "aw" .pushsection .discard.func_stack_frame_non_standard, "aw"
.long \func - . _ASM_PTR \func
.popsection .popsection
.endm .endm
......
...@@ -40,6 +40,8 @@ struct unwind_hint { ...@@ -40,6 +40,8 @@ struct unwind_hint {
#ifdef CONFIG_STACK_VALIDATION #ifdef CONFIG_STACK_VALIDATION
#include <asm/asm.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define UNWIND_HINT(sp_reg, sp_offset, type, end) \ #define UNWIND_HINT(sp_reg, sp_offset, type, end) \
...@@ -137,7 +139,7 @@ struct unwind_hint { ...@@ -137,7 +139,7 @@ struct unwind_hint {
.macro STACK_FRAME_NON_STANDARD func:req .macro STACK_FRAME_NON_STANDARD func:req
.pushsection .discard.func_stack_frame_non_standard, "aw" .pushsection .discard.func_stack_frame_non_standard, "aw"
.long \func - . _ASM_PTR \func
.popsection .popsection
.endm .endm
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment