Commit 29b810f5 authored by Jan Beulich's avatar Jan Beulich Committed by Thomas Gleixner

x86/xen/32: Make xen_iret_crit_fixup() independent of frame layout

Now that SS:ESP always get saved by SAVE_ALL, this also needs to be
accounted for in xen_iret_crit_fixup(). Otherwise the old_ax value gets
interpreted as EFLAGS, and hence VM86 mode appears to be active all the
time, leading to random "vm86_32: no user_vm86: BAD" log messages alongside
processes randomly crashing.

Since following the previous model (sitting after SAVE_ALL) would further
complicate the code _and_ retain the dependency of xen_iret_crit_fixup() on
frame manipulations done by entry_32.S, switch things around and do the
adjustment ahead of SAVE_ALL.

Fixes: 3c88c692 ("x86/stackframe/32: Provide consistent pt_regs")
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Stable Team <stable@vger.kernel.org>
Link: https://lkml.kernel.org/r/32d8713d-25a7-84ab-b74b-aa3e88abce6b@suse.com
parent 81ff2c37
...@@ -1341,11 +1341,6 @@ END(spurious_interrupt_bug) ...@@ -1341,11 +1341,6 @@ END(spurious_interrupt_bug)
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
ENTRY(xen_hypervisor_callback) ENTRY(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
/* /*
* Check to see if we got the event in the critical * Check to see if we got the event in the critical
* region in xen_iret_direct, after we've reenabled * region in xen_iret_direct, after we've reenabled
...@@ -1353,16 +1348,17 @@ ENTRY(xen_hypervisor_callback) ...@@ -1353,16 +1348,17 @@ ENTRY(xen_hypervisor_callback)
* iret instruction's behaviour where it delivers a * iret instruction's behaviour where it delivers a
* pending interrupt when enabling interrupts: * pending interrupt when enabling interrupts:
*/ */
movl PT_EIP(%esp), %eax cmpl $xen_iret_start_crit, (%esp)
cmpl $xen_iret_start_crit, %eax
jb 1f jb 1f
cmpl $xen_iret_end_crit, %eax cmpl $xen_iret_end_crit, (%esp)
jae 1f jae 1f
call xen_iret_crit_fixup
jmp xen_iret_crit_fixup 1:
pushl $-1 /* orig_ax = -1 => not a system call */
ENTRY(xen_do_upcall) SAVE_ALL
1: mov %esp, %eax ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
mov %esp, %eax
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
#ifndef CONFIG_PREEMPTION #ifndef CONFIG_PREEMPTION
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
......
...@@ -126,10 +126,9 @@ hyper_iret: ...@@ -126,10 +126,9 @@ hyper_iret:
.globl xen_iret_start_crit, xen_iret_end_crit .globl xen_iret_start_crit, xen_iret_end_crit
/* /*
* This is called by xen_hypervisor_callback in entry.S when it sees * This is called by xen_hypervisor_callback in entry_32.S when it sees
* that the EIP at the time of interrupt was between * that the EIP at the time of interrupt was between
* xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in * xen_iret_start_crit and xen_iret_end_crit.
* %eax so we can do a more refined determination of what to do.
* *
* The stack format at this point is: * The stack format at this point is:
* ---------------- * ----------------
...@@ -138,34 +137,23 @@ hyper_iret: ...@@ -138,34 +137,23 @@ hyper_iret:
* eflags } outer exception info * eflags } outer exception info
* cs } * cs }
* eip } * eip }
* ---------------- <- edi (copy dest)
* eax : outer eax if it hasn't been restored
* ---------------- * ----------------
* eflags } nested exception info * eax : outer eax if it hasn't been restored
* cs } (no ss/esp because we're nested
* eip } from the same ring)
* orig_eax }<- esi (copy src)
* - - - - - - - -
* fs }
* es }
* ds } SAVE_ALL state
* eax }
* : :
* ebx }<- esp
* ---------------- * ----------------
* eflags }
* cs } nested exception info
* eip }
* return address : (into xen_hypervisor_callback)
* *
* In order to deliver the nested exception properly, we need to shift * In order to deliver the nested exception properly, we need to discard the
* everything from the return addr up to the error code so it sits * nested exception frame such that when we handle the exception, we do it
* just under the outer exception info. This means that when we * in the context of the outer exception rather than starting a new one.
* handle the exception, we do it in the context of the outer
* exception rather than starting a new one.
* *
* The only caveat is that if the outer eax hasn't been restored yet * The only caveat is that if the outer eax hasn't been restored yet (i.e.
* (ie, it's still on stack), we need to insert its value into the * it's still on stack), we need to restore its value here.
* SAVE_ALL state before going on, since it's usermode state which we
* eventually need to restore.
*/ */
ENTRY(xen_iret_crit_fixup) ENTRY(xen_iret_crit_fixup)
pushl %ecx
/* /*
* Paranoia: Make sure we're really coming from kernel space. * Paranoia: Make sure we're really coming from kernel space.
* One could imagine a case where userspace jumps into the * One could imagine a case where userspace jumps into the
...@@ -176,32 +164,26 @@ ENTRY(xen_iret_crit_fixup) ...@@ -176,32 +164,26 @@ ENTRY(xen_iret_crit_fixup)
* jump instruction itself, not the destination, but some * jump instruction itself, not the destination, but some
* virtual environments get this wrong. * virtual environments get this wrong.
*/ */
movl PT_CS(%esp), %ecx movl 3*4(%esp), %ecx /* nested CS */
andl $SEGMENT_RPL_MASK, %ecx andl $SEGMENT_RPL_MASK, %ecx
cmpl $USER_RPL, %ecx cmpl $USER_RPL, %ecx
popl %ecx
je 2f je 2f
lea PT_ORIG_EAX(%esp), %esi
lea PT_EFLAGS(%esp), %edi
/* /*
* If eip is before iret_restore_end then stack * If eip is before iret_restore_end then stack
* hasn't been restored yet. * hasn't been restored yet.
*/ */
cmp $iret_restore_end, %eax cmpl $iret_restore_end, 1*4(%esp)
jae 1f jae 1f
movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */ movl 4*4(%esp), %eax /* load outer EAX */
movl %eax, PT_EAX(%esp) ret $4*4 /* discard nested EIP, CS, and EFLAGS as
* well as the just restored EAX */
lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */ 1:
ret $3*4 /* discard nested EIP, CS, and EFLAGS */
/* set up the copy */
1: std
mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
rep movsl
cld
lea 4(%edi), %esp /* point esp to new frame */
2: jmp xen_do_upcall
2:
ret
END(xen_iret_crit_fixup)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment