Commit e75c3c3a authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Return VM-Fail from vCPU-run assembly via standard ABI reg

...to prepare for making the assembly sub-routine callable from C code.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 77df5495
...@@ -87,7 +87,7 @@ ENDPROC(vmx_vmexit) ...@@ -87,7 +87,7 @@ ENDPROC(vmx_vmexit)
* @launched: %true if the VMCS has been launched * @launched: %true if the VMCS has been launched
* *
* Returns: * Returns:
* %RBX is 0 on VM-Exit, 1 on VM-Fail * 0 on VM-Exit, 1 on VM-Fail
*/ */
ENTRY(__vmx_vcpu_run) ENTRY(__vmx_vcpu_run)
push %_ASM_BP push %_ASM_BP
...@@ -163,17 +163,17 @@ ENTRY(__vmx_vcpu_run) ...@@ -163,17 +163,17 @@ ENTRY(__vmx_vcpu_run)
mov %r15, VCPU_R15(%_ASM_AX) mov %r15, VCPU_R15(%_ASM_AX)
#endif #endif
/* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */ /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
xor %ebx, %ebx xor %eax, %eax
/* /*
* Clear all general purpose registers except RSP and RBX to prevent * Clear all general purpose registers except RSP and RAX to prevent
* speculative use of the guest's values, even those that are reloaded * speculative use of the guest's values, even those that are reloaded
* via the stack. In theory, an L1 cache miss when restoring registers * via the stack. In theory, an L1 cache miss when restoring registers
* could lead to speculative execution with the guest's values. * could lead to speculative execution with the guest's values.
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
* free. RSP and RBX are exempt as RSP is restored by hardware during * free. RSP and RAX are exempt as RSP is restored by hardware during
* VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail. * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
*/ */
1: 1:
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -186,7 +186,7 @@ ENTRY(__vmx_vcpu_run) ...@@ -186,7 +186,7 @@ ENTRY(__vmx_vcpu_run)
xor %r14d, %r14d xor %r14d, %r14d
xor %r15d, %r15d xor %r15d, %r15d
#endif #endif
xor %eax, %eax xor %ebx, %ebx
xor %ecx, %ecx xor %ecx, %ecx
xor %edx, %edx xor %edx, %edx
xor %esi, %esi xor %esi, %esi
...@@ -199,6 +199,6 @@ ENTRY(__vmx_vcpu_run) ...@@ -199,6 +199,6 @@ ENTRY(__vmx_vcpu_run)
ret ret
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
2: mov $1, %ebx 2: mov $1, %eax
jmp 1b jmp 1b
ENDPROC(__vmx_vcpu_run) ENDPROC(__vmx_vcpu_run)
...@@ -6446,20 +6446,20 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6446,20 +6446,20 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
asm( asm(
"call __vmx_vcpu_run \n\t" "call __vmx_vcpu_run \n\t"
: ASM_CALL_CONSTRAINT, "=b"(vmx->fail), : ASM_CALL_CONSTRAINT, "=a"(vmx->fail),
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
"=D"((int){0}), "=S"((int){0}), "=d"((int){0}) "=D"((int){0}), "=S"((int){0}), "=d"((int){0})
: "D"(vmx), "S"(&vcpu->arch.regs), "d"(vmx->loaded_vmcs->launched) : "D"(vmx), "S"(&vcpu->arch.regs), "d"(vmx->loaded_vmcs->launched)
#else #else
"=a"((int){0}), "=d"((int){0}), "=c"((int){0}) "=d"((int){0}), "=c"((int){0})
: "a"(vmx), "d"(&vcpu->arch.regs), "c"(vmx->loaded_vmcs->launched) : "a"(vmx), "d"(&vcpu->arch.regs), "c"(vmx->loaded_vmcs->launched)
#endif #endif
: "cc", "memory" : "cc", "memory"
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
, "rax", "rcx" , "rbx", "rcx"
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
#else #else
, "edi", "esi" , "ebx", "edi", "esi"
#endif #endif
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment