Commit b9635930 authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Provide error code as a KVM_ASM_SAFE() output

Provide the error code on a fault in KVM_ASM_SAFE(), e.g. to allow tests
to assert that #PF generates the correct error code without needing to
manually install a #PF handler.  Use r10 as the scratch register for the
error code, as it's already clobbered by the asm blob (loaded with the
RIP of the to-be-executed instruction).  Deliberately load the output
"error_code" even in the non-faulting path so that error_code is always
initialized with deterministic data (the aforementioned RIP), i.e to
ensure a selftest won't end up with uninitialized consumption regardless
of how KVM_ASM_SAFE() is used.

Don't clear r10 in the non-faulting case and instead load error code with
the RIP (see above).  The error code is valid if and only if an exception
occurs, and '0' isn't necessarily a better "invalid" value, e.g. '0'
could result in false passes for a buggy test.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20221102184654.282799-9-dmatlack@google.com
parent f2e5b53b
......@@ -786,6 +786,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
*
* REGISTER OUTPUTS:
* r9 = exception vector (non-zero)
* r10 = error code
*/
#define KVM_ASM_SAFE(insn) \
"mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
......@@ -794,29 +795,43 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
"1: " insn "\n\t" \
"xor %%r9, %%r9\n\t" \
"2:\n\t" \
"mov %%r9b, %[vector]\n\t"
"mov %%r9b, %[vector]\n\t" \
"mov %%r10, %[error_code]\n\t"
#define KVM_ASM_SAFE_OUTPUTS(v) [vector] "=qm"(v)
#define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec)
#define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
#define kvm_asm_safe(insn, inputs...) \
({ \
uint8_t vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector) \
: inputs \
: KVM_ASM_SAFE_CLOBBERS); \
vector; \
#define kvm_asm_safe(insn, inputs...) \
({ \
uint64_t ign_error_code; \
uint8_t vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
: inputs \
: KVM_ASM_SAFE_CLOBBERS); \
vector; \
})
#define kvm_asm_safe_ec(insn, error_code, inputs...) \
({ \
uint8_t vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
: inputs \
: KVM_ASM_SAFE_CLOBBERS); \
vector; \
})
static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val)
{
uint64_t error_code;
uint8_t vector;
uint32_t a, d;
asm volatile(KVM_ASM_SAFE("rdmsr")
: "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector)
: "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector, error_code)
: "c"(msr)
: KVM_ASM_SAFE_CLOBBERS);
......
......@@ -1068,6 +1068,7 @@ static bool kvm_fixup_exception(struct ex_regs *regs)
regs->rip = regs->r11;
regs->r9 = regs->vector;
regs->r10 = regs->error_code;
return true;
}
......
......@@ -18,6 +18,7 @@
static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address,
vm_vaddr_t output_address, uint64_t *hv_status)
{
uint64_t error_code;
uint8_t vector;
/* Note both the hypercall and the "asm safe" clobber r9-r11. */
......@@ -25,7 +26,7 @@ static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address,
KVM_ASM_SAFE("vmcall")
: "=a" (*hv_status),
"+c" (control), "+d" (input_address),
KVM_ASM_SAFE_OUTPUTS(vector)
KVM_ASM_SAFE_OUTPUTS(vector, error_code)
: [output_address] "r"(output_address),
"a" (-EFAULT)
: "cc", "memory", "r8", KVM_ASM_SAFE_CLOBBERS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment