Commit c1118b36 authored by Paolo Bonzini's avatar Paolo Bonzini

x86: kvm: use alternatives for VMCALL vs. VMMCALL if kernel text is read-only

On x86_64, kernel text mappings are mapped read-only with CONFIG_DEBUG_RODATA.
In that case, KVM will fail to patch VMCALL instructions to VMMCALL
as required on AMD processors.

The failure mode is currently a divide-by-zero exception, which obviously
is a KVM bug that has to be fixed.  However, picking the right instruction
between VMCALL and VMMCALL will be faster and will help if you cannot upgrade
the hypervisor.
Reported-by: default avatarChris Webb <chris@arachsys.com>
Tested-by: default avatarChris Webb <chris@arachsys.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 81760dcc
...@@ -202,6 +202,7 @@ ...@@ -202,6 +202,7 @@
#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ #define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define _ASM_X86_KVM_PARA_H #define _ASM_X86_KVM_PARA_H
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/alternative.h>
#include <uapi/asm/kvm_para.h> #include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void); extern void kvmclock_init(void);
...@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void) ...@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void)
} }
#endif /* CONFIG_KVM_GUEST */ #endif /* CONFIG_KVM_GUEST */
/* This instruction is vmcall. On non-VT architectures, it will generate a #ifdef CONFIG_DEBUG_RODATA
* trap that we will then rewrite to the appropriate instruction. #define KVM_HYPERCALL \
ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
#else
/* On AMD processors, vmcall will generate a trap that we will
* then rewrite to the appropriate instruction.
*/ */
#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
#endif
/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
* instruction. The hypervisor may replace it with something else but only the * instruction. The hypervisor may replace it with something else but only the
......
...@@ -525,6 +525,13 @@ static void early_init_amd(struct cpuinfo_x86 *c) ...@@ -525,6 +525,13 @@ static void early_init_amd(struct cpuinfo_x86 *c)
} }
#endif #endif
/*
* This is only needed to tell the kernel whether to use VMCALL
* and VMMCALL. VMMCALL is never executed except under virt, so
* we can set it unconditionally.
*/
set_cpu_cap(c, X86_FEATURE_VMMCALL);
/* F16h erratum 793, CVE-2013-6885 */ /* F16h erratum 793, CVE-2013-6885 */
if (c->x86 == 0x16 && c->x86_model <= 0xf) if (c->x86 == 0x16 && c->x86_model <= 0xf)
msr_set_bit(MSR_AMD64_LS_CFG, 15); msr_set_bit(MSR_AMD64_LS_CFG, 15);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment