Commit 2ba4493a authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Explicitly check for valid guest state for !unrestricted guest

Call guest_state_valid() directly instead of querying emulation_required
when checking if L1 is attempting VM-Enter with invalid guest state.
If emulate_invalid_guest_state is false, KVM will fixup segment regs to
avoid emulation and will never set emulation_required, i.e. KVM will
incorrectly miss the associated consistency checks because the nested
path stuffs segments directly into vmcs02.

Opportunsitically add Consistency Check tracing to make future debug
suck a little less.

Fixes: 2bb8cafe ("KVM: vVMX: signal failure for nested VMEntry if emulation_required")
Fixes: 3184a995 ("KVM: nVMX: fix vmentry failure code when L2 state would require emulation")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923184452.980-4-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b89d5ad0
...@@ -2576,7 +2576,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2576,7 +2576,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* which means L1 attempted VMEntry to L2 with invalid state. * which means L1 attempted VMEntry to L2 with invalid state.
* Fail the VMEntry. * Fail the VMEntry.
*/ */
if (vmx->emulation_required) { if (CC(!vmx_guest_state_valid(vcpu))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT; *entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL; return -EINVAL;
} }
......
...@@ -337,7 +337,6 @@ static const struct kernel_param_ops vmentry_l1d_flush_ops = { ...@@ -337,7 +337,6 @@ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
}; };
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
static bool guest_state_valid(struct kvm_vcpu *vcpu);
static u32 vmx_segment_access_rights(struct kvm_segment *var); static u32 vmx_segment_access_rights(struct kvm_segment *var);
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type); u32 msr, int type);
...@@ -1340,7 +1339,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1340,7 +1339,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
static bool emulation_required(struct kvm_vcpu *vcpu) static bool emulation_required(struct kvm_vcpu *vcpu)
{ {
return emulate_invalid_guest_state && !guest_state_valid(vcpu); return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
} }
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
...@@ -3402,11 +3401,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) ...@@ -3402,11 +3401,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
* not. * not.
* We assume that registers are always usable * We assume that registers are always usable
*/ */
static bool guest_state_valid(struct kvm_vcpu *vcpu) bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
{ {
if (is_unrestricted_guest(vcpu))
return true;
/* real mode guest state checks */ /* real mode guest state checks */
if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
......
...@@ -321,6 +321,7 @@ void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); ...@@ -321,6 +321,7 @@ void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
int root_level); int root_level);
void update_exception_bitmap(struct kvm_vcpu *vcpu); void update_exception_bitmap(struct kvm_vcpu *vcpu);
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
...@@ -472,6 +473,12 @@ static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu) ...@@ -472,6 +473,12 @@ static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
SECONDARY_EXEC_UNRESTRICTED_GUEST)); SECONDARY_EXEC_UNRESTRICTED_GUEST));
} }
bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
{
return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
}
void dump_vmcs(void); void dump_vmcs(void);
#endif /* __KVM_X86_VMX_H */ #endif /* __KVM_X86_VMX_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment