Commit d881e6f6 authored by Avi Kivity's avatar Avi Kivity

KVM: VMX: Return correct CPL during transition to protected mode

In protected mode, the CPL is defined as the lower two bits of CS, as set by
the last far jump.  But during the transition to protected mode, there is no
last far jump, so we need to return zero (the inherited real mode CPL).

Fix by reading CPL from the cache during the transition.  This isn't 100%
correct since we don't set the CPL cache on a far jump, but since protected
mode transition will always jump to a segment with RPL=0, it will always
work.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent e676505a
...@@ -3175,11 +3175,22 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu) ...@@ -3175,11 +3175,22 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
static int vmx_get_cpl(struct kvm_vcpu *vcpu) static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations
* fail; use the cache instead.
*/
if (unlikely(vmx->emulation_required && emulate_invalid_guest_state)) {
return vmx->cpl;
}
if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) { if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
__set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
to_vmx(vcpu)->cpl = __vmx_get_cpl(vcpu); vmx->cpl = __vmx_get_cpl(vcpu);
} }
return to_vmx(vcpu)->cpl;
return vmx->cpl;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment