Commit 58cb628d authored by Jan Kiszka's avatar Jan Kiszka Committed by Paolo Bonzini

KVM: x86: Validate guest writes to MSR_IA32_APICBASE

Check for invalid state transitions on guest-initiated updates of
MSR_IA32_APICBASE. This address both enabling of the x2APIC when it is
not supported and all invalid transitions as described in SDM section
10.12.5. It also checks that no reserved bit is set in APICBASE by the
guest.
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
[Use cpuid_maxphyaddr instead of guest_cpuid_get_phys_bits. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b3af1e88
...@@ -72,4 +72,12 @@ static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) ...@@ -72,4 +72,12 @@ static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_PCID)); return best && (best->ecx & bit(X86_FEATURE_PCID));
} }
static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_X2APIC));
}
#endif #endif
...@@ -65,7 +65,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, ...@@ -65,7 +65,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map); struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s); struct kvm_lapic_state *s);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
......
...@@ -4392,7 +4392,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -4392,7 +4392,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 msr; struct msr_data apic_base_msr;
vmx->rmode.vm86_active = 0; vmx->rmode.vm86_active = 0;
...@@ -4400,10 +4400,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -4400,10 +4400,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0); kvm_set_cr8(&vmx->vcpu, 0);
msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_bsp(&vmx->vcpu)) if (kvm_vcpu_is_bsp(&vmx->vcpu))
msr |= MSR_IA32_APICBASE_BSP; apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
kvm_set_apic_base(&vmx->vcpu, msr); apic_base_msr.host_initiated = true;
kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
......
...@@ -257,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) ...@@ -257,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_get_apic_base); EXPORT_SYMBOL_GPL(kvm_get_apic_base);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
/* TODO: reserve bits check */ u64 old_state = vcpu->arch.apic_base &
kvm_lapic_set_base(vcpu, data); (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
u64 new_state = msr_info->data &
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
if (!msr_info->host_initiated &&
((msr_info->data & reserved_bits) != 0 ||
new_state == X2APIC_ENABLE ||
(new_state == MSR_IA32_APICBASE_ENABLE &&
old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
(new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
old_state == 0)))
return 1;
kvm_lapic_set_base(vcpu, msr_info->data);
return 0;
} }
EXPORT_SYMBOL_GPL(kvm_set_apic_base); EXPORT_SYMBOL_GPL(kvm_set_apic_base);
...@@ -2009,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2009,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case 0x200 ... 0x2ff: case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data); return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data); return kvm_set_apic_base(vcpu, msr_info);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data); return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE: case MSR_IA32_TSCDEADLINE:
...@@ -6412,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch); ...@@ -6412,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs) struct kvm_sregs *sregs)
{ {
struct msr_data apic_base_msr;
int mmu_reset_needed = 0; int mmu_reset_needed = 0;
int pending_vec, max_bits, idx; int pending_vec, max_bits, idx;
struct desc_ptr dt; struct desc_ptr dt;
...@@ -6435,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -6435,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= vcpu->arch.efer != sregs->efer; mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base); apic_base_msr.data = sregs->apic_base;
apic_base_msr.host_initiated = true;
kvm_set_apic_base(vcpu, &apic_base_msr);
mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0); kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment