Commit 767d3d43 authored by Andy Honig's avatar Andy Honig Committed by Ben Hutchings

KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797)

commit 0b79459b upstream.

There is a potential use after free issue with the handling of
MSR_KVM_SYSTEM_TIME.  If the guest specifies a GPA in a movable or removable
memory such as frame buffers then KVM might continue to write to that
address even after it's removed via KVM_SET_USER_MEMORY_REGION.  KVM pins
the page in memory so it's unlikely to cause an issue, but if the user
space component re-purposes the memory previously used for the guest, then
the guest will be able to corrupt that memory.

Tested: Tested against kvmclock unit test
Signed-off-by: default avatarAndrew Honig <ahonig@google.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
[bwh: Backported to 3.2:
 - Adjust context
 - We do not implement the PVCLOCK_GUEST_STOPPED flag]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent b7c5ee6d
...@@ -393,8 +393,8 @@ struct kvm_vcpu_arch { ...@@ -393,8 +393,8 @@ struct kvm_vcpu_arch {
gpa_t time; gpa_t time;
struct pvclock_vcpu_time_info hv_clock; struct pvclock_vcpu_time_info hv_clock;
unsigned int hw_tsc_khz; unsigned int hw_tsc_khz;
unsigned int time_offset; struct gfn_to_hva_cache pv_time;
struct page *time_page; bool pv_time_enabled;
struct { struct {
u64 msr_val; u64 msr_val;
......
...@@ -1105,7 +1105,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1105,7 +1105,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
{ {
unsigned long flags; unsigned long flags;
struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_vcpu_arch *vcpu = &v->arch;
void *shared_kaddr;
unsigned long this_tsc_khz; unsigned long this_tsc_khz;
s64 kernel_ns, max_kernel_ns; s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp; u64 tsc_timestamp;
...@@ -1141,7 +1140,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1141,7 +1140,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags); local_irq_restore(flags);
if (!vcpu->time_page) if (!vcpu->pv_time_enabled)
return 0; return 0;
/* /*
...@@ -1199,14 +1198,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1199,14 +1198,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/ */
vcpu->hv_clock.version += 2; vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, sizeof(vcpu->hv_clock));
sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr, KM_USER0);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0; return 0;
} }
...@@ -1496,10 +1490,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) ...@@ -1496,10 +1490,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu) static void kvmclock_reset(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.time_page) { vcpu->arch.pv_time_enabled = false;
kvm_release_page_dirty(vcpu->arch.time_page);
vcpu->arch.time_page = NULL;
}
} }
static void accumulate_steal_time(struct kvm_vcpu *vcpu) static void accumulate_steal_time(struct kvm_vcpu *vcpu)
...@@ -1591,6 +1582,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1591,6 +1582,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
break; break;
case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: { case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
kvmclock_reset(vcpu); kvmclock_reset(vcpu);
vcpu->arch.time = data; vcpu->arch.time = data;
...@@ -1600,21 +1592,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1600,21 +1592,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (!(data & 1)) if (!(data & 1))
break; break;
/* ...but clean it before doing the actual write */ gpa_offset = data & ~(PAGE_MASK | 1);
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */ /* Check that the address is 32-byte aligned. */
if (vcpu->arch.time_offset & if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
(sizeof(struct pvclock_vcpu_time_info) - 1))
break; break;
vcpu->arch.time_page = if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); &vcpu->arch.pv_time, data & ~1ULL))
vcpu->arch.pv_time_enabled = false;
if (is_error_page(vcpu->arch.time_page)) { else
kvm_release_page_clean(vcpu->arch.time_page); vcpu->arch.pv_time_enabled = true;
vcpu->arch.time_page = NULL;
}
break; break;
} }
case MSR_KVM_ASYNC_PF_EN: case MSR_KVM_ASYNC_PF_EN:
...@@ -6554,6 +6542,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -6554,6 +6542,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks; goto fail_free_mce_banks;
vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment