Commit 9eec50b8 authored by Andrey Smetanin's avatar Andrey Smetanin Committed by Paolo Bonzini

kvm/x86: Hyper-V HV_X64_MSR_VP_RUNTIME support

HV_X64_MSR_VP_RUNTIME msr used by guest to get
"the time the virtual processor consumes running guest code,
and the time the associated logical processor spends running
hypervisor code on behalf of that guest."

Calculation of this time is performed by task_cputime_adjusted()
for vcpu task.

Necessary to support loading of winhv.sys in guest, which in turn is
required to support Windows VMBus.
Signed-off-by: default avatarAndrey Smetanin <asmetanin@virtuozzo.com>
Reviewed-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
Signed-off-by: default avatarDenis V. Lunev <den@openvz.org>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Gleb Natapov <gleb@kernel.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 11c4b1ca
...@@ -374,6 +374,7 @@ struct kvm_mtrr { ...@@ -374,6 +374,7 @@ struct kvm_mtrr {
/* Hyper-V per vcpu emulation context */ /* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv { struct kvm_vcpu_hv {
u64 hv_vapic; u64 hv_vapic;
s64 runtime_offset;
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
......
...@@ -156,6 +156,9 @@ ...@@ -156,6 +156,9 @@
/* MSR used to reset the guest OS. */ /* MSR used to reset the guest OS. */
#define HV_X64_MSR_RESET 0x40000003 #define HV_X64_MSR_RESET 0x40000003
/* MSR used to provide vcpu runtime in 100ns units */
#define HV_X64_MSR_VP_RUNTIME 0x40000010
/* MSR used to read the per-partition time reference counter */ /* MSR used to read the per-partition time reference counter */
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 #define HV_X64_MSR_TIME_REF_COUNT 0x40000020
......
...@@ -178,7 +178,16 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, ...@@ -178,7 +178,16 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
return 0; return 0;
} }
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) /* Calculate cpu time spent by current task in 100ns units */
static u64 current_task_runtime_100ns(void)
{
cputime_t utime, stime;
task_cputime_adjusted(current, &utime, &stime);
return div_u64(cputime_to_nsecs(utime + stime), 100);
}
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{ {
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
...@@ -212,6 +221,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -212,6 +221,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
case HV_X64_MSR_TPR: case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
case HV_X64_MSR_VP_RUNTIME:
if (!host)
return 1;
hv->runtime_offset = data - current_task_runtime_100ns();
break;
default: default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data); msr, data);
...@@ -287,6 +301,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -287,6 +301,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_APIC_ASSIST_PAGE: case HV_X64_MSR_APIC_ASSIST_PAGE:
data = hv->hv_vapic; data = hv->hv_vapic;
break; break;
case HV_X64_MSR_VP_RUNTIME:
data = current_task_runtime_100ns() + hv->runtime_offset;
break;
default: default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1; return 1;
...@@ -305,7 +322,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -305,7 +322,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return r; return r;
} else } else
return kvm_hv_set_msr(vcpu, msr, data); return kvm_hv_set_msr(vcpu, msr, data, host);
} }
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
......
...@@ -954,6 +954,7 @@ static u32 emulated_msrs[] = { ...@@ -954,6 +954,7 @@ static u32 emulated_msrs[] = {
HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
HV_X64_MSR_RESET, HV_X64_MSR_RESET,
HV_X64_MSR_VP_INDEX, HV_X64_MSR_VP_INDEX,
HV_X64_MSR_VP_RUNTIME,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN, MSR_KVM_PV_EOI_EN,
......
...@@ -444,6 +444,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ...@@ -444,6 +444,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
*ut = p->utime; *ut = p->utime;
*st = p->stime; *st = p->stime;
} }
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{ {
...@@ -652,6 +653,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ...@@ -652,6 +653,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
task_cputime(p, &cputime.utime, &cputime.stime); task_cputime(p, &cputime.utime, &cputime.stime);
cputime_adjust(&cputime, &p->prev_cputime, ut, st); cputime_adjust(&cputime, &p->prev_cputime, ut, st);
} }
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment