Commit 1f4b34f8 authored by Andrey Smetanin's avatar Andrey Smetanin Committed by Paolo Bonzini

kvm/x86: Hyper-V SynIC timers

Per Hyper-V specification (and as required by Hyper-V-aware guests),
SynIC provides 4 per-vCPU timers.  Each timer is programmed via a pair
of MSRs, and signals expiration by delivering a special format message
to the configured SynIC message slot and triggering the corresponding
synthetic interrupt.

Note: as implemented by this patch, all periodic timers are "lazy"
(i.e. if the vCPU wasn't scheduled for more than the timer period the
timer events are lost), regardless of the corresponding configuration
MSR.  If deemed necessary, the "catch up" mode (the timer period is
shortened until the timer catches up) will be implemented later.

Changes v2:
* Use remainder to calculate periodic timer expiration time
Signed-off-by: default avatarAndrey Smetanin <asmetanin@virtuozzo.com>
Reviewed-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
CC: Gleb Natapov <gleb@kernel.org>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: "K. Y. Srinivasan" <kys@microsoft.com>
CC: Haiyang Zhang <haiyangz@microsoft.com>
CC: Vitaly Kuznetsov <vkuznets@redhat.com>
CC: Roman Kagan <rkagan@virtuozzo.com>
CC: Denis V. Lunev <den@openvz.org>
CC: qemu-devel@nongnu.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 765eaa0f
...@@ -379,6 +379,17 @@ struct kvm_mtrr { ...@@ -379,6 +379,17 @@ struct kvm_mtrr {
struct list_head head; struct list_head head;
}; };
/* Hyper-V SynIC timer */
struct kvm_vcpu_hv_stimer {
struct hrtimer timer;
int index;
u64 config;
u64 count;
u64 exp_time;
struct hv_message msg;
bool msg_pending;
};
/* Hyper-V synthetic interrupt controller (SynIC)*/ /* Hyper-V synthetic interrupt controller (SynIC)*/
struct kvm_vcpu_hv_synic { struct kvm_vcpu_hv_synic {
u64 version; u64 version;
...@@ -398,6 +409,8 @@ struct kvm_vcpu_hv { ...@@ -398,6 +409,8 @@ struct kvm_vcpu_hv {
s64 runtime_offset; s64 runtime_offset;
struct kvm_vcpu_hv_synic synic; struct kvm_vcpu_hv_synic synic;
struct kvm_hyperv_exit exit; struct kvm_hyperv_exit exit;
struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
......
...@@ -355,4 +355,10 @@ struct hv_timer_message_payload { ...@@ -355,4 +355,10 @@ struct hv_timer_message_payload {
__u64 delivery_time; /* When the message was delivered */ __u64 delivery_time; /* When the message was delivered */
}; };
#define HV_STIMER_ENABLE (1ULL << 0)
#define HV_STIMER_PERIODIC (1ULL << 1)
#define HV_STIMER_LAZY (1ULL << 2)
#define HV_STIMER_AUTOENABLE (1ULL << 3)
#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
#endif #endif
This diff is collapsed.
...@@ -59,5 +59,29 @@ void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); ...@@ -59,5 +59,29 @@ void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu); int kvm_hv_activate_synic(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
int timer_index)
{
return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index];
}
static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
{
struct kvm_vcpu_hv *hv_vcpu;
hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
stimer[0]);
return hv_vcpu_to_vcpu(hv_vcpu);
}
static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
{
return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
HV_SYNIC_STIMER_COUNT);
}
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
#endif #endif
...@@ -967,6 +967,7 @@ static u32 emulated_msrs[] = { ...@@ -967,6 +967,7 @@ static u32 emulated_msrs[] = {
HV_X64_MSR_VP_INDEX, HV_X64_MSR_VP_INDEX,
HV_X64_MSR_VP_RUNTIME, HV_X64_MSR_VP_RUNTIME,
HV_X64_MSR_SCONTROL, HV_X64_MSR_SCONTROL,
HV_X64_MSR_STIMER0_CONFIG,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN, MSR_KVM_PV_EOI_EN,
...@@ -2199,6 +2200,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2199,6 +2200,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
case HV_X64_MSR_CRASH_CTL: case HV_X64_MSR_CRASH_CTL:
case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
return kvm_hv_set_msr_common(vcpu, msr, data, return kvm_hv_set_msr_common(vcpu, msr, data,
msr_info->host_initiated); msr_info->host_initiated);
case MSR_IA32_BBL_CR_CTL3: case MSR_IA32_BBL_CR_CTL3:
...@@ -2403,6 +2405,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2403,6 +2405,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
case HV_X64_MSR_CRASH_CTL: case HV_X64_MSR_CRASH_CTL:
case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
return kvm_hv_get_msr_common(vcpu, return kvm_hv_get_msr_common(vcpu,
msr_info->index, &msr_info->data); msr_info->index, &msr_info->data);
break; break;
...@@ -6489,6 +6492,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6489,6 +6492,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 0; r = 0;
goto out; goto out;
} }
if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
kvm_hv_process_stimers(vcpu);
} }
/* /*
...@@ -7649,6 +7654,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -7649,6 +7654,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
int idx; int idx;
kvm_hv_vcpu_uninit(vcpu);
kvm_pmu_destroy(vcpu); kvm_pmu_destroy(vcpu);
kfree(vcpu->arch.mce_banks); kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu); kvm_free_lapic(vcpu);
...@@ -8043,6 +8049,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) ...@@ -8043,6 +8049,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
kvm_cpu_has_interrupt(vcpu)) kvm_cpu_has_interrupt(vcpu))
return true; return true;
if (kvm_hv_has_stimer_pending(vcpu))
return true;
return false; return false;
} }
......
...@@ -144,6 +144,7 @@ static inline bool is_error_page(struct page *page) ...@@ -144,6 +144,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_IOAPIC_EOI_EXIT 28 #define KVM_REQ_IOAPIC_EOI_EXIT 28
#define KVM_REQ_HV_RESET 29 #define KVM_REQ_HV_RESET 29
#define KVM_REQ_HV_EXIT 30 #define KVM_REQ_HV_EXIT 30
#define KVM_REQ_HV_STIMER 31
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment