Commit 72bbf935 authored by Ladi Prosek's avatar Ladi Prosek Committed by Paolo Bonzini

KVM: hyperv: define VP assist page helpers

The state related to the VP assist page is still managed by the LAPIC
code in the pv_eoi field.
Signed-off-by: default avatarLadi Prosek <lprosek@redhat.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: default avatarLiran Alon <liran.alon@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 970c0d4b
...@@ -693,6 +693,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -693,6 +693,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
stimer_cleanup(&hv_vcpu->stimer[i]); stimer_cleanup(&hv_vcpu->stimer[i]);
} }
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
return false;
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
struct hv_vp_assist_page *assist_page)
{
if (!kvm_hv_assist_page_enabled(vcpu))
return false;
return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
assist_page, sizeof(*assist_page));
}
EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{ {
struct hv_message *msg = &stimer->msg; struct hv_message *msg = &stimer->msg;
...@@ -1078,7 +1096,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1078,7 +1096,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
hv_vcpu->hv_vapic = data; hv_vcpu->hv_vapic = data;
if (kvm_lapic_enable_pv_eoi(vcpu, 0)) if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
return 1; return 1;
break; break;
} }
...@@ -1091,7 +1109,8 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1091,7 +1109,8 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
hv_vcpu->hv_vapic = data; hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn); kvm_vcpu_mark_page_dirty(vcpu, gfn);
if (kvm_lapic_enable_pv_eoi(vcpu, if (kvm_lapic_enable_pv_eoi(vcpu,
gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
sizeof(struct hv_vp_assist_page)))
return 1; return 1;
break; break;
} }
......
...@@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); ...@@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
struct hv_vp_assist_page *assist_page);
static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
int timer_index) int timer_index)
{ {
......
...@@ -2644,7 +2644,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) ...@@ -2644,7 +2644,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
return 0; return 0;
} }
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
{ {
u64 addr = data & ~KVM_MSR_ENABLED; u64 addr = data & ~KVM_MSR_ENABLED;
if (!IS_ALIGNED(addr, 4)) if (!IS_ALIGNED(addr, 4))
...@@ -2654,7 +2654,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) ...@@ -2654,7 +2654,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
if (!pv_eoi_enabled(vcpu)) if (!pv_eoi_enabled(vcpu))
return 0; return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
addr, sizeof(u8)); addr, len);
} }
void kvm_apic_accept_events(struct kvm_vcpu *vcpu) void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
......
...@@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) ...@@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
} }
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
void kvm_lapic_init(void); void kvm_lapic_init(void);
void kvm_lapic_exit(void); void kvm_lapic_exit(void);
......
...@@ -2477,7 +2477,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2477,7 +2477,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
case MSR_KVM_PV_EOI_EN: case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data)) if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
return 1; return 1;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment