Commit ef3f3980 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyper-v: Rename vcpu_to_hv_vcpu() to to_hv_vcpu()

vcpu_to_hv_vcpu()'s argument is almost always 'vcpu' so there's
no need to have an additional prefix. Also, this makes the code
more consistent with vmx/svm where to_vmx()/to_svm() are being
used.

No functional change intended.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-5-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cb5b9161
...@@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) ...@@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
return NULL; return NULL;
vcpu = kvm_get_vcpu(kvm, vpidx); vcpu = kvm_get_vcpu(kvm, vpidx);
if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) if (vcpu && to_hv_vcpu(vcpu)->vp_index == vpidx)
return vcpu; return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) if (to_hv_vcpu(vcpu)->vp_index == vpidx)
return vcpu; return vcpu;
return NULL; return NULL;
} }
...@@ -166,7 +166,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) ...@@ -166,7 +166,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_stimer *stimer; struct kvm_vcpu_hv_stimer *stimer;
int gsi, idx; int gsi, idx;
...@@ -317,7 +317,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -317,7 +317,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
return 1; return 1;
trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data); to_hv_vcpu(vcpu)->vp_index, msr, data);
switch (msr) { switch (msr) {
case HV_X64_MSR_SYNDBG_CONTROL: case HV_X64_MSR_SYNDBG_CONTROL:
syndbg->control.control = data; syndbg->control.control = data;
...@@ -379,7 +379,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) ...@@ -379,7 +379,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
} }
trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id,
vcpu_to_hv_vcpu(vcpu)->vp_index, msr, to_hv_vcpu(vcpu)->vp_index, msr,
*pdata); *pdata);
return 0; return 0;
...@@ -538,7 +538,7 @@ static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, ...@@ -538,7 +538,7 @@ static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
set_bit(stimer->index, set_bit(stimer->index,
vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); to_hv_vcpu(vcpu)->stimer_pending_bitmap);
kvm_make_request(KVM_REQ_HV_STIMER, vcpu); kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
if (vcpu_kick) if (vcpu_kick)
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
...@@ -553,7 +553,7 @@ static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) ...@@ -553,7 +553,7 @@ static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
hrtimer_cancel(&stimer->timer); hrtimer_cancel(&stimer->timer);
clear_bit(stimer->index, clear_bit(stimer->index,
vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); to_hv_vcpu(vcpu)->stimer_pending_bitmap);
stimer->msg_pending = false; stimer->msg_pending = false;
stimer->exp_time = 0; stimer->exp_time = 0;
} }
...@@ -802,7 +802,7 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) ...@@ -802,7 +802,7 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_stimer *stimer; struct kvm_vcpu_hv_stimer *stimer;
u64 time_now, exp_time; u64 time_now, exp_time;
int i; int i;
...@@ -832,7 +832,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) ...@@ -832,7 +832,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
int i; int i;
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
...@@ -883,7 +883,7 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) ...@@ -883,7 +883,7 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
int i; int i;
synic_init(&hv_vcpu->synic); synic_init(&hv_vcpu->synic);
...@@ -895,7 +895,7 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -895,7 +895,7 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
} }
...@@ -1501,7 +1501,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( ...@@ -1501,7 +1501,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, if (test_bit(to_hv_vcpu(vcpu)->vp_index,
(unsigned long *)vp_bitmap)) (unsigned long *)vp_bitmap))
__set_bit(i, vcpu_bitmap); __set_bit(i, vcpu_bitmap);
} }
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
/* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu) static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
{ {
return &vcpu->arch.hyperv; return &vcpu->arch.hyperv;
} }
...@@ -100,7 +100,7 @@ bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, ...@@ -100,7 +100,7 @@ bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
int timer_index) int timer_index)
{ {
return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index]; return &to_hv_vcpu(vcpu)->stimer[timer_index];
} }
static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment