Commit c54cdf14 authored by Liang Chen's avatar Liang Chen Committed by Radim Krčmář

KVM: x86: optimize steal time calculation

Since accumulate_steal_time is now only called in record_steal_time, it
doesn't quite make sense to put the delta calculation in a separate
function. The function could be called thousands of times before guest
enables the steal time MSR (though the compiler may optimize out this
function call). And after it's enabled, the MSR enable bit is tested twice
every time. Removing the accumulate_steal_time function also avoids the
necessity of having the accum_steal field.
Signed-off-by: default avatarLiang Chen <liangchen.linux@gmail.com>
Signed-off-by: default avatarGavin Guo <gavin.guo@canonical.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 05b1159e
...@@ -562,7 +562,6 @@ struct kvm_vcpu_arch { ...@@ -562,7 +562,6 @@ struct kvm_vcpu_arch {
struct { struct {
u64 msr_val; u64 msr_val;
u64 last_steal; u64 last_steal;
u64 accum_steal;
struct gfn_to_hva_cache stime; struct gfn_to_hva_cache stime;
struct kvm_steal_time steal; struct kvm_steal_time steal;
} st; } st;
......
...@@ -2002,22 +2002,8 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) ...@@ -2002,22 +2002,8 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
vcpu->arch.pv_time_enabled = false; vcpu->arch.pv_time_enabled = false;
} }
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
{
u64 delta;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
vcpu->arch.st.accum_steal = delta;
}
static void record_steal_time(struct kvm_vcpu *vcpu) static void record_steal_time(struct kvm_vcpu *vcpu)
{ {
accumulate_steal_time(vcpu);
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return; return;
...@@ -2025,9 +2011,10 @@ static void record_steal_time(struct kvm_vcpu *vcpu) ...@@ -2025,9 +2011,10 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return; return;
vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; vcpu->arch.st.steal.steal += current->sched_info.run_delay -
vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
vcpu->arch.st.steal.version += 2; vcpu->arch.st.steal.version += 2;
vcpu->arch.st.accum_steal = 0;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment