Commit 0cb9c3c8 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: PMU: Add counter_index_to_*reg() helpers

In order to reduce the boilerplate code, add two helpers returning
the counter register index (resp. the event register) in the vcpu
register file from the counter index.
Reviewed-by: default avatarOliver Upton <oliver.upton@linux.dev>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221113163832.3154370-8-maz@kernel.org
parent 0f1e172b
...@@ -77,6 +77,16 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) ...@@ -77,6 +77,16 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
return container_of(vcpu_arch, struct kvm_vcpu, arch); return container_of(vcpu_arch, struct kvm_vcpu, arch);
} }
static u32 counter_index_to_reg(u64 idx)
{
return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
}
static u32 counter_index_to_evtreg(u64 idx)
{
return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
}
/** /**
* kvm_pmu_get_counter_value - get PMU counter value * kvm_pmu_get_counter_value - get PMU counter value
* @vcpu: The vcpu pointer * @vcpu: The vcpu pointer
...@@ -91,8 +101,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -91,8 +101,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
if (!kvm_vcpu_has_pmu(vcpu)) if (!kvm_vcpu_has_pmu(vcpu))
return 0; return 0;
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) reg = counter_index_to_reg(select_idx);
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
counter = __vcpu_sys_reg(vcpu, reg); counter = __vcpu_sys_reg(vcpu, reg);
/* /*
...@@ -122,8 +131,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) ...@@ -122,8 +131,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
if (!kvm_vcpu_has_pmu(vcpu)) if (!kvm_vcpu_has_pmu(vcpu))
return; return;
reg = (select_idx == ARMV8_PMU_CYCLE_IDX) reg = counter_index_to_reg(select_idx);
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
/* Recreate the perf event to reflect the updated sample_period */ /* Recreate the perf event to reflect the updated sample_period */
...@@ -158,10 +166,7 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) ...@@ -158,10 +166,7 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
val = kvm_pmu_get_counter_value(vcpu, pmc->idx); val = kvm_pmu_get_counter_value(vcpu, pmc->idx);
if (pmc->idx == ARMV8_PMU_CYCLE_IDX) reg = counter_index_to_reg(pmc->idx);
reg = PMCCNTR_EL0;
else
reg = PMEVCNTR0_EL0 + pmc->idx;
__vcpu_sys_reg(vcpu, reg) = val; __vcpu_sys_reg(vcpu, reg) = val;
...@@ -404,16 +409,16 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, ...@@ -404,16 +409,16 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
u64 type, reg; u64 type, reg;
/* Filter on event type */ /* Filter on event type */
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i); type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
type &= kvm_pmu_event_mask(vcpu->kvm); type &= kvm_pmu_event_mask(vcpu->kvm);
if (type != event) if (type != event)
continue; continue;
/* Increment this counter */ /* Increment this counter */
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
if (!kvm_pmu_idx_is_64bit(vcpu, i)) if (!kvm_pmu_idx_is_64bit(vcpu, i))
reg = lower_32_bits(reg); reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
/* No overflow? move on */ /* No overflow? move on */
if (kvm_pmu_idx_has_64bit_overflow(vcpu, i) ? reg : lower_32_bits(reg)) if (kvm_pmu_idx_has_64bit_overflow(vcpu, i) ? reg : lower_32_bits(reg))
...@@ -549,8 +554,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -549,8 +554,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
struct perf_event_attr attr; struct perf_event_attr attr;
u64 eventsel, counter, reg, data; u64 eventsel, counter, reg, data;
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) reg = counter_index_to_evtreg(select_idx);
? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
data = __vcpu_sys_reg(vcpu, reg); data = __vcpu_sys_reg(vcpu, reg);
kvm_pmu_stop_counter(vcpu, pmc); kvm_pmu_stop_counter(vcpu, pmc);
...@@ -632,8 +636,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, ...@@ -632,8 +636,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
mask &= ~ARMV8_PMU_EVTYPE_EVENT; mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm); mask |= kvm_pmu_event_mask(vcpu->kvm);
reg = (select_idx == ARMV8_PMU_CYCLE_IDX) reg = counter_index_to_evtreg(select_idx);
? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
__vcpu_sys_reg(vcpu, reg) = data & mask; __vcpu_sys_reg(vcpu, reg) = data & mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment