Commit d1947bc4 authored by Andrew Murray's avatar Andrew Murray Committed by Marc Zyngier

arm64: arm_pmu: Add !VHE support for exclude_host/exclude_guest attributes

Add support for the :G and :H attributes in perf by handling the
exclude_host/exclude_guest event attributes.

We notify KVM of counters that we wish to be enabled or disabled on
guest entry/exit and thus defer from starting or stopping events based
on their event attributes.

With !VHE we switch the counters between host/guest at EL2. We are able
to eliminate counters counting host events on the boundaries of guest
entry/exit when using :G by filtering out EL2 for exclude_host. When
using !exclude_hv there is a small blackout window at the guest
entry/exit where host events are not captured.
Signed-off-by: default avatarAndrew Murray <andrew.murray@arm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent eb41238c
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/kvm_host.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/perf/arm_pmu.h> #include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
...@@ -528,11 +529,21 @@ static inline int armv8pmu_enable_counter(int idx) ...@@ -528,11 +529,21 @@ static inline int armv8pmu_enable_counter(int idx)
static inline void armv8pmu_enable_event_counter(struct perf_event *event) static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{ {
struct perf_event_attr *attr = &event->attr;
int idx = event->hw.idx; int idx = event->hw.idx;
u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
armv8pmu_enable_counter(idx);
if (armv8pmu_event_is_chained(event)) if (armv8pmu_event_is_chained(event))
armv8pmu_enable_counter(idx - 1); counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
kvm_set_pmu_events(counter_bits, attr);
/* We rely on the hypervisor switch code to enable guest counters */
if (!kvm_pmu_counter_deferred(attr)) {
armv8pmu_enable_counter(idx);
if (armv8pmu_event_is_chained(event))
armv8pmu_enable_counter(idx - 1);
}
} }
static inline int armv8pmu_disable_counter(int idx) static inline int armv8pmu_disable_counter(int idx)
...@@ -545,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx) ...@@ -545,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx)
static inline void armv8pmu_disable_event_counter(struct perf_event *event) static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct perf_event_attr *attr = &event->attr;
int idx = hwc->idx; int idx = hwc->idx;
u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
if (armv8pmu_event_is_chained(event)) if (armv8pmu_event_is_chained(event))
armv8pmu_disable_counter(idx - 1); counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
armv8pmu_disable_counter(idx);
kvm_clr_pmu_events(counter_bits);
/* We rely on the hypervisor switch code to disable guest counters */
if (!kvm_pmu_counter_deferred(attr)) {
if (armv8pmu_event_is_chained(event))
armv8pmu_disable_counter(idx - 1);
armv8pmu_disable_counter(idx);
}
} }
static inline int armv8pmu_enable_intens(int idx) static inline int armv8pmu_enable_intens(int idx)
...@@ -829,11 +850,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, ...@@ -829,11 +850,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (!attr->exclude_kernel) if (!attr->exclude_kernel)
config_base |= ARMV8_PMU_INCLUDE_EL2; config_base |= ARMV8_PMU_INCLUDE_EL2;
} else { } else {
if (attr->exclude_kernel) if (!attr->exclude_hv && !attr->exclude_host)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_PMU_INCLUDE_EL2; config_base |= ARMV8_PMU_INCLUDE_EL2;
} }
/*
* Filter out !VHE kernels and guest kernels
*/
if (attr->exclude_kernel)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (attr->exclude_user) if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0; config_base |= ARMV8_PMU_EXCLUDE_EL0;
...@@ -863,6 +889,9 @@ static void armv8pmu_reset(void *info) ...@@ -863,6 +889,9 @@ static void armv8pmu_reset(void *info)
armv8pmu_disable_intens(idx); armv8pmu_disable_intens(idx);
} }
/* Clear the counters we flip at guest entry/exit */
kvm_clr_pmu_events(U32_MAX);
/* /*
* Initialize & Reset PMNC. Request overflow interrupt for * Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter(). * 64 bit cycle counter but cheat in armv8pmu_write_counter().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment