Commit f5132b01 authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: Expose a version 2 architectural PMU to a guests

Use perf_events to emulate an architectural PMU, version 2.

Based on PMU version 1 emulation by Avi Kivity.

[avi: adjust for cpuid.c]
[jan: fix anonymous field initialization for older gcc]
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 89342082
...@@ -16,10 +16,12 @@ ...@@ -16,10 +16,12 @@
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/irq_work.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <linux/perf_event.h>
#include <asm/pvclock-abi.h> #include <asm/pvclock-abi.h>
#include <asm/desc.h> #include <asm/desc.h>
...@@ -291,6 +293,37 @@ struct kvm_mmu { ...@@ -291,6 +293,37 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */ u64 pdptrs[4]; /* pae */
}; };
enum pmc_type {
KVM_PMC_GP = 0,
KVM_PMC_FIXED,
};
struct kvm_pmc {
enum pmc_type type;
u8 idx;
u64 counter;
u64 eventsel;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
};
struct kvm_pmu {
unsigned nr_arch_gp_counters;
unsigned nr_arch_fixed_counters;
unsigned available_event_types;
u64 fixed_ctr_ctrl;
u64 global_ctrl;
u64 global_status;
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
u8 version;
struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
};
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
/* /*
* rip and regs accesses must go through * rip and regs accesses must go through
...@@ -424,6 +457,8 @@ struct kvm_vcpu_arch { ...@@ -424,6 +457,8 @@ struct kvm_vcpu_arch {
unsigned access; unsigned access;
gfn_t mmio_gfn; gfn_t mmio_gfn;
struct kvm_pmu pmu;
/* used for guest single stepping over the given code position */ /* used for guest single stepping over the given code position */
unsigned long singlestep_rip; unsigned long singlestep_rip;
...@@ -891,4 +926,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); ...@@ -891,4 +926,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */
...@@ -35,6 +35,7 @@ config KVM ...@@ -35,6 +35,7 @@ config KVM
select KVM_MMIO select KVM_MMIO
select TASKSTATS select TASKSTATS
select TASK_DELAY_ACCT select TASK_DELAY_ACCT
select PERF_EVENTS
---help--- ---help---
Support hosting fully virtualized guest machines using hardware Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent virtualization extensions. You will need a fairly recent
......
...@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) ...@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o) kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o)
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o timer.o cpuid.o i8254.o timer.o cpuid.o pmu.o
kvm-intel-y += vmx.o kvm-intel-y += vmx.o
kvm-amd-y += svm.o kvm-amd-y += svm.o
......
...@@ -45,6 +45,8 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu) ...@@ -45,6 +45,8 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
else else
apic->lapic_timer.timer_mode_mask = 1 << 17; apic->lapic_timer.timer_mode_mask = 1 << 17;
} }
kvm_pmu_cpuid_update(vcpu);
} }
static int is_efer_nx(void) static int is_efer_nx(void)
......
This diff is collapsed.
...@@ -1602,8 +1602,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1602,8 +1602,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
* which we perfectly emulate ;-). Any other value should be at least * which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them. * reported, some guests depend on them.
*/ */
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL2:
...@@ -1615,8 +1613,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1615,8 +1613,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* at least RHEL 4 unconditionally writes to the perfctr registers, /* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy. * so we ignore writes to make it happy.
*/ */
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR2:
...@@ -1653,6 +1649,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1653,6 +1649,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
default: default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data); return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) { if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data); msr, data);
...@@ -1815,10 +1813,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1815,10 +1813,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_K8_SYSCFG: case MSR_K8_SYSCFG:
case MSR_K7_HWCR: case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA: case MSR_VM_HSAVE_PA:
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL0:
case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR0:
case MSR_K8_INT_PENDING_MSG: case MSR_K8_INT_PENDING_MSG:
...@@ -1929,6 +1923,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1929,6 +1923,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = 0xbe702111; data = 0xbe702111;
break; break;
default: default:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
if (!ignore_msrs) { if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1; return 1;
...@@ -4650,7 +4646,7 @@ static void kvm_timer_init(void) ...@@ -4650,7 +4646,7 @@ static void kvm_timer_init(void)
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
static int kvm_is_in_guest(void) int kvm_is_in_guest(void)
{ {
return __this_cpu_read(current_vcpu) != NULL; return __this_cpu_read(current_vcpu) != NULL;
} }
...@@ -5114,6 +5110,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5114,6 +5110,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
process_nmi(vcpu); process_nmi(vcpu);
req_immediate_exit = req_immediate_exit =
kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_deliver_pmi(vcpu);
} }
r = kvm_mmu_reload(vcpu); r = kvm_mmu_reload(vcpu);
...@@ -5850,6 +5850,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -5850,6 +5850,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false; vcpu->arch.apf.halted = false;
kvm_pmu_reset(vcpu);
return kvm_x86_ops->vcpu_reset(vcpu); return kvm_x86_ops->vcpu_reset(vcpu);
} }
...@@ -5934,6 +5936,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -5934,6 +5936,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_mce_banks; goto fail_free_mce_banks;
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
return 0; return 0;
fail_free_mce_banks: fail_free_mce_banks:
...@@ -5952,6 +5955,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -5952,6 +5955,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
int idx; int idx;
kvm_pmu_destroy(vcpu);
kfree(vcpu->arch.mce_banks); kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu); kvm_free_lapic(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
......
...@@ -52,6 +52,8 @@ ...@@ -52,6 +52,8 @@
#define KVM_REQ_STEAL_UPDATE 13 #define KVM_REQ_STEAL_UPDATE 13
#define KVM_REQ_NMI 14 #define KVM_REQ_NMI 14
#define KVM_REQ_IMMEDIATE_EXIT 15 #define KVM_REQ_IMMEDIATE_EXIT 15
#define KVM_REQ_PMU 16
#define KVM_REQ_PMI 17
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment