Commit 6912ac32 authored by Wei Huang's avatar Wei Huang Committed by Paolo Bonzini

KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs

This patch enables AMD guest VM to access (R/W) PMU related MSRs, which
include PERFCTR[0..3] and EVNTSEL[0..3].
Reviewed-by: default avatarJoerg Roedel <jroedel@suse.de>
Tested-by: default avatarJoerg Roedel <jroedel@suse.de>
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarWei Huang <wei@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ca724305
...@@ -2202,36 +2202,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2202,36 +2202,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
return set_msr_mce(vcpu, msr, data); return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit, case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
* so we should check all of them in the generic path for the sake of case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
* cross vendor migration. pr = true; /* fall through */
* Writing a zero into the event select MSRs disables them, case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
* which we perfectly emulate ;-). Any other value should be at least case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr)) if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info); return kvm_pmu_set_msr(vcpu, msr_info);
...@@ -2418,24 +2393,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2418,24 +2393,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K8_SYSCFG: case MSR_K8_SYSCFG:
case MSR_K7_HWCR: case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA: case MSR_VM_HSAVE_PA:
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
case MSR_K8_INT_PENDING_MSG: case MSR_K8_INT_PENDING_MSG:
case MSR_AMD64_NB_CFG: case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE: case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2: case MSR_AMD64_BU_CFG2:
msr_info->data = 0; msr_info->data = 0;
break; break;
case MSR_P6_PERFCTR0: case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_PERFCTR1: case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_EVNTSEL0: case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL1: case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
msr_info->data = 0; msr_info->data = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment