Commit 2d7921c4 authored by Arbel Moshe's avatar Arbel Moshe Committed by Paolo Bonzini

KVM: x86: Add support for VMware backdoor Pseudo-PMCs

VMware exposes the following Pseudo PMCs:
0x10000: Physical host TSC
0x10001: Elapsed real time in ns
0x10002: Elapsed apparent time in ns

For more info refer to:
https://www.vmware.com/files/pdf/techpaper/Timekeeping-In-VirtualMachines.pdf

VMware allows access to these Pseduo-PMCs even when read via RDPMC
in Ring3 and CR4.PCE=0. Therefore, commit modifies x86 emulator
to allow access to these PMCs in this situation. In addition,
emulation of these PMCs were added to kvm_pmu_rdpmc().
Signed-off-by: default avatarArbel Moshe <arbel.moshe@oracle.com>
Signed-off-by: default avatarLiran Alon <liran.alon@oracle.com>
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9718420e
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "x86.h" #include "x86.h"
#include "tss.h" #include "tss.h"
#include "mmu.h" #include "mmu.h"
#include "pmu.h"
/* /*
* Operand types * Operand types
...@@ -4293,6 +4294,13 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt) ...@@ -4293,6 +4294,13 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
/*
* VMware allows access to these Pseduo-PMCs even when read via RDPMC
* in Ring3 when CR4.PCE=0.
*/
if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
return X86EMUL_CONTINUE;
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
ctxt->ops->check_pmc(ctxt, rcx)) ctxt->ops->check_pmc(ctxt, rcx))
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
......
...@@ -244,12 +244,49 @@ int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) ...@@ -244,12 +244,49 @@ int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
} }
bool is_vmware_backdoor_pmc(u32 pmc_idx)
{
switch (pmc_idx) {
case VMWARE_BACKDOOR_PMC_HOST_TSC:
case VMWARE_BACKDOOR_PMC_REAL_TIME:
case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
return true;
}
return false;
}
static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
u64 ctr_val;
switch (idx) {
case VMWARE_BACKDOOR_PMC_HOST_TSC:
ctr_val = rdtsc();
break;
case VMWARE_BACKDOOR_PMC_REAL_TIME:
ctr_val = ktime_get_boot_ns();
break;
case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
ctr_val = ktime_get_boot_ns() +
vcpu->kvm->arch.kvmclock_offset;
break;
default:
return 1;
}
*data = ctr_val;
return 0;
}
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{ {
bool fast_mode = idx & (1u << 31); bool fast_mode = idx & (1u << 31);
struct kvm_pmc *pmc; struct kvm_pmc *pmc;
u64 ctr_val; u64 ctr_val;
if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
if (!pmc) if (!pmc)
return 1; return 1;
......
...@@ -9,6 +9,10 @@ ...@@ -9,6 +9,10 @@
/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
#define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
struct kvm_event_hw_type_mapping { struct kvm_event_hw_type_mapping {
u8 eventsel; u8 eventsel;
u8 unit_mask; u8 unit_mask;
...@@ -114,6 +118,8 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu); ...@@ -114,6 +118,8 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_init(struct kvm_vcpu *vcpu); void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu); void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
extern struct kvm_pmu_ops intel_pmu_ops; extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops; extern struct kvm_pmu_ops amd_pmu_ops;
#endif /* __KVM_X86_PMU_H */ #endif /* __KVM_X86_PMU_H */
...@@ -5932,9 +5932,8 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) ...@@ -5932,9 +5932,8 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
{ {
if (ctxt->opcode_len != 1) switch (ctxt->opcode_len) {
return false; case 1:
switch (ctxt->b) { switch (ctxt->b) {
case 0xe4: /* IN */ case 0xe4: /* IN */
case 0xe5: case 0xe5:
...@@ -5950,6 +5949,14 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) ...@@ -5950,6 +5949,14 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
case 0x6f: case 0x6f:
return true; return true;
} }
break;
case 2:
switch (ctxt->b) {
case 0x33: /* RDPMC */
return true;
}
break;
}
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment