Commit 2b3eaf81 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Use accessors for GPRs outside of dedicated caching logic

... now that there is no overhead when using dedicated accessors.

Opportunistically remove a bogus "FIXME" in handle_rdmsr() regarding
the upper 32 bits of RAX and RDX.  Zeroing the upper 32 bits is
architecturally correct as 32-bit writes in 64-bit mode unconditionally
clear the upper 32 bits.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent de3cd117
...@@ -4764,7 +4764,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -4764,7 +4764,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; u32 index = kvm_rcx_read(vcpu);
u64 address; u64 address;
bool accessed_dirty; bool accessed_dirty;
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
...@@ -4810,7 +4810,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu) ...@@ -4810,7 +4810,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12;
u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; u32 function = kvm_rax_read(vcpu);
/* /*
* VMFUNC is only supported for nested guests, but we always enable the * VMFUNC is only supported for nested guests, but we always enable the
...@@ -4896,7 +4896,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, ...@@ -4896,7 +4896,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, u32 exit_reason) struct vmcs12 *vmcs12, u32 exit_reason)
{ {
u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; u32 msr_index = kvm_rcx_read(vcpu);
gpa_t bitmap; gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
......
...@@ -4831,7 +4831,7 @@ static int handle_cpuid(struct kvm_vcpu *vcpu) ...@@ -4831,7 +4831,7 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
static int handle_rdmsr(struct kvm_vcpu *vcpu) static int handle_rdmsr(struct kvm_vcpu *vcpu)
{ {
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u32 ecx = kvm_rcx_read(vcpu);
struct msr_data msr_info; struct msr_data msr_info;
msr_info.index = ecx; msr_info.index = ecx;
...@@ -4844,18 +4844,16 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu) ...@@ -4844,18 +4844,16 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
trace_kvm_msr_read(ecx, msr_info.data); trace_kvm_msr_read(ecx, msr_info.data);
/* FIXME: handling of bits 32:63 of rax, rdx */ kvm_rax_write(vcpu, msr_info.data & -1u);
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; kvm_rdx_write(vcpu, (msr_info.data >> 32) & -1u);
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
static int handle_wrmsr(struct kvm_vcpu *vcpu) static int handle_wrmsr(struct kvm_vcpu *vcpu)
{ {
struct msr_data msr; struct msr_data msr;
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u32 ecx = kvm_rcx_read(vcpu);
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) u64 data = kvm_read_edx_eax(vcpu);
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data; msr.data = data;
msr.index = ecx; msr.index = ecx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment