Commit 668f198f authored by David Kaplan's avatar David Kaplan Committed by Marcelo Tosatti

KVM: SVM: use kvm_register_write()/read()

KVM has nice wrappers to access the register values, clean up a few places
that should use them but currently do not.
Signed-off-by: default avatarDavid Kaplan <david.kaplan@amd.com>
[forward port and testing]
Signed-off-by: default avatarJoel Schopp <joel.schopp@amd.com>
Acked-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent affb8172
...@@ -2757,11 +2757,11 @@ static int invlpga_interception(struct vcpu_svm *svm) ...@@ -2757,11 +2757,11 @@ static int invlpga_interception(struct vcpu_svm *svm)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX], trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
vcpu->arch.regs[VCPU_REGS_RAX]); kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]); kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
...@@ -2770,7 +2770,7 @@ static int invlpga_interception(struct vcpu_svm *svm) ...@@ -2770,7 +2770,7 @@ static int invlpga_interception(struct vcpu_svm *svm)
static int skinit_interception(struct vcpu_svm *svm) static int skinit_interception(struct vcpu_svm *svm)
{ {
trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]); trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1; return 1;
...@@ -3133,7 +3133,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) ...@@ -3133,7 +3133,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int rdmsr_interception(struct vcpu_svm *svm) static int rdmsr_interception(struct vcpu_svm *svm)
{ {
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data; u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data)) { if (svm_get_msr(&svm->vcpu, ecx, &data)) {
...@@ -3142,8 +3142,8 @@ static int rdmsr_interception(struct vcpu_svm *svm) ...@@ -3142,8 +3142,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
} else { } else {
trace_kvm_msr_read(ecx, data); trace_kvm_msr_read(ecx, data);
svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
} }
...@@ -3246,9 +3246,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -3246,9 +3246,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static int wrmsr_interception(struct vcpu_svm *svm) static int wrmsr_interception(struct vcpu_svm *svm)
{ {
struct msr_data msr; struct msr_data msr;
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) u64 data = kvm_read_edx_eax(&svm->vcpu);
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data; msr.data = data;
msr.index = ecx; msr.index = ecx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment