Commit e9c16c78 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: use direct accessors for RIP and RSP

Use specific inline functions for RIP and RSP instead of
going through kvm_register_read and kvm_register_write,
which are quite a mouthful.  kvm_rsp_read and kvm_rsp_write
did not exist, so add them.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2b3eaf81
...@@ -65,6 +65,16 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) ...@@ -65,6 +65,16 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
kvm_register_write(vcpu, VCPU_REGS_RIP, val); kvm_register_write(vcpu, VCPU_REGS_RIP, val);
} }
static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
{
return kvm_register_read(vcpu, VCPU_REGS_RSP);
}
static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
{
kvm_register_write(vcpu, VCPU_REGS_RSP, val);
}
static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
{ {
might_sleep(); /* on svm */ might_sleep(); /* on svm */
......
...@@ -3389,8 +3389,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -3389,8 +3389,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
} }
kvm_rax_write(&svm->vcpu, hsave->save.rax); kvm_rax_write(&svm->vcpu, hsave->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); kvm_rip_write(&svm->vcpu, hsave->save.rip);
svm->vmcb->save.dr7 = 0; svm->vmcb->save.dr7 = 0;
svm->vmcb->save.cpl = 0; svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0; svm->vmcb->control.exit_int_info = 0;
...@@ -3497,8 +3497,8 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -3497,8 +3497,8 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */ /* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax; svm->vmcb->save.rax = nested_vmcb->save.rax;
......
...@@ -2372,8 +2372,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2372,8 +2372,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (!enable_ept) if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); kvm_rip_write(vcpu, vmcs12->guest_rip);
return 0; return 0;
} }
...@@ -3401,8 +3401,8 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -3401,8 +3401,8 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rsp = kvm_rsp_read(vcpu);
vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rip = kvm_rip_read(vcpu);
vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
...@@ -3585,8 +3585,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, ...@@ -3585,8 +3585,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
vmx_set_efer(vcpu, vcpu->arch.efer); vmx_set_efer(vcpu, vcpu->arch.efer);
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_rsp_write(vcpu, vmcs12->host_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); kvm_rip_write(vcpu, vmcs12->host_rip);
vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
vmx_set_interrupt_shadow(vcpu, 0); vmx_set_interrupt_shadow(vcpu, 0);
......
...@@ -8290,7 +8290,7 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -8290,7 +8290,7 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->rdx = kvm_rdx_read(vcpu); regs->rdx = kvm_rdx_read(vcpu);
regs->rsi = kvm_rsi_read(vcpu); regs->rsi = kvm_rsi_read(vcpu);
regs->rdi = kvm_rdi_read(vcpu); regs->rdi = kvm_rdi_read(vcpu);
regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rsp = kvm_rsp_read(vcpu);
regs->rbp = kvm_rbp_read(vcpu); regs->rbp = kvm_rbp_read(vcpu);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
regs->r8 = kvm_r8_read(vcpu); regs->r8 = kvm_r8_read(vcpu);
...@@ -8326,7 +8326,7 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -8326,7 +8326,7 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
kvm_rdx_write(vcpu, regs->rdx); kvm_rdx_write(vcpu, regs->rdx);
kvm_rsi_write(vcpu, regs->rsi); kvm_rsi_write(vcpu, regs->rsi);
kvm_rdi_write(vcpu, regs->rdi); kvm_rdi_write(vcpu, regs->rdi);
kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_rsp_write(vcpu, regs->rsp);
kvm_rbp_write(vcpu, regs->rbp); kvm_rbp_write(vcpu, regs->rbp);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
kvm_r8_write(vcpu, regs->r8); kvm_r8_write(vcpu, regs->r8);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment