Commit db8fcefa authored by Andre Przywara's avatar Andre Przywara Committed by Avi Kivity

KVM: move complete_insn_gp() into x86.c

move the complete_insn_gp() helper function out of the VMX part
into the generic x86 part to make it usable by SVM.
Signed-off-by: default avatarAndre Przywara <andre.przywara@amd.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent eea1cff9
...@@ -828,4 +828,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, ...@@ -828,4 +828,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */
...@@ -3147,14 +3147,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) ...@@ -3147,14 +3147,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1; hypercall[2] = 0xc1;
} }
static void complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
kvm_inject_gp(vcpu, 0);
else
skip_emulated_instruction(vcpu);
}
static int handle_cr(struct kvm_vcpu *vcpu) static int handle_cr(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification, val; unsigned long exit_qualification, val;
...@@ -3172,21 +3164,21 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -3172,21 +3164,21 @@ static int handle_cr(struct kvm_vcpu *vcpu)
switch (cr) { switch (cr) {
case 0: case 0:
err = kvm_set_cr0(vcpu, val); err = kvm_set_cr0(vcpu, val);
complete_insn_gp(vcpu, err); kvm_complete_insn_gp(vcpu, err);
return 1; return 1;
case 3: case 3:
err = kvm_set_cr3(vcpu, val); err = kvm_set_cr3(vcpu, val);
complete_insn_gp(vcpu, err); kvm_complete_insn_gp(vcpu, err);
return 1; return 1;
case 4: case 4:
err = kvm_set_cr4(vcpu, val); err = kvm_set_cr4(vcpu, val);
complete_insn_gp(vcpu, err); kvm_complete_insn_gp(vcpu, err);
return 1; return 1;
case 8: { case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = kvm_register_read(vcpu, reg); u8 cr8 = kvm_register_read(vcpu, reg);
err = kvm_set_cr8(vcpu, cr8); err = kvm_set_cr8(vcpu, cr8);
complete_insn_gp(vcpu, err); kvm_complete_insn_gp(vcpu, err);
if (irqchip_in_kernel(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm))
return 1; return 1;
if (cr8_prev <= cr8) if (cr8_prev <= cr8)
......
...@@ -334,6 +334,15 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -334,6 +334,15 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
} }
EXPORT_SYMBOL_GPL(kvm_requeue_exception); EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
kvm_inject_gp(vcpu, 0);
else
kvm_x86_ops->skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{ {
++vcpu->stat.pf_guest; ++vcpu->stat.pf_guest;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment