Commit 6affcbed authored by Kyle Huey's avatar Kyle Huey Committed by Paolo Bonzini

KVM: x86: Add kvm_skip_emulated_instruction and use it.

kvm_skip_emulated_instruction calls both
kvm_x86_ops->skip_emulated_instruction and kvm_vcpu_check_singlestep,
skipping the emulated instruction and generating a trap if necessary.

Replacing skip_emulated_instruction calls with
kvm_skip_emulated_instruction is straightforward, except for:

- ICEBP, which is already inside a trap, so avoid triggering another trap.
- Instructions that can trigger exits to userspace, such as the IO insns,
  MOVs to CR8, and HALT. If kvm_skip_emulated_instruction does trigger a
  KVM_GUESTDBG_SINGLESTEP exit, and the handling code for
  IN/OUT/MOV CR8/HALT also triggers an exit to userspace, the latter will
  take precedence. The singlestep will be triggered again on the next
  instruction, which is the current behavior.
- Task switch instructions which would require additional handling (e.g.
  the task switch bit) and are instead left alone.
- Cases where VMLAUNCH/VMRESUME do not proceed to the next instruction,
  which do not trigger singlestep traps as mentioned previously.
Signed-off-by: default avatarKyle Huey <khuey@kylehuey.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent eb277562
...@@ -1368,7 +1368,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, ...@@ -1368,7 +1368,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void); int kvm_is_in_guest(void);
......
...@@ -890,7 +890,6 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) ...@@ -890,7 +890,6 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
kvm_register_write(vcpu, VCPU_REGS_RDX, edx); kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
kvm_x86_ops->skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
...@@ -3151,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm) ...@@ -3151,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm)
static int wbinvd_interception(struct vcpu_svm *svm) static int wbinvd_interception(struct vcpu_svm *svm)
{ {
kvm_emulate_wbinvd(&svm->vcpu); return kvm_emulate_wbinvd(&svm->vcpu);
return 1;
} }
static int xsetbv_interception(struct vcpu_svm *svm) static int xsetbv_interception(struct vcpu_svm *svm)
...@@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm) ...@@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm)
return emulate_on_interception(svm); return emulate_on_interception(svm);
err = kvm_rdpmc(&svm->vcpu); err = kvm_rdpmc(&svm->vcpu);
kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
} }
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
...@@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm) ...@@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm)
} }
kvm_register_write(&svm->vcpu, reg, val); kvm_register_write(&svm->vcpu, reg, val);
} }
kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
} }
static int dr_interception(struct vcpu_svm *svm) static int dr_interception(struct vcpu_svm *svm)
......
This diff is collapsed.
...@@ -425,12 +425,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -425,12 +425,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
} }
EXPORT_SYMBOL_GPL(kvm_requeue_exception); EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{ {
if (err) if (err)
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
else else
kvm_x86_ops->skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
...@@ -4813,8 +4815,8 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) ...@@ -4813,8 +4815,8 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{ {
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_emulate_wbinvd_noskip(vcpu);
return kvm_emulate_wbinvd_noskip(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
...@@ -5430,6 +5432,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag ...@@ -5430,6 +5432,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
} }
} }
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
int r = EMULATE_DONE;
kvm_x86_ops->skip_emulated_instruction(vcpu);
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
return r == EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{ {
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
...@@ -6007,8 +6020,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt); ...@@ -6007,8 +6020,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
int kvm_emulate_halt(struct kvm_vcpu *vcpu) int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{ {
kvm_x86_ops->skip_emulated_instruction(vcpu); int ret = kvm_skip_emulated_instruction(vcpu);
return kvm_vcpu_halt(vcpu); /*
* TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
return kvm_vcpu_halt(vcpu) && ret;
} }
EXPORT_SYMBOL_GPL(kvm_emulate_halt); EXPORT_SYMBOL_GPL(kvm_emulate_halt);
...@@ -6039,9 +6056,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) ...@@ -6039,9 +6056,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{ {
unsigned long nr, a0, a1, a2, a3, ret; unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit, r = 1; int op_64_bit, r;
kvm_x86_ops->skip_emulated_instruction(vcpu); r = kvm_skip_emulated_instruction(vcpu);
if (kvm_hv_hypercall_enabled(vcpu->kvm)) if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu); return kvm_hv_hypercall(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment