Commit 8a76d7f2 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: x86: Add x86 callback for intercept check

This patch adds a callback into kvm_x86_ops so that svm and
vmx code can do intercept checks on emulated instructions.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 8ea7d6ae
...@@ -25,6 +25,24 @@ struct x86_exception { ...@@ -25,6 +25,24 @@ struct x86_exception {
u64 address; /* cr2 or nested page fault gpa */ u64 address; /* cr2 or nested page fault gpa */
}; };
/*
* This struct is used to carry enough information from the instruction
* decoder to main KVM so that a decision can be made whether the
* instruction needs to be intercepted or not.
*/
struct x86_instruction_info {
u8 intercept; /* which intercept */
u8 rep_prefix; /* rep prefix? */
u8 modrm_mod; /* mod part of modrm */
u8 modrm_reg; /* index of register used */
u8 modrm_rm; /* rm part of modrm */
u64 src_val; /* value of source operand */
u8 src_bytes; /* size of source operand */
u8 dst_bytes; /* size of destination operand */
u8 ad_bytes; /* size of src/dst address */
u64 next_rip; /* rip following the instruction */
};
/* /*
* x86_emulate_ops: * x86_emulate_ops:
* *
...@@ -163,8 +181,8 @@ struct x86_emulate_ops { ...@@ -163,8 +181,8 @@ struct x86_emulate_ops {
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */ void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */ void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
int (*intercept)(struct x86_emulate_ctxt *ctxt, int (*intercept)(struct kvm_vcpu *vcpu,
enum x86_intercept intercept, struct x86_instruction_info *info,
enum x86_intercept_stage stage); enum x86_intercept_stage stage);
}; };
......
...@@ -505,6 +505,8 @@ struct kvm_vcpu_stat { ...@@ -505,6 +505,8 @@ struct kvm_vcpu_stat {
u32 nmi_injections; u32 nmi_injections;
}; };
struct x86_instruction_info;
struct kvm_x86_ops { struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */ int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */ int (*disabled_by_bios)(void); /* __init */
...@@ -592,6 +594,11 @@ struct kvm_x86_ops { ...@@ -592,6 +594,11 @@ struct kvm_x86_ops {
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
int (*check_intercept)(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage);
const struct trace_print_flags *exit_reasons_str; const struct trace_print_flags *exit_reasons_str;
}; };
......
...@@ -408,6 +408,26 @@ struct gprefix { ...@@ -408,6 +408,26 @@ struct gprefix {
(_eip) += (_size); \ (_eip) += (_size); \
}) })
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
.rep_prefix = ctxt->decode.rep_prefix,
.modrm_mod = ctxt->decode.modrm_mod,
.modrm_reg = ctxt->decode.modrm_reg,
.modrm_rm = ctxt->decode.modrm_rm,
.src_val = ctxt->decode.src.val64,
.src_bytes = ctxt->decode.src.bytes,
.dst_bytes = ctxt->decode.dst.bytes,
.ad_bytes = ctxt->decode.ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt->vcpu, &info, stage);
}
static inline unsigned long ad_mask(struct decode_cache *c) static inline unsigned long ad_mask(struct decode_cache *c)
{ {
return (1UL << (c->ad_bytes << 3)) - 1; return (1UL << (c->ad_bytes << 3)) - 1;
...@@ -3132,8 +3152,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3132,8 +3152,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
} }
if (unlikely(ctxt->guest_mode) && c->intercept) { if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept, rc = emulator_check_intercept(ctxt, c->intercept,
X86_ICPT_PRE_EXCEPT); X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
goto done; goto done;
} }
...@@ -3158,8 +3178,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3158,8 +3178,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
} }
if (unlikely(ctxt->guest_mode) && c->intercept) { if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept, rc = emulator_check_intercept(ctxt, c->intercept,
X86_ICPT_POST_EXCEPT); X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
goto done; goto done;
} }
...@@ -3203,8 +3223,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3203,8 +3223,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn: special_insn:
if (unlikely(ctxt->guest_mode) && c->intercept) { if (unlikely(ctxt->guest_mode) && c->intercept) {
rc = ops->intercept(ctxt, c->intercept, rc = emulator_check_intercept(ctxt, c->intercept,
X86_ICPT_POST_MEMACCESS); X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
goto done; goto done;
} }
......
...@@ -3868,6 +3868,13 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) ...@@ -3868,6 +3868,13 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
update_cr0_intercept(svm); update_cr0_intercept(svm);
} }
static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
}
static struct kvm_x86_ops svm_x86_ops = { static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm, .cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled, .disabled_by_bios = is_disabled,
...@@ -3953,6 +3960,8 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -3953,6 +3960,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.adjust_tsc_offset = svm_adjust_tsc_offset, .adjust_tsc_offset = svm_adjust_tsc_offset,
.set_tdp_cr3 = set_tdp_cr3, .set_tdp_cr3 = set_tdp_cr3,
.check_intercept = svm_check_intercept,
}; };
static int __init svm_init(void) static int __init svm_init(void)
......
...@@ -4409,6 +4409,13 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) ...@@ -4409,6 +4409,13 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{ {
} }
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
}
static struct kvm_x86_ops vmx_x86_ops = { static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support, .cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios, .disabled_by_bios = vmx_disabled_by_bios,
...@@ -4494,6 +4501,8 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -4494,6 +4501,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.adjust_tsc_offset = vmx_adjust_tsc_offset, .adjust_tsc_offset = vmx_adjust_tsc_offset,
.set_tdp_cr3 = vmx_set_cr3, .set_tdp_cr3 = vmx_set_cr3,
.check_intercept = vmx_check_intercept,
}; };
static int __init vmx_init(void) static int __init vmx_init(void)
......
...@@ -4297,11 +4297,11 @@ static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) ...@@ -4297,11 +4297,11 @@ static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
preempt_enable(); preempt_enable();
} }
static int emulator_intercept(struct x86_emulate_ctxt *ctxt, static int emulator_intercept(struct kvm_vcpu *vcpu,
enum x86_intercept intercept, struct x86_instruction_info *info,
enum x86_intercept_stage stage) enum x86_intercept_stage stage)
{ {
return X86EMUL_CONTINUE; return kvm_x86_ops->check_intercept(vcpu, info, stage);
} }
static struct x86_emulate_ops emulate_ops = { static struct x86_emulate_ops emulate_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment