Commit 64d60670 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: stubs for SMM support

This patch adds the interface between x86.c and the emulator: the
SMBASE register, a new emulator flag, the RSM instruction.  It also
adds a new request bit that will be used by the KVM_SMI ioctl.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f077825a
...@@ -193,6 +193,8 @@ struct x86_emulate_ops { ...@@ -193,6 +193,8 @@ struct x86_emulate_ops {
int (*cpl)(struct x86_emulate_ctxt *ctxt); int (*cpl)(struct x86_emulate_ctxt *ctxt);
int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc); int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
...@@ -264,6 +266,8 @@ enum x86emul_mode { ...@@ -264,6 +266,8 @@ enum x86emul_mode {
/* These match some of the HF_* flags defined in kvm_host.h */ /* These match some of the HF_* flags defined in kvm_host.h */
#define X86EMUL_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ #define X86EMUL_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
#define X86EMUL_SMM_MASK (1 << 6)
#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7)
struct x86_emulate_ctxt { struct x86_emulate_ctxt {
const struct x86_emulate_ops *ops; const struct x86_emulate_ops *ops;
......
...@@ -368,6 +368,7 @@ struct kvm_vcpu_arch { ...@@ -368,6 +368,7 @@ struct kvm_vcpu_arch {
int32_t apic_arb_prio; int32_t apic_arb_prio;
int mp_state; int mp_state;
u64 ia32_misc_enable_msr; u64 ia32_misc_enable_msr;
u64 smbase;
bool tpr_access_reporting; bool tpr_access_reporting;
u64 ia32_xss; u64 ia32_xss;
......
...@@ -2259,6 +2259,14 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt) ...@@ -2259,6 +2259,14 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
return rc; return rc;
} }
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
return X86EMUL_UNHANDLEABLE;
}
static void static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct desc_struct *cs, struct desc_struct *ss) struct desc_struct *cs, struct desc_struct *ss)
...@@ -4197,7 +4205,7 @@ static const struct opcode twobyte_table[256] = { ...@@ -4197,7 +4205,7 @@ static const struct opcode twobyte_table[256] = {
F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */ /* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
DI(ImplicitOps, rsm), II(No64 | EmulateOnUD | ImplicitOps, em_rsm, rsm),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
......
...@@ -808,7 +808,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -808,7 +808,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
break; break;
case APIC_DM_SMI: case APIC_DM_SMI:
apic_debug("Ignoring guest SMI\n"); result = 1;
kvm_make_request(KVM_REQ_SMI, vcpu);
kvm_vcpu_kick(vcpu);
break; break;
case APIC_DM_NMI: case APIC_DM_NMI:
......
...@@ -3394,6 +3394,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { ...@@ -3394,6 +3394,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_MWAIT] = mwait_interception, [SVM_EXIT_MWAIT] = mwait_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_XSETBV] = xsetbv_interception,
[SVM_EXIT_NPF] = pf_interception, [SVM_EXIT_NPF] = pf_interception,
[SVM_EXIT_RSM] = emulate_on_interception,
}; };
static void dump_vmcb(struct kvm_vcpu *vcpu) static void dump_vmcb(struct kvm_vcpu *vcpu)
......
...@@ -954,6 +954,7 @@ static u32 emulated_msrs[] = { ...@@ -954,6 +954,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS, MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL, MSR_IA32_MCG_CTL,
MSR_IA32_SMBASE,
}; };
static unsigned num_emulated_msrs; static unsigned num_emulated_msrs;
...@@ -2282,6 +2283,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2282,6 +2283,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_MISC_ENABLE: case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data; vcpu->arch.ia32_misc_enable_msr = data;
break; break;
case MSR_IA32_SMBASE:
if (!msr_info->host_initiated)
return 1;
vcpu->arch.smbase = data;
break;
case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data; vcpu->kvm->arch.wall_clock = data;
...@@ -2679,6 +2685,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2679,6 +2685,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_MISC_ENABLE: case MSR_IA32_MISC_ENABLE:
msr_info->data = vcpu->arch.ia32_misc_enable_msr; msr_info->data = vcpu->arch.ia32_misc_enable_msr;
break; break;
case MSR_IA32_SMBASE:
if (!msr_info->host_initiated)
return 1;
msr_info->data = vcpu->arch.smbase;
break;
case MSR_IA32_PERF_STATUS: case MSR_IA32_PERF_STATUS:
/* TSC increment by tick */ /* TSC increment by tick */
msr_info->data = 1000ULL; msr_info->data = 1000ULL;
...@@ -3103,6 +3114,8 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) ...@@ -3103,6 +3114,8 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
{ {
kvm_make_request(KVM_REQ_SMI, vcpu);
return 0; return 0;
} }
...@@ -5129,6 +5142,20 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, ...@@ -5129,6 +5142,20 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
return kvm_set_msr(emul_to_vcpu(ctxt), &msr); return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
} }
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
return vcpu->arch.smbase;
}
static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu->arch.smbase = smbase;
}
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc) u32 pmc)
{ {
...@@ -5214,6 +5241,8 @@ static const struct x86_emulate_ops emulate_ops = { ...@@ -5214,6 +5241,8 @@ static const struct x86_emulate_ops emulate_ops = {
.cpl = emulator_get_cpl, .cpl = emulator_get_cpl,
.get_dr = emulator_get_dr, .get_dr = emulator_get_dr,
.set_dr = emulator_set_dr, .set_dr = emulator_set_dr,
.get_smbase = emulator_get_smbase,
.set_smbase = emulator_set_smbase,
.set_msr = emulator_set_msr, .set_msr = emulator_set_msr,
.get_msr = emulator_get_msr, .get_msr = emulator_get_msr,
.check_pmc = emulator_check_pmc, .check_pmc = emulator_check_pmc,
...@@ -5276,6 +5305,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) ...@@ -5276,6 +5305,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
cs_db ? X86EMUL_MODE_PROT32 : cs_db ? X86EMUL_MODE_PROT32 :
X86EMUL_MODE_PROT16; X86EMUL_MODE_PROT16;
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
ctxt->emul_flags = vcpu->arch.hflags; ctxt->emul_flags = vcpu->arch.hflags;
init_decode_cache(ctxt); init_decode_cache(ctxt);
...@@ -5445,9 +5476,24 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, ...@@ -5445,9 +5476,24 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu);
void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags) static void kvm_smm_changed(struct kvm_vcpu *vcpu)
{ {
if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
if (unlikely(vcpu->arch.smi_pending)) {
kvm_make_request(KVM_REQ_SMI, vcpu);
vcpu->arch.smi_pending = 0;
}
}
}
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
{
unsigned changed = vcpu->arch.hflags ^ emul_flags;
vcpu->arch.hflags = emul_flags; vcpu->arch.hflags = emul_flags;
if (changed & HF_SMM_MASK)
kvm_smm_changed(vcpu);
} }
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
...@@ -6341,6 +6387,16 @@ static void process_nmi(struct kvm_vcpu *vcpu) ...@@ -6341,6 +6387,16 @@ static void process_nmi(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
} }
static void process_smi(struct kvm_vcpu *vcpu)
{
if (is_smm(vcpu)) {
vcpu->arch.smi_pending = true;
return;
}
printk_once(KERN_DEBUG "Ignoring guest SMI\n");
}
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{ {
u64 eoi_exit_bitmap[4]; u64 eoi_exit_bitmap[4];
...@@ -6449,6 +6505,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6449,6 +6505,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
} }
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu); record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu)) if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu); process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu)) if (kvm_check_request(KVM_REQ_PMU, vcpu))
...@@ -7363,8 +7421,10 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -7363,8 +7421,10 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false; vcpu->arch.apf.halted = false;
if (!init_event) if (!init_event) {
kvm_pmu_reset(vcpu); kvm_pmu_reset(vcpu);
vcpu->arch.smbase = 0x30000;
}
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs_avail = ~0; vcpu->arch.regs_avail = ~0;
......
...@@ -134,6 +134,7 @@ static inline bool is_error_page(struct page *page) ...@@ -134,6 +134,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_ENABLE_IBS 23 #define KVM_REQ_ENABLE_IBS 23
#define KVM_REQ_DISABLE_IBS 24 #define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25 #define KVM_REQ_APIC_PAGE_RELOAD 25
#define KVM_REQ_SMI 26
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment