Commit 35d3d4a1 authored by Avi Kivity's avatar Avi Kivity

KVM: x86 emulator: simplify exception generation

Immediately after we generate an exception, we want a X86EMUL_PROPAGATE_FAULT
constant, so return it from the generation functions.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent db297e3d
...@@ -466,33 +466,33 @@ static ulong linear(struct x86_emulate_ctxt *ctxt, ...@@ -466,33 +466,33 @@ static ulong linear(struct x86_emulate_ctxt *ctxt,
return la; return la;
} }
static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
u32 error, bool valid) u32 error, bool valid)
{ {
ctxt->exception.vector = vec; ctxt->exception.vector = vec;
ctxt->exception.error_code = error; ctxt->exception.error_code = error;
ctxt->exception.error_code_valid = valid; ctxt->exception.error_code_valid = valid;
return X86EMUL_PROPAGATE_FAULT;
} }
static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{ {
emulate_exception(ctxt, GP_VECTOR, err, true); return emulate_exception(ctxt, GP_VECTOR, err, true);
} }
static void emulate_ud(struct x86_emulate_ctxt *ctxt) static int emulate_ud(struct x86_emulate_ctxt *ctxt)
{ {
emulate_exception(ctxt, UD_VECTOR, 0, false); return emulate_exception(ctxt, UD_VECTOR, 0, false);
} }
static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err) static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{ {
emulate_exception(ctxt, TS_VECTOR, err, true); return emulate_exception(ctxt, TS_VECTOR, err, true);
} }
static int emulate_de(struct x86_emulate_ctxt *ctxt) static int emulate_de(struct x86_emulate_ctxt *ctxt)
{ {
emulate_exception(ctxt, DE_VECTOR, 0, false); return emulate_exception(ctxt, DE_VECTOR, 0, false);
return X86EMUL_PROPAGATE_FAULT;
} }
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
...@@ -898,10 +898,8 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -898,10 +898,8 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
get_descriptor_table_ptr(ctxt, ops, selector, &dt); get_descriptor_table_ptr(ctxt, ops, selector, &dt);
if (dt.size < index * 8 + 7) { if (dt.size < index * 8 + 7)
emulate_gp(ctxt, selector & 0xfffc); return emulate_gp(ctxt, selector & 0xfffc);
return X86EMUL_PROPAGATE_FAULT;
}
addr = dt.address + index * 8; addr = dt.address + index * 8;
ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
&ctxt->exception); &ctxt->exception);
...@@ -921,10 +919,8 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -921,10 +919,8 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
get_descriptor_table_ptr(ctxt, ops, selector, &dt); get_descriptor_table_ptr(ctxt, ops, selector, &dt);
if (dt.size < index * 8 + 7) { if (dt.size < index * 8 + 7)
emulate_gp(ctxt, selector & 0xfffc); return emulate_gp(ctxt, selector & 0xfffc);
return X86EMUL_PROPAGATE_FAULT;
}
addr = dt.address + index * 8; addr = dt.address + index * 8;
ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
...@@ -1165,10 +1161,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, ...@@ -1165,10 +1161,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
change_mask |= EFLG_IF; change_mask |= EFLG_IF;
break; break;
case X86EMUL_MODE_VM86: case X86EMUL_MODE_VM86:
if (iopl < 3) { if (iopl < 3)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
change_mask |= EFLG_IF; change_mask |= EFLG_IF;
break; break;
default: /* real mode */ default: /* real mode */
...@@ -1347,10 +1341,8 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, ...@@ -1347,10 +1341,8 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
if (temp_eip & ~0xffff) { if (temp_eip & ~0xffff)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
...@@ -1601,10 +1593,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1601,10 +1593,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* syscall is not available in real mode */ /* syscall is not available in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL || if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86) { ctxt->mode == X86EMUL_MODE_VM86)
emulate_ud(ctxt); return emulate_ud(ctxt);
return X86EMUL_PROPAGATE_FAULT;
}
setup_syscalls_segments(ctxt, ops, &cs, &ss); setup_syscalls_segments(ctxt, ops, &cs, &ss);
...@@ -1655,34 +1645,26 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1655,34 +1645,26 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
u16 cs_sel, ss_sel; u16 cs_sel, ss_sel;
/* inject #GP if in real mode */ /* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL) { if (ctxt->mode == X86EMUL_MODE_REAL)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
/* XXX sysenter/sysexit have not been tested in 64bit mode. /* XXX sysenter/sysexit have not been tested in 64bit mode.
* Therefore, we inject an #UD. * Therefore, we inject an #UD.
*/ */
if (ctxt->mode == X86EMUL_MODE_PROT64) { if (ctxt->mode == X86EMUL_MODE_PROT64)
emulate_ud(ctxt); return emulate_ud(ctxt);
return X86EMUL_PROPAGATE_FAULT;
}
setup_syscalls_segments(ctxt, ops, &cs, &ss); setup_syscalls_segments(ctxt, ops, &cs, &ss);
ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
switch (ctxt->mode) { switch (ctxt->mode) {
case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT32:
if ((msr_data & 0xfffc) == 0x0) { if ((msr_data & 0xfffc) == 0x0)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
break; break;
case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT64:
if (msr_data == 0x0) { if (msr_data == 0x0)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
break; break;
} }
...@@ -1722,10 +1704,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1722,10 +1704,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* inject #GP if in real mode or Virtual 8086 mode */ /* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL || if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86) { ctxt->mode == X86EMUL_MODE_VM86)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
setup_syscalls_segments(ctxt, ops, &cs, &ss); setup_syscalls_segments(ctxt, ops, &cs, &ss);
...@@ -1740,18 +1720,14 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1740,18 +1720,14 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
switch (usermode) { switch (usermode) {
case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT32:
cs_sel = (u16)(msr_data + 16); cs_sel = (u16)(msr_data + 16);
if ((msr_data & 0xfffc) == 0x0) { if ((msr_data & 0xfffc) == 0x0)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
ss_sel = (u16)(msr_data + 24); ss_sel = (u16)(msr_data + 24);
break; break;
case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32); cs_sel = (u16)(msr_data + 32);
if (msr_data == 0x0) { if (msr_data == 0x0)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
ss_sel = cs_sel + 8; ss_sel = cs_sel + 8;
cs.d = 0; cs.d = 0;
cs.l = 1; cs.l = 1;
...@@ -1982,10 +1958,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -1982,10 +1958,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int ret; int ret;
if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) { if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
c->eip = tss->eip; c->eip = tss->eip;
ctxt->eflags = tss->eflags | 2; ctxt->eflags = tss->eflags | 2;
c->regs[VCPU_REGS_RAX] = tss->eax; c->regs[VCPU_REGS_RAX] = tss->eax;
...@@ -2107,10 +2081,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2107,10 +2081,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if (reason != TASK_SWITCH_IRET) { if (reason != TASK_SWITCH_IRET) {
if ((tss_selector & 3) > next_tss_desc.dpl || if ((tss_selector & 3) > next_tss_desc.dpl ||
ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
} }
desc_limit = desc_limit_scaled(&next_tss_desc); desc_limit = desc_limit_scaled(&next_tss_desc);
...@@ -2331,10 +2303,8 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt) ...@@ -2331,10 +2303,8 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
u64 tsc = 0; u64 tsc = 0;
if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) { if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc); ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
c->regs[VCPU_REGS_RAX] = (u32)tsc; c->regs[VCPU_REGS_RAX] = (u32)tsc;
c->regs[VCPU_REGS_RDX] = tsc >> 32; c->regs[VCPU_REGS_RDX] = tsc >> 32;
...@@ -2979,28 +2949,24 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -2979,28 +2949,24 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
ctxt->decode.mem_read.pos = 0; ctxt->decode.mem_read.pos = 0;
if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
emulate_ud(ctxt); rc = emulate_ud(ctxt);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
/* LOCK prefix is allowed only with some instructions */ /* LOCK prefix is allowed only with some instructions */
if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
emulate_ud(ctxt); rc = emulate_ud(ctxt);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) { if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
emulate_ud(ctxt); rc = emulate_ud(ctxt);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
/* Privileged instruction can be executed only in CPL=0 */ /* Privileged instruction can be executed only in CPL=0 */
if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
emulate_gp(ctxt, 0); rc = emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
...@@ -3178,8 +3144,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3178,8 +3144,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
break; break;
case 0x8c: /* mov r/m, sreg */ case 0x8c: /* mov r/m, sreg */
if (c->modrm_reg > VCPU_SREG_GS) { if (c->modrm_reg > VCPU_SREG_GS) {
emulate_ud(ctxt); rc = emulate_ud(ctxt);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
...@@ -3194,8 +3159,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3194,8 +3159,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
if (c->modrm_reg == VCPU_SREG_CS || if (c->modrm_reg == VCPU_SREG_CS ||
c->modrm_reg > VCPU_SREG_GS) { c->modrm_reg > VCPU_SREG_GS) {
emulate_ud(ctxt); rc = emulate_ud(ctxt);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
...@@ -3327,8 +3291,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3327,8 +3291,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
do_io_in: do_io_in:
c->dst.bytes = min(c->dst.bytes, 4u); c->dst.bytes = min(c->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
emulate_gp(ctxt, 0); rc = emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
...@@ -3342,8 +3305,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3342,8 +3305,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
c->src.bytes = min(c->src.bytes, 4u); c->src.bytes = min(c->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ops, c->dst.val, if (!emulator_io_permited(ctxt, ops, c->dst.val,
c->src.bytes)) { c->src.bytes)) {
emulate_gp(ctxt, 0); rc = emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} }
ops->pio_out_emulated(c->src.bytes, c->dst.val, ops->pio_out_emulated(c->src.bytes, c->dst.val,
...@@ -3368,16 +3330,14 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3368,16 +3330,14 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
break; break;
case 0xfa: /* cli */ case 0xfa: /* cli */
if (emulator_bad_iopl(ctxt, ops)) { if (emulator_bad_iopl(ctxt, ops)) {
emulate_gp(ctxt, 0); rc = emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} else } else
ctxt->eflags &= ~X86_EFLAGS_IF; ctxt->eflags &= ~X86_EFLAGS_IF;
break; break;
case 0xfb: /* sti */ case 0xfb: /* sti */
if (emulator_bad_iopl(ctxt, ops)) { if (emulator_bad_iopl(ctxt, ops)) {
emulate_gp(ctxt, 0); rc = emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done; goto done;
} else { } else {
ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment