Commit 3dc4bc4f authored by Nadav Amit's avatar Nadav Amit Committed by Paolo Bonzini

KVM: x86: JMP/CALL using call- or task-gate causes exception

The KVM emulator does not emulate JMP and CALL that target a call gate or a
task gate.  This patch does not try to implement these scenario as they are
presumably rare; yet it returns X86EMUL_UNHANDLEABLE error in such cases
instead of generating an exception.
Signed-off-by: default avatarNadav Amit <namit@cs.technion.ac.il>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 16bebefe
...@@ -263,6 +263,13 @@ struct instr_dual { ...@@ -263,6 +263,13 @@ struct instr_dual {
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2 #define EFLG_RESERVED_ONE_MASK 2
enum x86_transfer_type {
X86_TRANSFER_NONE,
X86_TRANSFER_CALL_JMP,
X86_TRANSFER_RET,
X86_TRANSFER_TASK_SWITCH,
};
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{ {
if (!(ctxt->regs_valid & (1 << nr))) { if (!(ctxt->regs_valid & (1 << nr))) {
...@@ -1472,7 +1479,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1472,7 +1479,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
/* Does not support long mode */ /* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl, u16 selector, int seg, u8 cpl,
bool in_task_switch, enum x86_transfer_type transfer,
struct desc_struct *desc) struct desc_struct *desc)
{ {
struct desc_struct seg_desc, old_desc; struct desc_struct seg_desc, old_desc;
...@@ -1526,11 +1533,15 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1526,11 +1533,15 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
return ret; return ret;
err_code = selector & 0xfffc; err_code = selector & 0xfffc;
err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
GP_VECTOR;
/* can't load system descriptor into segment selector */ /* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s) if (seg <= VCPU_SREG_GS && !seg_desc.s) {
if (transfer == X86_TRANSFER_CALL_JMP)
return X86EMUL_UNHANDLEABLE;
goto exception; goto exception;
}
if (!seg_desc.p) { if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
...@@ -1628,7 +1639,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1628,7 +1639,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg) u16 selector, int seg)
{ {
u8 cpl = ctxt->ops->cpl(ctxt); u8 cpl = ctxt->ops->cpl(ctxt);
return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
} }
static void write_register_operand(struct operand *op) static void write_register_operand(struct operand *op)
...@@ -2040,7 +2052,8 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) ...@@ -2040,7 +2052,8 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP,
&new_desc); &new_desc);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -2129,7 +2142,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) ...@@ -2129,7 +2142,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
/* Outer-privilege level return is not implemented */ /* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false, rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_RET,
&new_desc); &new_desc);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -2566,23 +2580,23 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2566,23 +2580,23 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
* it is handled in a context of new task * it is handled in a context of new task
*/ */
ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -2704,31 +2718,31 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2704,31 +2718,31 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* it is handled in a context of new task * it is handled in a context of new task
*/ */
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
cpl, true, NULL); cpl, X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
true, NULL); X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -3010,8 +3024,8 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) ...@@ -3010,8 +3024,8 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
&new_desc); X86_TRANSFER_CALL_JMP, &new_desc);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment