Commit 2356aaeb authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: use new CS.RPL as CPL during task switch

During task switch, all of CS.DPL, CS.RPL, SS.DPL must match (in addition
to all the other requirements) and will be the new CPL.  So far this
worked by carefully setting the CS selector and flag before doing the
task switch; setting CS.selector will already change the CPL.

However, this will not work once we get the CPL from SS.DPL, because
then you will have to set the full segment descriptor cache to change
the CPL.  ctxt->ops->cpl(ctxt) will then return the old CPL during the
task switch, and the check that SS.DPL == CPL will fail.

Temporarily assume that the CPL comes from CS.RPL during task switch
to a protected-mode task.  This is the same approach used in QEMU's
emulation code, which (until version 2.0) manually tracks the CPL.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent afa538f0
...@@ -1410,11 +1410,11 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1410,11 +1410,11 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
} }
/* Does not support long mode */ /* Does not support long mode */
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg) u16 selector, int seg, u8 cpl)
{ {
struct desc_struct seg_desc, old_desc; struct desc_struct seg_desc, old_desc;
u8 dpl, rpl, cpl; u8 dpl, rpl;
unsigned err_vec = GP_VECTOR; unsigned err_vec = GP_VECTOR;
u32 err_code = 0; u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
...@@ -1442,7 +1442,6 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1442,7 +1442,6 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
} }
rpl = selector & 3; rpl = selector & 3;
cpl = ctxt->ops->cpl(ctxt);
/* NULL selector is not valid for TR, CS and SS (except for long mode) */ /* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS if ((seg == VCPU_SREG_CS
...@@ -1544,6 +1543,13 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1544,6 +1543,13 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
return X86EMUL_PROPAGATE_FAULT; return X86EMUL_PROPAGATE_FAULT;
} }
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
return __load_segment_descriptor(ctxt, selector, seg, cpl);
}
static void write_register_operand(struct operand *op) static void write_register_operand(struct operand *op)
{ {
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
...@@ -2405,6 +2411,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2405,6 +2411,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss) struct tss_segment_16 *tss)
{ {
int ret; int ret;
u8 cpl;
ctxt->_eip = tss->ip; ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2; ctxt->eflags = tss->flag | 2;
...@@ -2427,23 +2434,25 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2427,23 +2434,25 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
cpl = tss->cs & 3;
/* /*
* Now load segment descriptors. If fault happens at this stage * Now load segment descriptors. If fault happens at this stage
* it is handled in a context of new task * it is handled in a context of new task
*/ */
ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR); ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -2521,6 +2530,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2521,6 +2530,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss) struct tss_segment_32 *tss)
{ {
int ret; int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
...@@ -2539,7 +2549,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2539,7 +2549,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
/* /*
* SDM says that segment selectors are loaded before segment * SDM says that segment selectors are loaded before segment
* descriptors * descriptors. This is important because CPL checks will
* use CS.RPL.
*/ */
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
...@@ -2553,43 +2564,38 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2553,43 +2564,38 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* If we're switching between Protected Mode and VM86, we need to make * If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so * sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly. * that the selectors are interpreted correctly.
*
* Need to get rflags to the vcpu struct immediately because it
* influences the CPL which is checked at least when loading the segment
* descriptors and when pushing an error code to the new kernel stack.
*
* TODO Introduce a separate ctxt->ops->set_cpl callback
*/ */
if (ctxt->eflags & X86_EFLAGS_VM) if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86; ctxt->mode = X86EMUL_MODE_VM86;
else cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32; ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
ctxt->ops->set_rflags(ctxt, ctxt->eflags); }
/* /*
* Now load segment descriptors. If fault happenes at this stage * Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task * it is handled in a context of new task
*/ */
ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS); ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS); ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment