Commit 7b105ca2 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity

KVM: x86 emulator: Stop passing ctxt->ops as arg of emul functions

Dereference it in the actual users.

This not only cleans up the emulator but also makes it easy to convert
the old emulation functions to the new em_xxx() form later.

Note: Remove some inline keywords to let the compiler decide inlining.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent ef5d75cc
...@@ -373,6 +373,5 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); ...@@ -373,6 +373,5 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt, int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int reason, u16 tss_selector, int reason,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
struct x86_emulate_ops *ops, int irq);
#endif /* _ASM_X86_KVM_X86_EMULATE_H */ #endif /* _ASM_X86_KVM_X86_EMULATE_H */
...@@ -475,13 +475,12 @@ static void set_seg_override(struct decode_cache *c, int seg) ...@@ -475,13 +475,12 @@ static void set_seg_override(struct decode_cache *c, int seg)
c->seg_override = seg; c->seg_override = seg;
} }
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
struct x86_emulate_ops *ops, int seg)
{ {
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0; return 0;
return ops->get_cached_segment_base(ctxt, seg); return ctxt->ops->get_cached_segment_base(ctxt, seg);
} }
static unsigned seg_override(struct x86_emulate_ctxt *ctxt, static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
...@@ -570,7 +569,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, ...@@ -570,7 +569,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
u16 sel; u16 sel;
unsigned cpl, rpl; unsigned cpl, rpl;
la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; la = seg_base(ctxt, addr.seg) + addr.ea;
switch (ctxt->mode) { switch (ctxt->mode) {
case X86EMUL_MODE_REAL: case X86EMUL_MODE_REAL:
break; break;
...@@ -1052,7 +1051,6 @@ static void fetch_bit_operand(struct decode_cache *c) ...@@ -1052,7 +1051,6 @@ static void fetch_bit_operand(struct decode_cache *c)
} }
static int read_emulated(struct x86_emulate_ctxt *ctxt, static int read_emulated(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
unsigned long addr, void *dest, unsigned size) unsigned long addr, void *dest, unsigned size)
{ {
int rc; int rc;
...@@ -1064,8 +1062,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -1064,8 +1062,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
if (mc->pos < mc->end) if (mc->pos < mc->end)
goto read_cached; goto read_cached;
rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n, rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
&ctxt->exception); &ctxt->exception);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
mc->end += n; mc->end += n;
...@@ -1090,7 +1088,7 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt, ...@@ -1090,7 +1088,7 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt,
rc = linearize(ctxt, addr, size, false, &linear); rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
return read_emulated(ctxt, ctxt->ops, linear, data, size); return read_emulated(ctxt, linear, data, size);
} }
static int segmented_write(struct x86_emulate_ctxt *ctxt, static int segmented_write(struct x86_emulate_ctxt *ctxt,
...@@ -1124,7 +1122,6 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, ...@@ -1124,7 +1122,6 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
} }
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
unsigned int size, unsigned short port, unsigned int size, unsigned short port,
void *dest) void *dest)
{ {
...@@ -1143,7 +1140,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -1143,7 +1140,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
if (n == 0) if (n == 0)
n = 1; n = 1;
rc->pos = rc->end = 0; rc->pos = rc->end = 0;
if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n)) if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0; return 0;
rc->end = n * size; rc->end = n * size;
} }
...@@ -1154,9 +1151,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -1154,9 +1151,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
} }
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 selector, struct desc_ptr *dt) u16 selector, struct desc_ptr *dt)
{ {
struct x86_emulate_ops *ops = ctxt->ops;
if (selector & 1 << 2) { if (selector & 1 << 2) {
struct desc_struct desc; struct desc_struct desc;
u16 sel; u16 sel;
...@@ -1173,48 +1171,42 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, ...@@ -1173,48 +1171,42 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
/* allowed just for 8 bytes segments */ /* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc) u16 selector, struct desc_struct *desc)
{ {
struct desc_ptr dt; struct desc_ptr dt;
u16 index = selector >> 3; u16 index = selector >> 3;
int ret;
ulong addr; ulong addr;
get_descriptor_table_ptr(ctxt, ops, selector, &dt); get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7) if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc); return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
return ret; addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
} }
/* allowed just for 8 bytes segments */ /* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc) u16 selector, struct desc_struct *desc)
{ {
struct desc_ptr dt; struct desc_ptr dt;
u16 index = selector >> 3; u16 index = selector >> 3;
ulong addr; ulong addr;
int ret;
get_descriptor_table_ptr(ctxt, ops, selector, &dt); get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7) if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc); return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8; addr = dt.address + index * 8;
ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
return ret;
} }
/* Does not support long mode */ /* Does not support long mode */
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 selector, int seg) u16 selector, int seg)
{ {
struct desc_struct seg_desc; struct desc_struct seg_desc;
...@@ -1249,7 +1241,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1249,7 +1241,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (null_selector) /* for NULL selector skip all following checks */ if (null_selector) /* for NULL selector skip all following checks */
goto load; goto load;
ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc); ret = read_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -1267,7 +1259,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1267,7 +1259,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3; rpl = selector & 3;
dpl = seg_desc.dpl; dpl = seg_desc.dpl;
cpl = ops->cpl(ctxt); cpl = ctxt->ops->cpl(ctxt);
switch (seg) { switch (seg) {
case VCPU_SREG_SS: case VCPU_SREG_SS:
...@@ -1318,12 +1310,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1318,12 +1310,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (seg_desc.s) { if (seg_desc.s) {
/* mark segment as accessed */ /* mark segment as accessed */
seg_desc.type |= 1; seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc); ret = write_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
} }
load: load:
ops->set_segment(ctxt, selector, &seg_desc, 0, seg); ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
exception: exception:
emulate_exception(ctxt, err_vec, err_code, true); emulate_exception(ctxt, err_vec, err_code, true);
...@@ -1424,13 +1416,12 @@ static int em_pop(struct x86_emulate_ctxt *ctxt) ...@@ -1424,13 +1416,12 @@ static int em_pop(struct x86_emulate_ctxt *ctxt)
} }
static int emulate_popf(struct x86_emulate_ctxt *ctxt, static int emulate_popf(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, void *dest, int len)
void *dest, int len)
{ {
int rc; int rc;
unsigned long val, change_mask; unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
int cpl = ops->cpl(ctxt); int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len); rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
...@@ -1471,11 +1462,10 @@ static int em_popf(struct x86_emulate_ctxt *ctxt) ...@@ -1471,11 +1462,10 @@ static int em_popf(struct x86_emulate_ctxt *ctxt)
c->dst.type = OP_REG; c->dst.type = OP_REG;
c->dst.addr.reg = &ctxt->eflags; c->dst.addr.reg = &ctxt->eflags;
c->dst.bytes = c->op_bytes; c->dst.bytes = c->op_bytes;
return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes); return emulate_popf(ctxt, &c->dst.val, c->op_bytes);
} }
static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
struct x86_emulate_ops *ops, int seg)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
...@@ -1484,8 +1474,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, ...@@ -1484,8 +1474,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
return em_push(ctxt); return em_push(ctxt);
} }
static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
struct x86_emulate_ops *ops, int seg)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
unsigned long selector; unsigned long selector;
...@@ -1495,7 +1484,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, ...@@ -1495,7 +1484,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg); rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc; return rc;
} }
...@@ -1549,10 +1538,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) ...@@ -1549,10 +1538,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
return rc; return rc;
} }
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
struct x86_emulate_ops *ops, int irq)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
struct x86_emulate_ops *ops = ctxt->ops;
int rc; int rc;
struct desc_ptr dt; struct desc_ptr dt;
gva_t cs_addr; gva_t cs_addr;
...@@ -1590,7 +1579,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, ...@@ -1590,7 +1579,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS); rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -1599,12 +1588,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, ...@@ -1599,12 +1588,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
return rc; return rc;
} }
static int emulate_int(struct x86_emulate_ctxt *ctxt, static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
struct x86_emulate_ops *ops, int irq)
{ {
switch(ctxt->mode) { switch(ctxt->mode) {
case X86EMUL_MODE_REAL: case X86EMUL_MODE_REAL:
return emulate_int_real(ctxt, ops, irq); return emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86: case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT32:
...@@ -1615,8 +1603,7 @@ static int emulate_int(struct x86_emulate_ctxt *ctxt, ...@@ -1615,8 +1603,7 @@ static int emulate_int(struct x86_emulate_ctxt *ctxt,
} }
} }
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
struct x86_emulate_ops *ops)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE; int rc = X86EMUL_CONTINUE;
...@@ -1648,7 +1635,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, ...@@ -1648,7 +1635,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -1669,12 +1656,11 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, ...@@ -1669,12 +1656,11 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
return rc; return rc;
} }
static inline int emulate_iret(struct x86_emulate_ctxt *ctxt, static int emulate_iret(struct x86_emulate_ctxt *ctxt)
struct x86_emulate_ops* ops)
{ {
switch(ctxt->mode) { switch(ctxt->mode) {
case X86EMUL_MODE_REAL: case X86EMUL_MODE_REAL:
return emulate_iret_real(ctxt, ops); return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86: case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT32:
...@@ -1693,7 +1679,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) ...@@ -1693,7 +1679,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
memcpy(&sel, c->src.valptr + c->op_bytes, 2); memcpy(&sel, c->src.valptr + c->op_bytes, 2);
rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS); rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -1830,8 +1816,7 @@ static int em_grp9(struct x86_emulate_ctxt *ctxt) ...@@ -1830,8 +1816,7 @@ static int em_grp9(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, static int emulate_ret_far(struct x86_emulate_ctxt *ctxt)
struct x86_emulate_ops *ops)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int rc; int rc;
...@@ -1845,12 +1830,11 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, ...@@ -1845,12 +1830,11 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
rc = emulate_pop(ctxt, &cs, c->op_bytes); rc = emulate_pop(ctxt, &cs, c->op_bytes);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
return rc; return rc;
} }
static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
struct x86_emulate_ops *ops, int seg)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
unsigned short sel; unsigned short sel;
...@@ -1858,7 +1842,7 @@ static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, ...@@ -1858,7 +1842,7 @@ static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
memcpy(&sel, c->src.valptr + c->op_bytes, 2); memcpy(&sel, c->src.valptr + c->op_bytes, 2);
rc = load_segment_descriptor(ctxt, ops, sel, seg); rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -1866,15 +1850,14 @@ static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, ...@@ -1866,15 +1850,14 @@ static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
return rc; return rc;
} }
static inline void static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct desc_struct *cs, struct desc_struct *cs, struct desc_struct *ss)
struct desc_struct *ss)
{ {
u16 selector; u16 selector;
memset(cs, 0, sizeof(struct desc_struct)); memset(cs, 0, sizeof(struct desc_struct));
ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS); ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
memset(ss, 0, sizeof(struct desc_struct)); memset(ss, 0, sizeof(struct desc_struct));
cs->l = 0; /* will be adjusted later */ cs->l = 0; /* will be adjusted later */
...@@ -1897,10 +1880,10 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, ...@@ -1897,10 +1880,10 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
ss->p = 1; ss->p = 1;
} }
static int static int emulate_syscall(struct x86_emulate_ctxt *ctxt)
emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss; struct desc_struct cs, ss;
u64 msr_data; u64 msr_data;
u16 cs_sel, ss_sel; u16 cs_sel, ss_sel;
...@@ -1912,7 +1895,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1912,7 +1895,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return emulate_ud(ctxt); return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer); ops->get_msr(ctxt, MSR_EFER, &efer);
setup_syscalls_segments(ctxt, ops, &cs, &ss); setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_STAR, &msr_data); ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32; msr_data >>= 32;
...@@ -1950,16 +1933,16 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1950,16 +1933,16 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
static int static int emulate_sysenter(struct x86_emulate_ctxt *ctxt)
emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss; struct desc_struct cs, ss;
u64 msr_data; u64 msr_data;
u16 cs_sel, ss_sel; u16 cs_sel, ss_sel;
u64 efer = 0; u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */ /* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL) if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
...@@ -1970,7 +1953,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1970,7 +1953,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (ctxt->mode == X86EMUL_MODE_PROT64) if (ctxt->mode == X86EMUL_MODE_PROT64)
return emulate_ud(ctxt); return emulate_ud(ctxt);
setup_syscalls_segments(ctxt, ops, &cs, &ss); setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (ctxt->mode) { switch (ctxt->mode) {
...@@ -2006,10 +1989,10 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2006,10 +1989,10 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
static int static int emulate_sysexit(struct x86_emulate_ctxt *ctxt)
emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss; struct desc_struct cs, ss;
u64 msr_data; u64 msr_data;
int usermode; int usermode;
...@@ -2020,7 +2003,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2020,7 +2003,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ctxt->mode == X86EMUL_MODE_VM86) ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
setup_syscalls_segments(ctxt, ops, &cs, &ss); setup_syscalls_segments(ctxt, &cs, &ss);
if ((c->rex_prefix & 0x8) != 0x0) if ((c->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64; usermode = X86EMUL_MODE_PROT64;
...@@ -2058,8 +2041,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2058,8 +2041,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
struct x86_emulate_ops *ops)
{ {
int iopl; int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL) if (ctxt->mode == X86EMUL_MODE_REAL)
...@@ -2067,13 +2049,13 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, ...@@ -2067,13 +2049,13 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
if (ctxt->mode == X86EMUL_MODE_VM86) if (ctxt->mode == X86EMUL_MODE_VM86)
return true; return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
return ops->cpl(ctxt) > iopl; return ctxt->ops->cpl(ctxt) > iopl;
} }
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 port, u16 len) u16 port, u16 len)
{ {
struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg; struct desc_struct tr_seg;
u32 base3; u32 base3;
int r; int r;
...@@ -2104,14 +2086,13 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, ...@@ -2104,14 +2086,13 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
} }
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 port, u16 len) u16 port, u16 len)
{ {
if (ctxt->perm_ok) if (ctxt->perm_ok)
return true; return true;
if (emulator_bad_iopl(ctxt, ops)) if (emulator_bad_iopl(ctxt))
if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) if (!emulator_io_port_access_allowed(ctxt, port, len))
return false; return false;
ctxt->perm_ok = true; ctxt->perm_ok = true;
...@@ -2120,7 +2101,6 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, ...@@ -2120,7 +2101,6 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
} }
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
struct tss_segment_16 *tss) struct tss_segment_16 *tss)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
...@@ -2144,7 +2124,6 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2144,7 +2124,6 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
} }
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
struct tss_segment_16 *tss) struct tss_segment_16 *tss)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
...@@ -2175,19 +2154,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2175,19 +2154,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
* Now load segment descriptors. If fault happenes at this stage * Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task * it is handled in a context of new task
*/ */
ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR); ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -2195,10 +2174,10 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2195,10 +2174,10 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
} }
static int task_switch_16(struct x86_emulate_ctxt *ctxt, static int task_switch_16(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 tss_selector, u16 old_tss_sel, u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc) ulong old_tss_base, struct desc_struct *new_desc)
{ {
struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg; struct tss_segment_16 tss_seg;
int ret; int ret;
u32 new_tss_base = get_desc_base(new_desc); u32 new_tss_base = get_desc_base(new_desc);
...@@ -2209,7 +2188,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ...@@ -2209,7 +2188,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
return ret; return ret;
save_state_to_tss16(ctxt, ops, &tss_seg); save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception); &ctxt->exception);
...@@ -2235,16 +2214,15 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ...@@ -2235,16 +2214,15 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
return ret; return ret;
} }
return load_state_from_tss16(ctxt, ops, &tss_seg); return load_state_from_tss16(ctxt, &tss_seg);
} }
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
struct tss_segment_32 *tss) struct tss_segment_32 *tss)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
tss->cr3 = ops->get_cr(ctxt, 3); tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
tss->eip = c->eip; tss->eip = c->eip;
tss->eflags = ctxt->eflags; tss->eflags = ctxt->eflags;
tss->eax = c->regs[VCPU_REGS_RAX]; tss->eax = c->regs[VCPU_REGS_RAX];
...@@ -2266,13 +2244,12 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2266,13 +2244,12 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
} }
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
struct tss_segment_32 *tss) struct tss_segment_32 *tss)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int ret; int ret;
if (ops->set_cr(ctxt, 3, tss->cr3)) if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
c->eip = tss->eip; c->eip = tss->eip;
ctxt->eflags = tss->eflags | 2; ctxt->eflags = tss->eflags | 2;
...@@ -2301,25 +2278,25 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2301,25 +2278,25 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* Now load segment descriptors. If fault happenes at this stage * Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task * it is handled in a context of new task
*/ */
ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR); ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS); ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS); ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -2327,10 +2304,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2327,10 +2304,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
} }
static int task_switch_32(struct x86_emulate_ctxt *ctxt, static int task_switch_32(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 tss_selector, u16 old_tss_sel, u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc) ulong old_tss_base, struct desc_struct *new_desc)
{ {
struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg; struct tss_segment_32 tss_seg;
int ret; int ret;
u32 new_tss_base = get_desc_base(new_desc); u32 new_tss_base = get_desc_base(new_desc);
...@@ -2341,7 +2318,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ...@@ -2341,7 +2318,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
/* FIXME: need to provide precise fault address */ /* FIXME: need to provide precise fault address */
return ret; return ret;
save_state_to_tss32(ctxt, ops, &tss_seg); save_state_to_tss32(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception); &ctxt->exception);
...@@ -2367,14 +2344,14 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ...@@ -2367,14 +2344,14 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
return ret; return ret;
} }
return load_state_from_tss32(ctxt, ops, &tss_seg); return load_state_from_tss32(ctxt, &tss_seg);
} }
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 tss_selector, int reason, u16 tss_selector, int reason,
bool has_error_code, u32 error_code) bool has_error_code, u32 error_code)
{ {
struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc; struct desc_struct curr_tss_desc, next_tss_desc;
int ret; int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
...@@ -2384,10 +2361,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2384,10 +2361,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
/* FIXME: old_tss_base == ~0 ? */ /* FIXME: old_tss_base == ~0 ? */
ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc); ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc); ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -2409,8 +2386,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2409,8 +2386,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
write_segment_descriptor(ctxt, ops, old_tss_sel, write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
&curr_tss_desc);
} }
if (reason == TASK_SWITCH_IRET) if (reason == TASK_SWITCH_IRET)
...@@ -2422,10 +2398,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2422,10 +2398,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
old_tss_sel = 0xffff; old_tss_sel = 0xffff;
if (next_tss_desc.type & 8) if (next_tss_desc.type & 8)
ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel, ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc); old_tss_base, &next_tss_desc);
else else
ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel, ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc); old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return ret; return ret;
...@@ -2435,8 +2411,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2435,8 +2411,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if (reason != TASK_SWITCH_IRET) { if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */ next_tss_desc.type |= (1 << 1); /* set busy flag */
write_segment_descriptor(ctxt, ops, tss_selector, write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
&next_tss_desc);
} }
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
...@@ -2458,14 +2433,13 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2458,14 +2433,13 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int reason, u16 tss_selector, int reason,
bool has_error_code, u32 error_code) bool has_error_code, u32 error_code)
{ {
struct x86_emulate_ops *ops = ctxt->ops;
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int rc; int rc;
c->eip = ctxt->eip; c->eip = ctxt->eip;
c->dst.type = OP_NONE; c->dst.type = OP_NONE;
rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, rc = emulator_do_task_switch(ctxt, tss_selector, reason,
has_error_code, error_code); has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) if (rc == X86EMUL_CONTINUE)
...@@ -2535,7 +2509,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) ...@@ -2535,7 +2509,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
old_eip = c->eip; old_eip = c->eip;
memcpy(&sel, c->src.valptr + c->op_bytes, 2); memcpy(&sel, c->src.valptr + c->op_bytes, 2);
if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS)) if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
c->eip = 0; c->eip = 0;
...@@ -2973,7 +2947,7 @@ static int check_perm_in(struct x86_emulate_ctxt *ctxt) ...@@ -2973,7 +2947,7 @@ static int check_perm_in(struct x86_emulate_ctxt *ctxt)
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
c->dst.bytes = min(c->dst.bytes, 4u); c->dst.bytes = min(c->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes)) if (!emulator_io_permited(ctxt, c->src.val, c->dst.bytes))
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
...@@ -2984,7 +2958,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) ...@@ -2984,7 +2958,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
c->src.bytes = min(c->src.bytes, 4u); c->src.bytes = min(c->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes)) if (!emulator_io_permited(ctxt, c->dst.val, c->src.bytes))
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
...@@ -3724,8 +3698,7 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) ...@@ -3724,8 +3698,7 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
return false; return false;
} }
int int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{ {
struct x86_emulate_ops *ops = ctxt->ops; struct x86_emulate_ops *ops = ctxt->ops;
u64 msr_data; u64 msr_data;
...@@ -3854,25 +3827,25 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3854,25 +3827,25 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
switch (c->b) { switch (c->b) {
case 0x06: /* push es */ case 0x06: /* push es */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
break; break;
case 0x07: /* pop es */ case 0x07: /* pop es */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
break; break;
case 0x0e: /* push cs */ case 0x0e: /* push cs */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
break; break;
case 0x16: /* push ss */ case 0x16: /* push ss */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
break; break;
case 0x17: /* pop ss */ case 0x17: /* pop ss */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
break; break;
case 0x1e: /* push ds */ case 0x1e: /* push ds */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
break; break;
case 0x1f: /* pop ds */ case 0x1f: /* pop ds */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
break; break;
case 0x40 ... 0x47: /* inc r16/r32 */ case 0x40 ... 0x47: /* inc r16/r32 */
emulate_1op("inc", c->dst, ctxt->eflags); emulate_1op("inc", c->dst, ctxt->eflags);
...@@ -3938,7 +3911,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3938,7 +3911,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
if (c->modrm_reg == VCPU_SREG_SS) if (c->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg); rc = load_segment_descriptor(ctxt, sel, c->modrm_reg);
c->dst.type = OP_NONE; /* Disable writeback. */ c->dst.type = OP_NONE; /* Disable writeback. */
break; break;
...@@ -3969,13 +3942,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3969,13 +3942,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
rc = em_pop(ctxt); rc = em_pop(ctxt);
break; break;
case 0xc4: /* les */ case 0xc4: /* les */
rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES); rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
break; break;
case 0xc5: /* lds */ case 0xc5: /* lds */
rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS); rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
break; break;
case 0xcb: /* ret far */ case 0xcb: /* ret far */
rc = emulate_ret_far(ctxt, ops); rc = emulate_ret_far(ctxt);
break; break;
case 0xcc: /* int3 */ case 0xcc: /* int3 */
irq = 3; irq = 3;
...@@ -3983,7 +3956,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3983,7 +3956,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
case 0xcd: /* int n */ case 0xcd: /* int n */
irq = c->src.val; irq = c->src.val;
do_interrupt: do_interrupt:
rc = emulate_int(ctxt, ops, irq); rc = emulate_int(ctxt, irq);
break; break;
case 0xce: /* into */ case 0xce: /* into */
if (ctxt->eflags & EFLG_OF) { if (ctxt->eflags & EFLG_OF) {
...@@ -3992,7 +3965,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3992,7 +3965,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
} }
break; break;
case 0xcf: /* iret */ case 0xcf: /* iret */
rc = emulate_iret(ctxt, ops); rc = emulate_iret(ctxt);
break; break;
case 0xd0 ... 0xd1: /* Grp2 */ case 0xd0 ... 0xd1: /* Grp2 */
rc = em_grp2(ctxt); rc = em_grp2(ctxt);
...@@ -4037,7 +4010,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4037,7 +4010,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
case 0xec: /* in al,dx */ case 0xec: /* in al,dx */
case 0xed: /* in (e/r)ax,dx */ case 0xed: /* in (e/r)ax,dx */
do_io_in: do_io_in:
if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, if (!pio_in_emulated(ctxt, c->dst.bytes, c->src.val,
&c->dst.val)) &c->dst.val))
goto done; /* IO is needed */ goto done; /* IO is needed */
break; break;
...@@ -4065,14 +4038,14 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4065,14 +4038,14 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
ctxt->eflags |= EFLG_CF; ctxt->eflags |= EFLG_CF;
break; break;
case 0xfa: /* cli */ case 0xfa: /* cli */
if (emulator_bad_iopl(ctxt, ops)) { if (emulator_bad_iopl(ctxt)) {
rc = emulate_gp(ctxt, 0); rc = emulate_gp(ctxt, 0);
goto done; goto done;
} else } else
ctxt->eflags &= ~X86_EFLAGS_IF; ctxt->eflags &= ~X86_EFLAGS_IF;
break; break;
case 0xfb: /* sti */ case 0xfb: /* sti */
if (emulator_bad_iopl(ctxt, ops)) { if (emulator_bad_iopl(ctxt)) {
rc = emulate_gp(ctxt, 0); rc = emulate_gp(ctxt, 0);
goto done; goto done;
} else { } else {
...@@ -4154,7 +4127,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4154,7 +4127,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
twobyte_insn: twobyte_insn:
switch (c->b) { switch (c->b) {
case 0x05: /* syscall */ case 0x05: /* syscall */
rc = emulate_syscall(ctxt, ops); rc = emulate_syscall(ctxt);
break; break;
case 0x06: case 0x06:
rc = em_clts(ctxt); rc = em_clts(ctxt);
...@@ -4216,10 +4189,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4216,10 +4189,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
rc = X86EMUL_CONTINUE; rc = X86EMUL_CONTINUE;
break; break;
case 0x34: /* sysenter */ case 0x34: /* sysenter */
rc = emulate_sysenter(ctxt, ops); rc = emulate_sysenter(ctxt);
break; break;
case 0x35: /* sysexit */ case 0x35: /* sysexit */
rc = emulate_sysexit(ctxt, ops); rc = emulate_sysexit(ctxt);
break; break;
case 0x40 ... 0x4f: /* cmov */ case 0x40 ... 0x4f: /* cmov */
c->dst.val = c->dst.orig_val = c->src.val; c->dst.val = c->dst.orig_val = c->src.val;
...@@ -4234,10 +4207,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4234,10 +4207,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
c->dst.val = test_cc(c->b, ctxt->eflags); c->dst.val = test_cc(c->b, ctxt->eflags);
break; break;
case 0xa0: /* push fs */ case 0xa0: /* push fs */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
break; break;
case 0xa1: /* pop fs */ case 0xa1: /* pop fs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
break; break;
case 0xa3: case 0xa3:
bt: /* bt */ bt: /* bt */
...@@ -4251,10 +4224,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4251,10 +4224,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
break; break;
case 0xa8: /* push gs */ case 0xa8: /* push gs */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
break; break;
case 0xa9: /* pop gs */ case 0xa9: /* pop gs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
break; break;
case 0xab: case 0xab:
bts: /* bts */ bts: /* bts */
...@@ -4284,17 +4257,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4284,17 +4257,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
} }
break; break;
case 0xb2: /* lss */ case 0xb2: /* lss */
rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS); rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
break; break;
case 0xb3: case 0xb3:
btr: /* btr */ btr: /* btr */
emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags); emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
break; break;
case 0xb4: /* lfs */ case 0xb4: /* lfs */
rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS); rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
break; break;
case 0xb5: /* lgs */ case 0xb5: /* lgs */
rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS); rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
break; break;
case 0xb6 ... 0xb7: /* movzx */ case 0xb6 ... 0xb7: /* movzx */
c->dst.bytes = c->op_bytes; c->dst.bytes = c->op_bytes;
......
...@@ -4513,7 +4513,7 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) ...@@ -4513,7 +4513,7 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
vcpu->arch.emulate_ctxt.decode.ad_bytes = 2; vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip + vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip +
inc_eip; inc_eip;
ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq); ret = emulate_int_real(&vcpu->arch.emulate_ctxt, irq);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return EMULATE_FAIL; return EMULATE_FAIL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment