Commit 5fdbf976 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: x86: accessors for guest registers

As suggested by Avi, introduce accessors to read/write guest registers.
This simplifies the ->cache_regs/->decache_regs interface, and improves
register caching which is important for VMX, where the cost of
vmcs_read/vmcs_write is significant.

[avi: fix warnings]
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent ca60dfbb
#ifndef ASM_KVM_CACHE_REGS_H
#define ASM_KVM_CACHE_REGS_H
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
kvm_x86_ops->cache_reg(vcpu, reg);
return vcpu->arch.regs[reg];
}
static inline void kvm_register_write(struct kvm_vcpu *vcpu,
enum kvm_reg reg,
unsigned long val)
{
vcpu->arch.regs[reg] = val;
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
}
static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
{
return kvm_register_read(vcpu, VCPU_REGS_RIP);
}
static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
{
kvm_register_write(vcpu, VCPU_REGS_RIP, val);
}
#endif
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include "kvm_cache_regs.h"
#include "irq.h" #include "irq.h"
#define PRId64 "d" #define PRId64 "d"
...@@ -558,8 +559,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write) ...@@ -558,8 +559,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests); set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
kvm_x86_ops->cache_regs(vcpu); run->tpr_access.rip = kvm_rip_read(vcpu);
run->tpr_access.rip = vcpu->arch.rip;
run->tpr_access.is_write = write; run->tpr_access.is_write = write;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "kvm_svm.h" #include "kvm_svm.h"
#include "irq.h" #include "irq.h"
#include "mmu.h" #include "mmu.h"
#include "kvm_cache_regs.h"
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -236,13 +237,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -236,13 +237,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
printk(KERN_DEBUG "%s: NOP\n", __func__); printk(KERN_DEBUG "%s: NOP\n", __func__);
return; return;
} }
if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
__func__, __func__, kvm_rip_read(vcpu), svm->next_rip);
svm->vmcb->save.rip,
svm->next_rip);
vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip; kvm_rip_write(vcpu, svm->next_rip);
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
vcpu->arch.interrupt_window_open = 1; vcpu->arch.interrupt_window_open = 1;
...@@ -581,6 +580,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -581,6 +580,7 @@ static void init_vmcb(struct vcpu_svm *svm)
save->dr7 = 0x400; save->dr7 = 0x400;
save->rflags = 2; save->rflags = 2;
save->rip = 0x0000fff0; save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
/* /*
* cr0 val on cpu init should be 0x60000010, we enable cpu * cr0 val on cpu init should be 0x60000010, we enable cpu
...@@ -615,10 +615,12 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -615,10 +615,12 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
init_vmcb(svm); init_vmcb(svm);
if (vcpu->vcpu_id != 0) { if (vcpu->vcpu_id != 0) {
svm->vmcb->save.rip = 0; kvm_rip_write(vcpu, 0);
svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
} }
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
return 0; return 0;
} }
...@@ -721,23 +723,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -721,23 +723,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc); rdtscll(vcpu->arch.host_tsc);
} }
static void svm_cache_regs(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.rip = svm->vmcb->save.rip;
}
static void svm_decache_regs(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.rip;
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{ {
return to_svm(vcpu)->vmcb->save.rflags; return to_svm(vcpu)->vmcb->save.rflags;
...@@ -1139,14 +1124,14 @@ static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1139,14 +1124,14 @@ static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
svm->next_rip = svm->vmcb->save.rip + 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
return kvm_emulate_halt(&svm->vcpu); return kvm_emulate_halt(&svm->vcpu);
} }
static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
svm->next_rip = svm->vmcb->save.rip + 3; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
kvm_emulate_hypercall(&svm->vcpu); kvm_emulate_hypercall(&svm->vcpu);
return 1; return 1;
...@@ -1178,7 +1163,7 @@ static int task_switch_interception(struct vcpu_svm *svm, ...@@ -1178,7 +1163,7 @@ static int task_switch_interception(struct vcpu_svm *svm,
static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
kvm_emulate_cpuid(&svm->vcpu); kvm_emulate_cpuid(&svm->vcpu);
return 1; return 1;
} }
...@@ -1273,9 +1258,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1273,9 +1258,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data, KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
(u32)(data >> 32), handler); (u32)(data >> 32), handler);
svm->vmcb->save.rax = data & 0xffffffff; svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
} }
return 1; return 1;
...@@ -1359,13 +1344,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) ...@@ -1359,13 +1344,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data = (svm->vmcb->save.rax & -1u) u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32), KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
handler); handler);
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
if (svm_set_msr(&svm->vcpu, ecx, data)) if (svm_set_msr(&svm->vcpu, ecx, data))
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
else else
...@@ -1723,6 +1708,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1723,6 +1708,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u16 gs_selector; u16 gs_selector;
u16 ldt_selector; u16 ldt_selector;
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
pre_svm_run(svm); pre_svm_run(svm);
sync_lapic_to_cr8(vcpu); sync_lapic_to_cr8(vcpu);
...@@ -1858,6 +1847,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1858,6 +1847,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
load_db_regs(svm->host_db_regs); load_db_regs(svm->host_db_regs);
vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
write_dr6(svm->host_dr6); write_dr6(svm->host_dr6);
write_dr7(svm->host_dr7); write_dr7(svm->host_dr7);
...@@ -1977,8 +1969,6 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -1977,8 +1969,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_gdt = svm_set_gdt, .set_gdt = svm_set_gdt,
.get_dr = svm_get_dr, .get_dr = svm_get_dr,
.set_dr = svm_set_dr, .set_dr = svm_set_dr,
.cache_regs = svm_cache_regs,
.decache_regs = svm_decache_regs,
.get_rflags = svm_get_rflags, .get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags, .set_rflags = svm_set_rflags,
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include "kvm_cache_regs.h"
#include <asm/io.h> #include <asm/io.h>
#include <asm/desc.h> #include <asm/desc.h>
...@@ -715,9 +716,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -715,9 +716,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
unsigned long rip; unsigned long rip;
u32 interruptibility; u32 interruptibility;
rip = vmcs_readl(GUEST_RIP); rip = kvm_rip_read(vcpu);
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
vmcs_writel(GUEST_RIP, rip); kvm_rip_write(vcpu, rip);
/* /*
* We emulated an instruction, so temporary interrupt blocking * We emulated an instruction, so temporary interrupt blocking
...@@ -947,24 +948,19 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -947,24 +948,19 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
return ret; return ret;
} }
/* static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
* Sync the rsp and rip registers into the vcpu structure. This allows
* registers to be accessed by indexing vcpu->arch.regs.
*/
static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
{
vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
vcpu->arch.rip = vmcs_readl(GUEST_RIP);
}
/*
* Syncs rsp and rip back into the vmcs. Should be called after possible
* modification.
*/
static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
{ {
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
vmcs_writel(GUEST_RIP, vcpu->arch.rip); switch (reg) {
case VCPU_REGS_RSP:
vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
break;
case VCPU_REGS_RIP:
vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
break;
default:
break;
}
} }
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
...@@ -2019,6 +2015,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -2019,6 +2015,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
u64 msr; u64 msr;
int ret; int ret;
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
down_read(&vcpu->kvm->slots_lock); down_read(&vcpu->kvm->slots_lock);
if (!init_rmode(vmx->vcpu.kvm)) { if (!init_rmode(vmx->vcpu.kvm)) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -2072,10 +2069,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -2072,10 +2069,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RFLAGS, 0x02); vmcs_writel(GUEST_RFLAGS, 0x02);
if (vmx->vcpu.vcpu_id == 0) if (vmx->vcpu.vcpu_id == 0)
vmcs_writel(GUEST_RIP, 0xfff0); kvm_rip_write(vcpu, 0xfff0);
else else
vmcs_writel(GUEST_RIP, 0); kvm_rip_write(vcpu, 0);
vmcs_writel(GUEST_RSP, 0); kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
/* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
vmcs_writel(GUEST_DR7, 0x400); vmcs_writel(GUEST_DR7, 0x400);
...@@ -2139,11 +2136,11 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) ...@@ -2139,11 +2136,11 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
if (vcpu->arch.rmode.active) { if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true; vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = irq; vmx->rmode.irq.vector = irq;
vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); vmx->rmode.irq.rip = kvm_rip_read(vcpu);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
return; return;
} }
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
...@@ -2288,7 +2285,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2288,7 +2285,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
error_code = 0; error_code = 0;
rip = vmcs_readl(GUEST_RIP); rip = kvm_rip_read(vcpu);
if (intr_info & INTR_INFO_DELIVER_CODE_MASK) if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
if (is_page_fault(intr_info)) { if (is_page_fault(intr_info)) {
...@@ -2386,27 +2383,25 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2386,27 +2383,25 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
reg = (exit_qualification >> 8) & 15; reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) { switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */ case 0: /* mov to cr */
KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg], KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
(u32)((u64)vcpu->arch.regs[reg] >> 32), handler); (u32)kvm_register_read(vcpu, reg),
(u32)((u64)kvm_register_read(vcpu, reg) >> 32),
handler);
switch (cr) { switch (cr) {
case 0: case 0:
vcpu_load_rsp_rip(vcpu); kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
case 3: case 3:
vcpu_load_rsp_rip(vcpu); kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
case 4: case 4:
vcpu_load_rsp_rip(vcpu); kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
case 8: case 8:
vcpu_load_rsp_rip(vcpu); kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
if (irqchip_in_kernel(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm))
return 1; return 1;
...@@ -2415,7 +2410,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2415,7 +2410,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}; };
break; break;
case 2: /* clts */ case 2: /* clts */
vcpu_load_rsp_rip(vcpu);
vmx_fpu_deactivate(vcpu); vmx_fpu_deactivate(vcpu);
vcpu->arch.cr0 &= ~X86_CR0_TS; vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
...@@ -2426,21 +2420,17 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2426,21 +2420,17 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
case 1: /*mov from cr*/ case 1: /*mov from cr*/
switch (cr) { switch (cr) {
case 3: case 3:
vcpu_load_rsp_rip(vcpu); kvm_register_write(vcpu, reg, vcpu->arch.cr3);
vcpu->arch.regs[reg] = vcpu->arch.cr3;
vcpu_put_rsp_rip(vcpu);
KVMTRACE_3D(CR_READ, vcpu, (u32)cr, KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
(u32)vcpu->arch.regs[reg], (u32)kvm_register_read(vcpu, reg),
(u32)((u64)vcpu->arch.regs[reg] >> 32), (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
handler); handler);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
case 8: case 8:
vcpu_load_rsp_rip(vcpu); kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
vcpu_put_rsp_rip(vcpu);
KVMTRACE_2D(CR_READ, vcpu, (u32)cr, KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
(u32)vcpu->arch.regs[reg], handler); (u32)kvm_register_read(vcpu, reg), handler);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
} }
...@@ -2472,7 +2462,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2472,7 +2462,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & 7; dr = exit_qualification & 7;
reg = (exit_qualification >> 8) & 15; reg = (exit_qualification >> 8) & 15;
vcpu_load_rsp_rip(vcpu);
if (exit_qualification & 16) { if (exit_qualification & 16) {
/* mov from dr */ /* mov from dr */
switch (dr) { switch (dr) {
...@@ -2485,12 +2474,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2485,12 +2474,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
default: default:
val = 0; val = 0;
} }
vcpu->arch.regs[reg] = val; kvm_register_write(vcpu, reg, val);
KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
} else { } else {
/* mov to dr */ /* mov to dr */
} }
vcpu_put_rsp_rip(vcpu);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
} }
...@@ -2735,8 +2723,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -2735,8 +2723,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vectoring_info = vmx->idt_vectoring_info; u32 vectoring_info = vmx->idt_vectoring_info;
KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
(u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
/* Access CR3 don't cause VMExit in paging mode, so we need /* Access CR3 don't cause VMExit in paging mode, so we need
* to sync with guest real CR3. */ * to sync with guest real CR3. */
...@@ -2922,9 +2910,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) ...@@ -2922,9 +2910,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
static void fixup_rmode_irq(struct vcpu_vmx *vmx) static void fixup_rmode_irq(struct vcpu_vmx *vmx)
{ {
vmx->rmode.irq.pending = 0; vmx->rmode.irq.pending = 0;
if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip) if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
return; return;
vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip); kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
...@@ -2941,6 +2929,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2941,6 +2929,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info; u32 intr_info;
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
/* /*
* Loading guest fpu may have cleared host cr0.ts * Loading guest fpu may have cleared host cr0.ts
*/ */
...@@ -3061,6 +3054,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3061,6 +3054,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
#endif #endif
); );
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
vcpu->arch.regs_dirty = 0;
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
if (vmx->rmode.irq.pending) if (vmx->rmode.irq.pending)
fixup_rmode_irq(vmx); fixup_rmode_irq(vmx);
...@@ -3224,8 +3220,7 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -3224,8 +3220,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt, .set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt, .get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt, .set_gdt = vmx_set_gdt,
.cache_regs = vcpu_load_rsp_rip, .cache_reg = vmx_cache_reg,
.decache_regs = vcpu_put_rsp_rip,
.get_rflags = vmx_get_rflags, .get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags, .set_rflags = vmx_set_rflags,
......
This diff is collapsed.
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define DPRINTF(_f, _a ...) printf(_f , ## _a) #define DPRINTF(_f, _a ...) printf(_f , ## _a)
#else #else
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#define DPRINTF(x...) do {} while (0) #define DPRINTF(x...) do {} while (0)
#endif #endif
#include <linux/module.h> #include <linux/module.h>
...@@ -839,7 +840,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -839,7 +840,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* Shadow copy of register state. Committed on successful emulation. */ /* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache)); memset(c, 0, sizeof(struct decode_cache));
c->eip = ctxt->vcpu->arch.rip; c->eip = kvm_rip_read(ctxt->vcpu);
ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
...@@ -1267,7 +1268,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1267,7 +1268,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (c->rep_prefix && (c->d & String)) { if (c->rep_prefix && (c->d & String)) {
/* All REP prefixes have the same first termination condition */ /* All REP prefixes have the same first termination condition */
if (c->regs[VCPU_REGS_RCX] == 0) { if (c->regs[VCPU_REGS_RCX] == 0) {
ctxt->vcpu->arch.rip = c->eip; kvm_rip_write(ctxt->vcpu, c->eip);
goto done; goto done;
} }
/* The second termination condition only applies for REPE /* The second termination condition only applies for REPE
...@@ -1281,17 +1282,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1281,17 +1282,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
(c->b == 0xae) || (c->b == 0xaf)) { (c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) && if ((c->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0)) { ((ctxt->eflags & EFLG_ZF) == 0)) {
ctxt->vcpu->arch.rip = c->eip; kvm_rip_write(ctxt->vcpu, c->eip);
goto done; goto done;
} }
if ((c->rep_prefix == REPNE_PREFIX) && if ((c->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) { ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
ctxt->vcpu->arch.rip = c->eip; kvm_rip_write(ctxt->vcpu, c->eip);
goto done; goto done;
} }
} }
c->regs[VCPU_REGS_RCX]--; c->regs[VCPU_REGS_RCX]--;
c->eip = ctxt->vcpu->arch.rip; c->eip = kvm_rip_read(ctxt->vcpu);
} }
if (c->src.type == OP_MEM) { if (c->src.type == OP_MEM) {
...@@ -1768,7 +1769,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1768,7 +1769,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* Commit shadow register state. */ /* Commit shadow register state. */
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
ctxt->vcpu->arch.rip = c->eip; kvm_rip_write(ctxt->vcpu, c->eip);
done: done:
if (rc == X86EMUL_UNHANDLEABLE) { if (rc == X86EMUL_UNHANDLEABLE) {
...@@ -1793,7 +1794,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1793,7 +1794,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
goto done; goto done;
/* Let the processor re-execute the fixed hypercall */ /* Let the processor re-execute the fixed hypercall */
c->eip = ctxt->vcpu->arch.rip; c->eip = kvm_rip_read(ctxt->vcpu);
/* Disable writeback. */ /* Disable writeback. */
c->dst.type = OP_NONE; c->dst.type = OP_NONE;
break; break;
...@@ -1889,7 +1890,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1889,7 +1890,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
if (rc) { if (rc) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->arch.rip; c->eip = kvm_rip_read(ctxt->vcpu);
} }
rc = X86EMUL_CONTINUE; rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE; c->dst.type = OP_NONE;
...@@ -1899,7 +1900,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1899,7 +1900,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
if (rc) { if (rc) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->arch.rip; c->eip = kvm_rip_read(ctxt->vcpu);
} else { } else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data; c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32; c->regs[VCPU_REGS_RDX] = msr_data >> 32;
......
...@@ -89,7 +89,7 @@ extern struct list_head vm_list; ...@@ -89,7 +89,7 @@ extern struct list_head vm_list;
struct kvm_vcpu; struct kvm_vcpu;
struct kvm; struct kvm;
enum { enum kvm_reg {
VCPU_REGS_RAX = 0, VCPU_REGS_RAX = 0,
VCPU_REGS_RCX = 1, VCPU_REGS_RCX = 1,
VCPU_REGS_RDX = 2, VCPU_REGS_RDX = 2,
...@@ -108,6 +108,7 @@ enum { ...@@ -108,6 +108,7 @@ enum {
VCPU_REGS_R14 = 14, VCPU_REGS_R14 = 14,
VCPU_REGS_R15 = 15, VCPU_REGS_R15 = 15,
#endif #endif
VCPU_REGS_RIP,
NR_VCPU_REGS NR_VCPU_REGS
}; };
...@@ -219,8 +220,13 @@ struct kvm_vcpu_arch { ...@@ -219,8 +220,13 @@ struct kvm_vcpu_arch {
int interrupt_window_open; int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ /*
unsigned long rip; /* needs vcpu_load_rsp_rip() */ * rip and regs accesses must go through
* kvm_{register,rip}_{read,write} functions.
*/
unsigned long regs[NR_VCPU_REGS];
u32 regs_avail;
u32 regs_dirty;
unsigned long cr0; unsigned long cr0;
unsigned long cr2; unsigned long cr2;
...@@ -414,8 +420,7 @@ struct kvm_x86_ops { ...@@ -414,8 +420,7 @@ struct kvm_x86_ops {
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
int *exception); int *exception);
void (*cache_regs)(struct kvm_vcpu *vcpu); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
void (*decache_regs)(struct kvm_vcpu *vcpu);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment