Commit d7ee039e authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: vmx: move struct host_state usage to struct loaded_vmcs

Make host_state a property of a loaded_vmcs so that it can be
used as a cache of the VMCS fields, e.g. to lazily VMWRITE the
corresponding VMCS field.  Treating host_state as a cache does
not work if it's not VMCS specific as the cache would become
incoherent when switching between vmcs01 and vmcs02.

Move vmcs_host_cr3 and vmcs_host_cr4 into host_state.

Explicitly zero out host_state when allocating a new VMCS for a
loaded_vmcs.  Unlike the pre-existing vmcs_host_cr{3,4} usage,
the segment information is not guaranteed to be (re)initialized
when running a new nested VMCS, e.g. HOST_FS_BASE is not written
in vmx_set_constant_host_state().
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e920de85
...@@ -218,6 +218,21 @@ struct vmcs { ...@@ -218,6 +218,21 @@ struct vmcs {
char data[0]; char data[0];
}; };
/*
* vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
* and whose values change infrequently, but are not constant. I.e. this is
* used as a write-through cache of the corresponding VMCS fields.
*/
struct vmcs_host_state {
unsigned long cr3; /* May not match real cr3 */
unsigned long cr4; /* May not match real cr4 */
u16 fs_sel, gs_sel, ldt_sel;
#ifdef CONFIG_X86_64
u16 ds_sel, es_sel;
#endif
};
/* /*
* Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
* remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
...@@ -229,14 +244,13 @@ struct loaded_vmcs { ...@@ -229,14 +244,13 @@ struct loaded_vmcs {
int cpu; int cpu;
bool launched; bool launched;
bool nmi_known_unmasked; bool nmi_known_unmasked;
unsigned long vmcs_host_cr3; /* May not match real cr3 */
unsigned long vmcs_host_cr4; /* May not match real cr4 */
/* Support for vnmi-less CPUs */ /* Support for vnmi-less CPUs */
int soft_vnmi_blocked; int soft_vnmi_blocked;
ktime_t entry_time; ktime_t entry_time;
s64 vnmi_blocked_time; s64 vnmi_blocked_time;
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
struct list_head loaded_vmcss_on_cpu_link; struct list_head loaded_vmcss_on_cpu_link;
struct vmcs_host_state host_state;
}; };
struct shared_msr_entry { struct shared_msr_entry {
...@@ -819,12 +833,6 @@ struct vcpu_vmx { ...@@ -819,12 +833,6 @@ struct vcpu_vmx {
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
} msr_autoload; } msr_autoload;
struct {
u16 fs_sel, gs_sel, ldt_sel;
#ifdef CONFIG_X86_64
u16 ds_sel, es_sel;
#endif
} host_state;
struct { struct {
int vm86_active; int vm86_active;
ulong save_rflags; ulong save_rflags;
...@@ -2662,6 +2670,7 @@ static unsigned long segment_base(u16 selector) ...@@ -2662,6 +2670,7 @@ static unsigned long segment_base(u16 selector)
static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs_host_state *host_state;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
#endif #endif
...@@ -2673,16 +2682,17 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -2673,16 +2682,17 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
return; return;
vmx->loaded_cpu_state = vmx->loaded_vmcs; vmx->loaded_cpu_state = vmx->loaded_vmcs;
host_state = &vmx->loaded_cpu_state->host_state;
/* /*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1. * allow segment selectors with cpl > 0 or ti == 1.
*/ */
vmx->host_state.ldt_sel = kvm_read_ldt(); host_state->ldt_sel = kvm_read_ldt();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
savesegment(ds, vmx->host_state.ds_sel); savesegment(ds, host_state->ds_sel);
savesegment(es, vmx->host_state.es_sel); savesegment(es, host_state->es_sel);
gs_base = cpu_kernelmode_gs_base(cpu); gs_base = cpu_kernelmode_gs_base(cpu);
if (likely(is_64bit_mm(current->mm))) { if (likely(is_64bit_mm(current->mm))) {
...@@ -2707,12 +2717,12 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -2707,12 +2717,12 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
gs_base = segment_base(gs_sel); gs_base = segment_base(gs_sel);
#endif #endif
vmx->host_state.fs_sel = fs_sel; host_state->fs_sel = fs_sel;
if (!(fs_sel & 7)) if (!(fs_sel & 7))
vmcs_write16(HOST_FS_SELECTOR, fs_sel); vmcs_write16(HOST_FS_SELECTOR, fs_sel);
else else
vmcs_write16(HOST_FS_SELECTOR, 0); vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.gs_sel = gs_sel; host_state->gs_sel = gs_sel;
if (!(gs_sel & 7)) if (!(gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, gs_sel); vmcs_write16(HOST_GS_SELECTOR, gs_sel);
else else
...@@ -2729,10 +2739,13 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -2729,10 +2739,13 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
{ {
struct vmcs_host_state *host_state;
if (!vmx->loaded_cpu_state) if (!vmx->loaded_cpu_state)
return; return;
WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
host_state = &vmx->loaded_cpu_state->host_state;
++vmx->vcpu.stat.host_state_reload; ++vmx->vcpu.stat.host_state_reload;
vmx->loaded_cpu_state = NULL; vmx->loaded_cpu_state = NULL;
...@@ -2741,20 +2754,20 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) ...@@ -2741,20 +2754,20 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
if (is_long_mode(&vmx->vcpu)) if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif #endif
if (vmx->host_state.ldt_sel || (vmx->host_state.gs_sel & 7)) { if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
kvm_load_ldt(vmx->host_state.ldt_sel); kvm_load_ldt(host_state->ldt_sel);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel); load_gs_index(host_state->gs_sel);
#else #else
loadsegment(gs, vmx->host_state.gs_sel); loadsegment(gs, host_state->gs_sel);
#endif #endif
} }
if (vmx->host_state.fs_sel & 7) if (host_state->fs_sel & 7)
loadsegment(fs, vmx->host_state.fs_sel); loadsegment(fs, host_state->fs_sel);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { if (unlikely(host_state->ds_sel | host_state->es_sel)) {
loadsegment(ds, vmx->host_state.ds_sel); loadsegment(ds, host_state->ds_sel);
loadsegment(es, vmx->host_state.es_sel); loadsegment(es, host_state->es_sel);
} }
#endif #endif
invalidate_tss_limit(); invalidate_tss_limit();
...@@ -4574,6 +4587,9 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) ...@@ -4574,6 +4587,9 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
evmcs->hv_enlightenments_control.msr_bitmap = 1; evmcs->hv_enlightenments_control.msr_bitmap = 1;
} }
} }
memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
return 0; return 0;
out_vmcs: out_vmcs:
...@@ -6080,12 +6096,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) ...@@ -6080,12 +6096,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
*/ */
cr3 = __read_cr3(); cr3 = __read_cr3();
vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
vmx->loaded_vmcs->vmcs_host_cr3 = cr3; vmx->loaded_vmcs->host_state.cr3 = cr3;
/* Save the most likely value for this task's CR4 in the VMCS. */ /* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow(); cr4 = cr4_read_shadow();
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
vmx->loaded_vmcs->vmcs_host_cr4 = cr4; vmx->loaded_vmcs->host_state.cr4 = cr4;
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -10356,15 +10372,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -10356,15 +10372,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr3 = __get_current_cr3_fast(); cr3 = __get_current_cr3_fast();
if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) { if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
vmcs_writel(HOST_CR3, cr3); vmcs_writel(HOST_CR3, cr3);
vmx->loaded_vmcs->vmcs_host_cr3 = cr3; vmx->loaded_vmcs->host_state.cr3 = cr3;
} }
cr4 = cr4_read_shadow(); cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) { if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmcs_writel(HOST_CR4, cr4); vmcs_writel(HOST_CR4, cr4);
vmx->loaded_vmcs->vmcs_host_cr4 = cr4; vmx->loaded_vmcs->host_state.cr4 = cr4;
} }
/* When single-stepping over STI and MOV SS, we must clear the /* When single-stepping over STI and MOV SS, we must clear the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment