Commit b464f57e authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: VMX: simplify vmx_prepare_switch_to_{guest,host}

vmx->loaded_cpu_state can only be NULL or equal to vmx->loaded_vmcs,
so change it to a bool.  Because the direction of the bool is
now the opposite of vmx->guest_msrs_dirty, change the direction of
vmx->guest_msrs_dirty so that they match.

Finally, do not imply that MSRs have to be reloaded when
vmx->guest_state_loaded is false; instead, set vmx->guest_msrs_ready
to false explicitly in vmx_prepare_switch_to_host.

Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4d6c9892
...@@ -1057,20 +1057,18 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -1057,20 +1057,18 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* when guest state is loaded. This happens when guest transitions * when guest state is loaded. This happens when guest transitions
* to/from long-mode by setting MSR_EFER.LMA. * to/from long-mode by setting MSR_EFER.LMA.
*/ */
if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) { if (!vmx->guest_msrs_ready) {
vmx->guest_msrs_dirty = false; vmx->guest_msrs_ready = true;
for (i = 0; i < vmx->save_nmsrs; ++i) for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index, kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data, vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask); vmx->guest_msrs[i].mask);
} }
if (vmx->guest_state_loaded)
if (vmx->loaded_cpu_state)
return; return;
vmx->loaded_cpu_state = vmx->loaded_vmcs; host_state = &vmx->loaded_vmcs->host_state;
host_state = &vmx->loaded_cpu_state->host_state;
/* /*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
...@@ -1126,20 +1124,20 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -1126,20 +1124,20 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmcs_writel(HOST_GS_BASE, gs_base); vmcs_writel(HOST_GS_BASE, gs_base);
host_state->gs_base = gs_base; host_state->gs_base = gs_base;
} }
vmx->guest_state_loaded = true;
} }
static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
{ {
struct vmcs_host_state *host_state; struct vmcs_host_state *host_state;
if (!vmx->loaded_cpu_state) if (!vmx->guest_state_loaded)
return; return;
WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); host_state = &vmx->loaded_vmcs->host_state;
host_state = &vmx->loaded_cpu_state->host_state;
++vmx->vcpu.stat.host_state_reload; ++vmx->vcpu.stat.host_state_reload;
vmx->loaded_cpu_state = NULL;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
...@@ -1165,13 +1163,15 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) ...@@ -1165,13 +1163,15 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif #endif
load_fixmap_gdt(raw_smp_processor_id()); load_fixmap_gdt(raw_smp_processor_id());
vmx->guest_state_loaded = false;
vmx->guest_msrs_ready = false;
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{ {
preempt_disable(); preempt_disable();
if (vmx->loaded_cpu_state) if (vmx->guest_state_loaded)
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
preempt_enable(); preempt_enable();
return vmx->msr_guest_kernel_gs_base; return vmx->msr_guest_kernel_gs_base;
...@@ -1180,7 +1180,7 @@ static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) ...@@ -1180,7 +1180,7 @@ static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
{ {
preempt_disable(); preempt_disable();
if (vmx->loaded_cpu_state) if (vmx->guest_state_loaded)
wrmsrl(MSR_KERNEL_GS_BASE, data); wrmsrl(MSR_KERNEL_GS_BASE, data);
preempt_enable(); preempt_enable();
vmx->msr_guest_kernel_gs_base = data; vmx->msr_guest_kernel_gs_base = data;
...@@ -1583,7 +1583,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) ...@@ -1583,7 +1583,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
move_msr_up(vmx, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
vmx->save_nmsrs = save_nmsrs; vmx->save_nmsrs = save_nmsrs;
vmx->guest_msrs_dirty = true; vmx->guest_msrs_ready = false;
if (cpu_has_vmx_msr_bitmap()) if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu); vmx_update_msr_bitmap(&vmx->vcpu);
......
...@@ -187,13 +187,23 @@ struct vcpu_vmx { ...@@ -187,13 +187,23 @@ struct vcpu_vmx {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
u8 fail; u8 fail;
u8 msr_bitmap_mode; u8 msr_bitmap_mode;
/*
* If true, host state has been stored in vmx->loaded_vmcs for
* the CPU registers that only need to be switched when transitioning
* to/from the kernel, and the registers have been loaded with guest
* values. If false, host state is loaded in the CPU registers
* and vmx->loaded_vmcs->host_state is invalid.
*/
bool guest_state_loaded;
u32 exit_intr_info; u32 exit_intr_info;
u32 idt_vectoring_info; u32 idt_vectoring_info;
ulong rflags; ulong rflags;
struct shared_msr_entry *guest_msrs; struct shared_msr_entry *guest_msrs;
int nmsrs; int nmsrs;
int save_nmsrs; int save_nmsrs;
bool guest_msrs_dirty; bool guest_msrs_ready;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base; u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base; u64 msr_guest_kernel_gs_base;
...@@ -208,14 +218,10 @@ struct vcpu_vmx { ...@@ -208,14 +218,10 @@ struct vcpu_vmx {
/* /*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a * loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested * non-nested (L1) guest, it always points to vmcs01. For a nested
* guest (L2), it points to a different VMCS. loaded_cpu_state points * guest (L2), it points to a different VMCS.
* to the VMCS whose state is loaded into the CPU registers that only
* need to be switched when transitioning to/from the kernel; a NULL
* value indicates that host state is loaded.
*/ */
struct loaded_vmcs vmcs01; struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs; struct loaded_vmcs *loaded_vmcs;
struct loaded_vmcs *loaded_cpu_state;
struct msr_autoload { struct msr_autoload {
struct vmx_msrs guest; struct vmx_msrs guest;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment