Commit 110312c8 authored by Avi Kivity's avatar Avi Kivity

KVM: VMX: Optimize atomic EFER load

When NX is enabled on the host but not on the guest, we use the entry/exit
msr load facility, which is slow.  Optimize it to use entry/exit efer load,
which is ~1200 cycles faster.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 07c116d2
...@@ -191,6 +191,8 @@ static unsigned long *vmx_io_bitmap_b; ...@@ -191,6 +191,8 @@ static unsigned long *vmx_io_bitmap_b;
static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode; static unsigned long *vmx_msr_bitmap_longmode;
static bool cpu_has_load_ia32_efer;
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock); static DEFINE_SPINLOCK(vmx_vpid_lock);
...@@ -664,6 +666,12 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) ...@@ -664,6 +666,12 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
unsigned i; unsigned i;
struct msr_autoload *m = &vmx->msr_autoload; struct msr_autoload *m = &vmx->msr_autoload;
if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
return;
}
for (i = 0; i < m->nr; ++i) for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr) if (m->guest[i].index == msr)
break; break;
...@@ -683,6 +691,14 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -683,6 +691,14 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
unsigned i; unsigned i;
struct msr_autoload *m = &vmx->msr_autoload; struct msr_autoload *m = &vmx->msr_autoload;
if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
vmcs_write64(GUEST_IA32_EFER, guest_val);
vmcs_write64(HOST_IA32_EFER, host_val);
vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
return;
}
for (i = 0; i < m->nr; ++i) for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr) if (m->guest[i].index == msr)
break; break;
...@@ -1418,6 +1434,14 @@ static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, ...@@ -1418,6 +1434,14 @@ static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
return 0; return 0;
} }
static __init bool allow_1_setting(u32 msr, u32 ctl)
{
u32 vmx_msr_low, vmx_msr_high;
rdmsr(msr, vmx_msr_low, vmx_msr_high);
return vmx_msr_high & ctl;
}
static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
{ {
u32 vmx_msr_low, vmx_msr_high; u32 vmx_msr_low, vmx_msr_high;
...@@ -1532,6 +1556,12 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -1532,6 +1556,12 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmexit_ctrl = _vmexit_control;
vmcs_conf->vmentry_ctrl = _vmentry_control; vmcs_conf->vmentry_ctrl = _vmentry_control;
cpu_has_load_ia32_efer =
allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
VM_ENTRY_LOAD_IA32_EFER)
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
VM_EXIT_LOAD_IA32_EFER);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment