Commit 26bb0981 authored by Avi Kivity's avatar Avi Kivity

KVM: VMX: Use shared msr infrastructure

Instead of reloading syscall MSRs on every preemption, use the new shared
msr infrastructure to reload them at the last possible minute (just before
exit to userspace).

Improves vcpu/idle/vcpu switches by about 2000 cycles (when EFER needs to be
reloaded as well).

[jan: fix slot index missing indirection]
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 18863bdd
...@@ -86,6 +86,11 @@ struct vmcs { ...@@ -86,6 +86,11 @@ struct vmcs {
char data[0]; char data[0];
}; };
struct shared_msr_entry {
unsigned index;
u64 data;
};
struct vcpu_vmx { struct vcpu_vmx {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
struct list_head local_vcpus_link; struct list_head local_vcpus_link;
...@@ -93,8 +98,7 @@ struct vcpu_vmx { ...@@ -93,8 +98,7 @@ struct vcpu_vmx {
int launched; int launched;
u8 fail; u8 fail;
u32 idt_vectoring_info; u32 idt_vectoring_info;
struct kvm_msr_entry *guest_msrs; struct shared_msr_entry *guest_msrs;
struct kvm_msr_entry *host_msrs;
int nmsrs; int nmsrs;
int save_nmsrs; int save_nmsrs;
int msr_offset_efer; int msr_offset_efer;
...@@ -108,7 +112,6 @@ struct vcpu_vmx { ...@@ -108,7 +112,6 @@ struct vcpu_vmx {
u16 fs_sel, gs_sel, ldt_sel; u16 fs_sel, gs_sel, ldt_sel;
int gs_ldt_reload_needed; int gs_ldt_reload_needed;
int fs_reload_needed; int fs_reload_needed;
int guest_efer_loaded;
} host_state; } host_state;
struct { struct {
int vm86_active; int vm86_active;
...@@ -195,6 +198,8 @@ static struct kvm_vmx_segment_field { ...@@ -195,6 +198,8 @@ static struct kvm_vmx_segment_field {
VMX_SEGMENT_FIELD(LDTR), VMX_SEGMENT_FIELD(LDTR),
}; };
static u64 host_efer;
static void ept_save_pdptrs(struct kvm_vcpu *vcpu); static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
/* /*
...@@ -209,22 +214,6 @@ static const u32 vmx_msr_index[] = { ...@@ -209,22 +214,6 @@ static const u32 vmx_msr_index[] = {
}; };
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
static void load_msrs(struct kvm_msr_entry *e, int n)
{
int i;
for (i = 0; i < n; ++i)
wrmsrl(e[i].index, e[i].data);
}
static void save_msrs(struct kvm_msr_entry *e, int n)
{
int i;
for (i = 0; i < n; ++i)
rdmsrl(e[i].index, e[i].data);
}
static inline int is_page_fault(u32 intr_info) static inline int is_page_fault(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
...@@ -373,7 +362,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) ...@@ -373,7 +362,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
int i; int i;
for (i = 0; i < vmx->nmsrs; ++i) for (i = 0; i < vmx->nmsrs; ++i)
if (vmx->guest_msrs[i].index == msr) if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
return i; return i;
return -1; return -1;
} }
...@@ -404,7 +393,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) ...@@ -404,7 +393,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa)
: : "a" (&operand), "c" (ext) : "cc", "memory"); : : "a" (&operand), "c" (ext) : "cc", "memory");
} }
static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{ {
int i; int i;
...@@ -595,17 +584,15 @@ static void reload_tss(void) ...@@ -595,17 +584,15 @@ static void reload_tss(void)
load_TR_desc(); load_TR_desc();
} }
static void load_transition_efer(struct vcpu_vmx *vmx) static bool update_transition_efer(struct vcpu_vmx *vmx)
{ {
int efer_offset = vmx->msr_offset_efer; int efer_offset = vmx->msr_offset_efer;
u64 host_efer;
u64 guest_efer; u64 guest_efer;
u64 ignore_bits; u64 ignore_bits;
if (efer_offset < 0) if (efer_offset < 0)
return; return false;
host_efer = vmx->host_msrs[efer_offset].data; guest_efer = vmx->vcpu.arch.shadow_efer;
guest_efer = vmx->guest_msrs[efer_offset].data;
/* /*
* NX is emulated; LMA and LME handled by hardware; SCE meaninless * NX is emulated; LMA and LME handled by hardware; SCE meaninless
...@@ -619,26 +606,18 @@ static void load_transition_efer(struct vcpu_vmx *vmx) ...@@ -619,26 +606,18 @@ static void load_transition_efer(struct vcpu_vmx *vmx)
ignore_bits &= ~(u64)EFER_SCE; ignore_bits &= ~(u64)EFER_SCE;
#endif #endif
if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
return; return false;
vmx->host_state.guest_efer_loaded = 1;
guest_efer &= ~ignore_bits; guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits; guest_efer |= host_efer & ignore_bits;
wrmsrl(MSR_EFER, guest_efer); vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->vcpu.stat.efer_reload++; return true;
}
static void reload_host_efer(struct vcpu_vmx *vmx)
{
if (vmx->host_state.guest_efer_loaded) {
vmx->host_state.guest_efer_loaded = 0;
load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
}
} }
static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
int i;
if (vmx->host_state.loaded) if (vmx->host_state.loaded)
return; return;
...@@ -680,8 +659,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) ...@@ -680,8 +659,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
} }
#endif #endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs); for (i = 0; i < vmx->save_nmsrs; ++i)
load_transition_efer(vmx); kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data);
} }
static void __vmx_load_host_state(struct vcpu_vmx *vmx) static void __vmx_load_host_state(struct vcpu_vmx *vmx)
...@@ -709,9 +689,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) ...@@ -709,9 +689,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
local_irq_restore(flags); local_irq_restore(flags);
} }
reload_tss(); reload_tss();
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
reload_host_efer(vmx);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) { if (is_long_mode(&vmx->vcpu)) {
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
...@@ -908,19 +885,14 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, ...@@ -908,19 +885,14 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
/* /*
* Swap MSR entry in host/guest MSR entry array. * Swap MSR entry in host/guest MSR entry array.
*/ */
#ifdef CONFIG_X86_64
static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
{ {
struct kvm_msr_entry tmp; struct shared_msr_entry tmp;
tmp = vmx->guest_msrs[to]; tmp = vmx->guest_msrs[to];
vmx->guest_msrs[to] = vmx->guest_msrs[from]; vmx->guest_msrs[to] = vmx->guest_msrs[from];
vmx->guest_msrs[from] = tmp; vmx->guest_msrs[from] = tmp;
tmp = vmx->host_msrs[to];
vmx->host_msrs[to] = vmx->host_msrs[from];
vmx->host_msrs[from] = tmp;
} }
#endif
/* /*
* Set up the vmcs to automatically save and restore system * Set up the vmcs to automatically save and restore system
...@@ -929,15 +901,13 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) ...@@ -929,15 +901,13 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
*/ */
static void setup_msrs(struct vcpu_vmx *vmx) static void setup_msrs(struct vcpu_vmx *vmx)
{ {
int save_nmsrs; int save_nmsrs, index;
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
vmx_load_host_state(vmx); vmx_load_host_state(vmx);
save_nmsrs = 0; save_nmsrs = 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) { if (is_long_mode(&vmx->vcpu)) {
int index;
index = __find_msr_index(vmx, MSR_SYSCALL_MASK); index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
if (index >= 0) if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
...@@ -956,9 +926,11 @@ static void setup_msrs(struct vcpu_vmx *vmx) ...@@ -956,9 +926,11 @@ static void setup_msrs(struct vcpu_vmx *vmx)
move_msr_up(vmx, index, save_nmsrs++); move_msr_up(vmx, index, save_nmsrs++);
} }
#endif #endif
vmx->save_nmsrs = save_nmsrs; vmx->msr_offset_efer = index = __find_msr_index(vmx, MSR_EFER);
if (index >= 0 && update_transition_efer(vmx))
move_msr_up(vmx, index, save_nmsrs++);
vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); vmx->save_nmsrs = save_nmsrs;
if (cpu_has_vmx_msr_bitmap()) { if (cpu_has_vmx_msr_bitmap()) {
if (is_long_mode(&vmx->vcpu)) if (is_long_mode(&vmx->vcpu))
...@@ -1000,7 +972,7 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) ...@@ -1000,7 +972,7 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{ {
u64 data; u64 data;
struct kvm_msr_entry *msr; struct shared_msr_entry *msr;
if (!pdata) { if (!pdata) {
printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
...@@ -1019,9 +991,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -1019,9 +991,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
vmx_load_host_state(to_vmx(vcpu)); vmx_load_host_state(to_vmx(vcpu));
data = to_vmx(vcpu)->msr_guest_kernel_gs_base; data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
break; break;
#endif
case MSR_EFER: case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_index, pdata); return kvm_get_msr_common(vcpu, msr_index, pdata);
#endif
case MSR_IA32_TSC: case MSR_IA32_TSC:
data = guest_read_tsc(); data = guest_read_tsc();
break; break;
...@@ -1035,6 +1007,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -1035,6 +1007,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
data = vmcs_readl(GUEST_SYSENTER_ESP); data = vmcs_readl(GUEST_SYSENTER_ESP);
break; break;
default: default:
vmx_load_host_state(to_vmx(vcpu));
msr = find_msr_entry(to_vmx(vcpu), msr_index); msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) { if (msr) {
vmx_load_host_state(to_vmx(vcpu)); vmx_load_host_state(to_vmx(vcpu));
...@@ -1056,7 +1029,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -1056,7 +1029,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr; struct shared_msr_entry *msr;
u64 host_tsc; u64 host_tsc;
int ret = 0; int ret = 0;
...@@ -1565,7 +1538,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu) ...@@ -1565,7 +1538,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
if (!msr)
return;
/* /*
* Force kernel_gs_base reloading before EFER changes, as control * Force kernel_gs_base reloading before EFER changes, as control
...@@ -2417,10 +2393,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -2417,10 +2393,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
if (wrmsr_safe(index, data_low, data_high) < 0) if (wrmsr_safe(index, data_low, data_high) < 0)
continue; continue;
data = data_low | ((u64)data_high << 32); data = data_low | ((u64)data_high << 32);
vmx->host_msrs[j].index = index; vmx->guest_msrs[j].index = i;
vmx->host_msrs[j].reserved = 0; vmx->guest_msrs[j].data = 0;
vmx->host_msrs[j].data = data;
vmx->guest_msrs[j] = vmx->host_msrs[j];
++vmx->nmsrs; ++vmx->nmsrs;
} }
...@@ -3821,7 +3795,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -3821,7 +3795,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
__clear_bit(vmx->vpid, vmx_vpid_bitmap); __clear_bit(vmx->vpid, vmx_vpid_bitmap);
spin_unlock(&vmx_vpid_lock); spin_unlock(&vmx_vpid_lock);
vmx_free_vmcs(vcpu); vmx_free_vmcs(vcpu);
kfree(vmx->host_msrs);
kfree(vmx->guest_msrs); kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx); kmem_cache_free(kvm_vcpu_cache, vmx);
...@@ -3848,10 +3821,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -3848,10 +3821,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto uninit_vcpu; goto uninit_vcpu;
} }
vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vmx->host_msrs)
goto free_guest_msrs;
vmx->vmcs = alloc_vmcs(); vmx->vmcs = alloc_vmcs();
if (!vmx->vmcs) if (!vmx->vmcs)
goto free_msrs; goto free_msrs;
...@@ -3882,8 +3851,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -3882,8 +3851,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
free_vmcs: free_vmcs:
free_vmcs(vmx->vmcs); free_vmcs(vmx->vmcs);
free_msrs: free_msrs:
kfree(vmx->host_msrs);
free_guest_msrs:
kfree(vmx->guest_msrs); kfree(vmx->guest_msrs);
uninit_vcpu: uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu); kvm_vcpu_uninit(&vmx->vcpu);
...@@ -4033,7 +4000,12 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -4033,7 +4000,12 @@ static struct kvm_x86_ops vmx_x86_ops = {
static int __init vmx_init(void) static int __init vmx_init(void)
{ {
int r; int r, i;
rdmsrl_safe(MSR_EFER, &host_efer);
for (i = 0; i < NR_VMX_MSR; ++i)
kvm_define_shared_msr(i, vmx_msr_index[i]);
vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_io_bitmap_a) if (!vmx_io_bitmap_a)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment