Commit a486b0af authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: Fix TSS size check for 16-bit tasks
  KVM: Add missing srcu_read_lock() for kvm_mmu_notifier_release()
  KVM: Increase NR_IOBUS_DEVS limit to 200
  KVM: fix the handling of dirty bitmaps to avoid overflows
  KVM: MMU: fix kvm_mmu_zap_page() and its calling path
  KVM: VMX: Save/restore rflags.vm correctly in real mode
  KVM: allow bit 10 to be cleared in MSR_IA32_MC4_CTL
  KVM: Don't spam kernel log when injecting exceptions due to bad cr writes
  KVM: SVM: Fix memory leaks that happen when svm_create_vcpu() fails
  KVM: take srcu lock before call to complete_pio()
parents 1519ae4d e8861cfe
...@@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, ...@@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
{ {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int r, i; int r, i;
long n, base; long base;
unsigned long n;
unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
...@@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, ...@@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG; base = memslot->base_gfn / BITS_PER_LONG;
for (i = 0; i < n/sizeof(long); ++i) { for (i = 0; i < n/sizeof(long); ++i) {
...@@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log) struct kvm_dirty_log *log)
{ {
int r; int r;
int n; unsigned long n;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int is_dirty = 0; int is_dirty = 0;
...@@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (is_dirty) { if (is_dirty) {
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots->memslots[log->slot]; memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n); memset(memslot->dirty_bitmap, 0, n);
} }
r = 0; r = 0;
......
...@@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
ulong ga, ga_end; ulong ga, ga_end;
int is_dirty = 0; int is_dirty = 0;
int r, n; int r;
unsigned long n;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
...@@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_for_each_vcpu(n, vcpu, kvm) kvm_for_each_vcpu(n, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n); memset(memslot->dirty_bitmap, 0, n);
} }
......
...@@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, ...@@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
for_each_sp(pages, sp, parents, i) { for_each_sp(pages, sp, parents, i) {
kvm_mmu_zap_page(kvm, sp); kvm_mmu_zap_page(kvm, sp);
mmu_pages_clear_parents(&parents); mmu_pages_clear_parents(&parents);
zapped++;
} }
zapped += pages.nr;
kvm_mmu_pages_init(parent, &parents, &pages); kvm_mmu_pages_init(parent, &parents, &pages);
} }
...@@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) ...@@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
*/ */
if (used_pages > kvm_nr_mmu_pages) { if (used_pages > kvm_nr_mmu_pages) {
while (used_pages > kvm_nr_mmu_pages) { while (used_pages > kvm_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page; struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev, page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link); struct kvm_mmu_page, link);
kvm_mmu_zap_page(kvm, page); used_pages -= kvm_mmu_zap_page(kvm, page);
used_pages--; used_pages--;
} }
kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0; kvm->arch.n_free_mmu_pages = 0;
} }
else else
...@@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) ...@@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
&& !sp->role.invalid) { && !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n", pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word); __func__, gfn, sp->role.word);
kvm_mmu_zap_page(kvm, sp); if (kvm_mmu_zap_page(kvm, sp))
nn = bucket->first;
} }
} }
} }
......
...@@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
if (err) if (err)
goto free_svm; goto free_svm;
err = -ENOMEM;
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL);
if (!page) { if (!page)
err = -ENOMEM;
goto uninit; goto uninit;
}
err = -ENOMEM;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages) if (!msrpm_pages)
goto uninit; goto free_page1;
nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages) if (!nested_msrpm_pages)
goto uninit; goto free_page2;
svm->msrpm = page_address(msrpm_pages);
svm_vcpu_init_msrpm(svm->msrpm);
hsave_page = alloc_page(GFP_KERNEL); hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page) if (!hsave_page)
goto uninit; goto free_page3;
svm->nested.hsave = page_address(hsave_page); svm->nested.hsave = page_address(hsave_page);
svm->msrpm = page_address(msrpm_pages);
svm_vcpu_init_msrpm(svm->msrpm);
svm->nested.msrpm = page_address(nested_msrpm_pages); svm->nested.msrpm = page_address(nested_msrpm_pages);
svm->vmcb = page_address(page); svm->vmcb = page_address(page);
...@@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
return &svm->vcpu; return &svm->vcpu;
free_page3:
__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
__free_page(page);
uninit: uninit:
kvm_vcpu_uninit(&svm->vcpu); kvm_vcpu_uninit(&svm->vcpu);
free_svm: free_svm:
......
...@@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); ...@@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
/* /*
* These 2 parameters are used to config the controls for Pause-Loop Exiting: * These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive * ple_gap: upper bound on the amount of time between two successive
...@@ -131,7 +133,7 @@ struct vcpu_vmx { ...@@ -131,7 +133,7 @@ struct vcpu_vmx {
} host_state; } host_state;
struct { struct {
int vm86_active; int vm86_active;
u8 save_iopl; ulong save_rflags;
struct kvm_save_segment { struct kvm_save_segment {
u16 selector; u16 selector;
unsigned long base; unsigned long base;
...@@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) ...@@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{ {
unsigned long rflags; unsigned long rflags, save_rflags;
rflags = vmcs_readl(GUEST_RFLAGS); rflags = vmcs_readl(GUEST_RFLAGS);
if (to_vmx(vcpu)->rmode.vm86_active) if (to_vmx(vcpu)->rmode.vm86_active) {
rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
save_rflags = to_vmx(vcpu)->rmode.save_rflags;
rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
}
return rflags; return rflags;
} }
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{ {
if (to_vmx(vcpu)->rmode.vm86_active) if (to_vmx(vcpu)->rmode.vm86_active) {
to_vmx(vcpu)->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
}
vmcs_writel(GUEST_RFLAGS, rflags); vmcs_writel(GUEST_RFLAGS, rflags);
} }
...@@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) ...@@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS); flags = vmcs_readl(GUEST_RFLAGS);
flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
...@@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) ...@@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS); flags = vmcs_readl(GUEST_RFLAGS);
vmx->rmode.save_iopl vmx->rmode.save_rflags = flags;
= (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
......
...@@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL) { if (cr0 & 0xffffffff00000000UL) {
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
cr0, kvm_read_cr0(vcpu));
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
cr0 &= ~CR0_RESERVED_BITS; cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
"and a clear PE flag\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
int cs_db, cs_l; int cs_db, cs_l;
if (!is_pae(vcpu)) { if (!is_pae(vcpu)) {
printk(KERN_DEBUG "set_cr0: #GP, start paging "
"in long mode while PAE is disabled\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l) { if (cs_l) {
printk(KERN_DEBUG "set_cr0: #GP, start paging "
"in long mode while CS.L == 1\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
...@@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
} else } else
#endif #endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
"reserved bits\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) { if (cr4 & CR4_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (is_long_mode(vcpu)) { if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE)) { if (!(cr4 & X86_CR4_PAE)) {
printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
"in long mode\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits) && ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) { && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (cr4 & X86_CR4_VMXE) { if (cr4 & X86_CR4_VMXE) {
printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_long_mode(vcpu)) { if (is_long_mode(vcpu)) {
if (cr3 & CR3_L_MODE_RESERVED_BITS) { if (cr3 & CR3_L_MODE_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
} else { } else {
if (is_pae(vcpu)) { if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS) { if (cr3 & CR3_PAE_RESERVED_BITS) {
printk(KERN_DEBUG
"set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
"reserved bits\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); ...@@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{ {
if (cr8 & CR8_RESERVED_BITS) { if (cr8 & CR8_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -649,15 +627,12 @@ static u32 emulated_msrs[] = { ...@@ -649,15 +627,12 @@ static u32 emulated_msrs[] = {
static void set_efer(struct kvm_vcpu *vcpu, u64 efer) static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
if (efer & efer_reserved_bits) { if (efer & efer_reserved_bits) {
printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
efer);
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (is_paging(vcpu) if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (msr >= MSR_IA32_MC0_CTL && if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) { msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL; u32 offset = msr - MSR_IA32_MC0_CTL;
/* only 0 or all 1s can be written to IA32_MCi_CTL */ /* only 0 or all 1s can be written to IA32_MCi_CTL
* some Linux kernels though clear bit 10 in bank 4 to
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
* this to avoid an uncatched #GP in the guest
*/
if ((offset & 0x3) == 0 && if ((offset & 0x3) == 0 &&
data != 0 && data != ~(u64)0) data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1; return -1;
vcpu->arch.mce_banks[offset] = data; vcpu->arch.mce_banks[offset] = data;
break; break;
...@@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, ...@@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log) struct kvm_dirty_log *log)
{ {
int r, n, i; int r, i;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
unsigned long n;
unsigned long is_dirty = 0; unsigned long is_dirty = 0;
unsigned long *dirty_bitmap = NULL; unsigned long *dirty_bitmap = NULL;
...@@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
r = -ENOMEM; r = -ENOMEM;
dirty_bitmap = vmalloc(n); dirty_bitmap = vmalloc(n);
...@@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_set_cr8(vcpu, kvm_run->cr8); kvm_set_cr8(vcpu, kvm_run->cr8);
if (vcpu->arch.pio.cur_count) { if (vcpu->arch.pio.cur_count) {
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = complete_pio(vcpu); r = complete_pio(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r) if (r)
goto out; goto out;
} }
...@@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) ...@@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
int ret = 0; int ret = 0;
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
u32 desc_limit;
old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
...@@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) ...@@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
} }
} }
if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { desc_limit = get_desc_limit(&nseg_desc);
if (!nseg_desc.p ||
((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
desc_limit < 0x2b)) {
kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
return 1; return 1;
} }
......
...@@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache; ...@@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
*/ */
struct kvm_io_bus { struct kvm_io_bus {
int dev_count; int dev_count;
#define NR_IOBUS_DEVS 6 #define NR_IOBUS_DEVS 200
struct kvm_io_device *devs[NR_IOBUS_DEVS]; struct kvm_io_device *devs[NR_IOBUS_DEVS];
}; };
...@@ -119,6 +119,11 @@ struct kvm_memory_slot { ...@@ -119,6 +119,11 @@ struct kvm_memory_slot {
int user_alloc; int user_alloc;
}; };
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}
struct kvm_kernel_irq_routing_entry { struct kvm_kernel_irq_routing_entry {
u32 gsi; u32 gsi;
u32 type; u32 type;
......
...@@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, ...@@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm) struct mm_struct *mm)
{ {
struct kvm *kvm = mmu_notifier_to_kvm(mn); struct kvm *kvm = mmu_notifier_to_kvm(mn);
int idx;
idx = srcu_read_lock(&kvm->srcu);
kvm_arch_flush_shadow(kvm); kvm_arch_flush_shadow(kvm);
srcu_read_unlock(&kvm->srcu, idx);
} }
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
...@@ -648,7 +652,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -648,7 +652,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Allocate page dirty bitmap if needed */ /* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
new.dirty_bitmap = vmalloc(dirty_bytes); new.dirty_bitmap = vmalloc(dirty_bytes);
if (!new.dirty_bitmap) if (!new.dirty_bitmap)
...@@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
{ {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int r, i; int r, i;
int n; unsigned long n;
unsigned long any = 0; unsigned long any = 0;
r = -EINVAL; r = -EINVAL;
...@@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = kvm_dirty_bitmap_bytes(memslot);
for (i = 0; !any && i < n/sizeof(long); ++i) for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i]; any = memslot->dirty_bitmap[i];
...@@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) ...@@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot_unaliased(kvm, gfn); memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) { if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn; unsigned long rel_gfn = gfn - memslot->base_gfn;
unsigned long *p = memslot->dirty_bitmap +
rel_gfn / BITS_PER_LONG;
int offset = rel_gfn % BITS_PER_LONG;
/* avoid RMW */ /* avoid RMW */
if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) if (!generic_test_le_bit(offset, p))
generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); generic___set_le_bit(offset, p);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment