Commit 104452f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Avi Kivity:
 "Fixing a scheduling-while-atomic bug in the ppc code, and a bug which
  allowed pci bridges to be assigned to guests."

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: PPC: Book3S HV: Drop locks around call to kvmppc_pin_guest_page
  KVM: Fix PCI header check on device assignment
parents 08d49c46 081f323b
...@@ -268,16 +268,38 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, ...@@ -268,16 +268,38 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
return err; return err;
} }
static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
{ {
struct kvm *kvm = vcpu->kvm;
void *va; void *va;
unsigned long nb; unsigned long nb;
unsigned long gpa;
vpap->update_pending = 0; /*
* We need to pin the page pointed to by vpap->next_gpa,
* but we can't call kvmppc_pin_guest_page under the lock
* as it does get_user_pages() and down_read(). So we
* have to drop the lock, pin the page, then get the lock
* again and check that a new area didn't get registered
* in the meantime.
*/
for (;;) {
gpa = vpap->next_gpa;
spin_unlock(&vcpu->arch.vpa_update_lock);
va = NULL; va = NULL;
if (vpap->next_gpa) { nb = 0;
if (gpa)
va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
if (nb < vpap->len) { spin_lock(&vcpu->arch.vpa_update_lock);
if (gpa == vpap->next_gpa)
break;
/* sigh... unpin that one and try again */
if (va)
kvmppc_unpin_guest_page(kvm, va);
}
vpap->update_pending = 0;
if (va && nb < vpap->len) {
/* /*
* If it's now too short, it must be that userspace * If it's now too short, it must be that userspace
* has changed the mappings underlying guest memory, * has changed the mappings underlying guest memory,
...@@ -286,7 +308,6 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) ...@@ -286,7 +308,6 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
kvmppc_unpin_guest_page(kvm, va); kvmppc_unpin_guest_page(kvm, va);
va = NULL; va = NULL;
} }
}
if (vpap->pinned_addr) if (vpap->pinned_addr)
kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
vpap->pinned_addr = va; vpap->pinned_addr = va;
...@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) ...@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm;
spin_lock(&vcpu->arch.vpa_update_lock); spin_lock(&vcpu->arch.vpa_update_lock);
if (vcpu->arch.vpa.update_pending) { if (vcpu->arch.vpa.update_pending) {
kvmppc_update_vpa(kvm, &vcpu->arch.vpa); kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
} }
if (vcpu->arch.dtl.update_pending) { if (vcpu->arch.dtl.update_pending) {
kvmppc_update_vpa(kvm, &vcpu->arch.dtl); kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_index = 0; vcpu->arch.dtl_index = 0;
} }
if (vcpu->arch.slb_shadow.update_pending) if (vcpu->arch.slb_shadow.update_pending)
kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
spin_unlock(&vcpu->arch.vpa_update_lock); spin_unlock(&vcpu->arch.vpa_update_lock);
} }
...@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
struct kvm_vcpu *vcpu, *vcpu0, *vnext; struct kvm_vcpu *vcpu, *vcpu0, *vnext;
long ret; long ret;
u64 now; u64 now;
int ptid, i; int ptid, i, need_vpa_update;
/* don't start if any threads have a signal pending */ /* don't start if any threads have a signal pending */
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) need_vpa_update = 0;
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
if (signal_pending(vcpu->arch.run_task)) if (signal_pending(vcpu->arch.run_task))
return 0; return 0;
need_vpa_update |= vcpu->arch.vpa.update_pending |
vcpu->arch.slb_shadow.update_pending |
vcpu->arch.dtl.update_pending;
}
/*
* Initialize *vc, in particular vc->vcore_state, so we can
* drop the vcore lock if necessary.
*/
vc->n_woken = 0;
vc->nap_count = 0;
vc->entry_exit_count = 0;
vc->vcore_state = VCORE_RUNNING;
vc->in_guest = 0;
vc->napping_threads = 0;
/*
* Updating any of the vpas requires calling kvmppc_pin_guest_page,
* which can't be called with any spinlocks held.
*/
if (need_vpa_update) {
spin_unlock(&vc->lock);
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
kvmppc_update_vpas(vcpu);
spin_lock(&vc->lock);
}
/* /*
* Make sure we are running on thread 0, and that * Make sure we are running on thread 0, and that
...@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
if (vcpu->arch.ceded) if (vcpu->arch.ceded)
vcpu->arch.ptid = ptid++; vcpu->arch.ptid = ptid++;
vc->n_woken = 0;
vc->nap_count = 0;
vc->entry_exit_count = 0;
vc->vcore_state = VCORE_RUNNING;
vc->stolen_tb += mftb() - vc->preempt_tb; vc->stolen_tb += mftb() - vc->preempt_tb;
vc->in_guest = 0;
vc->pcpu = smp_processor_id(); vc->pcpu = smp_processor_id();
vc->napping_threads = 0;
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu); kvmppc_start_thread(vcpu);
if (vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending)
kvmppc_update_vpas(vcpu);
kvmppc_create_dtl_entry(vcpu, vc); kvmppc_create_dtl_entry(vcpu, vc);
} }
/* Grab any remaining hw threads so they can't go into the kernel */ /* Grab any remaining hw threads so they can't go into the kernel */
......
...@@ -635,7 +635,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, ...@@ -635,7 +635,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
int r = 0, idx; int r = 0, idx;
struct kvm_assigned_dev_kernel *match; struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev; struct pci_dev *dev;
u8 header_type;
if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
return -EINVAL; return -EINVAL;
...@@ -668,8 +667,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, ...@@ -668,8 +667,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
} }
/* Don't allow bridges to be assigned */ /* Don't allow bridges to be assigned */
pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
r = -EPERM; r = -EPERM;
goto out_put; goto out_put;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment