Commit 7e10b9a6 authored by Cédric Le Goater's avatar Cédric Le Goater Committed by Paul Mackerras

KVM: PPC: Book3S HV: XIVE: Introduce a new mutex for the XIVE device

The XICS-on-XIVE KVM device needs to allocate XIVE event queues when a
priority is used by the OS. This is referred as EQ provisioning and it
is done under the hood when :

  1. a CPU is hot-plugged in the VM
  2. the "set-xive" is called at VM startup
  3. sources are restored at VM restore

The kvm->lock mutex is used to protect the different XIVE structures
being modified but in some contexts, kvm->lock is taken under the
vcpu->mutex which is not permitted by the KVM locking rules.

Introduce a new mutex 'lock' for the KVM devices for them to
synchronize accesses to the XIVE device structures.
Reviewed-by: default avatarGreg Kurz <groug@kaod.org>
Signed-off-by: default avatarCédric Le Goater <clg@kaod.org>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent e717d0ae
...@@ -271,14 +271,14 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) ...@@ -271,14 +271,14 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
return rc; return rc;
} }
/* Called with kvm_lock held */ /* Called with xive->lock held */
static int xive_check_provisioning(struct kvm *kvm, u8 prio) static int xive_check_provisioning(struct kvm *kvm, u8 prio)
{ {
struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive *xive = kvm->arch.xive;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i, rc; int i, rc;
lockdep_assert_held(&kvm->lock); lockdep_assert_held(&xive->lock);
/* Already provisioned ? */ /* Already provisioned ? */
if (xive->qmap & (1 << prio)) if (xive->qmap & (1 << prio))
...@@ -621,9 +621,12 @@ int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, ...@@ -621,9 +621,12 @@ int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
irq, server, priority); irq, server, priority);
/* First, check provisioning of queues */ /* First, check provisioning of queues */
if (priority != MASKED) if (priority != MASKED) {
mutex_lock(&xive->lock);
rc = xive_check_provisioning(xive->kvm, rc = xive_check_provisioning(xive->kvm,
xive_prio_from_guest(priority)); xive_prio_from_guest(priority));
mutex_unlock(&xive->lock);
}
if (rc) { if (rc) {
pr_devel(" provisioning failure %d !\n", rc); pr_devel(" provisioning failure %d !\n", rc);
return rc; return rc;
...@@ -1199,7 +1202,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, ...@@ -1199,7 +1202,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
return -ENOMEM; return -ENOMEM;
/* We need to synchronize with queue provisioning */ /* We need to synchronize with queue provisioning */
mutex_lock(&vcpu->kvm->lock); mutex_lock(&xive->lock);
vcpu->arch.xive_vcpu = xc; vcpu->arch.xive_vcpu = xc;
xc->xive = xive; xc->xive = xive;
xc->vcpu = vcpu; xc->vcpu = vcpu;
...@@ -1283,7 +1286,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, ...@@ -1283,7 +1286,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
bail: bail:
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&xive->lock);
if (r) { if (r) {
kvmppc_xive_cleanup_vcpu(vcpu); kvmppc_xive_cleanup_vcpu(vcpu);
return r; return r;
...@@ -1527,13 +1530,12 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) ...@@ -1527,13 +1530,12 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
struct kvmppc_xive *xive, int irq) struct kvmppc_xive *xive, int irq)
{ {
struct kvm *kvm = xive->kvm;
struct kvmppc_xive_src_block *sb; struct kvmppc_xive_src_block *sb;
int i, bid; int i, bid;
bid = irq >> KVMPPC_XICS_ICS_SHIFT; bid = irq >> KVMPPC_XICS_ICS_SHIFT;
mutex_lock(&kvm->lock); mutex_lock(&xive->lock);
/* block already exists - somebody else got here first */ /* block already exists - somebody else got here first */
if (xive->src_blocks[bid]) if (xive->src_blocks[bid])
...@@ -1560,7 +1562,7 @@ struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( ...@@ -1560,7 +1562,7 @@ struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
xive->max_sbid = bid; xive->max_sbid = bid;
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&xive->lock);
return xive->src_blocks[bid]; return xive->src_blocks[bid];
} }
...@@ -1670,9 +1672,9 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) ...@@ -1670,9 +1672,9 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
/* If we have a priority target the interrupt */ /* If we have a priority target the interrupt */
if (act_prio != MASKED) { if (act_prio != MASKED) {
/* First, check provisioning of queues */ /* First, check provisioning of queues */
mutex_lock(&xive->kvm->lock); mutex_lock(&xive->lock);
rc = xive_check_provisioning(xive->kvm, act_prio); rc = xive_check_provisioning(xive->kvm, act_prio);
mutex_unlock(&xive->kvm->lock); mutex_unlock(&xive->lock);
/* Target interrupt */ /* Target interrupt */
if (rc == 0) if (rc == 0)
...@@ -1963,6 +1965,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) ...@@ -1963,6 +1965,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
dev->private = xive; dev->private = xive;
xive->dev = dev; xive->dev = dev;
xive->kvm = kvm; xive->kvm = kvm;
mutex_init(&xive->lock);
/* Already there ? */ /* Already there ? */
if (kvm->arch.xive) if (kvm->arch.xive)
......
...@@ -141,6 +141,7 @@ struct kvmppc_xive { ...@@ -141,6 +141,7 @@ struct kvmppc_xive {
struct kvmppc_xive_ops *ops; struct kvmppc_xive_ops *ops;
struct address_space *mapping; struct address_space *mapping;
struct mutex mapping_lock; struct mutex mapping_lock;
struct mutex lock;
}; };
#define KVMPPC_XIVE_Q_COUNT 8 #define KVMPPC_XIVE_Q_COUNT 8
......
...@@ -114,7 +114,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, ...@@ -114,7 +114,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
return -EINVAL; return -EINVAL;
} }
mutex_lock(&vcpu->kvm->lock); mutex_lock(&xive->lock);
if (kvmppc_xive_find_server(vcpu->kvm, server_num)) { if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
pr_devel("Duplicate !\n"); pr_devel("Duplicate !\n");
...@@ -159,7 +159,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, ...@@ -159,7 +159,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
/* TODO: reset all queues to a clean state ? */ /* TODO: reset all queues to a clean state ? */
bail: bail:
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&xive->lock);
if (rc) if (rc)
kvmppc_xive_native_cleanup_vcpu(vcpu); kvmppc_xive_native_cleanup_vcpu(vcpu);
...@@ -772,7 +772,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive) ...@@ -772,7 +772,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
pr_devel("%s\n", __func__); pr_devel("%s\n", __func__);
mutex_lock(&kvm->lock); mutex_lock(&xive->lock);
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
...@@ -810,7 +810,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive) ...@@ -810,7 +810,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
} }
} }
mutex_unlock(&kvm->lock); mutex_unlock(&xive->lock);
return 0; return 0;
} }
...@@ -878,7 +878,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive) ...@@ -878,7 +878,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
pr_devel("%s\n", __func__); pr_devel("%s\n", __func__);
mutex_lock(&kvm->lock); mutex_lock(&xive->lock);
for (i = 0; i <= xive->max_sbid; i++) { for (i = 0; i <= xive->max_sbid; i++) {
struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
...@@ -892,7 +892,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive) ...@@ -892,7 +892,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
kvmppc_xive_native_vcpu_eq_sync(vcpu); kvmppc_xive_native_vcpu_eq_sync(vcpu);
} }
mutex_unlock(&kvm->lock); mutex_unlock(&xive->lock);
return 0; return 0;
} }
...@@ -965,7 +965,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, ...@@ -965,7 +965,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
} }
/* /*
* Called when device fd is closed * Called when device fd is closed. kvm->lock is held.
*/ */
static void kvmppc_xive_native_release(struct kvm_device *dev) static void kvmppc_xive_native_release(struct kvm_device *dev)
{ {
...@@ -1064,6 +1064,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) ...@@ -1064,6 +1064,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
xive->kvm = kvm; xive->kvm = kvm;
kvm->arch.xive = xive; kvm->arch.xive = xive;
mutex_init(&xive->mapping_lock); mutex_init(&xive->mapping_lock);
mutex_init(&xive->lock);
/* /*
* Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment