Commit 91194919 authored by Scott Wood's avatar Scott Wood Committed by Alexander Graf

kvm/ppc/mpic: Eliminate mmio_mapped

We no longer need to keep track of this now that MPIC destruction
always happens either during VM destruction (after MMIO has been
destroyed) or during a failed creation (before the fd has been exposed
to userspace, and thus before the MMIO region could have been
registered).
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 07f0a7bd
...@@ -190,7 +190,6 @@ struct openpic { ...@@ -190,7 +190,6 @@ struct openpic {
struct kvm_io_device mmio; struct kvm_io_device mmio;
struct list_head mmio_regions; struct list_head mmio_regions;
atomic_t users; atomic_t users;
bool mmio_mapped;
gpa_t reg_base; gpa_t reg_base;
spinlock_t lock; spinlock_t lock;
...@@ -1428,24 +1427,13 @@ static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr, ...@@ -1428,24 +1427,13 @@ static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
return ret; return ret;
} }
static void kvm_mpic_dtor(struct kvm_io_device *this)
{
struct openpic *opp = container_of(this, struct openpic, mmio);
opp->mmio_mapped = false;
}
static const struct kvm_io_device_ops mpic_mmio_ops = { static const struct kvm_io_device_ops mpic_mmio_ops = {
.read = kvm_mpic_read, .read = kvm_mpic_read,
.write = kvm_mpic_write, .write = kvm_mpic_write,
.destructor = kvm_mpic_dtor,
}; };
static void map_mmio(struct openpic *opp) static void map_mmio(struct openpic *opp)
{ {
BUG_ON(opp->mmio_mapped);
opp->mmio_mapped = true;
kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops); kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops);
kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS, kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
...@@ -1455,10 +1443,7 @@ static void map_mmio(struct openpic *opp) ...@@ -1455,10 +1443,7 @@ static void map_mmio(struct openpic *opp)
static void unmap_mmio(struct openpic *opp) static void unmap_mmio(struct openpic *opp)
{ {
if (opp->mmio_mapped) {
opp->mmio_mapped = false;
kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio); kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
}
} }
static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr) static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr)
...@@ -1637,18 +1622,6 @@ static void mpic_destroy(struct kvm_device *dev) ...@@ -1637,18 +1622,6 @@ static void mpic_destroy(struct kvm_device *dev)
{ {
struct openpic *opp = dev->private; struct openpic *opp = dev->private;
if (opp->mmio_mapped) {
/*
* Normally we get unmapped by kvm_io_bus_destroy(),
* which happens before the VCPUs release their references.
*
* Thus, we should only get here if no VCPUs took a reference
* to us in the first place.
*/
WARN_ON(opp->nb_cpus != 0);
unmap_mmio(opp);
}
dev->kvm->arch.mpic = NULL; dev->kvm->arch.mpic = NULL;
kfree(opp); kfree(opp);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment