Commit c298125f authored by Sasha Levin's avatar Sasha Levin Committed by Avi Kivity

KVM: MMIO: Lock coalesced device when checking for available entry

Move the check whether there are available entries to within the spinlock.
This allows working with larger amount of VCPUs and reduces premature
exits when using a large number of VCPUs.

Cc: Avi Kivity <avi@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarSasha Levin <levinsasha928@gmail.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 22388a3c
...@@ -25,23 +25,8 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, ...@@ -25,23 +25,8 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
gpa_t addr, int len) gpa_t addr, int len)
{ {
struct kvm_coalesced_mmio_zone *zone; struct kvm_coalesced_mmio_zone *zone;
struct kvm_coalesced_mmio_ring *ring;
unsigned avail;
int i; int i;
/* Are we able to batch it ? */
/* last is the first free entry
* check if we don't meet the first used entry
* there is always one unused entry in the buffer
*/
ring = dev->kvm->coalesced_mmio_ring;
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
if (avail < KVM_MAX_VCPUS) {
/* full */
return 0;
}
/* is it in a batchable area ? */ /* is it in a batchable area ? */
for (i = 0; i < dev->nb_zones; i++) { for (i = 0; i < dev->nb_zones; i++) {
...@@ -58,16 +43,43 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, ...@@ -58,16 +43,43 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
return 0; return 0;
} }
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
{
struct kvm_coalesced_mmio_ring *ring;
unsigned avail;
/* Are we able to batch it ? */
/* last is the first free entry
* check if we don't meet the first used entry
* there is always one unused entry in the buffer
*/
ring = dev->kvm->coalesced_mmio_ring;
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
if (avail == 0) {
/* full */
return 0;
}
return 1;
}
static int coalesced_mmio_write(struct kvm_io_device *this, static int coalesced_mmio_write(struct kvm_io_device *this,
gpa_t addr, int len, const void *val) gpa_t addr, int len, const void *val)
{ {
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
if (!coalesced_mmio_in_range(dev, addr, len)) if (!coalesced_mmio_in_range(dev, addr, len))
return -EOPNOTSUPP; return -EOPNOTSUPP;
spin_lock(&dev->lock); spin_lock(&dev->lock);
if (!coalesced_mmio_has_room(dev)) {
spin_unlock(&dev->lock);
return -EOPNOTSUPP;
}
/* copy data in first free entry of the ring */ /* copy data in first free entry of the ring */
ring->coalesced_mmio[ring->last].phys_addr = addr; ring->coalesced_mmio[ring->last].phys_addr = addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment