Commit 44401a20 authored by Maciej S. Szmigiero's avatar Maciej S. Szmigiero Committed by Paolo Bonzini

KVM: Optimize overlapping memslots check

Do a quick lookup for possibly overlapping gfns when creating or moving
a memslot instead of performing a linear scan of the whole memslot set.
Signed-off-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
[sean: tweaked params to avoid churn in future cleanup]
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <a4795e5c2f624754e9c0aab023ebda1966feb3e1.1638817641.git.maciej.szmigiero@oracle.com>
parent f4209439
...@@ -1815,6 +1815,19 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1815,6 +1815,19 @@ static int kvm_set_memslot(struct kvm *kvm,
return 0; return 0;
} }
static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
gfn_t start, gfn_t end)
{
struct kvm_memslot_iter iter;
kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
if (iter.slot->id != id)
return true;
}
return false;
}
/* /*
* Allocate some memory and give it an address in the guest physical address * Allocate some memory and give it an address in the guest physical address
* space. * space.
...@@ -1826,8 +1839,9 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1826,8 +1839,9 @@ static int kvm_set_memslot(struct kvm *kvm,
int __kvm_set_memory_region(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem) const struct kvm_userspace_memory_region *mem)
{ {
struct kvm_memory_slot *old, *tmp; struct kvm_memory_slot *old;
struct kvm_memory_slot new; struct kvm_memory_slot new;
struct kvm_memslots *slots;
enum kvm_mr_change change; enum kvm_mr_change change;
int as_id, id; int as_id, id;
int r; int r;
...@@ -1856,11 +1870,13 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1856,11 +1870,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
return -EINVAL; return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
/* /*
* Note, the old memslot (and the pointer itself!) may be invalidated * Note, the old memslot (and the pointer itself!) may be invalidated
* and/or destroyed by kvm_set_memslot(). * and/or destroyed by kvm_set_memslot().
*/ */
old = id_to_memslot(__kvm_memslots(kvm, as_id), id); old = id_to_memslot(slots, id);
if (!mem->memory_size) { if (!mem->memory_size) {
if (!old || !old->npages) if (!old || !old->npages)
...@@ -1909,18 +1925,10 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1909,18 +1925,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
return 0; return 0;
} }
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
int bkt; kvm_check_memslot_overlap(slots, id, new.base_gfn,
new.base_gfn + new.npages))
/* Check for overlaps */
kvm_for_each_memslot(tmp, bkt, __kvm_memslots(kvm, as_id)) {
if (tmp->id == id)
continue;
if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
(new.base_gfn >= tmp->base_gfn + tmp->npages)))
return -EEXIST; return -EEXIST;
}
}
return kvm_set_memslot(kvm, old, &new, change); return kvm_set_memslot(kvm, old, &new, change);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment