Commit ce5f0215 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Use "new" memslot's address space ID instead of dedicated param

Now that the address space ID is stored in every slot, including fake
slots used for deletion, use the slot's as_id instead of passing in the
redundant information as a param to kvm_set_memslot().  This will greatly
simplify future memslot work by avoiding passing a large number of
variables around purely to honor @as_id.

Drop a comment in the DELETE path about new->as_id being provided purely
for debug, as that's now a lie.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <03189577be214ab8530a4b3a3ee3ed1c2f9e5815.1638817639.git.maciej.szmigiero@oracle.com>
parent 4e4d30cb
...@@ -1551,7 +1551,7 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to, ...@@ -1551,7 +1551,7 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to,
static int kvm_set_memslot(struct kvm *kvm, static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem, const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *new, int as_id, struct kvm_memory_slot *new,
enum kvm_mr_change change) enum kvm_mr_change change)
{ {
struct kvm_memory_slot *slot, old; struct kvm_memory_slot *slot, old;
...@@ -1574,7 +1574,7 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1574,7 +1574,7 @@ static int kvm_set_memslot(struct kvm *kvm,
*/ */
mutex_lock(&kvm->slots_arch_lock); mutex_lock(&kvm->slots_arch_lock);
slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); slots = kvm_dup_memslots(__kvm_memslots(kvm, new->as_id), change);
if (!slots) { if (!slots) {
mutex_unlock(&kvm->slots_arch_lock); mutex_unlock(&kvm->slots_arch_lock);
return -ENOMEM; return -ENOMEM;
...@@ -1594,7 +1594,7 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1594,7 +1594,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* dropped by update_memslots anyway. We'll also revert to the * dropped by update_memslots anyway. We'll also revert to the
* old memslots if preparing the new memory region fails. * old memslots if preparing the new memory region fails.
*/ */
slots = install_new_memslots(kvm, as_id, slots); slots = install_new_memslots(kvm, new->as_id, slots);
/* From this point no new shadow pages pointing to a deleted, /* From this point no new shadow pages pointing to a deleted,
* or moved, memslot will be created. * or moved, memslot will be created.
...@@ -1616,7 +1616,7 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1616,7 +1616,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* to retrieve memslots *after* acquiring slots_arch_lock, thus * to retrieve memslots *after* acquiring slots_arch_lock, thus
* the active memslots are guaranteed to be fresh. * the active memslots are guaranteed to be fresh.
*/ */
kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, as_id)); kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, new->as_id));
} }
/* /*
...@@ -1633,7 +1633,7 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1633,7 +1633,7 @@ static int kvm_set_memslot(struct kvm *kvm,
WARN_ON_ONCE(change != KVM_MR_CREATE); WARN_ON_ONCE(change != KVM_MR_CREATE);
memset(&old, 0, sizeof(old)); memset(&old, 0, sizeof(old));
old.id = new->id; old.id = new->id;
old.as_id = as_id; old.as_id = new->as_id;
} }
/* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
...@@ -1644,7 +1644,7 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1644,7 +1644,7 @@ static int kvm_set_memslot(struct kvm *kvm,
goto out_slots; goto out_slots;
update_memslots(slots, new, change); update_memslots(slots, new, change);
slots = install_new_memslots(kvm, as_id, slots); slots = install_new_memslots(kvm, new->as_id, slots);
/* /*
* Update the total number of memslot pages before calling the arch * Update the total number of memslot pages before calling the arch
...@@ -1666,7 +1666,7 @@ static int kvm_set_memslot(struct kvm *kvm, ...@@ -1666,7 +1666,7 @@ static int kvm_set_memslot(struct kvm *kvm,
out_slots: out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
slots = install_new_memslots(kvm, as_id, slots); slots = install_new_memslots(kvm, new->as_id, slots);
else else
mutex_unlock(&kvm->slots_arch_lock); mutex_unlock(&kvm->slots_arch_lock);
kvfree(slots); kvfree(slots);
...@@ -1738,13 +1738,9 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1738,13 +1738,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(&new, 0, sizeof(new)); memset(&new, 0, sizeof(new));
new.id = id; new.id = id;
/*
* This is only for debugging purpose; it should never be
* referenced for a removed memslot.
*/
new.as_id = as_id; new.as_id = as_id;
return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); return kvm_set_memslot(kvm, mem, &new, KVM_MR_DELETE);
} }
new.as_id = as_id; new.as_id = as_id;
...@@ -1807,7 +1803,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1807,7 +1803,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
bitmap_set(new.dirty_bitmap, 0, new.npages); bitmap_set(new.dirty_bitmap, 0, new.npages);
} }
r = kvm_set_memslot(kvm, mem, &new, as_id, change); r = kvm_set_memslot(kvm, mem, &new, change);
if (r) if (r)
goto out_bitmap; goto out_bitmap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment