Commit 49c7754c authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: Add memory slot versioning and use it to provide fast guest write interface

Keep track of memslots changes by keeping generation number in memslots
structure. Provide kvm_write_guest_cached() function that skips
gfn_to_hva() translation if memslots was not changed since previous
invocation.
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 56028d08
...@@ -3190,6 +3190,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -3190,6 +3190,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
} }
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
slots->generation++;
old_slots = kvm->memslots; old_slots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots); rcu_assign_pointer(kvm->memslots, slots);
......
...@@ -199,6 +199,7 @@ struct kvm_irq_routing_table {}; ...@@ -199,6 +199,7 @@ struct kvm_irq_routing_table {};
struct kvm_memslots { struct kvm_memslots {
int nmemslots; int nmemslots;
u64 generation;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS]; KVM_PRIVATE_MEM_SLOTS];
}; };
...@@ -352,12 +353,18 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, ...@@ -352,12 +353,18 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len); int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len); unsigned long len);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
......
...@@ -67,4 +67,11 @@ struct kvm_lapic_irq { ...@@ -67,4 +67,11 @@ struct kvm_lapic_irq {
u32 dest_id; u32 dest_id;
}; };
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
unsigned long hva;
struct kvm_memory_slot *memslot;
};
#endif /* __KVM_TYPES_H__ */ #endif /* __KVM_TYPES_H__ */
...@@ -687,6 +687,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -687,6 +687,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots) if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1; slots->nmemslots = mem->slot + 1;
slots->generation++;
slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
old_memslots = kvm->memslots; old_memslots = kvm->memslots;
...@@ -721,6 +722,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -721,6 +722,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots) if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1; slots->nmemslots = mem->slot + 1;
slots->generation++;
/* actual memory is freed via old in kvm_free_physmem_slot below */ /* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) { if (!npages) {
...@@ -851,10 +853,10 @@ int kvm_is_error_hva(unsigned long addr) ...@@ -851,10 +853,10 @@ int kvm_is_error_hva(unsigned long addr)
} }
EXPORT_SYMBOL_GPL(kvm_is_error_hva); EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
gfn_t gfn)
{ {
int i; int i;
struct kvm_memslots *slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; ++i) { for (i = 0; i < slots->nmemslots; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i]; struct kvm_memory_slot *memslot = &slots->memslots[i];
...@@ -865,6 +867,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) ...@@ -865,6 +867,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
} }
return NULL; return NULL;
} }
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_memslot); EXPORT_SYMBOL_GPL(gfn_to_memslot);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
...@@ -927,12 +934,9 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) ...@@ -927,12 +934,9 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
return memslot - slots->memslots; return memslot - slots->memslots;
} }
static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages) gfn_t *nr_pages)
{ {
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID) if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva(); return bad_hva();
...@@ -944,7 +948,7 @@ static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, ...@@ -944,7 +948,7 @@ static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{ {
return gfn_to_hva_many(kvm, gfn, NULL); return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_hva); EXPORT_SYMBOL_GPL(gfn_to_hva);
...@@ -1054,7 +1058,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, ...@@ -1054,7 +1058,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
unsigned long addr; unsigned long addr;
gfn_t entry; gfn_t entry;
addr = gfn_to_hva_many(kvm, gfn, &entry); addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
if (kvm_is_error_hva(addr)) if (kvm_is_error_hva(addr))
return -1; return -1;
...@@ -1238,6 +1242,47 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, ...@@ -1238,6 +1242,47 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
return 0; return 0;
} }
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int offset = offset_in_page(gpa);
gfn_t gfn = gpa >> PAGE_SHIFT;
ghc->gpa = gpa;
ghc->generation = slots->generation;
ghc->memslot = __gfn_to_memslot(slots, gfn);
ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
if (!kvm_is_error_hva(ghc->hva))
ghc->hva += offset;
else
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
r = copy_to_user((void __user *)ghc->hva, data, len);
if (r)
return -EFAULT;
mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{ {
return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
...@@ -1263,11 +1308,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) ...@@ -1263,11 +1308,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
} }
EXPORT_SYMBOL_GPL(kvm_clear_guest); EXPORT_SYMBOL_GPL(kvm_clear_guest);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn) void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
gfn_t gfn)
{ {
struct kvm_memory_slot *memslot;
memslot = gfn_to_memslot(kvm, gfn);
if (memslot && memslot->dirty_bitmap) { if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn; unsigned long rel_gfn = gfn - memslot->base_gfn;
...@@ -1275,6 +1318,14 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) ...@@ -1275,6 +1318,14 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
} }
} }
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
memslot = gfn_to_memslot(kvm, gfn);
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
/* /*
* The vCPU has executed a HLT instruction with in-kernel mode enabled. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment