Commit c3262257 authored by Bharata B Rao's avatar Bharata B Rao Committed by Paul Mackerras

KVM: PPC: Book3S HV: Handle memory plug/unplug to secure VM

Register the new memslot with UV during plug and unregister
the memslot during unplug. In addition, release all the
device pages during unplug.
Signed-off-by: default avatarBharata B Rao <bharata@linux.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 008e359c
......@@ -19,6 +19,8 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
struct kvm *kvm);
#else
static inline int kvmppc_uvmem_init(void)
{
......@@ -64,5 +66,9 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
{
return -EFAULT;
}
static inline void
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
struct kvm *kvm) { }
#endif /* CONFIG_PPC_UV */
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
......@@ -27,6 +27,7 @@
#define UV_RETURN 0xF11C
#define UV_ESM 0xF110
#define UV_REGISTER_MEM_SLOT 0xF120
#define UV_UNREGISTER_MEM_SLOT 0xF124
#define UV_PAGE_IN 0xF128
#define UV_PAGE_OUT 0xF12C
#define UV_SHARE_PAGE 0xF130
......
......@@ -67,6 +67,11 @@ static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
size, flags, slotid);
}
static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid)
{
return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid);
}
static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
{
return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
......
......@@ -1101,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
unsigned long gpa;
unsigned int shift;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
kvmppc_uvmem_drop_pages(memslot, kvm);
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return;
......
......@@ -74,6 +74,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/kvm_host.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
#include "book3s.h"
......@@ -4515,6 +4516,29 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) &&
((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES))
kvmppc_radix_flush_memslot(kvm, old);
/*
* If UV hasn't yet called H_SVM_INIT_START, don't register memslots.
*/
if (!kvm->arch.secure_guest)
return;
switch (change) {
case KVM_MR_CREATE:
if (kvmppc_uvmem_slot_init(kvm, new))
return;
uv_register_mem_slot(kvm->arch.lpid,
new->base_gfn << PAGE_SHIFT,
new->npages * PAGE_SIZE,
0, new->id);
break;
case KVM_MR_DELETE:
uv_unregister_mem_slot(kvm->arch.lpid, old->id);
kvmppc_uvmem_slot_free(kvm, old);
break;
default:
/* TODO: Handle KVM_MR_MOVE */
break;
}
}
/*
......
......@@ -249,6 +249,43 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
return H_SUCCESS;
}
/*
* Drop device pages that we maintain for the secure guest
*
* We first mark the pages to be skipped from UV_PAGE_OUT when there
* is HV side fault on these pages. Next we *get* these pages, forcing
* fault on them, do fault time migration to replace the device PTEs in
* QEMU page table with normal PTEs from newly allocated pages.
*/
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
struct kvm *kvm)
{
int i;
struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn, uvmem_pfn;
unsigned long gfn = free->base_gfn;
for (i = free->npages; i; --i, ++gfn) {
struct page *uvmem_page;
mutex_lock(&kvm->arch.uvmem_lock);
if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
mutex_unlock(&kvm->arch.uvmem_lock);
continue;
}
uvmem_page = pfn_to_page(uvmem_pfn);
pvt = uvmem_page->zone_device_data;
pvt->skip_page_out = true;
mutex_unlock(&kvm->arch.uvmem_lock);
pfn = gfn_to_pfn(kvm, gfn);
if (is_error_noslot_pfn(pfn))
continue;
kvm_release_pfn_clean(pfn);
}
}
/*
* Get a free device PFN from the pool
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment