Commit 81ab595d authored by Laurent Dufour's avatar Laurent Dufour Committed by Paul Mackerras

KVM: PPC: Book3S HV: Rework secure mem slot dropping

When a secure memslot is dropped, all the pages backed in the secure
device (aka really backed by secure memory by the Ultravisor)
should be paged out to a normal page. Previously, this was
achieved by triggering the page fault mechanism which is calling
kvmppc_svm_page_out() on each pages.

This can't work when hot unplugging a memory slot because the memory
slot is flagged as invalid and gfn_to_pfn() is then not trying to access
the page, so the page fault mechanism is not triggered.

Since the final goal is to make a call to kvmppc_svm_page_out() it seems
simpler to call directly instead of triggering such a mechanism. This
way kvmppc_uvmem_drop_pages() can be called even when hot unplugging a
memslot.

Since kvmppc_uvmem_drop_pages() is already holding kvm->arch.uvmem_lock,
the call to __kvmppc_svm_page_out() is made.  As
__kvmppc_svm_page_out needs the vma pointer to migrate the pages,
the VMA is fetched in a lazy way, to not trigger find_vma() all
the time. In addition, the mmap_sem is held in read mode during
that time, not in write mode since the virual memory layout is not
impacted, and kvm->arch.uvmem_lock prevents concurrent operation
on the secure device.
Reviewed-by: default avatarBharata B Rao <bharata@linux.ibm.com>
Signed-off-by: default avatarLaurent Dufour <ldufour@linux.ibm.com>
        [modified check on the VMA in kvmppc_uvmem_drop_pages]
Signed-off-by: default avatarRam Pai <linuxram@us.ibm.com>
	[modified the changelog description]
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent f1b87ea8
...@@ -594,35 +594,53 @@ static inline int kvmppc_svm_page_out(struct vm_area_struct *vma, ...@@ -594,35 +594,53 @@ static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
* fault on them, do fault time migration to replace the device PTEs in * fault on them, do fault time migration to replace the device PTEs in
* QEMU page table with normal PTEs from newly allocated pages. * QEMU page table with normal PTEs from newly allocated pages.
*/ */
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free, void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
struct kvm *kvm, bool skip_page_out) struct kvm *kvm, bool skip_page_out)
{ {
int i; int i;
struct kvmppc_uvmem_page_pvt *pvt; struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn, uvmem_pfn; struct page *uvmem_page;
unsigned long gfn = free->base_gfn; struct vm_area_struct *vma = NULL;
unsigned long uvmem_pfn, gfn;
unsigned long addr;
mmap_read_lock(kvm->mm);
addr = slot->userspace_addr;
for (i = free->npages; i; --i, ++gfn) { gfn = slot->base_gfn;
struct page *uvmem_page; for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
/* Fetch the VMA if addr is not in the latest fetched one */
if (!vma || addr >= vma->vm_end) {
vma = find_vma_intersection(kvm->mm, addr, addr+1);
if (!vma) {
pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
break;
}
}
mutex_lock(&kvm->arch.uvmem_lock); mutex_lock(&kvm->arch.uvmem_lock);
if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
uvmem_page = pfn_to_page(uvmem_pfn);
pvt = uvmem_page->zone_device_data;
pvt->skip_page_out = skip_page_out;
pvt->remove_gfn = true;
if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
PAGE_SHIFT, kvm, pvt->gpa))
pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
pvt->gpa, addr);
} else {
/* Remove the shared flag if any */
kvmppc_gfn_remove(gfn, kvm); kvmppc_gfn_remove(gfn, kvm);
mutex_unlock(&kvm->arch.uvmem_lock);
continue;
} }
uvmem_page = pfn_to_page(uvmem_pfn);
pvt = uvmem_page->zone_device_data;
pvt->skip_page_out = skip_page_out;
pvt->remove_gfn = true;
mutex_unlock(&kvm->arch.uvmem_lock); mutex_unlock(&kvm->arch.uvmem_lock);
pfn = gfn_to_pfn(kvm, gfn);
if (is_error_noslot_pfn(pfn))
continue;
kvm_release_pfn_clean(pfn);
} }
mmap_read_unlock(kvm->mm);
} }
unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment