Commit f41dff4e authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache

Rather than relying on the host to free the previously-donated pKVM
hypervisor VM pages explicitly on teardown, introduce a dedicated
teardown memcache which allows the host to reclaim guest memory
resources without having to keep track of all of the allocations made by
the pKVM hypervisor at EL2.
Tested-by: default avatarVincent Donnefort <vdonnefort@google.com>
Co-developed-by: default avatarFuad Tabba <tabba@google.com>
Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
[maz: dropped __maybe_unused from unmap_donated_memory_noclear()]
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-21-will@kernel.org
parent 60dfe093
...@@ -176,12 +176,7 @@ typedef unsigned int pkvm_handle_t; ...@@ -176,12 +176,7 @@ typedef unsigned int pkvm_handle_t;
struct kvm_protected_vm { struct kvm_protected_vm {
pkvm_handle_t handle; pkvm_handle_t handle;
struct kvm_hyp_memcache teardown_mc;
struct {
void *pgd;
void *vm;
void *vcpus[KVM_MAX_VCPUS];
} hyp_donations;
}; };
struct kvm_arch { struct kvm_arch {
......
...@@ -76,7 +76,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt); ...@@ -76,7 +76,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
int hyp_pin_shared_mem(void *from, void *to); int hyp_pin_shared_mem(void *from, void *to);
void hyp_unpin_shared_mem(void *from, void *to); void hyp_unpin_shared_mem(void *from, void *to);
void reclaim_guest_pages(struct pkvm_hyp_vm *vm); void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages, int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc); struct kvm_hyp_memcache *host_mc);
......
...@@ -260,19 +260,24 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) ...@@ -260,19 +260,24 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
return 0; return 0;
} }
void reclaim_guest_pages(struct pkvm_hyp_vm *vm) void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
{ {
void *pgd = vm->pgt.pgd; void *addr;
unsigned long nr_pages;
nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
/* Dump all pgtable pages in the hyp_pool */
guest_lock_component(vm); guest_lock_component(vm);
kvm_pgtable_stage2_destroy(&vm->pgt); kvm_pgtable_stage2_destroy(&vm->pgt);
vm->kvm.arch.mmu.pgd_phys = 0ULL; vm->kvm.arch.mmu.pgd_phys = 0ULL;
guest_unlock_component(vm); guest_unlock_component(vm);
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(pgd), nr_pages)); /* Drain the hyp_pool into the memcache */
addr = hyp_alloc_pages(&vm->pool, 0);
while (addr) {
memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
push_hyp_memcache(mc, addr, hyp_virt_to_phys);
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
addr = hyp_alloc_pages(&vm->pool, 0);
}
} }
int __pkvm_prot_finalize(void) int __pkvm_prot_finalize(void)
......
...@@ -393,7 +393,7 @@ static void unmap_donated_memory(void *va, size_t size) ...@@ -393,7 +393,7 @@ static void unmap_donated_memory(void *va, size_t size)
__unmap_donated_memory(va, size); __unmap_donated_memory(va, size);
} }
static void __maybe_unused unmap_donated_memory_noclear(void *va, size_t size) static void unmap_donated_memory_noclear(void *va, size_t size)
{ {
if (!va) if (!va)
return; return;
...@@ -527,8 +527,21 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, ...@@ -527,8 +527,21 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
return ret; return ret;
} }
static void
teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
{
size = PAGE_ALIGN(size);
memset(addr, 0, size);
for (void *start = addr; start < addr + size; start += PAGE_SIZE)
push_hyp_memcache(mc, start, hyp_virt_to_phys);
unmap_donated_memory_noclear(addr, size);
}
int __pkvm_teardown_vm(pkvm_handle_t handle) int __pkvm_teardown_vm(pkvm_handle_t handle)
{ {
struct kvm_hyp_memcache *mc;
struct pkvm_hyp_vm *hyp_vm; struct pkvm_hyp_vm *hyp_vm;
struct kvm *host_kvm; struct kvm *host_kvm;
unsigned int idx; unsigned int idx;
...@@ -547,25 +560,27 @@ int __pkvm_teardown_vm(pkvm_handle_t handle) ...@@ -547,25 +560,27 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
goto err_unlock; goto err_unlock;
} }
host_kvm = hyp_vm->host_kvm;
/* Ensure the VMID is clean before it can be reallocated */ /* Ensure the VMID is clean before it can be reallocated */
__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
remove_vm_table_entry(handle); remove_vm_table_entry(handle);
hyp_spin_unlock(&vm_table_lock); hyp_spin_unlock(&vm_table_lock);
/* Reclaim guest pages (including page-table pages) */ /* Reclaim guest pages (including page-table pages) */
reclaim_guest_pages(hyp_vm); mc = &host_kvm->arch.pkvm.teardown_mc;
reclaim_guest_pages(hyp_vm, mc);
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus); unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
/* Return the metadata pages to the host */ /* Push the metadata pages to the teardown memcache */
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) { for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx]; struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu)); teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
} }
host_kvm = hyp_vm->host_kvm;
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus); vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
unmap_donated_memory(hyp_vm, vm_size); teardown_donated_memory(mc, hyp_vm, vm_size);
hyp_unpin_shared_mem(host_kvm, host_kvm + 1); hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
return 0; return 0;
......
...@@ -147,8 +147,6 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm) ...@@ -147,8 +147,6 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
handle = ret; handle = ret;
host_kvm->arch.pkvm.handle = handle; host_kvm->arch.pkvm.handle = handle;
host_kvm->arch.pkvm.hyp_donations.pgd = pgd;
host_kvm->arch.pkvm.hyp_donations.vm = hyp_vm;
/* Donate memory for the vcpus at hyp and initialize it. */ /* Donate memory for the vcpus at hyp and initialize it. */
hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE); hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
...@@ -167,12 +165,12 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm) ...@@ -167,12 +165,12 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
goto destroy_vm; goto destroy_vm;
} }
host_kvm->arch.pkvm.hyp_donations.vcpus[idx] = hyp_vcpu;
ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu, ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
hyp_vcpu); hyp_vcpu);
if (ret) if (ret) {
free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
goto destroy_vm; goto destroy_vm;
}
} }
return 0; return 0;
...@@ -201,30 +199,13 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm) ...@@ -201,30 +199,13 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
void pkvm_destroy_hyp_vm(struct kvm *host_kvm) void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
{ {
unsigned long idx, nr_vcpus = host_kvm->created_vcpus; if (host_kvm->arch.pkvm.handle) {
size_t pgd_sz, hyp_vm_sz;
if (host_kvm->arch.pkvm.handle)
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm, WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
host_kvm->arch.pkvm.handle)); host_kvm->arch.pkvm.handle));
host_kvm->arch.pkvm.handle = 0;
for (idx = 0; idx < nr_vcpus; ++idx) {
void *hyp_vcpu = host_kvm->arch.pkvm.hyp_donations.vcpus[idx];
if (!hyp_vcpu)
break;
free_pages_exact(hyp_vcpu, PAGE_ALIGN(PKVM_HYP_VCPU_SIZE));
} }
hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE, host_kvm->arch.pkvm.handle = 0;
size_mul(sizeof(void *), nr_vcpus))); free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
free_pages_exact(host_kvm->arch.pkvm.hyp_donations.vm, hyp_vm_sz);
free_pages_exact(host_kvm->arch.pkvm.hyp_donations.pgd, pgd_sz);
} }
int pkvm_init_host_vm(struct kvm *host_kvm) int pkvm_init_host_vm(struct kvm *host_kvm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment