Commit ca9f4942 authored by Bharata B Rao's avatar Bharata B Rao Committed by Paul Mackerras

KVM: PPC: Book3S HV: Support for running secure guests

A pseries guest can be run as secure guest on Ultravisor-enabled
POWER platforms. On such platforms, this driver will be used to manage
the movement of guest pages between the normal memory managed by
hypervisor (HV) and secure memory managed by Ultravisor (UV).

HV is informed about the guest's transition to secure mode via hcalls:

H_SVM_INIT_START: Initiate securing a VM
H_SVM_INIT_DONE: Conclude securing a VM

As part of H_SVM_INIT_START, register all existing memslots with
the UV. H_SVM_INIT_DONE call by UV informs HV that transition of
the guest to secure mode is complete.

These two states (transition to secure mode STARTED and transition
to secure mode COMPLETED) are recorded in kvm->arch.secure_guest.
Setting these states will cause the assembly code that enters the
guest to call the UV_RETURN ucall instead of trying to enter the
guest directly.

Migration of pages betwen normal and secure memory of secure
guest is implemented in H_SVM_PAGE_IN and H_SVM_PAGE_OUT hcalls.

H_SVM_PAGE_IN: Move the content of a normal page to secure page
H_SVM_PAGE_OUT: Move the content of a secure page to normal page

Private ZONE_DEVICE memory equal to the amount of secure memory
available in the platform for running secure guests is created.
Whenever a page belonging to the guest becomes secure, a page from
this private device memory is used to represent and track that secure
page on the HV side. The movement of pages between normal and secure
memory is done via migrate_vma_pages() using UV_PAGE_IN and
UV_PAGE_OUT ucalls.

In order to prevent the device private pages (that correspond to pages
of secure guest) from participating in KSM merging, H_SVM_PAGE_IN
calls ksm_madvise() under read version of mmap_sem. However
ksm_madvise() needs to be under write lock.  Hence we call
kvmppc_svm_page_in with mmap_sem held for writing, and it then
downgrades to a read lock after calling ksm_madvise.

[paulus@ozlabs.org - roll in patch "KVM: PPC: Book3S HV: Take write
 mmap_sem when calling ksm_madvise"]
Signed-off-by: default avatarBharata B Rao <bharata@linux.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 33cf1707
...@@ -342,6 +342,12 @@ ...@@ -342,6 +342,12 @@
#define H_TLB_INVALIDATE 0xF808 #define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C #define H_COPY_TOFROM_GUEST 0xF80C
/* Platform-specific hcalls used by the Ultravisor */
#define H_SVM_PAGE_IN 0xEF00
#define H_SVM_PAGE_OUT 0xEF04
#define H_SVM_INIT_START 0xEF08
#define H_SVM_INIT_DONE 0xEF0C
/* Values for 2nd argument to H_SET_MODE */ /* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1 #define H_SET_MODE_RESOURCE_SET_CIABR 1
#define H_SET_MODE_RESOURCE_SET_DAWR 2 #define H_SET_MODE_RESOURCE_SET_DAWR 2
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_KVM_BOOK3S_UVMEM_H__
#define __ASM_KVM_BOOK3S_UVMEM_H__
#ifdef CONFIG_PPC_UV
int kvmppc_uvmem_init(void);
void kvmppc_uvmem_free(void);
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
void kvmppc_uvmem_slot_free(struct kvm *kvm,
const struct kvm_memory_slot *slot);
unsigned long kvmppc_h_svm_page_in(struct kvm *kvm,
unsigned long gra,
unsigned long flags,
unsigned long page_shift);
unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
unsigned long gra,
unsigned long flags,
unsigned long page_shift);
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
#else
static inline int kvmppc_uvmem_init(void)
{
return 0;
}
static inline void kvmppc_uvmem_free(void) { }
static inline int
kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{
return 0;
}
static inline void
kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { }
static inline unsigned long
kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra,
unsigned long flags, unsigned long page_shift)
{
return H_UNSUPPORTED;
}
static inline unsigned long
kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra,
unsigned long flags, unsigned long page_shift)
{
return H_UNSUPPORTED;
}
static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
{
return H_UNSUPPORTED;
}
static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
{
return H_UNSUPPORTED;
}
#endif /* CONFIG_PPC_UV */
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
...@@ -275,6 +275,10 @@ struct kvm_hpt_info { ...@@ -275,6 +275,10 @@ struct kvm_hpt_info {
struct kvm_resize_hpt; struct kvm_resize_hpt;
/* Flag values for kvm_arch.secure_guest */
#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */
struct kvm_arch { struct kvm_arch {
unsigned int lpid; unsigned int lpid;
unsigned int smt_mode; /* # vcpus per virtual core */ unsigned int smt_mode; /* # vcpus per virtual core */
...@@ -330,6 +334,8 @@ struct kvm_arch { ...@@ -330,6 +334,8 @@ struct kvm_arch {
#endif #endif
struct kvmppc_ops *kvm_ops; struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
struct mutex uvmem_lock;
struct list_head uvmem_pfns;
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */ struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr; u64 l1_ptcr;
int max_nested_lpid; int max_nested_lpid;
......
...@@ -26,6 +26,9 @@ ...@@ -26,6 +26,9 @@
#define UV_WRITE_PATE 0xF104 #define UV_WRITE_PATE 0xF104
#define UV_RETURN 0xF11C #define UV_RETURN 0xF11C
#define UV_ESM 0xF110 #define UV_ESM 0xF110
#define UV_REGISTER_MEM_SLOT 0xF120
#define UV_PAGE_IN 0xF128
#define UV_PAGE_OUT 0xF12C
#define UV_SHARE_PAGE 0xF130 #define UV_SHARE_PAGE 0xF130
#define UV_UNSHARE_PAGE 0xF134 #define UV_UNSHARE_PAGE 0xF134
#define UV_UNSHARE_ALL_PAGES 0xF140 #define UV_UNSHARE_ALL_PAGES 0xF140
......
...@@ -46,4 +46,25 @@ static inline int uv_unshare_all_pages(void) ...@@ -46,4 +46,25 @@ static inline int uv_unshare_all_pages(void)
return ucall_norets(UV_UNSHARE_ALL_PAGES); return ucall_norets(UV_UNSHARE_ALL_PAGES);
} }
static inline int uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags,
u64 page_shift)
{
return ucall_norets(UV_PAGE_IN, lpid, src_ra, dst_gpa, flags,
page_shift);
}
static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags,
u64 page_shift)
{
return ucall_norets(UV_PAGE_OUT, lpid, dst_ra, src_gpa, flags,
page_shift);
}
static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
u64 flags, u64 slotid)
{
return ucall_norets(UV_REGISTER_MEM_SLOT, lpid, start_gpa,
size, flags, slotid);
}
#endif /* _ASM_POWERPC_ULTRAVISOR_H */ #endif /* _ASM_POWERPC_ULTRAVISOR_H */
...@@ -71,6 +71,9 @@ kvm-hv-y += \ ...@@ -71,6 +71,9 @@ kvm-hv-y += \
book3s_64_mmu_radix.o \ book3s_64_mmu_radix.o \
book3s_hv_nested.o book3s_hv_nested.o
kvm-hv-$(CONFIG_PPC_UV) += \
book3s_hv_uvmem.o
kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm.o book3s_hv_tm.o
......
...@@ -72,6 +72,8 @@ ...@@ -72,6 +72,8 @@
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/xive.h> #include <asm/xive.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/kvm_host.h>
#include <asm/kvm_book3s_uvmem.h>
#include "book3s.h" #include "book3s.h"
...@@ -1070,6 +1072,25 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -1070,6 +1072,25 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6)); kvmppc_get_gpr(vcpu, 6));
break; break;
case H_SVM_PAGE_IN:
ret = kvmppc_h_svm_page_in(vcpu->kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_PAGE_OUT:
ret = kvmppc_h_svm_page_out(vcpu->kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_INIT_START:
ret = kvmppc_h_svm_init_start(vcpu->kvm);
break;
case H_SVM_INIT_DONE:
ret = kvmppc_h_svm_init_done(vcpu->kvm);
break;
default: default:
return RESUME_HOST; return RESUME_HOST;
} }
...@@ -4767,6 +4788,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) ...@@ -4767,6 +4788,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
char buf[32]; char buf[32];
int ret; int ret;
mutex_init(&kvm->arch.uvmem_lock);
INIT_LIST_HEAD(&kvm->arch.uvmem_pfns);
mutex_init(&kvm->arch.mmu_setup_lock); mutex_init(&kvm->arch.mmu_setup_lock);
/* Allocate the guest's logical partition ID */ /* Allocate the guest's logical partition ID */
...@@ -4938,6 +4961,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) ...@@ -4938,6 +4961,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm->arch.process_table = 0; kvm->arch.process_table = 0;
kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
} }
kvmppc_free_lpid(kvm->arch.lpid); kvmppc_free_lpid(kvm->arch.lpid);
kvmppc_free_pimap(kvm); kvmppc_free_pimap(kvm);
...@@ -5528,11 +5552,16 @@ static int kvmppc_book3s_init_hv(void) ...@@ -5528,11 +5552,16 @@ static int kvmppc_book3s_init_hv(void)
no_mixing_hpt_and_radix = true; no_mixing_hpt_and_radix = true;
} }
r = kvmppc_uvmem_init();
if (r < 0)
pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
return r; return r;
} }
static void kvmppc_book3s_exit_hv(void) static void kvmppc_book3s_exit_hv(void)
{ {
kvmppc_uvmem_free();
kvmppc_free_host_rm_ops(); kvmppc_free_host_rm_ops();
if (kvmppc_radix_possible()) if (kvmppc_radix_possible())
kvmppc_radix_exit(); kvmppc_radix_exit();
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment