Commit 5304002d authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier

KVM: arm64: Rename 'host_kvm' to 'host_mmu'

In preparation for introducing VM and vCPU state at EL2, rename the
existing 'struct host_kvm' and its singleton 'host_kvm' instance to
'host_mmu' so as to avoid confusion between the structure tracking the
host stage-2 MMU state and the host instance of a 'struct kvm' for a
protected guest.
Reviewed-by: default avatarPhilippe Mathieu-Daudé <philmd@linaro.org>
Tested-by: default avatarVincent Donnefort <vdonnefort@google.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-12-will@kernel.org
parent 1c80002e
...@@ -44,13 +44,13 @@ static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot) ...@@ -44,13 +44,13 @@ static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
return prot & PKVM_PAGE_STATE_PROT_MASK; return prot & PKVM_PAGE_STATE_PROT_MASK;
} }
struct host_kvm { struct host_mmu {
struct kvm_arch arch; struct kvm_arch arch;
struct kvm_pgtable pgt; struct kvm_pgtable pgt;
struct kvm_pgtable_mm_ops mm_ops; struct kvm_pgtable_mm_ops mm_ops;
hyp_spinlock_t lock; hyp_spinlock_t lock;
}; };
extern struct host_kvm host_kvm; extern struct host_mmu host_mmu;
/* This corresponds to page-table locking order */ /* This corresponds to page-table locking order */
enum pkvm_component_id { enum pkvm_component_id {
...@@ -76,7 +76,7 @@ void hyp_unpin_shared_mem(void *from, void *to); ...@@ -76,7 +76,7 @@ void hyp_unpin_shared_mem(void *from, void *to);
static __always_inline void __load_host_stage2(void) static __always_inline void __load_host_stage2(void)
{ {
if (static_branch_likely(&kvm_protected_mode_initialized)) if (static_branch_likely(&kvm_protected_mode_initialized))
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
else else
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
} }
......
...@@ -22,18 +22,18 @@ ...@@ -22,18 +22,18 @@
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
extern unsigned long hyp_nr_cpus; extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm; struct host_mmu host_mmu;
static struct hyp_pool host_s2_pool; static struct hyp_pool host_s2_pool;
static void host_lock_component(void) static void host_lock_component(void)
{ {
hyp_spin_lock(&host_kvm.lock); hyp_spin_lock(&host_mmu.lock);
} }
static void host_unlock_component(void) static void host_unlock_component(void)
{ {
hyp_spin_unlock(&host_kvm.lock); hyp_spin_unlock(&host_mmu.lock);
} }
static void hyp_lock_component(void) static void hyp_lock_component(void)
...@@ -88,7 +88,7 @@ static int prepare_s2_pool(void *pgt_pool_base) ...@@ -88,7 +88,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
if (ret) if (ret)
return ret; return ret;
host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) { host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_pages_exact = host_s2_zalloc_pages_exact, .zalloc_pages_exact = host_s2_zalloc_pages_exact,
.zalloc_page = host_s2_zalloc_page, .zalloc_page = host_s2_zalloc_page,
.phys_to_virt = hyp_phys_to_virt, .phys_to_virt = hyp_phys_to_virt,
...@@ -109,7 +109,7 @@ static void prepare_host_vtcr(void) ...@@ -109,7 +109,7 @@ static void prepare_host_vtcr(void)
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
id_aa64mmfr1_el1_sys_val, phys_shift); id_aa64mmfr1_el1_sys_val, phys_shift);
} }
...@@ -117,25 +117,25 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr ...@@ -117,25 +117,25 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr
int kvm_host_prepare_stage2(void *pgt_pool_base) int kvm_host_prepare_stage2(void *pgt_pool_base)
{ {
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
int ret; int ret;
prepare_host_vtcr(); prepare_host_vtcr();
hyp_spin_lock_init(&host_kvm.lock); hyp_spin_lock_init(&host_mmu.lock);
mmu->arch = &host_kvm.arch; mmu->arch = &host_mmu.arch;
ret = prepare_s2_pool(pgt_pool_base); ret = prepare_s2_pool(pgt_pool_base);
if (ret) if (ret)
return ret; return ret;
ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu, ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS, &host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
host_stage2_force_pte_cb); host_stage2_force_pte_cb);
if (ret) if (ret)
return ret; return ret;
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
mmu->pgt = &host_kvm.pgt; mmu->pgt = &host_mmu.pgt;
atomic64_set(&mmu->vmid.id, 0); atomic64_set(&mmu->vmid.id, 0);
return 0; return 0;
...@@ -143,19 +143,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) ...@@ -143,19 +143,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
int __pkvm_prot_finalize(void) int __pkvm_prot_finalize(void)
{ {
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
if (params->hcr_el2 & HCR_VM) if (params->hcr_el2 & HCR_VM)
return -EPERM; return -EPERM;
params->vttbr = kvm_get_vttbr(mmu); params->vttbr = kvm_get_vttbr(mmu);
params->vtcr = host_kvm.arch.vtcr; params->vtcr = host_mmu.arch.vtcr;
params->hcr_el2 |= HCR_VM; params->hcr_el2 |= HCR_VM;
kvm_flush_dcache_to_poc(params, sizeof(*params)); kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg(params->hcr_el2, hcr_el2); write_sysreg(params->hcr_el2, hcr_el2);
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
/* /*
* Make sure to have an ISB before the TLB maintenance below but only * Make sure to have an ISB before the TLB maintenance below but only
...@@ -173,7 +173,7 @@ int __pkvm_prot_finalize(void) ...@@ -173,7 +173,7 @@ int __pkvm_prot_finalize(void)
static int host_stage2_unmap_dev_all(void) static int host_stage2_unmap_dev_all(void)
{ {
struct kvm_pgtable *pgt = &host_kvm.pgt; struct kvm_pgtable *pgt = &host_mmu.pgt;
struct memblock_region *reg; struct memblock_region *reg;
u64 addr = 0; u64 addr = 0;
int i, ret; int i, ret;
...@@ -258,7 +258,7 @@ static bool range_is_memory(u64 start, u64 end) ...@@ -258,7 +258,7 @@ static bool range_is_memory(u64 start, u64 end)
static inline int __host_stage2_idmap(u64 start, u64 end, static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot) enum kvm_pgtable_prot prot)
{ {
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
prot, &host_s2_pool); prot, &host_s2_pool);
} }
...@@ -271,7 +271,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end, ...@@ -271,7 +271,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
#define host_stage2_try(fn, ...) \ #define host_stage2_try(fn, ...) \
({ \ ({ \
int __ret; \ int __ret; \
hyp_assert_lock_held(&host_kvm.lock); \ hyp_assert_lock_held(&host_mmu.lock); \
__ret = fn(__VA_ARGS__); \ __ret = fn(__VA_ARGS__); \
if (__ret == -ENOMEM) { \ if (__ret == -ENOMEM) { \
__ret = host_stage2_unmap_dev_all(); \ __ret = host_stage2_unmap_dev_all(); \
...@@ -294,8 +294,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) ...@@ -294,8 +294,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
u32 level; u32 level;
int ret; int ret;
hyp_assert_lock_held(&host_kvm.lock); hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level); ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
if (ret) if (ret)
return ret; return ret;
...@@ -327,7 +327,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size, ...@@ -327,7 +327,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{ {
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt, return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
addr, size, &host_s2_pool, owner_id); addr, size, &host_s2_pool, owner_id);
} }
...@@ -468,8 +468,8 @@ static int __host_check_page_state_range(u64 addr, u64 size, ...@@ -468,8 +468,8 @@ static int __host_check_page_state_range(u64 addr, u64 size,
.get_page_state = host_get_page_state, .get_page_state = host_get_page_state,
}; };
hyp_assert_lock_held(&host_kvm.lock); hyp_assert_lock_held(&host_mmu.lock);
return check_page_state_range(&host_kvm.pgt, addr, size, &d); return check_page_state_range(&host_mmu.pgt, addr, size, &d);
} }
static int __host_set_page_state_range(u64 addr, u64 size, static int __host_set_page_state_range(u64 addr, u64 size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment