Commit c0f00a18 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV Nested: Change nested guest lookup to use idr

This removes the fixed sized kvm->arch.nested_guests array.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarFabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220123120043.3586018-5-npiggin@gmail.com
parent 6ba2a292
...@@ -327,8 +327,7 @@ struct kvm_arch { ...@@ -327,8 +327,7 @@ struct kvm_arch {
struct list_head uvmem_pfns; struct list_head uvmem_pfns;
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */ struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr; u64 l1_ptcr;
int max_nested_lpid; struct idr kvm_nested_guest_idr;
struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
/* This array can grow quite large, keep it at the end */ /* This array can grow quite large, keep it at the end */
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
#endif #endif
......
...@@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) ...@@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
} }
void kvmhv_vm_nested_init(struct kvm *kvm)
{
kvm->arch.max_nested_lpid = -1;
}
/* /*
* Handle the H_SET_PARTITION_TABLE hcall. * Handle the H_SET_PARTITION_TABLE hcall.
* r4 = guest real address of partition table + log_2(size) - 12 * r4 = guest real address of partition table + log_2(size) - 12
...@@ -660,6 +655,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) ...@@ -660,6 +655,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
kvmhv_set_nested_ptbl(gp); kvmhv_set_nested_ptbl(gp);
} }
void kvmhv_vm_nested_init(struct kvm *kvm)
{
idr_init(&kvm->arch.kvm_nested_guest_idr);
}
static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
{
return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
}
static bool __prealloc_nested(struct kvm *kvm, int lpid)
{
if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
return false;
return true;
}
static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
{
if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
WARN_ON(1);
}
static void __remove_nested(struct kvm *kvm, int lpid)
{
idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
}
static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
{ {
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
...@@ -720,13 +744,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp) ...@@ -720,13 +744,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
long ref; long ref;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (gp == kvm->arch.nested_guests[lpid]) { if (gp == __find_nested(kvm, lpid)) {
kvm->arch.nested_guests[lpid] = NULL; __remove_nested(kvm, lpid);
if (lpid == kvm->arch.max_nested_lpid) {
while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
;
kvm->arch.max_nested_lpid = lpid;
}
--gp->refcnt; --gp->refcnt;
} }
ref = gp->refcnt; ref = gp->refcnt;
...@@ -743,24 +762,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp) ...@@ -743,24 +762,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
*/ */
void kvmhv_release_all_nested(struct kvm *kvm) void kvmhv_release_all_nested(struct kvm *kvm)
{ {
int i; int lpid;
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
struct kvm_nested_guest *freelist = NULL; struct kvm_nested_guest *freelist = NULL;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int srcu_idx, bkt; int srcu_idx, bkt;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
gp = kvm->arch.nested_guests[i]; __remove_nested(kvm, lpid);
if (!gp)
continue;
kvm->arch.nested_guests[i] = NULL;
if (--gp->refcnt == 0) { if (--gp->refcnt == 0) {
gp->next = freelist; gp->next = freelist;
freelist = gp; freelist = gp;
} }
} }
kvm->arch.max_nested_lpid = -1; idr_destroy(&kvm->arch.kvm_nested_guest_idr);
/* idr is empty and may be reused at this point */
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
while ((gp = freelist) != NULL) { while ((gp = freelist) != NULL) {
freelist = gp->next; freelist = gp->next;
...@@ -797,7 +814,7 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, ...@@ -797,7 +814,7 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
return NULL; return NULL;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
gp = kvm->arch.nested_guests[l1_lpid]; gp = __find_nested(kvm, l1_lpid);
if (gp) if (gp)
++gp->refcnt; ++gp->refcnt;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
...@@ -808,17 +825,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, ...@@ -808,17 +825,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
newgp = kvmhv_alloc_nested(kvm, l1_lpid); newgp = kvmhv_alloc_nested(kvm, l1_lpid);
if (!newgp) if (!newgp)
return NULL; return NULL;
if (!__prealloc_nested(kvm, l1_lpid)) {
kvmhv_release_nested(newgp);
return NULL;
}
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (kvm->arch.nested_guests[l1_lpid]) { gp = __find_nested(kvm, l1_lpid);
/* someone else beat us to it */ if (!gp) {
gp = kvm->arch.nested_guests[l1_lpid]; __add_nested(kvm, l1_lpid, newgp);
} else {
kvm->arch.nested_guests[l1_lpid] = newgp;
++newgp->refcnt; ++newgp->refcnt;
gp = newgp; gp = newgp;
newgp = NULL; newgp = NULL;
if (l1_lpid > kvm->arch.max_nested_lpid)
kvm->arch.max_nested_lpid = l1_lpid;
} }
++gp->refcnt; ++gp->refcnt;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
...@@ -841,20 +860,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp) ...@@ -841,20 +860,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp)
kvmhv_release_nested(gp); kvmhv_release_nested(gp);
} }
static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
{
if (lpid > kvm->arch.max_nested_lpid)
return NULL;
return kvm->arch.nested_guests[lpid];
}
pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift) unsigned long ea, unsigned *hshift)
{ {
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
pte_t *pte; pte_t *pte;
gp = kvmhv_find_nested(kvm, lpid); gp = __find_nested(kvm, lpid);
if (!gp) if (!gp)
return NULL; return NULL;
...@@ -960,7 +972,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, ...@@ -960,7 +972,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
gpa = n_rmap & RMAP_NESTED_GPA_MASK; gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
gp = kvmhv_find_nested(kvm, lpid); gp = __find_nested(kvm, lpid);
if (!gp) if (!gp)
return; return;
...@@ -1152,16 +1164,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric) ...@@ -1152,16 +1164,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
int i; int lpid;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
gp = kvm->arch.nested_guests[i]; spin_unlock(&kvm->mmu_lock);
if (gp) { kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
spin_unlock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
spin_lock(&kvm->mmu_lock);
}
} }
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
...@@ -1313,7 +1322,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, ...@@ -1313,7 +1322,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
* H_ENTER_NESTED call. Since we can't differentiate this case from * H_ENTER_NESTED call. Since we can't differentiate this case from
* the invalid case, we ignore such flush requests and return success. * the invalid case, we ignore such flush requests and return success.
*/ */
if (!kvmhv_find_nested(vcpu->kvm, lpid)) if (!__find_nested(vcpu->kvm, lpid))
return H_SUCCESS; return H_SUCCESS;
/* /*
...@@ -1657,15 +1666,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) ...@@ -1657,15 +1666,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid) int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
{ {
int ret = -1; int ret = lpid + 1;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
while (++lpid <= kvm->arch.max_nested_lpid) { if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
if (kvm->arch.nested_guests[lpid]) { ret = -1;
ret = lpid;
break;
}
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
return ret; return ret;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment