Commit 9fd4236f authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/kvm/book3s: Use find_kvm_host_pte in kvmppc_get_hpa

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-19-aneesh.kumar@linux.ibm.com
parent bda3deaa
...@@ -878,8 +878,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -878,8 +878,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
return ret; return ret;
} }
static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa, static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
int writing, unsigned long *hpa, unsigned long gpa, int writing, unsigned long *hpa,
struct kvm_memory_slot **memslot_p) struct kvm_memory_slot **memslot_p)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -898,7 +898,7 @@ static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa, ...@@ -898,7 +898,7 @@ static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
hva = __gfn_to_hva_memslot(memslot, gfn); hva = __gfn_to_hva_memslot(memslot, gfn);
/* Try to find the host pte for that virtual address */ /* Try to find the host pte for that virtual address */
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
if (!ptep) if (!ptep)
return H_TOO_HARD; return H_TOO_HARD;
pte = kvmppc_read_update_linux_pte(ptep, writing); pte = kvmppc_read_update_linux_pte(ptep, writing);
...@@ -933,16 +933,11 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, ...@@ -933,16 +933,11 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq; mmu_seq = kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
if (ret != H_SUCCESS)
return ret;
/* Check if we've been invalidated */
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
if (mmu_notifier_retry(kvm, mmu_seq)) {
ret = H_TOO_HARD; ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
if (ret != H_SUCCESS)
goto out_unlock; goto out_unlock;
}
/* Zero the page */ /* Zero the page */
for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
...@@ -966,19 +961,14 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, ...@@ -966,19 +961,14 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq; mmu_seq = kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
if (ret != H_SUCCESS) ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
return ret;
ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
if (ret != H_SUCCESS) if (ret != H_SUCCESS)
return ret; goto out_unlock;
/* Check if we've been invalidated */ ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL);
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); if (ret != H_SUCCESS)
if (mmu_notifier_retry(kvm, mmu_seq)) {
ret = H_TOO_HARD;
goto out_unlock; goto out_unlock;
}
/* Copy the page */ /* Copy the page */
memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment