Commit e9edb17a authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier

KVM: arm64: Convert kvm_set_spte_hva() to generic page-table API

Convert kvm_set_spte_hva() to use kvm_pgtable_stage2_map() instead
of stage2_set_pte().
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Reviewed-by: default avatarGavin Shan <gshan@redhat.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20200911132529.19844-9-will@kernel.org
parent 02bbd374
...@@ -1914,28 +1914,27 @@ int kvm_unmap_hva_range(struct kvm *kvm, ...@@ -1914,28 +1914,27 @@ int kvm_unmap_hva_range(struct kvm *kvm,
static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{ {
pte_t *pte = (pte_t *)data; kvm_pfn_t *pfn = (kvm_pfn_t *)data;
WARN_ON(size != PAGE_SIZE); WARN_ON(size != PAGE_SIZE);
/* /*
* We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE * The MMU notifiers will have unmapped a huge PMD before calling
* flag clear because MMU notifiers will have unmapped a huge PMD before * ->change_pte() (which in turn calls kvm_set_spte_hva()) and
* calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and * therefore we never need to clear out a huge PMD through this
* therefore stage2_set_pte() never needs to clear out a huge PMD * calling path and a memcache is not required.
* through this calling path.
*/ */
stage2_set_pte(&kvm->arch.mmu, NULL, gpa, pte, 0); kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
__pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
return 0; return 0;
} }
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
unsigned long end = hva + PAGE_SIZE; unsigned long end = hva + PAGE_SIZE;
kvm_pfn_t pfn = pte_pfn(pte); kvm_pfn_t pfn = pte_pfn(pte);
pte_t stage2_pte;
if (!kvm->arch.mmu.pgd) if (!kvm->arch.mmu.pgt)
return 0; return 0;
trace_kvm_set_spte_hva(hva); trace_kvm_set_spte_hva(hva);
...@@ -1945,9 +1944,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -1945,9 +1944,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
* just like a translation fault and clean the cache to the PoC. * just like a translation fault and clean the cache to the PoC.
*/ */
clean_dcache_guest_page(pfn, PAGE_SIZE); clean_dcache_guest_page(pfn, PAGE_SIZE);
stage2_pte = kvm_pfn_pte(pfn, PAGE_S2); handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment