Commit efaa5b93 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Use TTL hint in when invalidating stage-2 translations

Since we often have a precise idea of the level we're dealing with
when invalidating TLBs, we can provide it to as a hint to our
invalidation helper.
Reviewed-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarAlexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent a0e50aa3
...@@ -91,7 +91,8 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); ...@@ -91,7 +91,8 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#endif #endif
extern void __kvm_flush_vm_context(void); extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
......
...@@ -46,7 +46,8 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt) ...@@ -46,7 +46,8 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
} }
} }
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -62,7 +63,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa) ...@@ -62,7 +63,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
* whole of Stage-1. Weep... * whole of Stage-1. Weep...
*/ */
ipa >>= 12; ipa >>= 12;
__tlbi(ipas2e1is, ipa); __tlbi_level(ipas2e1is, ipa, level);
/* /*
* We have to ensure completion of the invalidation at Stage-2, * We have to ensure completion of the invalidation at Stage-2,
......
...@@ -79,7 +79,8 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt) ...@@ -79,7 +79,8 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
local_irq_restore(cxt->flags); local_irq_restore(cxt->flags);
} }
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -94,7 +95,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa) ...@@ -94,7 +95,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
* whole of Stage-1. Weep... * whole of Stage-1. Weep...
*/ */
ipa >>= 12; ipa >>= 12;
__tlbi(ipas2e1is, ipa); __tlbi_level(ipas2e1is, ipa, level);
/* /*
* We have to ensure completion of the invalidation at Stage-2, * We have to ensure completion of the invalidation at Stage-2,
......
...@@ -58,9 +58,10 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) ...@@ -58,9 +58,10 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
} }
static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa) static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level)
{ {
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa); kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, level);
} }
/* /*
...@@ -102,7 +103,7 @@ static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t ...@@ -102,7 +103,7 @@ static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t
return; return;
pmd_clear(pmd); pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
put_page(virt_to_page(pmd)); put_page(virt_to_page(pmd));
} }
...@@ -122,7 +123,7 @@ static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t ...@@ -122,7 +123,7 @@ static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t
return; return;
stage2_pud_clear(kvm, pudp); stage2_pud_clear(kvm, pudp);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
put_page(virt_to_page(pudp)); put_page(virt_to_page(pudp));
} }
...@@ -163,7 +164,7 @@ static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr ...@@ -163,7 +164,7 @@ static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr
struct kvm *kvm = mmu->kvm; struct kvm *kvm = mmu->kvm;
p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL); p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
stage2_pgd_clear(kvm, pgd); stage2_pgd_clear(kvm, pgd);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
stage2_p4d_free(kvm, p4d_table); stage2_p4d_free(kvm, p4d_table);
put_page(virt_to_page(pgd)); put_page(virt_to_page(pgd));
} }
...@@ -173,7 +174,7 @@ static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr ...@@ -173,7 +174,7 @@ static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr
struct kvm *kvm = mmu->kvm; struct kvm *kvm = mmu->kvm;
pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0); pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
stage2_p4d_clear(kvm, p4d); stage2_p4d_clear(kvm, p4d);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
stage2_pud_free(kvm, pud_table); stage2_pud_free(kvm, pud_table);
put_page(virt_to_page(p4d)); put_page(virt_to_page(p4d));
} }
...@@ -185,7 +186,7 @@ static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr ...@@ -185,7 +186,7 @@ static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr
VM_BUG_ON(stage2_pud_huge(kvm, *pud)); VM_BUG_ON(stage2_pud_huge(kvm, *pud));
stage2_pud_clear(kvm, pud); stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
stage2_pmd_free(kvm, pmd_table); stage2_pmd_free(kvm, pmd_table);
put_page(virt_to_page(pud)); put_page(virt_to_page(pud));
} }
...@@ -195,7 +196,7 @@ static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr ...@@ -195,7 +196,7 @@ static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr
pte_t *pte_table = pte_offset_kernel(pmd, 0); pte_t *pte_table = pte_offset_kernel(pmd, 0);
VM_BUG_ON(pmd_thp_or_huge(*pmd)); VM_BUG_ON(pmd_thp_or_huge(*pmd));
pmd_clear(pmd); pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
free_page((unsigned long)pte_table); free_page((unsigned long)pte_table);
put_page(virt_to_page(pmd)); put_page(virt_to_page(pmd));
} }
...@@ -273,7 +274,7 @@ static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, ...@@ -273,7 +274,7 @@ static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
pte_t old_pte = *pte; pte_t old_pte = *pte;
kvm_set_pte(pte, __pte(0)); kvm_set_pte(pte, __pte(0));
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
/* No need to invalidate the cache for device mappings */ /* No need to invalidate the cache for device mappings */
if (!kvm_is_device_pfn(pte_pfn(old_pte))) if (!kvm_is_device_pfn(pte_pfn(old_pte)))
...@@ -302,7 +303,7 @@ static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud, ...@@ -302,7 +303,7 @@ static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
pmd_t old_pmd = *pmd; pmd_t old_pmd = *pmd;
pmd_clear(pmd); pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
kvm_flush_dcache_pmd(old_pmd); kvm_flush_dcache_pmd(old_pmd);
...@@ -332,7 +333,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d, ...@@ -332,7 +333,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
pud_t old_pud = *pud; pud_t old_pud = *pud;
stage2_pud_clear(kvm, pud); stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
kvm_flush_dcache_pud(old_pud); kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud)); put_page(virt_to_page(pud));
} else { } else {
...@@ -1260,7 +1261,7 @@ static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu, ...@@ -1260,7 +1261,7 @@ static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu,
*/ */
WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
pmd_clear(pmd); pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
} else { } else {
get_page(virt_to_page(pmd)); get_page(virt_to_page(pmd));
} }
...@@ -1302,7 +1303,7 @@ static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu, ...@@ -1302,7 +1303,7 @@ static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu,
WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
stage2_pud_clear(kvm, pudp); stage2_pud_clear(kvm, pudp);
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
} else { } else {
get_page(virt_to_page(pudp)); get_page(virt_to_page(pudp));
} }
...@@ -1451,7 +1452,7 @@ static int stage2_set_pte(struct kvm_s2_mmu *mmu, ...@@ -1451,7 +1452,7 @@ static int stage2_set_pte(struct kvm_s2_mmu *mmu,
return 0; return 0;
kvm_set_pte(pte, __pte(0)); kvm_set_pte(pte, __pte(0));
kvm_tlb_flush_vmid_ipa(mmu, addr); kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
} else { } else {
get_page(virt_to_page(pte)); get_page(virt_to_page(pte));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment