Commit f60ca2f9 authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: Always zero invalid PTEs

kvm_set_invalid_pte() currently only clears bit 0 from a PTE because
stage2_map_walk_table_post() needs to be able to follow the anchor. In
preparation for re-using bits 63-01 from invalid PTEs, make sure to zero
it entirely by ensuring to cache the anchor's child upfront.
Acked-by: default avatarWill Deacon <will@kernel.org>
Suggested-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-29-qperret@google.com
parent a14307f5
......@@ -156,10 +156,9 @@ static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_op
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
}
static void kvm_set_invalid_pte(kvm_pte_t *ptep)
static void kvm_clear_pte(kvm_pte_t *ptep)
{
kvm_pte_t pte = *ptep;
WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID);
WRITE_ONCE(*ptep, 0);
}
static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
......@@ -444,6 +443,7 @@ struct stage2_map_data {
kvm_pte_t attr;
kvm_pte_t *anchor;
kvm_pte_t *childp;
struct kvm_s2_mmu *mmu;
void *memcache;
......@@ -533,7 +533,7 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
* There's an existing different valid leaf entry, so perform
* break-before-make.
*/
kvm_set_invalid_pte(ptep);
kvm_clear_pte(ptep);
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
mm_ops->put_page(ptep);
}
......@@ -554,7 +554,8 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
if (!kvm_block_mapping_supported(addr, end, data->phys, level))
return 0;
kvm_set_invalid_pte(ptep);
data->childp = kvm_pte_follow(*ptep, data->mm_ops);
kvm_clear_pte(ptep);
/*
* Invalidate the whole stage-2, as we may have numerous leaf
......@@ -600,7 +601,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* will be mapped lazily.
*/
if (kvm_pte_valid(pte)) {
kvm_set_invalid_pte(ptep);
kvm_clear_pte(ptep);
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
mm_ops->put_page(ptep);
}
......@@ -616,19 +617,24 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
struct stage2_map_data *data)
{
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
kvm_pte_t *childp;
int ret = 0;
if (!data->anchor)
return 0;
mm_ops->put_page(kvm_pte_follow(*ptep, mm_ops));
mm_ops->put_page(ptep);
if (data->anchor == ptep) {
childp = data->childp;
data->anchor = NULL;
data->childp = NULL;
ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
} else {
childp = kvm_pte_follow(*ptep, mm_ops);
}
mm_ops->put_page(childp);
mm_ops->put_page(ptep);
return ret;
}
......@@ -737,7 +743,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* block entry and rely on the remaining portions being faulted
* back lazily.
*/
kvm_set_invalid_pte(ptep);
kvm_clear_pte(ptep);
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
mm_ops->put_page(ptep);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment