Commit 02f10845 authored by Ricardo Koller's avatar Ricardo Koller Committed by Oliver Upton

KVM: arm64: Add KVM_PGTABLE_WALK flags for skipping CMOs and BBM TLBIs

Add two flags to kvm_pgtable_visit_ctx, KVM_PGTABLE_WALK_SKIP_BBM_TLBI
and KVM_PGTABLE_WALK_SKIP_CMO, to indicate that the walk should not
perform TLB invalidations (TLBIs) in break-before-make (BBM) nor cache
maintenance operations (CMO). This will be used by a future commit to
create unlinked tables not accessible to the HW page-table walker.
Signed-off-by: default avatarRicardo Koller <ricarkol@google.com>
Reviewed-by: default avatarShaoqin Huang <shahuang@redhat.com>
Reviewed-by: default avatarGavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20230426172330.1439644-3-ricarkol@google.comSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent c14d08c5
...@@ -195,6 +195,12 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end, ...@@ -195,6 +195,12 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
* with other software walkers. * with other software walkers.
* @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
* invoked from a fault handler. * invoked from a fault handler.
* @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
* without Break-before-make's
* TLB invalidation.
* @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries
* without Cache maintenance
* operations required.
*/ */
enum kvm_pgtable_walk_flags { enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_LEAF = BIT(0), KVM_PGTABLE_WALK_LEAF = BIT(0),
...@@ -202,6 +208,8 @@ enum kvm_pgtable_walk_flags { ...@@ -202,6 +208,8 @@ enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_TABLE_POST = BIT(2), KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
KVM_PGTABLE_WALK_SHARED = BIT(3), KVM_PGTABLE_WALK_SHARED = BIT(3),
KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4), KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
}; };
struct kvm_pgtable_visit_ctx { struct kvm_pgtable_visit_ctx {
......
...@@ -62,6 +62,16 @@ struct kvm_pgtable_walk_data { ...@@ -62,6 +62,16 @@ struct kvm_pgtable_walk_data {
u64 end; u64 end;
}; };
static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
{
return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
}
static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
{
return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
}
static bool kvm_phys_is_valid(u64 phys) static bool kvm_phys_is_valid(u64 phys)
{ {
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX)); return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
...@@ -741,14 +751,17 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, ...@@ -741,14 +751,17 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED)) if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
return false; return false;
/* if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
* Perform the appropriate TLB invalidation based on the evicted pte /*
* value (if any). * Perform the appropriate TLB invalidation based on the
*/ * evicted pte value (if any).
if (kvm_pte_table(ctx->old, ctx->level)) */
kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); if (kvm_pte_table(ctx->old, ctx->level))
else if (kvm_pte_valid(ctx->old)) kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); else if (kvm_pte_valid(ctx->old))
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
ctx->addr, ctx->level);
}
if (stage2_pte_is_counted(ctx->old)) if (stage2_pte_is_counted(ctx->old))
mm_ops->put_page(ctx->ptep); mm_ops->put_page(ctx->ptep);
...@@ -832,11 +845,13 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, ...@@ -832,11 +845,13 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
return -EAGAIN; return -EAGAIN;
/* Perform CMOs before installation of the guest stage-2 PTE */ /* Perform CMOs before installation of the guest stage-2 PTE */
if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new)) if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
stage2_pte_cacheable(pgt, new))
mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops), mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
granule); granule);
if (mm_ops->icache_inval_pou && stage2_pte_executable(new)) if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
stage2_pte_executable(new))
mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
stage2_make_pte(ctx, new); stage2_make_pte(ctx, new);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment