Commit 58f3b0fc authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier

KVM: arm64: Support TLB invalidation in guest context

Typically, TLB invalidation of guest stage-2 mappings using nVHE is
performed by a hypercall originating from the host. For the invalidation
instruction to be effective, therefore, __tlb_switch_to_{guest,host}()
swizzle the active stage-2 context around the TLBI instruction.

With guest-to-host memory sharing and unsharing hypercalls
originating from the guest under pKVM, there is need to support
both guest and host VMID invalidations issued from guest context.

Replace the __tlb_switch_to_{guest,host}() functions with a more general
{enter,exit}_vmid_context() implementation which supports being invoked
from guest context and acts as a no-op if the target context matches the
running context.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Acked-by: default avatarOliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240423150538.2103045-10-tabba@google.comSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 7cc1d214
...@@ -11,13 +11,23 @@ ...@@ -11,13 +11,23 @@
#include <nvhe/mem_protect.h> #include <nvhe/mem_protect.h>
struct tlb_inv_context { struct tlb_inv_context {
struct kvm_s2_mmu *mmu;
u64 tcr; u64 tcr;
u64 sctlr;
}; };
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, static void enter_vmid_context(struct kvm_s2_mmu *mmu,
struct tlb_inv_context *cxt, struct tlb_inv_context *cxt,
bool nsh) bool nsh)
{ {
struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
vcpu = host_ctxt->__hyp_running_vcpu;
cxt->mmu = NULL;
/* /*
* We have two requirements: * We have two requirements:
* *
...@@ -40,20 +50,55 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, ...@@ -40,20 +50,55 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
else else
dsb(ish); dsb(ish);
/*
* If we're already in the desired context, then there's nothing to do.
*/
if (vcpu) {
/*
* We're in guest context. However, for this to work, this needs
* to be called from within __kvm_vcpu_run(), which ensures that
* __hyp_running_vcpu is set to the current guest vcpu.
*/
if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
return;
cxt->mmu = vcpu->arch.hw_mmu;
} else {
/* We're in host context. */
if (mmu == host_s2_mmu)
return;
cxt->mmu = host_s2_mmu;
}
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val; u64 val;
/* /*
* For CPUs that are affected by ARM 1319367, we need to * For CPUs that are affected by ARM 1319367, we need to
* avoid a host Stage-1 walk while we have the guest's * avoid a Stage-1 walk with the old VMID while we have
* VMID set in the VTTBR in order to invalidate TLBs. * the new VMID set in the VTTBR in order to invalidate TLBs.
* We're guaranteed that the S1 MMU is enabled, so we can * We're guaranteed that the host S1 MMU is enabled, so
* simply set the EPD bits to avoid any further TLB fill. * we can simply set the EPD bits to avoid any further
* TLB fill. For guests, we ensure that the S1 MMU is
* temporarily enabled in the next context.
*/ */
val = cxt->tcr = read_sysreg_el1(SYS_TCR); val = cxt->tcr = read_sysreg_el1(SYS_TCR);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK; val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
write_sysreg_el1(val, SYS_TCR); write_sysreg_el1(val, SYS_TCR);
isb(); isb();
if (vcpu) {
val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
if (!(val & SCTLR_ELx_M)) {
val |= SCTLR_ELx_M;
write_sysreg_el1(val, SYS_SCTLR);
isb();
}
} else {
/* The host S1 MMU is always enabled. */
cxt->sctlr = SCTLR_ELx_M;
}
} }
/* /*
...@@ -62,18 +107,40 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, ...@@ -62,18 +107,40 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
* ensuring that we always have an ISB, but not two ISBs back * ensuring that we always have an ISB, but not two ISBs back
* to back. * to back.
*/ */
if (vcpu)
__load_host_stage2();
else
__load_stage2(mmu, kern_hyp_va(mmu->arch)); __load_stage2(mmu, kern_hyp_va(mmu->arch));
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
static void __tlb_switch_to_host(struct tlb_inv_context *cxt) static void exit_vmid_context(struct tlb_inv_context *cxt)
{ {
struct kvm_s2_mmu *mmu = cxt->mmu;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
vcpu = host_ctxt->__hyp_running_vcpu;
if (!mmu)
return;
if (vcpu)
__load_stage2(mmu, kern_hyp_va(mmu->arch));
else
__load_host_stage2(); __load_host_stage2();
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Ensure write of the host VMID */ /* Ensure write of the old VMID */
isb();
if (!(cxt->sctlr & SCTLR_ELx_M)) {
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
isb(); isb();
/* Restore the host's TCR_EL1 */ }
write_sysreg_el1(cxt->tcr, SYS_TCR); write_sysreg_el1(cxt->tcr, SYS_TCR);
} }
} }
...@@ -84,7 +151,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, ...@@ -84,7 +151,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt, false); enter_vmid_context(mmu, &cxt, false);
/* /*
* We could do so much better if we had the VA as well. * We could do so much better if we had the VA as well.
...@@ -105,7 +172,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, ...@@ -105,7 +172,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
...@@ -114,7 +181,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, ...@@ -114,7 +181,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt, true); enter_vmid_context(mmu, &cxt, true);
/* /*
* We could do so much better if we had the VA as well. * We could do so much better if we had the VA as well.
...@@ -135,7 +202,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, ...@@ -135,7 +202,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nsh); dsb(nsh);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
...@@ -152,7 +219,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, ...@@ -152,7 +219,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
start = round_down(start, stride); start = round_down(start, stride);
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt, false); enter_vmid_context(mmu, &cxt, false);
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0); __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
...@@ -161,7 +228,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, ...@@ -161,7 +228,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
...@@ -169,13 +236,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) ...@@ -169,13 +236,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt, false); enter_vmid_context(mmu, &cxt, false);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
...@@ -183,19 +250,19 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) ...@@ -183,19 +250,19 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt, false); enter_vmid_context(mmu, &cxt, false);
__tlbi(vmalle1); __tlbi(vmalle1);
asm volatile("ic iallu"); asm volatile("ic iallu");
dsb(nsh); dsb(nsh);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_flush_vm_context(void) void __kvm_flush_vm_context(void)
{ {
/* Same remark as in __tlb_switch_to_guest() */ /* Same remark as in enter_vmid_context() */
dsb(ish); dsb(ish);
__tlbi(alle1is); __tlbi(alle1is);
dsb(ish); dsb(ish);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment