Commit 5eec0a91 authored by Marc Zyngier's avatar Marc Zyngier

arm64: KVM: Implement TLB handling

Implement the TLB handling as a direct translation of the assembly
code version.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent c13d1683
...@@ -10,3 +10,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o ...@@ -10,3 +10,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
...@@ -147,6 +147,7 @@ ENTRY(__fpsimd_guest_restore) ...@@ -147,6 +147,7 @@ ENTRY(__fpsimd_guest_restore)
add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
bl __fpsimd_restore_state bl __fpsimd_restore_state
// Skip restoring fpexc32 for AArch64 guests
mrs x1, hcr_el2 mrs x1, hcr_el2
tbnz x1, #HCR_RW_SHIFT, 1f tbnz x1, #HCR_RW_SHIFT, 1f
ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
......
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "hyp.h"
void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
/*
* We could do so much better if we had the VA as well.
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
ipa >>= 12;
asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
/*
* We have to ensure completion of the invalidation at Stage-2,
* since a table walk on another CPU could refill a TLB with a
* complete (S1 + S2) walk based on the old Stage-2 mapping if
* the Stage-1 invalidation happened first.
*/
dsb(ish);
asm volatile("tlbi vmalle1is" : : );
dsb(ish);
isb();
write_sysreg(0, vttbr_el2);
}
void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
asm volatile("tlbi vmalls12e1is" : : );
dsb(ish);
isb();
write_sysreg(0, vttbr_el2);
}
void __hyp_text __tlb_flush_vm_context(void)
{
dsb(ishst);
asm volatile("tlbi alle1is \n"
"ic ialluis ": : );
dsb(ish);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment