Commit a15f6939 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

KVM: arm/arm64: Split dcache/icache flushing

As we're about to introduce opportunistic invalidation of the icache,
let's split dcache and icache flushing.
Acked-by: default avatarChristoffer Dall <cdall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent d6811986
...@@ -126,21 +126,12 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) ...@@ -126,21 +126,12 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
} }
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn, kvm_pfn_t pfn,
unsigned long size) unsigned long size)
{ {
/* /*
* If we are going to insert an instruction page and the icache is * Clean the dcache to the Point of Coherency.
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
* *
* We need to do this through a kernel mapping (using the * We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong * user-space mapping has proved to be the wrong
...@@ -155,19 +146,52 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, ...@@ -155,19 +146,52 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_flush_dcache_to_poc(va, PAGE_SIZE); kvm_flush_dcache_to_poc(va, PAGE_SIZE);
if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
(unsigned long)va + PAGE_SIZE);
size -= PAGE_SIZE; size -= PAGE_SIZE;
pfn++; pfn++;
kunmap_atomic(va); kunmap_atomic(va);
} }
}
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size)
{
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
VM_BUG_ON(size & ~PAGE_MASK);
if (icache_is_vivt_asid_tagged())
return;
if (!icache_is_pipt()) {
/* any kind of VIPT cache */ /* any kind of VIPT cache */
__flush_icache_all(); __flush_icache_all();
return;
}
/* PIPT cache. As for the d-side, use a temporary kernel mapping. */
while (size) {
void *va = kmap_atomic_pfn(pfn);
__cpuc_coherent_user_range((unsigned long)va,
(unsigned long)va + PAGE_SIZE);
size -= PAGE_SIZE;
pfn++;
kunmap_atomic(va);
} }
} }
......
...@@ -230,19 +230,26 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) ...@@ -230,19 +230,26 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
} }
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn, kvm_pfn_t pfn,
unsigned long size) unsigned long size)
{ {
void *va = page_address(pfn_to_page(pfn)); void *va = page_address(pfn_to_page(pfn));
kvm_flush_dcache_to_poc(va, size); kvm_flush_dcache_to_poc(va, size);
}
static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size)
{
if (icache_is_aliasing()) { if (icache_is_aliasing()) {
/* any kind of VIPT cache */ /* any kind of VIPT cache */
__flush_icache_all(); __flush_icache_all();
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
void *va = page_address(pfn_to_page(pfn));
flush_icache_range((unsigned long)va, flush_icache_range((unsigned long)va,
(unsigned long)va + size); (unsigned long)va + size);
} }
......
...@@ -1257,10 +1257,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, ...@@ -1257,10 +1257,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
} }
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, static void clean_dcache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
unsigned long size) unsigned long size)
{ {
__coherent_cache_guest_page(vcpu, pfn, size); __clean_dcache_guest_page(vcpu, pfn, size);
}
static void invalidate_icache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
unsigned long size)
{
__invalidate_icache_guest_page(vcpu, pfn, size);
} }
static void kvm_send_hwpoison_signal(unsigned long address, static void kvm_send_hwpoison_signal(unsigned long address,
...@@ -1391,7 +1397,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1391,7 +1397,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
new_pmd = kvm_s2pmd_mkwrite(new_pmd); new_pmd = kvm_s2pmd_mkwrite(new_pmd);
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
} }
coherent_cache_guest_page(vcpu, pfn, PMD_SIZE); clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else { } else {
pte_t new_pte = pfn_pte(pfn, mem_type); pte_t new_pte = pfn_pte(pfn, mem_type);
...@@ -1401,7 +1409,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1401,7 +1409,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
mark_page_dirty(kvm, gfn); mark_page_dirty(kvm, gfn);
} }
coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE); clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment