Commit 17ab9d57 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

KVM: arm/arm64: Drop vcpu parameter from guest cache maintenance operartions

The vcpu parameter isn't used for anything, and gets in the way of
further cleanups. Let's get rid of it.
Acked-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 7a3796d2
...@@ -150,9 +150,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) ...@@ -150,9 +150,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
} }
static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu, static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
kvm_pfn_t pfn,
unsigned long size)
{ {
/* /*
* Clean the dcache to the Point of Coherency. * Clean the dcache to the Point of Coherency.
...@@ -177,8 +175,7 @@ static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu, ...@@ -177,8 +175,7 @@ static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
} }
} }
static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu, static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
kvm_pfn_t pfn,
unsigned long size) unsigned long size)
{ {
u32 iclsz; u32 iclsz;
......
...@@ -252,17 +252,14 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) ...@@ -252,17 +252,14 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
} }
static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu, static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
kvm_pfn_t pfn,
unsigned long size)
{ {
void *va = page_address(pfn_to_page(pfn)); void *va = page_address(pfn_to_page(pfn));
kvm_flush_dcache_to_poc(va, size); kvm_flush_dcache_to_poc(va, size);
} }
static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu, static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
kvm_pfn_t pfn,
unsigned long size) unsigned long size)
{ {
if (icache_is_aliasing()) { if (icache_is_aliasing()) {
......
...@@ -1276,16 +1276,14 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, ...@@ -1276,16 +1276,14 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
} }
static void clean_dcache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
unsigned long size)
{ {
__clean_dcache_guest_page(vcpu, pfn, size); __clean_dcache_guest_page(pfn, size);
} }
static void invalidate_icache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
unsigned long size)
{ {
__invalidate_icache_guest_page(vcpu, pfn, size); __invalidate_icache_guest_page(pfn, size);
} }
static void kvm_send_hwpoison_signal(unsigned long address, static void kvm_send_hwpoison_signal(unsigned long address,
...@@ -1421,11 +1419,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1421,11 +1419,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} }
if (fault_status != FSC_PERM) if (fault_status != FSC_PERM)
clean_dcache_guest_page(vcpu, pfn, PMD_SIZE); clean_dcache_guest_page(pfn, PMD_SIZE);
if (exec_fault) { if (exec_fault) {
new_pmd = kvm_s2pmd_mkexec(new_pmd); new_pmd = kvm_s2pmd_mkexec(new_pmd);
invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE); invalidate_icache_guest_page(pfn, PMD_SIZE);
} else if (fault_status == FSC_PERM) { } else if (fault_status == FSC_PERM) {
/* Preserve execute if XN was already cleared */ /* Preserve execute if XN was already cleared */
if (stage2_is_exec(kvm, fault_ipa)) if (stage2_is_exec(kvm, fault_ipa))
...@@ -1443,11 +1441,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1443,11 +1441,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} }
if (fault_status != FSC_PERM) if (fault_status != FSC_PERM)
clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE); clean_dcache_guest_page(pfn, PAGE_SIZE);
if (exec_fault) { if (exec_fault) {
new_pte = kvm_s2pte_mkexec(new_pte); new_pte = kvm_s2pte_mkexec(new_pte);
invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE); invalidate_icache_guest_page(pfn, PAGE_SIZE);
} else if (fault_status == FSC_PERM) { } else if (fault_status == FSC_PERM) {
/* Preserve execute if XN was already cleared */ /* Preserve execute if XN was already cleared */
if (stage2_is_exec(kvm, fault_ipa)) if (stage2_is_exec(kvm, fault_ipa))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment