Commit 748c0e31 authored by Lan Tianyu's avatar Lan Tianyu Committed by Paolo Bonzini

KVM: Make kvm_set_spte_hva() return int

The patch is to make kvm_set_spte_hva() return int and caller can
check return value to determine flush tlb or not.
Signed-off-by: default avatarLan Tianyu <Tianyu.Lan@microsoft.com>
Acked-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c3134ce2
...@@ -225,7 +225,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -225,7 +225,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
......
...@@ -360,7 +360,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -360,7 +360,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
......
...@@ -933,7 +933,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, ...@@ -933,7 +933,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
......
...@@ -551,7 +551,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, ...@@ -551,7 +551,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
(pte_dirty(old_pte) && !pte_dirty(hva_pte)); (pte_dirty(old_pte) && !pte_dirty(hva_pte));
} }
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
unsigned long end = hva + PAGE_SIZE; unsigned long end = hva + PAGE_SIZE;
int ret; int ret;
...@@ -559,6 +559,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -559,6 +559,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
if (ret) if (ret)
kvm_mips_callbacks->flush_shadow_all(kvm); kvm_mips_callbacks->flush_shadow_all(kvm);
return 0;
} }
static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
......
...@@ -72,7 +72,7 @@ extern int kvm_unmap_hva_range(struct kvm *kvm, ...@@ -72,7 +72,7 @@ extern int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
#define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE 13
......
...@@ -851,9 +851,10 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) ...@@ -851,9 +851,10 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
return kvm->arch.kvm_ops->test_age_hva(kvm, hva); return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
} }
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
return 0;
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
......
...@@ -757,10 +757,11 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) ...@@ -757,10 +757,11 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
return 0; return 0;
} }
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
/* The page will get remapped properly on its next fault */ /* The page will get remapped properly on its next fault */
kvm_unmap_hva(kvm, hva); kvm_unmap_hva(kvm, hva);
return 0;
} }
/*****************************************/ /*****************************************/
......
...@@ -1512,7 +1512,7 @@ asmlinkage void kvm_spurious_fault(void); ...@@ -1512,7 +1512,7 @@ asmlinkage void kvm_spurious_fault(void);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
......
...@@ -1913,9 +1913,10 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) ...@@ -1913,9 +1913,10 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
} }
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
return 0;
} }
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
......
...@@ -2049,14 +2049,14 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data ...@@ -2049,14 +2049,14 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
} }
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
unsigned long end = hva + PAGE_SIZE; unsigned long end = hva + PAGE_SIZE;
kvm_pfn_t pfn = pte_pfn(pte); kvm_pfn_t pfn = pte_pfn(pte);
pte_t stage2_pte; pte_t stage2_pte;
if (!kvm->arch.pgd) if (!kvm->arch.pgd)
return; return 0;
trace_kvm_set_spte_hva(hva); trace_kvm_set_spte_hva(hva);
...@@ -2067,6 +2067,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -2067,6 +2067,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
clean_dcache_guest_page(pfn, PAGE_SIZE); clean_dcache_guest_page(pfn, PAGE_SIZE);
stage2_pte = kvm_pfn_pte(pfn, PAGE_S2); stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
return 0;
} }
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment