Commit 77c3913b authored by Liang Chen's avatar Liang Chen Committed by Paolo Bonzini

KVM: x86: directly use kvm_make_request again

A one-line wrapper around kvm_make_request is not particularly
useful. Replace kvm_mmu_flush_tlb() with kvm_make_request().
Signed-off-by: default avatarLiang Chen <liangchen.linux@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a70656b6
...@@ -914,7 +914,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu); ...@@ -914,7 +914,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu);
int fx_init(struct kvm_vcpu *vcpu); int fx_init(struct kvm_vcpu *vcpu);
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes); const u8 *new, int bytes);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
......
...@@ -1743,7 +1743,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -1743,7 +1743,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return 1; return 1;
} }
kvm_mmu_flush_tlb(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0; return 0;
} }
...@@ -1796,7 +1796,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) ...@@ -1796,7 +1796,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
if (flush) if (flush)
kvm_mmu_flush_tlb(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
} }
struct mmu_page_path { struct mmu_page_path {
...@@ -2530,7 +2530,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2530,7 +2530,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
true, host_writable)) { true, host_writable)) {
if (write_fault) if (write_fault)
*emulate = 1; *emulate = 1;
kvm_mmu_flush_tlb(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
} }
if (unlikely(is_mmio_spte(*sptep) && emulate)) if (unlikely(is_mmio_spte(*sptep) && emulate))
...@@ -3444,12 +3444,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, ...@@ -3444,12 +3444,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->nx = false; context->nx = false;
} }
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
{ {
mmu_free_roots(vcpu); mmu_free_roots(vcpu);
...@@ -3964,7 +3958,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, ...@@ -3964,7 +3958,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
if (remote_flush) if (remote_flush)
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
else if (local_flush) else if (local_flush)
kvm_mmu_flush_tlb(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
} }
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
...@@ -4225,7 +4219,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); ...@@ -4225,7 +4219,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{ {
vcpu->arch.mmu.invlpg(vcpu, gva); vcpu->arch.mmu.invlpg(vcpu, gva);
kvm_mmu_flush_tlb(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
++vcpu->stat.invlpg; ++vcpu->stat.invlpg;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
......
...@@ -6711,7 +6711,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -6711,7 +6711,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
switch (type) { switch (type) {
case VMX_EPT_EXTENT_GLOBAL: case VMX_EPT_EXTENT_GLOBAL:
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
kvm_mmu_flush_tlb(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
break; break;
default: default:
......
...@@ -729,7 +729,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -729,7 +729,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
kvm_mmu_flush_tlb(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment