Commit 8c8560b8 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Use KVM_REQ_TLB_FLUSH_CURRENT for MMU specific flushes

Flush only the current ASID/context when requesting a TLB flush due to a
change in the current vCPU's MMU to avoid blasting away TLB entries
associated with other ASIDs/contexts, e.g. entries cached for L1 when
a change in L2's MMU requires a flush.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-26-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent eeeb4f67
...@@ -2309,7 +2309,7 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, ...@@ -2309,7 +2309,7 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
return; return;
if (local_flush) if (local_flush)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
#ifdef CONFIG_KVM_MMU_AUDIT #ifdef CONFIG_KVM_MMU_AUDIT
...@@ -2516,11 +2516,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2516,11 +2516,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
break; break;
WARN_ON(!list_empty(&invalid_list)); WARN_ON(!list_empty(&invalid_list));
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
if (sp->unsync_children) if (sp->unsync_children)
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
__clear_sp_write_flooding_count(sp); __clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false); trace_kvm_mmu_get_page(sp, false);
...@@ -3121,7 +3121,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -3121,7 +3121,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (write_fault) if (write_fault)
ret = RET_PF_EMULATE; ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
...@@ -4310,7 +4310,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, ...@@ -4310,7 +4310,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
if (!skip_tlb_flush) { if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
/* /*
...@@ -5179,7 +5179,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -5179,7 +5179,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r) if (r)
goto out; goto out;
kvm_mmu_load_pgd(vcpu); kvm_mmu_load_pgd(vcpu);
kvm_x86_ops.tlb_flush_all(vcpu); kvm_x86_ops.tlb_flush_current(vcpu);
out: out:
return r; return r;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment