Commit 7780938c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Rename ->tlb_flush() to ->tlb_flush_all()

Rename ->tlb_flush() to ->tlb_flush_all() in preparation for adding a
new hook to flush only the current ASID/context.

Opportunstically replace the comment in vmx_flush_tlb() that explains
why it flushes all EPTP/VPID contexts with a comment explaining why it
unconditionally uses INVEPT when EPT is enabled.  I.e. rely on the "all"
part of the name to clarify why it does global INVEPT/INVVPID.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-23-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4a41e43c
...@@ -1103,7 +1103,7 @@ struct kvm_x86_ops { ...@@ -1103,7 +1103,7 @@ struct kvm_x86_ops {
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
void (*tlb_flush)(struct kvm_vcpu *vcpu); void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
int (*tlb_remote_flush)(struct kvm *kvm); int (*tlb_remote_flush)(struct kvm *kvm);
int (*tlb_remote_flush_with_range)(struct kvm *kvm, int (*tlb_remote_flush_with_range)(struct kvm *kvm,
struct kvm_tlb_range *range); struct kvm_tlb_range *range);
......
...@@ -5179,7 +5179,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -5179,7 +5179,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r) if (r)
goto out; goto out;
kvm_mmu_load_pgd(vcpu); kvm_mmu_load_pgd(vcpu);
kvm_x86_ops.tlb_flush(vcpu); kvm_x86_ops.tlb_flush_all(vcpu);
out: out:
return r; return r;
} }
......
...@@ -3944,7 +3944,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -3944,7 +3944,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.get_rflags = svm_get_rflags, .get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags, .set_rflags = svm_set_rflags,
.tlb_flush = svm_flush_tlb, .tlb_flush_all = svm_flush_tlb,
.tlb_flush_gva = svm_flush_tlb_gva, .tlb_flush_gva = svm_flush_tlb_gva,
.tlb_flush_guest = svm_flush_tlb, .tlb_flush_guest = svm_flush_tlb,
......
...@@ -2838,18 +2838,16 @@ static void exit_lmode(struct kvm_vcpu *vcpu) ...@@ -2838,18 +2838,16 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif #endif
static void vmx_flush_tlb(struct kvm_vcpu *vcpu) static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
/* /*
* Flush all EPTP/VPID contexts, as the TLB flush _may_ have been * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
* invoked via kvm_flush_remote_tlbs(). Flushing remote TLBs requires * the CPU is not required to invalidate guest-physical mappings on
* all contexts to be flushed, not just the active context. * VM-Entry, even if VPID is disabled. Guest-physical mappings are
* * associated with the root EPT structure and not any particular VPID
* Note, this also ensures a deferred TLB flush with VPID enabled and * (INVVPID also isn't required to invalidate guest-physical mappings).
* EPT disabled invalidates the "correct" VPID, by nuking both L1 and
* L2's VPIDs.
*/ */
if (enable_ept) { if (enable_ept) {
ept_sync_global(); ept_sync_global();
...@@ -7765,7 +7763,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7765,7 +7763,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_rflags = vmx_get_rflags, .get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags, .set_rflags = vmx_set_rflags,
.tlb_flush = vmx_flush_tlb, .tlb_flush_all = vmx_flush_tlb_all,
.tlb_flush_gva = vmx_flush_tlb_gva, .tlb_flush_gva = vmx_flush_tlb_gva,
.tlb_flush_guest = vmx_flush_tlb_guest, .tlb_flush_guest = vmx_flush_tlb_guest,
......
...@@ -2690,10 +2690,10 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) ...@@ -2690,10 +2690,10 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
vcpu->arch.time = 0; vcpu->arch.time = 0;
} }
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
{ {
++vcpu->stat.tlb_flush; ++vcpu->stat.tlb_flush;
kvm_x86_ops.tlb_flush(vcpu); kvm_x86_ops.tlb_flush_all(vcpu);
} }
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
...@@ -8223,7 +8223,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8223,7 +8223,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
kvm_mmu_load_pgd(vcpu); kvm_mmu_load_pgd(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu); kvm_vcpu_flush_tlb_all(vcpu);
if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu)) if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu); kvm_vcpu_flush_tlb_guest(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment