Commit e64419d9 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move "flush guest's TLB" logic to separate kvm_x86_ops hook

Add a dedicated hook to handle flushing TLB entries on behalf of the
guest, i.e. for a paravirtualized TLB flush, and use it directly instead
of bouncing through kvm_vcpu_flush_tlb().

For VMX, change the effective implementation implementation to never do
INVEPT and flush only the current context, i.e. to always flush via
INVVPID(SINGLE_CONTEXT).  The INVEPT performed by __vmx_flush_tlb() when
@invalidate_gpa=false and enable_vpid=0 is unnecessary, as it will only
flush guest-physical mappings; linear and combined mappings are flushed
by VM-Enter when VPID is disabled, and changes in the guest pages tables
do not affect guest-physical mappings.

When EPT and VPID are enabled, doing INVVPID is not required (by Intel's
architecture) to invalidate guest-physical mappings, i.e. TLB entries
that cache guest-physical mappings can live across INVVPID as the
mappings are associated with an EPTP, not a VPID.  The intent of
@invalidate_gpa is to inform vmx_flush_tlb() that it must "invalidate
gpa mappings", i.e. do INVEPT and not simply INVVPID.  Other than nested
VPID handling, which now calls vpid_sync_context() directly, the only
scenario where KVM can safely do INVVPID instead of INVEPT (when EPT is
enabled) is if KVM is flushing TLB entries from the guest's perspective,
i.e. is only required to invalidate linear mappings.

For SVM, flushing TLB entries from the guest's perspective can be done
by flushing the current ASID, as changes to the guest's page tables are
associated only with the current ASID.

Adding a dedicated ->tlb_flush_guest() paves the way toward removing
@invalidate_gpa, which is a potentially dangerous control flag as its
meaning is not exactly crystal clear, even for those who are familiar
with the subtleties of what mappings Intel CPUs are/aren't allowed to
keep across various invalidation scenarios.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-15-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bc41d0c4
...@@ -1114,6 +1114,12 @@ struct kvm_x86_ops { ...@@ -1114,6 +1114,12 @@ struct kvm_x86_ops {
*/ */
void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr); void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
/*
* Flush any TLB entries created by the guest. Like tlb_flush_gva(),
* does not need to flush GPA->HPA mappings.
*/
void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
void (*run)(struct kvm_vcpu *vcpu); void (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu, int (*handle_exit)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath); enum exit_fastpath_completion exit_fastpath);
......
...@@ -3170,6 +3170,11 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -3170,6 +3170,11 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
invlpga(gva, svm->vmcb->control.asid); invlpga(gva, svm->vmcb->control.asid);
} }
static void svm_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
svm_flush_tlb(vcpu, false);
}
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{ {
} }
...@@ -3939,6 +3944,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -3939,6 +3944,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.tlb_flush = svm_flush_tlb, .tlb_flush = svm_flush_tlb,
.tlb_flush_gva = svm_flush_tlb_gva, .tlb_flush_gva = svm_flush_tlb_gva,
.tlb_flush_guest = svm_flush_tlb_guest,
.run = svm_vcpu_run, .run = svm_vcpu_run,
.handle_exit = handle_exit, .handle_exit = handle_exit,
......
...@@ -2851,6 +2851,18 @@ static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) ...@@ -2851,6 +2851,18 @@ static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
*/ */
} }
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
/*
* vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
* or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
* are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
* disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
* i.e. no explicit INVVPID is necessary.
*/
vpid_sync_context(to_vmx(vcpu)->vpid);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{ {
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
...@@ -7718,6 +7730,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7718,6 +7730,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.tlb_flush = vmx_flush_tlb, .tlb_flush = vmx_flush_tlb,
.tlb_flush_gva = vmx_flush_tlb_gva, .tlb_flush_gva = vmx_flush_tlb_gva,
.tlb_flush_guest = vmx_flush_tlb_guest,
.run = vmx_vcpu_run, .run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit, .handle_exit = vmx_handle_exit,
......
...@@ -2719,7 +2719,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu) ...@@ -2719,7 +2719,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
trace_kvm_pv_tlb_flush(vcpu->vcpu_id, trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
st->preempted & KVM_VCPU_FLUSH_TLB); st->preempted & KVM_VCPU_FLUSH_TLB);
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb(vcpu, false); kvm_x86_ops.tlb_flush_guest(vcpu);
vcpu->arch.st.preempted = 0; vcpu->arch.st.preempted = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment