Commit a086f6a1 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Marcelo Tosatti

Revert "KVM: Simplify kvm->tlbs_dirty handling"

This reverts commit 5befdc38.

Since we will allow flush tlb out of mmu-lock in the later
patch
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 42bf549f
...@@ -913,8 +913,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, ...@@ -913,8 +913,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
* used by guest then tlbs are not flushed, so guest is allowed to access the * used by guest then tlbs are not flushed, so guest is allowed to access the
* freed pages. * freed pages.
* We set tlbs_dirty to let the notifier know this change and delay the flush * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
* until such a case actually happens.
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
...@@ -943,7 +942,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -943,7 +942,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return -EINVAL; return -EINVAL;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty = true; vcpu->kvm->tlbs_dirty++;
continue; continue;
} }
...@@ -958,7 +957,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -958,7 +957,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (gfn != sp->gfns[i]) { if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i]); drop_spte(vcpu->kvm, &sp->spt[i]);
vcpu->kvm->tlbs_dirty = true; vcpu->kvm->tlbs_dirty++;
continue; continue;
} }
......
...@@ -411,9 +411,7 @@ struct kvm { ...@@ -411,9 +411,7 @@ struct kvm {
unsigned long mmu_notifier_seq; unsigned long mmu_notifier_seq;
long mmu_notifier_count; long mmu_notifier_count;
#endif #endif
/* Protected by mmu_lock */ long tlbs_dirty;
bool tlbs_dirty;
struct list_head devices; struct list_head devices;
}; };
......
...@@ -186,9 +186,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) ...@@ -186,9 +186,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
void kvm_flush_remote_tlbs(struct kvm *kvm) void kvm_flush_remote_tlbs(struct kvm *kvm)
{ {
long dirty_count = kvm->tlbs_dirty;
smp_mb();
if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.remote_tlb_flush; ++kvm->stat.remote_tlb_flush;
kvm->tlbs_dirty = false; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
} }
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment