Commit c3e5e415 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paolo Bonzini

KVM: X86: Change kvm_sync_page() to return true when remote flush is needed

Currently kvm_sync_page() returns true when there is any present spte.
But the return value is ignored in the callers.

Changing kvm_sync_page() to return true when remote flush is needed and
changing mmu->sync_page() not to directly flush can combine and reduce
remote flush requests.
Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210918005636.3675-7-jiangshanlai@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 06152b2d
...@@ -1795,7 +1795,7 @@ static void mark_unsync(u64 *spte) ...@@ -1795,7 +1795,7 @@ static void mark_unsync(u64 *spte)
static int nonpaging_sync_page(struct kvm_vcpu *vcpu, static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp) struct kvm_mmu_page *sp)
{ {
return 0; return -1;
} }
#define KVM_PAGE_ARRAY_NR 16 #define KVM_PAGE_ARRAY_NR 16
...@@ -1909,12 +1909,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, ...@@ -1909,12 +1909,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list) struct list_head *invalid_list)
{ {
if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
if (ret < 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false; return false;
} }
return true; return !!ret;
} }
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
...@@ -2024,6 +2026,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, ...@@ -2024,6 +2026,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
struct mmu_page_path parents; struct mmu_page_path parents;
struct kvm_mmu_pages pages; struct kvm_mmu_pages pages;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
bool flush = false;
while (mmu_unsync_walk(parent, &pages)) { while (mmu_unsync_walk(parent, &pages)) {
bool protected = false; bool protected = false;
...@@ -2033,25 +2036,27 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, ...@@ -2033,25 +2036,27 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
if (protected) { if (protected) {
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
flush = false;
} }
for_each_sp(pages, sp, parents, i) { for_each_sp(pages, sp, parents, i) {
kvm_unlink_unsync_page(vcpu->kvm, sp); kvm_unlink_unsync_page(vcpu->kvm, sp);
kvm_sync_page(vcpu, sp, &invalid_list); flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents); mmu_pages_clear_parents(&parents);
} }
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false); kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
if (!can_yield) { if (!can_yield) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
return -EINTR; return -EINTR;
} }
cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
flush = false;
} }
} }
kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false); kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
return 0; return 0;
} }
...@@ -2135,6 +2140,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2135,6 +2140,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
break; break;
WARN_ON(!list_empty(&invalid_list)); WARN_ON(!list_empty(&invalid_list));
kvm_flush_remote_tlbs(vcpu->kvm);
} }
__clear_sp_write_flooding_count(sp); __clear_sp_write_flooding_count(sp);
...@@ -4191,7 +4197,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) ...@@ -4191,7 +4197,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
} }
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access, int *nr_present) unsigned int access)
{ {
if (unlikely(is_mmio_spte(*sptep))) { if (unlikely(is_mmio_spte(*sptep))) {
if (gfn != get_mmio_spte_gfn(*sptep)) { if (gfn != get_mmio_spte_gfn(*sptep)) {
...@@ -4199,7 +4205,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, ...@@ -4199,7 +4205,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
return true; return true;
} }
(*nr_present)++;
mark_mmio_spte(vcpu, sptep, gfn, access); mark_mmio_spte(vcpu, sptep, gfn, access);
return true; return true;
} }
......
...@@ -1066,11 +1066,16 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, ...@@ -1066,11 +1066,16 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
* Using the cached information from sp->gfns is safe because: * Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn * - The spte has a reference to the struct page, so the pfn for a given gfn
* can't change unless all sptes pointing to it are nuked first. * can't change unless all sptes pointing to it are nuked first.
*
* Returns
* < 0: the sp should be zapped
* 0: the sp is synced and no tlb flushing is required
* > 0: the sp is synced and tlb flushing is required
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base; union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
int i, nr_present = 0; int i;
bool host_writable; bool host_writable;
gpa_t first_pte_gpa; gpa_t first_pte_gpa;
int set_spte_ret = 0; int set_spte_ret = 0;
...@@ -1098,7 +1103,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1098,7 +1103,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
*/ */
if (WARN_ON_ONCE(sp->role.direct || if (WARN_ON_ONCE(sp->role.direct ||
(sp->role.word ^ mmu_role.word) & ~sync_role_ign.word)) (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
return 0; return -1;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
...@@ -1115,7 +1120,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1115,7 +1120,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
sizeof(pt_element_t))) sizeof(pt_element_t)))
return 0; return -1;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
...@@ -1127,8 +1132,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1127,8 +1132,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
pte_access &= FNAME(gpte_access)(gpte); pte_access &= FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
&nr_present))
continue; continue;
if (gfn != sp->gfns[i]) { if (gfn != sp->gfns[i]) {
...@@ -1137,8 +1141,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1137,8 +1141,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
continue; continue;
} }
nr_present++;
host_writable = sp->spt[i] & shadow_host_writable_mask; host_writable = sp->spt[i] & shadow_host_writable_mask;
set_spte_ret |= set_spte(vcpu, &sp->spt[i], set_spte_ret |= set_spte(vcpu, &sp->spt[i],
...@@ -1147,10 +1149,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1147,10 +1149,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
true, false, host_writable); true, false, host_writable);
} }
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) return set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH;
kvm_flush_remote_tlbs(vcpu->kvm);
return nr_present;
} }
#undef pt_element_t #undef pt_element_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment