Commit 332b207d authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: optimize pte write path if don't have protected sp

Simply return from kvm_mmu_pte_write path if no shadow page is
write-protected, then we can avoid to walk all shadow pages and hold
mmu-lock
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 96304217
...@@ -441,6 +441,7 @@ struct kvm_arch { ...@@ -441,6 +441,7 @@ struct kvm_arch {
unsigned int n_used_mmu_pages; unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages; unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages; unsigned int n_max_mmu_pages;
unsigned int indirect_shadow_pages;
atomic_t invlpg_counter; atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/* /*
......
...@@ -498,6 +498,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) ...@@ -498,6 +498,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1; linfo->write_count += 1;
} }
kvm->arch.indirect_shadow_pages++;
} }
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
...@@ -513,6 +514,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) ...@@ -513,6 +514,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
linfo->write_count -= 1; linfo->write_count -= 1;
WARN_ON(linfo->write_count < 0); WARN_ON(linfo->write_count < 0);
} }
kvm->arch.indirect_shadow_pages--;
} }
static int has_wrprotected_page(struct kvm *kvm, static int has_wrprotected_page(struct kvm *kvm,
...@@ -3233,6 +3235,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -3233,6 +3235,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level, npte, invlpg_counter, r, flooded = 0; int level, npte, invlpg_counter, r, flooded = 0;
bool remote_flush, local_flush, zap_page; bool remote_flush, local_flush, zap_page;
/*
* If we don't have indirect shadow pages, it means no page is
* write-protected, so we can exit simply.
*/
if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
zap_page = remote_flush = local_flush = false; zap_page = remote_flush = local_flush = false;
offset = offset_in_page(gpa); offset = offset_in_page(gpa);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment