Commit 767d8d8d authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paolo Bonzini

KVM: X86/MMU: Add sp_has_gptes()

Add sp_has_gptes() which equals to !sp->role.direct currently.

Shadow page having gptes needs to be write-protected, accounted and
responded to kvm_mmu_pte_write().

Use it in these places to replace !sp->role.direct and rename
for_each_gfn_indirect_valid_sp.
Signed-off-by: default avatarLai Jiangshan <jiangshan.ljs@antgroup.com>
Message-Id: <20220420131204.2850-2-jiangshanlai@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9f084f7c
...@@ -1853,15 +1853,23 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, ...@@ -1853,15 +1853,23 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm, static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list); struct list_head *invalid_list);
static bool sp_has_gptes(struct kvm_mmu_page *sp)
{
if (sp->role.direct)
return false;
return true;
}
#define for_each_valid_sp(_kvm, _sp, _list) \ #define for_each_valid_sp(_kvm, _sp, _list) \
hlist_for_each_entry(_sp, _list, hash_link) \ hlist_for_each_entry(_sp, _list, hash_link) \
if (is_obsolete_sp((_kvm), (_sp))) { \ if (is_obsolete_sp((_kvm), (_sp))) { \
} else } else
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
for_each_valid_sp(_kvm, _sp, \ for_each_valid_sp(_kvm, _sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list) struct list_head *invalid_list)
...@@ -2109,7 +2117,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2109,7 +2117,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
sp->gfn = gfn; sp->gfn = gfn;
sp->role = role; sp->role = role;
hlist_add_head(&sp->hash_link, sp_list); hlist_add_head(&sp->hash_link, sp_list);
if (!direct) { if (sp_has_gptes(sp)) {
account_shadowed(vcpu->kvm, sp); account_shadowed(vcpu->kvm, sp);
if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn)) if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
...@@ -2318,7 +2326,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, ...@@ -2318,7 +2326,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
/* Zapping children means active_mmu_pages has become unstable. */ /* Zapping children means active_mmu_pages has become unstable. */
list_unstable = *nr_zapped; list_unstable = *nr_zapped;
if (!sp->role.invalid && !sp->role.direct) if (!sp->role.invalid && sp_has_gptes(sp))
unaccount_shadowed(kvm, sp); unaccount_shadowed(kvm, sp);
if (sp->unsync) if (sp->unsync)
...@@ -2498,7 +2506,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) ...@@ -2498,7 +2506,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
pgprintk("%s: looking for gfn %llx\n", __func__, gfn); pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0; r = 0;
write_lock(&kvm->mmu_lock); write_lock(&kvm->mmu_lock);
for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
pgprintk("%s: gfn %llx role %x\n", __func__, gfn, pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
sp->role.word); sp->role.word);
r = 1; r = 1;
...@@ -2560,7 +2568,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, ...@@ -2560,7 +2568,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
* that case, KVM must complete emulation of the guest TLB flush before * that case, KVM must complete emulation of the guest TLB flush before
* allowing shadow pages to become unsync (writable by the guest). * allowing shadow pages to become unsync (writable by the guest).
*/ */
for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
if (!can_unsync) if (!can_unsync)
return -EPERM; return -EPERM;
...@@ -5298,7 +5306,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -5298,7 +5306,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
++vcpu->kvm->stat.mmu_pte_write; ++vcpu->kvm->stat.mmu_pte_write;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
if (detect_write_misaligned(sp, gpa, bytes) || if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) { detect_write_flooding(sp)) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment