Commit 3ad93562 authored by Keqian Zhu's avatar Keqian Zhu Committed by Paolo Bonzini

KVM: x86: Support write protecting only large pages

Prepare for write protecting large page lazily during dirty log tracking,
for which we will only need to write protect gfns at large page
granularity.

No functional or performance change expected.
Signed-off-by: default avatarKeqian Zhu <zhukeqian1@huawei.com>
Message-Id: <20210429034115.35560-2-zhukeqian1@huawei.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d8f5537a
...@@ -1249,20 +1249,21 @@ int kvm_cpu_dirty_log_size(void) ...@@ -1249,20 +1249,21 @@ int kvm_cpu_dirty_log_size(void)
} }
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn) struct kvm_memory_slot *slot, u64 gfn,
int min_level)
{ {
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
int i; int i;
bool write_protected = false; bool write_protected = false;
for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot); rmap_head = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmap_head, true); write_protected |= __rmap_write_protect(kvm, rmap_head, true);
} }
if (is_tdp_mmu_enabled(kvm)) if (is_tdp_mmu_enabled(kvm))
write_protected |= write_protected |=
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn); kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
return write_protected; return write_protected;
} }
...@@ -1272,7 +1273,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) ...@@ -1272,7 +1273,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
} }
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
......
...@@ -128,7 +128,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -128,7 +128,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn); struct kvm_memory_slot *slot, u64 gfn,
int min_level);
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages); u64 start_gfn, u64 pages);
......
...@@ -100,7 +100,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, ...@@ -100,7 +100,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
kvm_mmu_gfn_disallow_lpage(slot, gfn); kvm_mmu_gfn_disallow_lpage(slot, gfn);
if (mode == KVM_PAGE_TRACK_WRITE) if (mode == KVM_PAGE_TRACK_WRITE)
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} }
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
......
...@@ -1462,15 +1462,22 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, ...@@ -1462,15 +1462,22 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
* Returns true if an SPTE was set and a TLB flush is needed. * Returns true if an SPTE was set and a TLB flush is needed.
*/ */
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn) gfn_t gfn, int min_level)
{ {
struct tdp_iter iter; struct tdp_iter iter;
u64 new_spte; u64 new_spte;
bool spte_set = false; bool spte_set = false;
BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
rcu_read_lock(); rcu_read_lock();
tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
min_level, gfn, gfn + 1) {
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;
if (!is_writable_pte(iter.old_spte)) if (!is_writable_pte(iter.old_spte))
break; break;
...@@ -1492,14 +1499,15 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -1492,14 +1499,15 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
* Returns true if an SPTE was set and a TLB flush is needed. * Returns true if an SPTE was set and a TLB flush is needed.
*/ */
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn) struct kvm_memory_slot *slot, gfn_t gfn,
int min_level)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
bool spte_set = false; bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root, slot->as_id) for_each_tdp_mmu_root(kvm, root, slot->as_id)
spte_set |= write_protect_gfn(kvm, root, gfn); spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
return spte_set; return spte_set;
} }
......
...@@ -74,7 +74,8 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, ...@@ -74,7 +74,8 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool flush); bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn); struct kvm_memory_slot *slot, gfn_t gfn,
int min_level);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level); int *root_level);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment