Commit d77aa73c authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: MMU: use slot_handle_level and its helper to clean up the code

slot_handle_level and its helper functions are ready now, use them to
clean up the code
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1bad2b2a
...@@ -4523,34 +4523,19 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -4523,34 +4523,19 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
PT_PAGE_TABLE_LEVEL, lock_flush_tlb); PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
} }
static bool slot_rmap_write_protect(struct kvm *kvm, unsigned long *rmapp)
{
return __rmap_write_protect(kvm, rmapp, false);
}
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot) struct kvm_memory_slot *memslot)
{ {
gfn_t last_gfn; bool flush;
int i;
bool flush = false;
last_gfn = memslot->base_gfn + memslot->npages - 1;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { false);
unsigned long *rmapp;
unsigned long last_index, index;
rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
flush |= __rmap_write_protect(kvm, rmapp,
false);
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
cond_resched_lock(&kvm->mmu_lock);
}
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/* /*
...@@ -4611,59 +4596,18 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, ...@@ -4611,59 +4596,18 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_memory_slot *memslot) struct kvm_memory_slot *memslot)
{ {
bool flush = false;
unsigned long *rmapp;
unsigned long last_index, index;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
slot_handle_leaf(kvm, memslot, kvm_mmu_zap_collapsible_spte, true);
rmapp = memslot->arch.rmap[0];
last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1,
memslot->base_gfn, PT_PAGE_TABLE_LEVEL);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp);
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush) {
kvm_flush_remote_tlbs(kvm);
flush = false;
}
cond_resched_lock(&kvm->mmu_lock);
}
}
if (flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot) struct kvm_memory_slot *memslot)
{ {
gfn_t last_gfn; bool flush;
unsigned long *rmapp;
unsigned long last_index, index;
bool flush = false;
last_gfn = memslot->base_gfn + memslot->npages - 1;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1];
last_index = gfn_to_index(last_gfn, memslot->base_gfn,
PT_PAGE_TABLE_LEVEL);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
flush |= __rmap_clear_dirty(kvm, rmapp);
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
cond_resched_lock(&kvm->mmu_lock);
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock); lockdep_assert_held(&kvm->slots_lock);
...@@ -4682,31 +4626,11 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); ...@@ -4682,31 +4626,11 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot) struct kvm_memory_slot *memslot)
{ {
gfn_t last_gfn; bool flush;
int i;
bool flush = false;
last_gfn = memslot->base_gfn + memslot->npages - 1;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
/* skip rmap for 4K page */ false);
for (i = PT_PAGE_TABLE_LEVEL + 1; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
unsigned long *rmapp;
unsigned long last_index, index;
rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
flush |= __rmap_write_protect(kvm, rmapp,
false);
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
cond_resched_lock(&kvm->mmu_lock);
}
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/* see kvm_mmu_slot_remove_write_access */ /* see kvm_mmu_slot_remove_write_access */
...@@ -4720,30 +4644,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); ...@@ -4720,30 +4644,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
void kvm_mmu_slot_set_dirty(struct kvm *kvm, void kvm_mmu_slot_set_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot) struct kvm_memory_slot *memslot)
{ {
gfn_t last_gfn; bool flush;
int i;
bool flush = false;
last_gfn = memslot->base_gfn + memslot->npages - 1;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
unsigned long *rmapp;
unsigned long last_index, index;
rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
flush |= __rmap_set_dirty(kvm, rmapp);
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
cond_resched_lock(&kvm->mmu_lock);
}
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock); lockdep_assert_held(&kvm->slots_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment