Commit 8e22f955 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: cleanup spte_write_protect

Use __drop_large_spte to cleanup this function and comment spte_write_protect
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent d13bc5b5
...@@ -1050,7 +1050,33 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) ...@@ -1050,7 +1050,33 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
rmap_remove(kvm, sptep); rmap_remove(kvm, sptep);
} }
/* Return true if the spte is dropped. */
static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
if (is_large_pte(*sptep)) {
WARN_ON(page_header(__pa(sptep))->role.level ==
PT_PAGE_TABLE_LEVEL);
drop_spte(kvm, sptep);
--kvm->stat.lpages;
return true;
}
return false;
}
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
if (__drop_large_spte(vcpu->kvm, sptep))
kvm_flush_remote_tlbs(vcpu->kvm);
}
/*
* Write-protect on the specified @sptep due to dirty page logging or
* protecting shadow page table. @flush indicates whether tlb need be
* flushed.
*
* Return true if the spte is dropped.
*/
static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush)
{ {
u64 spte = *sptep; u64 spte = *sptep;
...@@ -1061,13 +1087,9 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) ...@@ -1061,13 +1087,9 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush)
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
*flush |= true; *flush |= true;
if (is_large_pte(spte)) {
WARN_ON(page_header(__pa(sptep))->role.level == if (__drop_large_spte(kvm, sptep))
PT_PAGE_TABLE_LEVEL);
drop_spte(kvm, sptep);
--kvm->stat.lpages;
return true; return true;
}
spte = spte & ~PT_WRITABLE_MASK; spte = spte & ~PT_WRITABLE_MASK;
mmu_spte_update(sptep, spte); mmu_spte_update(sptep, spte);
...@@ -1878,15 +1900,6 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) ...@@ -1878,15 +1900,6 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
mmu_spte_set(sptep, spte); mmu_spte_set(sptep, spte);
} }
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
if (is_large_pte(*sptep)) {
drop_spte(vcpu->kvm, sptep);
--vcpu->kvm->stat.lpages;
kvm_flush_remote_tlbs(vcpu->kvm);
}
}
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned direct_access) unsigned direct_access)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment