Commit f783ef1c authored by Jing Zhang's avatar Jing Zhang Committed by Marc Zyngier

KVM: arm64: Add fast path to handle permission relaxation during dirty logging

To reduce MMU lock contention during dirty logging, all permission
relaxation operations would be performed under read lock.
Signed-off-by: default avatarJing Zhang <jingzhangos@google.com>
Tested-by: default avatarFuad Tabba <tabba@google.com>
Reviewed-by: default avatarFuad Tabba <tabba@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220118015703.3630552-3-jingzhangos@google.com
parent fcc5bf89
...@@ -1080,6 +1080,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1080,6 +1080,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn; gfn_t gfn;
kvm_pfn_t pfn; kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot); bool logging_active = memslot_is_logging(memslot);
bool logging_perm_fault = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule; unsigned long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
...@@ -1114,6 +1115,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1114,6 +1115,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (logging_active) { if (logging_active) {
force_pte = true; force_pte = true;
vma_shift = PAGE_SHIFT; vma_shift = PAGE_SHIFT;
logging_perm_fault = (fault_status == FSC_PERM && write_fault);
} else { } else {
vma_shift = get_vma_page_shift(vma, hva); vma_shift = get_vma_page_shift(vma, hva);
} }
...@@ -1212,7 +1214,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1212,7 +1214,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && device) if (exec_fault && device)
return -ENOEXEC; return -ENOEXEC;
write_lock(&kvm->mmu_lock); /*
* To reduce MMU contentions and enhance concurrency during dirty
* logging dirty logging, only acquire read lock for permission
* relaxation.
*/
if (logging_perm_fault)
read_lock(&kvm->mmu_lock);
else
write_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt; pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_notifier_retry(kvm, mmu_seq)) if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock; goto out_unlock;
...@@ -1271,7 +1281,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1271,7 +1281,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} }
out_unlock: out_unlock:
write_unlock(&kvm->mmu_lock); if (logging_perm_fault)
read_unlock(&kvm->mmu_lock);
else
write_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0; return ret != -EAGAIN ? ret : 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment