Commit 89154dd5 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds

mmap locking API: convert mmap_sem call sites missed by coccinelle

Convert the last few remaining mmap_sem rwsem calls to use the new mmap
locking API.  These were missed by coccinelle for some reason (I think
coccinelle does not support some of the preprocessor constructs in these
files ?)

[akpm@linux-foundation.org: convert linux-next leftovers]
[akpm@linux-foundation.org: more linux-next leftovers]
[akpm@linux-foundation.org: more linux-next leftovers]
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: default avatarLaurent Dufour <ldufour@linux.ibm.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-6-walken@google.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d8ed45c5
...@@ -1084,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm) ...@@ -1084,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx; int idx;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
down_read(&current->mm->mmap_sem); mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
...@@ -1092,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm) ...@@ -1092,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot); stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
} }
...@@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} }
/* Let's check if we will get back a huge page backed by hugetlbfs */ /* Let's check if we will get back a huge page backed by hugetlbfs */
down_read(&current->mm->mmap_sem); mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1); vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) { if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva); kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
return -EFAULT; return -EFAULT;
} }
...@@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PMD_SIZE || if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
/* We need minimum second+third level pages */ /* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
...@@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(kvm_phys_size(kvm) >> PAGE_SHIFT)) (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT; return -EFAULT;
down_read(&current->mm->mmap_sem); mmap_read_lock(current->mm);
/* /*
* A memory region could potentially cover multiple VMAs, and any holes * A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map * between them, so iterate over all of them to find out if we can map
...@@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot); stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
out: out:
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
return ret; return ret;
} }
......
...@@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (user_mode(regs)) if (user_mode(regs))
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
retry: retry:
down_read(&mm->mmap_sem); mmap_read_lock(mm);
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (!vma) if (!vma)
goto bad_area; goto bad_area;
...@@ -190,7 +190,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -190,7 +190,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
} }
} }
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
return; return;
/* /*
...@@ -198,7 +198,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -198,7 +198,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
* Fix it, but check if it's kernel or user first.. * Fix it, but check if it's kernel or user first..
*/ */
bad_area: bad_area:
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
bad_area_nosemaphore: bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */ /* User mode accesses just cause a SIGSEGV */
...@@ -250,14 +250,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -250,14 +250,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
* We ran out of memory, call the OOM killer, and return the userspace * We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed). * (which will retry the fault, or kill us if we got oom-killed).
*/ */
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
if (!user_mode(regs)) if (!user_mode(regs))
goto no_context; goto no_context;
pagefault_out_of_memory(); pagefault_out_of_memory();
return; return;
do_sigbus: do_sigbus:
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) if (!user_mode(regs))
......
...@@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, ...@@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages) if (!numpages)
return 0; return 0;
down_read(&init_mm.mmap_sem); mmap_read_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks); &masks);
up_read(&init_mm.mmap_sem); mmap_read_unlock(&init_mm);
flush_tlb_kernel_range(start, end); flush_tlb_kernel_range(start, end);
......
...@@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned long pfn; unsigned long pfn;
unsigned long paddr; unsigned long paddr;
down_read(&current->mm->mmap_sem); mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
if (!vma || !(vma->vm_flags & VM_PFNMAP)) { if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
return -EFAULT; return -EFAULT;
} }
pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
paddr = pfn << PAGE_SHIFT; paddr = pfn << PAGE_SHIFT;
table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
if (!table) { if (!table) {
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
return -EFAULT; return -EFAULT;
} }
ret = CMPXCHG(&table[index], orig_pte, new_pte); ret = CMPXCHG(&table[index], orig_pte, new_pte);
memunmap(table); memunmap(table);
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
} }
return (ret != orig_pte); return (ret != orig_pte);
......
...@@ -982,9 +982,9 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, ...@@ -982,9 +982,9 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
} }
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
down_read(&current->mm->mmap_sem); mmap_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)userptr); vma = find_vma(current->mm, (unsigned long)userptr);
up_read(&current->mm->mmap_sem); mmap_read_unlock(current->mm);
if (!vma) { if (!vma) {
dev_err(atomisp_dev, "find_vma failed\n"); dev_err(atomisp_dev, "find_vma failed\n");
kfree(bo->page_obj); kfree(bo->page_obj);
......
...@@ -1422,17 +1422,17 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) ...@@ -1422,17 +1422,17 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
mutex_unlock(&vdev->vma_lock); mutex_unlock(&vdev->vma_lock);
if (try) { if (try) {
if (!down_read_trylock(&mm->mmap_sem)) { if (!mmap_read_trylock(mm)) {
mmput(mm); mmput(mm);
return 0; return 0;
} }
} else { } else {
down_read(&mm->mmap_sem); mmap_read_lock(mm);
} }
if (mmget_still_valid(mm)) { if (mmget_still_valid(mm)) {
if (try) { if (try) {
if (!mutex_trylock(&vdev->vma_lock)) { if (!mutex_trylock(&vdev->vma_lock)) {
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
mmput(mm); mmput(mm);
return 0; return 0;
} }
...@@ -1454,7 +1454,7 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) ...@@ -1454,7 +1454,7 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
} }
mutex_unlock(&vdev->vma_lock); mutex_unlock(&vdev->vma_lock);
} }
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
mmput(mm); mmput(mm);
} }
} }
......
...@@ -2322,7 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) ...@@ -2322,7 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
if (!mm) if (!mm)
goto out_put_task; goto out_put_task;
ret = down_read_killable(&mm->mmap_sem); ret = mmap_read_lock_killable(mm);
if (ret) { if (ret) {
mmput(mm); mmput(mm);
goto out_put_task; goto out_put_task;
...@@ -2349,7 +2349,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) ...@@ -2349,7 +2349,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL); p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
if (!p) { if (!p) {
ret = -ENOMEM; ret = -ENOMEM;
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
mmput(mm); mmput(mm);
goto out_put_task; goto out_put_task;
} }
...@@ -2358,7 +2358,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) ...@@ -2358,7 +2358,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p->end = vma->vm_end; p->end = vma->vm_end;
p->mode = vma->vm_file->f_mode; p->mode = vma->vm_file->f_mode;
} }
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
mmput(mm); mmput(mm);
for (i = 0; i < nr_files; i++) { for (i = 0; i < nr_files; i++) {
......
...@@ -245,9 +245,9 @@ static int dmirror_range_fault(struct dmirror *dmirror, ...@@ -245,9 +245,9 @@ static int dmirror_range_fault(struct dmirror *dmirror,
} }
range->notifier_seq = mmu_interval_read_begin(range->notifier); range->notifier_seq = mmu_interval_read_begin(range->notifier);
down_read(&mm->mmap_sem); mmap_read_lock(mm);
ret = hmm_range_fault(range); ret = hmm_range_fault(range);
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
if (ret) { if (ret) {
if (ret == -EBUSY) if (ret == -EBUSY)
continue; continue;
...@@ -686,7 +686,7 @@ static int dmirror_migrate(struct dmirror *dmirror, ...@@ -686,7 +686,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
if (!mmget_not_zero(mm)) if (!mmget_not_zero(mm))
return -EINVAL; return -EINVAL;
down_read(&mm->mmap_sem); mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) { for (addr = start; addr < end; addr = next) {
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start || if (!vma || addr < vma->vm_start ||
...@@ -713,7 +713,7 @@ static int dmirror_migrate(struct dmirror *dmirror, ...@@ -713,7 +713,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
dmirror_migrate_finalize_and_map(&args, dmirror); dmirror_migrate_finalize_and_map(&args, dmirror);
migrate_vma_finalize(&args); migrate_vma_finalize(&args);
} }
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
mmput(mm); mmput(mm);
/* Return the migrated data for verification. */ /* Return the migrated data for verification. */
...@@ -733,7 +733,7 @@ static int dmirror_migrate(struct dmirror *dmirror, ...@@ -733,7 +733,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
return ret; return ret;
out: out:
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
mmput(mm); mmput(mm);
return ret; return ret;
} }
...@@ -825,9 +825,9 @@ static int dmirror_range_snapshot(struct dmirror *dmirror, ...@@ -825,9 +825,9 @@ static int dmirror_range_snapshot(struct dmirror *dmirror,
range->notifier_seq = mmu_interval_read_begin(range->notifier); range->notifier_seq = mmu_interval_read_begin(range->notifier);
down_read(&mm->mmap_sem); mmap_read_lock(mm);
ret = hmm_range_fault(range); ret = hmm_range_fault(range);
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
if (ret) { if (ret) {
if (ret == -EBUSY) if (ret == -EBUSY)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment