Commit 3e4e28c5 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds

mmap locking API: convert mmap_sem API comments

Convert comments that reference old mmap_sem APIs to reference
corresponding new mmap locking APIs instead.
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Reviewed-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-12-walken@google.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent da1c55f1
...@@ -191,15 +191,15 @@ The usage pattern is:: ...@@ -191,15 +191,15 @@ The usage pattern is::
again: again:
range.notifier_seq = mmu_interval_read_begin(&interval_sub); range.notifier_seq = mmu_interval_read_begin(&interval_sub);
down_read(&mm->mmap_sem); mmap_read_lock(mm);
ret = hmm_range_fault(&range); ret = hmm_range_fault(&range);
if (ret) { if (ret) {
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
if (ret == -EBUSY) if (ret == -EBUSY)
goto again; goto again;
return ret; return ret;
} }
up_read(&mm->mmap_sem); mmap_read_unlock(mm);
take_lock(driver->update); take_lock(driver->update);
if (mmu_interval_read_retry(&ni, range.notifier_seq) { if (mmu_interval_read_retry(&ni, range.notifier_seq) {
......
...@@ -171,7 +171,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -171,7 +171,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -173,7 +173,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -173,7 +173,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -165,7 +165,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -165,7 +165,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -238,7 +238,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -238,7 +238,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -181,7 +181,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -181,7 +181,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -247,7 +247,7 @@ void do_page_fault(unsigned long entry, unsigned long addr, ...@@ -247,7 +247,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -160,7 +160,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, ...@@ -160,7 +160,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -183,7 +183,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -183,7 +183,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -329,7 +329,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, ...@@ -329,7 +329,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
current->min_flt++; current->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -147,7 +147,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -147,7 +147,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -502,7 +502,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -502,7 +502,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -262,7 +262,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -262,7 +262,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -450,7 +450,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -450,7 +450,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -130,7 +130,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -130,7 +130,7 @@ void do_page_fault(struct pt_regs *regs)
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
* in mm/filemap.c. * in mm/filemap.c.
*/ */
......
...@@ -933,7 +933,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, ...@@ -933,7 +933,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (!mmget_not_zero(mm)) if (!mmget_not_zero(mm))
goto err_mmget; goto err_mmget;
if (!mmap_read_trylock(mm)) if (!mmap_read_trylock(mm))
goto err_down_read_mmap_sem_failed; goto err_mmap_read_lock_failed;
vma = binder_alloc_get_vma(alloc); vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item); list_lru_isolate(lru, item);
...@@ -960,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, ...@@ -960,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY; return LRU_REMOVED_RETRY;
err_down_read_mmap_sem_failed: err_mmap_read_lock_failed:
mmput_async(mm); mmput_async(mm);
err_mmget: err_mmget:
err_page_already_freed: err_page_already_freed:
......
...@@ -187,7 +187,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -187,7 +187,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
} }
/* /*
* Called under down_write(mmap_sem). * Called under mmap_write_lock(mm).
*/ */
#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
......
...@@ -1248,7 +1248,7 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, ...@@ -1248,7 +1248,7 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
/* /*
* To be sure waitqueue_active() is not reordered by the CPU * To be sure waitqueue_active() is not reordered by the CPU
* before the pagetable update, use an explicit SMP memory * before the pagetable update, use an explicit SMP memory
* barrier here. PT lock release or up_read(mmap_sem) still * barrier here. PT lock release or mmap_read_unlock(mm) still
* have release semantics that can allow the * have release semantics that can allow the
* waitqueue_active() to be reordered before the pte update. * waitqueue_active() to be reordered before the pte update.
*/ */
......
...@@ -1373,7 +1373,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable); ...@@ -1373,7 +1373,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
* Return values: * Return values:
* 1 - page is locked; mmap_sem is still held. * 1 - page is locked; mmap_sem is still held.
* 0 - page is not locked. * 0 - page is not locked.
* mmap_sem has been released (up_read()), unless flags had both * mmap_lock has been released (mmap_read_unlock(), unless flags had both
* FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
* which case mmap_sem is still held. * which case mmap_sem is still held.
* *
......
...@@ -1993,19 +1993,19 @@ EXPORT_SYMBOL(get_user_pages); ...@@ -1993,19 +1993,19 @@ EXPORT_SYMBOL(get_user_pages);
/** /**
* get_user_pages_locked() is suitable to replace the form: * get_user_pages_locked() is suitable to replace the form:
* *
* down_read(&mm->mmap_sem); * mmap_read_lock(mm);
* do_something() * do_something()
* get_user_pages(tsk, mm, ..., pages, NULL); * get_user_pages(tsk, mm, ..., pages, NULL);
* up_read(&mm->mmap_sem); * mmap_read_unlock(mm);
* *
* to: * to:
* *
* int locked = 1; * int locked = 1;
* down_read(&mm->mmap_sem); * mmap_read_lock(mm);
* do_something() * do_something()
* get_user_pages_locked(tsk, mm, ..., pages, &locked); * get_user_pages_locked(tsk, mm, ..., pages, &locked);
* if (locked) * if (locked)
* up_read(&mm->mmap_sem); * mmap_read_unlock(mm);
* *
* @start: starting user address * @start: starting user address
* @nr_pages: number of pages from start to pin * @nr_pages: number of pages from start to pin
...@@ -2050,9 +2050,9 @@ EXPORT_SYMBOL(get_user_pages_locked); ...@@ -2050,9 +2050,9 @@ EXPORT_SYMBOL(get_user_pages_locked);
/* /*
* get_user_pages_unlocked() is suitable to replace the form: * get_user_pages_unlocked() is suitable to replace the form:
* *
* down_read(&mm->mmap_sem); * mmap_read_lock(mm);
* get_user_pages(tsk, mm, ..., pages, NULL); * get_user_pages(tsk, mm, ..., pages, NULL);
* up_read(&mm->mmap_sem); * mmap_read_unlock(mm);
* *
* with: * with:
* *
......
...@@ -1833,9 +1833,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1833,9 +1833,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
goto unlock; goto unlock;
/* /*
* In case prot_numa, we are under down_read(mmap_sem). It's critical * In case prot_numa, we are under mmap_read_lock(mm). It's critical
* to not clear pmd intermittently to avoid race with MADV_DONTNEED * to not clear pmd intermittently to avoid race with MADV_DONTNEED
* which is also under down_read(mmap_sem): * which is also under mmap_read_lock(mm):
* *
* CPU0: CPU1: * CPU0: CPU1:
* change_huge_pmd(prot_numa=1) * change_huge_pmd(prot_numa=1)
......
...@@ -1543,7 +1543,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) ...@@ -1543,7 +1543,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
/* /*
* Check vma->anon_vma to exclude MAP_PRIVATE mappings that * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
* got written to. These VMAs are likely not worth investing * got written to. These VMAs are likely not worth investing
* down_write(mmap_sem) as PMD-mapping is likely to be split * mmap_write_lock(mm) as PMD-mapping is likely to be split
* later. * later.
* *
* Not that vma->anon_vma check is racy: it can be set up after * Not that vma->anon_vma check is racy: it can be set up after
......
...@@ -2362,7 +2362,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) ...@@ -2362,7 +2362,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
} else { } else {
mmap_read_unlock(mm); mmap_read_unlock(mm);
/* /*
* up_read(&mm->mmap_sem) first because after * mmap_read_unlock(mm) first because after
* spin_unlock(&ksm_mmlist_lock) run, the "mm" may * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
* already have been freed under us by __ksm_exit() * already have been freed under us by __ksm_exit()
* because the "mm_slot" is still hashed and * because the "mm_slot" is still hashed and
......
...@@ -3323,10 +3323,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) ...@@ -3323,10 +3323,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
* pte_offset_map() on pmds where a huge pmd might be created * pte_offset_map() on pmds where a huge pmd might be created
* from a different thread. * from a different thread.
* *
* pte_alloc_map() is safe to use under down_write(mmap_sem) or when * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
* parallel threads are excluded by other means. * parallel threads are excluded by other means.
* *
* Here we only have down_read(mmap_sem). * Here we only have mmap_read_lock(mm).
*/ */
if (pte_alloc(vma->vm_mm, vmf->pmd)) if (pte_alloc(vma->vm_mm, vmf->pmd))
return VM_FAULT_OOM; return VM_FAULT_OOM;
......
...@@ -2185,7 +2185,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, ...@@ -2185,7 +2185,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
* *
* This function allocates a page from the kernel page pool and applies * This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process. * a NUMA policy associated with the VMA or the current process.
* When VMA is not NULL caller must hold down_read on the mmap_sem of the * When VMA is not NULL caller must read-lock the mmap_lock of the
* mm_struct of the VMA to prevent it from going away. Should be used for * mm_struct of the VMA to prevent it from going away. Should be used for
* all allocations for pages that will be mapped into user space. Returns * all allocations for pages that will be mapped into user space. Returns
* NULL when no page can be allocated. * NULL when no page can be allocated.
......
...@@ -2772,10 +2772,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, ...@@ -2772,10 +2772,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
* pte_offset_map() on pmds where a huge pmd might be created * pte_offset_map() on pmds where a huge pmd might be created
* from a different thread. * from a different thread.
* *
* pte_alloc_map() is safe to use under down_write(mmap_sem) or when * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
* parallel threads are excluded by other means. * parallel threads are excluded by other means.
* *
* Here we only have down_read(mmap_sem). * Here we only have mmap_read_lock(mm).
*/ */
if (pte_alloc(mm, pmdp)) if (pte_alloc(mm, pmdp))
goto abort; goto abort;
......
...@@ -1361,7 +1361,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, ...@@ -1361,7 +1361,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
} }
/* /*
* The caller must hold down_write(&current->mm->mmap_sem). * The caller must write-lock current->mm->mmap_lock.
*/ */
unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long len, unsigned long prot,
......
...@@ -577,8 +577,8 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) ...@@ -577,8 +577,8 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
/* /*
* MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
* work on the mm anymore. The check for MMF_OOM_SKIP must run * work on the mm anymore. The check for MMF_OOM_SKIP must run
* under mmap_sem for reading because it serializes against the * under mmap_lock for reading because it serializes against the
* down_write();up_write() cycle in exit_mmap(). * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
*/ */
if (test_bit(MMF_OOM_SKIP, &mm->flags)) { if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
trace_skip_task_reaping(tsk->pid); trace_skip_task_reaping(tsk->pid);
...@@ -611,7 +611,7 @@ static void oom_reap_task(struct task_struct *tsk) ...@@ -611,7 +611,7 @@ static void oom_reap_task(struct task_struct *tsk)
int attempts = 0; int attempts = 0;
struct mm_struct *mm = tsk->signal->oom_mm; struct mm_struct *mm = tsk->signal->oom_mm;
/* Retry the down_read_trylock(mmap_sem) a few times */ /* Retry the mmap_read_trylock(mm) a few times */
while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
schedule_timeout_idle(HZ/10); schedule_timeout_idle(HZ/10);
...@@ -629,7 +629,7 @@ static void oom_reap_task(struct task_struct *tsk) ...@@ -629,7 +629,7 @@ static void oom_reap_task(struct task_struct *tsk)
/* /*
* Hide this mm from OOM killer because it has been either reaped or * Hide this mm from OOM killer because it has been either reaped or
* somebody can't call up_write(mmap_sem). * somebody can't call mmap_write_unlock(mm).
*/ */
set_bit(MMF_OOM_SKIP, &mm->flags); set_bit(MMF_OOM_SKIP, &mm->flags);
......
...@@ -1734,7 +1734,7 @@ int tcp_mmap(struct file *file, struct socket *sock, ...@@ -1734,7 +1734,7 @@ int tcp_mmap(struct file *file, struct socket *sock,
return -EPERM; return -EPERM;
vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
/* Instruct vm_insert_page() to not down_read(mmap_sem) */ /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_MIXEDMAP;
vma->vm_ops = &tcp_vm_ops; vma->vm_ops = &tcp_vm_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment