Commit d4944b0e authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Linus Torvalds

mm: remove write/force parameters from __get_user_pages_unlocked()

This removes the redundant 'write' and 'force' parameters from
__get_user_pages_unlocked() to make the use of FOLL_FORCE explicit in
callers as use of this flag can result in surprising behaviour (and
hence bugs) within the mm subsystem.
Signed-off-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 859110d7
...@@ -1285,8 +1285,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, ...@@ -1285,8 +1285,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, int *locked); int write, int force, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, struct page **pages, unsigned int gup_flags);
unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages); int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages, int write,
......
...@@ -875,17 +875,11 @@ EXPORT_SYMBOL(get_user_pages_locked); ...@@ -875,17 +875,11 @@ EXPORT_SYMBOL(get_user_pages_locked);
*/ */
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, struct page **pages, unsigned int gup_flags)
unsigned int gup_flags)
{ {
long ret; long ret;
int locked = 1; int locked = 1;
if (write)
gup_flags |= FOLL_WRITE;
if (force)
gup_flags |= FOLL_FORCE;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
&locked, false, gup_flags); &locked, false, gup_flags);
...@@ -915,8 +909,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked); ...@@ -915,8 +909,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages) int write, int force, struct page **pages)
{ {
unsigned int flags = FOLL_TOUCH;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return __get_user_pages_unlocked(current, current->mm, start, nr_pages, return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
write, force, pages, FOLL_TOUCH); pages, flags);
} }
EXPORT_SYMBOL(get_user_pages_unlocked); EXPORT_SYMBOL(get_user_pages_unlocked);
......
...@@ -185,8 +185,7 @@ EXPORT_SYMBOL(get_user_pages_locked); ...@@ -185,8 +185,7 @@ EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, struct page **pages, unsigned int gup_flags)
unsigned int gup_flags)
{ {
long ret; long ret;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
...@@ -200,8 +199,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked); ...@@ -200,8 +199,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages) int write, int force, struct page **pages)
{ {
unsigned int flags = 0;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return __get_user_pages_unlocked(current, current->mm, start, nr_pages, return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
write, force, pages, 0); pages, flags);
} }
EXPORT_SYMBOL(get_user_pages_unlocked); EXPORT_SYMBOL(get_user_pages_unlocked);
......
...@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr, ...@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
ssize_t rc = 0; ssize_t rc = 0;
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
/ sizeof(struct pages *); / sizeof(struct pages *);
unsigned int flags = FOLL_REMOTE;
/* Work out address and page range required */ /* Work out address and page range required */
if (len == 0) if (len == 0)
return 0; return 0;
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
if (vm_write)
flags |= FOLL_WRITE;
while (!rc && nr_pages && iov_iter_count(iter)) { while (!rc && nr_pages && iov_iter_count(iter)) {
int pages = min(nr_pages, max_pages_per_loop); int pages = min(nr_pages, max_pages_per_loop);
size_t bytes; size_t bytes;
...@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr, ...@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
* current/current->mm * current/current->mm
*/ */
pages = __get_user_pages_unlocked(task, mm, pa, pages, pages = __get_user_pages_unlocked(task, mm, pa, pages,
vm_write, 0, process_pages, process_pages, flags);
FOLL_REMOTE);
if (pages <= 0) if (pages <= 0)
return -EFAULT; return -EFAULT;
......
...@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work) ...@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
* mm and might be done in another context, so we must * mm and might be done in another context, so we must
* use FOLL_REMOTE. * use FOLL_REMOTE.
*/ */
__get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE); __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
FOLL_WRITE | FOLL_REMOTE);
kvm_async_page_present_sync(vcpu, apf); kvm_async_page_present_sync(vcpu, apf);
......
...@@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, ...@@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
npages = get_user_page_nowait(addr, write_fault, page); npages = get_user_page_nowait(addr, write_fault, page);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
} else } else {
unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
if (write_fault)
flags |= FOLL_WRITE;
npages = __get_user_pages_unlocked(current, current->mm, addr, 1, npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
write_fault, 0, page, page, flags);
FOLL_TOUCH|FOLL_HWPOISON); }
if (npages != 1) if (npages != 1)
return npages; return npages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment