Commit 4c630f30 authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Andrew Morton

mm/gup: remove vmas parameter from pin_user_pages()

We are now in a position where no caller of pin_user_pages() requires the
vmas parameter at all, so eliminate this parameter from the function and
all callers.

This clears the way to removing the vmas parameter from GUP altogether.

Link: https://lkml.kernel.org/r/195a99ae949c9f5cb589d2222b736ced96ec199a.1684350871.git.lstoakes@gmail.comSigned-off-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>	[qib]
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>	[drivers/media]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 34ed8d0d
...@@ -105,7 +105,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, ...@@ -105,7 +105,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n,
FOLL_WRITE | FOLL_LONGTERM, FOLL_WRITE | FOLL_LONGTERM,
mem->hpages + entry, NULL); mem->hpages + entry);
if (ret == n) { if (ret == n) {
pinned += n; pinned += n;
continue; continue;
......
...@@ -111,7 +111,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, ...@@ -111,7 +111,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
ret = pin_user_pages(start_page + got * PAGE_SIZE, ret = pin_user_pages(start_page + got * PAGE_SIZE,
num_pages - got, num_pages - got,
FOLL_LONGTERM | FOLL_WRITE, FOLL_LONGTERM | FOLL_WRITE,
p + got, NULL); p + got);
if (ret < 0) { if (ret < 0) {
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
goto bail_release; goto bail_release;
......
...@@ -140,7 +140,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ...@@ -140,7 +140,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = pin_user_pages(cur_base, ret = pin_user_pages(cur_base,
min_t(unsigned long, npages, min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)), PAGE_SIZE / sizeof(struct page *)),
gup_flags, page_list, NULL); gup_flags, page_list);
if (ret < 0) if (ret < 0)
goto out; goto out;
......
...@@ -422,7 +422,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) ...@@ -422,7 +422,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
umem->page_chunk[i].plist = plist; umem->page_chunk[i].plist = plist;
while (nents) { while (nents) {
rv = pin_user_pages(first_page_va, nents, foll_flags, rv = pin_user_pages(first_page_va, nents, foll_flags,
plist, NULL); plist);
if (rv < 0) if (rv < 0)
goto out_sem_up; goto out_sem_up;
......
...@@ -180,7 +180,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, ...@@ -180,7 +180,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
data, size, dma->nr_pages); data, size, dma->nr_pages);
err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags, err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags,
dma->pages, NULL); dma->pages);
if (err != dma->nr_pages) { if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0; dma->nr_pages = (err >= 0) ? err : 0;
......
...@@ -1052,7 +1052,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev, ...@@ -1052,7 +1052,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev,
goto out; goto out;
pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE, pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE,
page_list, NULL); page_list);
if (pinned != npages) { if (pinned != npages) {
ret = pinned < 0 ? pinned : -ENOMEM; ret = pinned < 0 ? pinned : -ENOMEM;
goto out; goto out;
......
...@@ -983,7 +983,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, ...@@ -983,7 +983,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
while (npages) { while (npages) {
sz2pin = min_t(unsigned long, npages, list_size); sz2pin = min_t(unsigned long, npages, list_size);
pinned = pin_user_pages(cur_base, sz2pin, pinned = pin_user_pages(cur_base, sz2pin,
gup_flags, page_list, NULL); gup_flags, page_list);
if (sz2pin != pinned) { if (sz2pin != pinned) {
if (pinned < 0) { if (pinned < 0) {
ret = pinned; ret = pinned;
......
...@@ -2399,8 +2399,7 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, ...@@ -2399,8 +2399,7 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
long get_user_pages(unsigned long start, unsigned long nr_pages, long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages); unsigned int gup_flags, struct page **pages);
long pin_user_pages(unsigned long start, unsigned long nr_pages, long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages);
struct vm_area_struct **vmas);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags); struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
......
...@@ -1044,7 +1044,7 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) ...@@ -1044,7 +1044,7 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
ret = 0; ret = 0;
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
pages, NULL); pages);
if (pret == nr_pages) if (pret == nr_pages)
*npages = nr_pages; *npages = nr_pages;
else else
......
...@@ -3131,8 +3131,6 @@ EXPORT_SYMBOL(pin_user_pages_remote); ...@@ -3131,8 +3131,6 @@ EXPORT_SYMBOL(pin_user_pages_remote);
* @gup_flags: flags modifying lookup behaviour * @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned. * @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. * Should be at least nr_pages long.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* *
* Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
* FOLL_PIN is set. * FOLL_PIN is set.
...@@ -3141,15 +3139,14 @@ EXPORT_SYMBOL(pin_user_pages_remote); ...@@ -3141,15 +3139,14 @@ EXPORT_SYMBOL(pin_user_pages_remote);
* see Documentation/core-api/pin_user_pages.rst for details. * see Documentation/core-api/pin_user_pages.rst for details.
*/ */
long pin_user_pages(unsigned long start, unsigned long nr_pages, long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages)
struct vm_area_struct **vmas)
{ {
int locked = 1; int locked = 1;
if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_PIN)) if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN))
return 0; return 0;
return __gup_longterm_locked(current->mm, start, nr_pages, return __gup_longterm_locked(current->mm, start, nr_pages,
pages, vmas, &locked, gup_flags); pages, NULL, &locked, gup_flags);
} }
EXPORT_SYMBOL(pin_user_pages); EXPORT_SYMBOL(pin_user_pages);
......
...@@ -146,18 +146,17 @@ static int __gup_test_ioctl(unsigned int cmd, ...@@ -146,18 +146,17 @@ static int __gup_test_ioctl(unsigned int cmd,
pages + i); pages + i);
break; break;
case PIN_BASIC_TEST: case PIN_BASIC_TEST:
nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i, nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i);
NULL);
break; break;
case PIN_LONGTERM_BENCHMARK: case PIN_LONGTERM_BENCHMARK:
nr = pin_user_pages(addr, nr, nr = pin_user_pages(addr, nr,
gup->gup_flags | FOLL_LONGTERM, gup->gup_flags | FOLL_LONGTERM,
pages + i, NULL); pages + i);
break; break;
case DUMP_USER_PAGES_TEST: case DUMP_USER_PAGES_TEST:
if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN)
nr = pin_user_pages(addr, nr, gup->gup_flags, nr = pin_user_pages(addr, nr, gup->gup_flags,
pages + i, NULL); pages + i);
else else
nr = get_user_pages(addr, nr, gup->gup_flags, nr = get_user_pages(addr, nr, gup->gup_flags,
pages + i); pages + i);
...@@ -270,7 +269,7 @@ static inline int pin_longterm_test_start(unsigned long arg) ...@@ -270,7 +269,7 @@ static inline int pin_longterm_test_start(unsigned long arg)
gup_flags, pages); gup_flags, pages);
else else
cur_pages = pin_user_pages(addr, remaining_pages, cur_pages = pin_user_pages(addr, remaining_pages,
gup_flags, pages, NULL); gup_flags, pages);
if (cur_pages < 0) { if (cur_pages < 0) {
pin_longterm_test_stop(); pin_longterm_test_stop();
ret = cur_pages; ret = cur_pages;
......
...@@ -103,7 +103,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address) ...@@ -103,7 +103,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
npgs = pin_user_pages(address, umem->npgs, npgs = pin_user_pages(address, umem->npgs,
gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL); gup_flags | FOLL_LONGTERM, &umem->pgs[0]);
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
if (npgs != umem->npgs) { if (npgs != umem->npgs) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment