Commit b2cac248 authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Andrew Morton

mm/gup: remove vmas array from internal GUP functions

Now we have eliminated all callers to GUP APIs which use the vmas
parameter, eliminate it altogether.

This eliminates a class of bugs where vmas might have been kept around
longer than the mmap_lock and thus we need not be concerned about locks
being dropped during this operation leaving behind dangling pointers.

This simplifies the GUP API and makes it considerably clearer as to its
purpose - follow flags are applied and if pinning, an array of pages is
returned.

Link: https://lkml.kernel.org/r/6811b4b2b4b3baf3dd07f422bb18853bb2cd09fb.1684350871.git.lstoakes@gmail.comSigned-off-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4c630f30
...@@ -133,9 +133,8 @@ int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, ...@@ -133,9 +133,8 @@ int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags); unsigned long address, unsigned int flags);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **, struct page **, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, long, unsigned int, long, unsigned int, int *);
int *);
void unmap_hugepage_range(struct vm_area_struct *, void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *, unsigned long, unsigned long, struct page *,
zap_flags_t); zap_flags_t);
...@@ -306,9 +305,8 @@ static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, ...@@ -306,9 +305,8 @@ static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
static inline long follow_hugetlb_page(struct mm_struct *mm, static inline long follow_hugetlb_page(struct mm_struct *mm,
struct vm_area_struct *vma, struct page **pages, struct vm_area_struct *vma, struct page **pages,
struct vm_area_struct **vmas, unsigned long *position, unsigned long *position, unsigned long *nr_pages,
unsigned long *nr_pages, long i, unsigned int flags, long i, unsigned int flags, int *nonblocking)
int *nonblocking)
{ {
BUG(); BUG();
return 0; return 0;
......
This diff is collapsed.
...@@ -6425,17 +6425,14 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, ...@@ -6425,17 +6425,14 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
} }
#endif /* CONFIG_USERFAULTFD */ #endif /* CONFIG_USERFAULTFD */
static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, static void record_subpages(struct page *page, struct vm_area_struct *vma,
int refs, struct page **pages, int refs, struct page **pages)
struct vm_area_struct **vmas)
{ {
int nr; int nr;
for (nr = 0; nr < refs; nr++) { for (nr = 0; nr < refs; nr++) {
if (likely(pages)) if (likely(pages))
pages[nr] = nth_page(page, nr); pages[nr] = nth_page(page, nr);
if (vmas)
vmas[nr] = vma;
} }
} }
...@@ -6508,9 +6505,9 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, ...@@ -6508,9 +6505,9 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
} }
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas, struct page **pages, unsigned long *position,
unsigned long *position, unsigned long *nr_pages, unsigned long *nr_pages, long i, unsigned int flags,
long i, unsigned int flags, int *locked) int *locked)
{ {
unsigned long pfn_offset; unsigned long pfn_offset;
unsigned long vaddr = *position; unsigned long vaddr = *position;
...@@ -6638,7 +6635,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -6638,7 +6635,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* If subpage information not requested, update counters * If subpage information not requested, update counters
* and skip the same_page loop below. * and skip the same_page loop below.
*/ */
if (!pages && !vmas && !pfn_offset && if (!pages && !pfn_offset &&
(vaddr + huge_page_size(h) < vma->vm_end) && (vaddr + huge_page_size(h) < vma->vm_end) &&
(remainder >= pages_per_huge_page(h))) { (remainder >= pages_per_huge_page(h))) {
vaddr += huge_page_size(h); vaddr += huge_page_size(h);
...@@ -6653,11 +6650,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -6653,11 +6650,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
(vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas) if (pages)
record_subpages_vmas(nth_page(page, pfn_offset), record_subpages(nth_page(page, pfn_offset),
vma, refs, vma, refs,
likely(pages) ? pages + i : NULL, likely(pages) ? pages + i : NULL);
vmas ? vmas + i : NULL);
if (pages) { if (pages) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment