Commit ca122fe4 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm: add an 'end' parameter to find_get_entries

This simplifies the callers and leads to a more efficient implementation
since the XArray has this functionality already.

Link: https://lkml.kernel.org/r/20201112212641.27837-11-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5c211ba2
......@@ -451,8 +451,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
}
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
pgoff_t end, unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages);
......
......@@ -1865,6 +1865,7 @@ static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max,
* find_get_entries - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page cache index
* @end: The final page index (inclusive).
* @nr_entries: The maximum number of entries
* @entries: Where the resulting entries are placed
* @indices: The cache indices corresponding to the entries in @entries
......@@ -1888,9 +1889,9 @@ static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max,
*
* Return: the number of pages and shadow entries which were found.
*/
unsigned find_get_entries(struct address_space *mapping,
pgoff_t start, unsigned int nr_entries,
struct page **entries, pgoff_t *indices)
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, unsigned int nr_entries, struct page **entries,
pgoff_t *indices)
{
XA_STATE(xas, &mapping->i_pages, start);
struct page *page;
......@@ -1900,7 +1901,7 @@ unsigned find_get_entries(struct address_space *mapping,
return 0;
rcu_read_lock();
while ((page = find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) {
while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
/*
* Terminate early on finding a THP, to allow the caller to
* handle it all at once; but continue if this is hugetlbfs.
......
......@@ -913,8 +913,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
struct page *page = pvec.pages[i];
index = indices[i];
if (index >= end)
break;
if (xa_is_value(page)) {
if (unfalloc)
......@@ -967,9 +965,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
while (index < end) {
cond_resched();
pvec.nr = find_get_entries(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
pvec.nr = find_get_entries(mapping, index, end - 1,
PAGEVEC_SIZE, pvec.pages, indices);
if (!pvec.nr) {
/* If all gone or hole-punch or unfalloc, we're done */
if (index == start || end != -1)
......@@ -982,9 +979,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
struct page *page = pvec.pages[i];
index = indices[i];
if (index >= end)
break;
if (xa_is_value(page)) {
if (unfalloc)
continue;
......
......@@ -1046,7 +1046,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
pgoff_t start, unsigned nr_entries,
pgoff_t *indices)
{
pvec->nr = find_get_entries(mapping, start, nr_entries,
pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries,
pvec->pages, indices);
return pagevec_count(pvec);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment