Commit 31d270fd authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm: add an 'end' parameter to pagevec_lookup_entries

Simplifies the callers and uses the existing functionality in
find_get_entries().  We can also drop the final argument of
truncate_exceptional_pvec_entries() and simplify the logic in that
function.

Link: https://lkml.kernel.org/r/20201112212641.27837-12-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ca122fe4
...@@ -26,9 +26,8 @@ struct pagevec { ...@@ -26,9 +26,8 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec); void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec);
unsigned pagevec_lookup_entries(struct pagevec *pvec, unsigned pagevec_lookup_entries(struct pagevec *pvec,
struct address_space *mapping, struct address_space *mapping, pgoff_t start, pgoff_t end,
pgoff_t start, unsigned nr_entries, unsigned nr_entries, pgoff_t *indices);
pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec); void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec, unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping, struct address_space *mapping,
......
...@@ -1022,6 +1022,7 @@ void __pagevec_lru_add(struct pagevec *pvec) ...@@ -1022,6 +1022,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
* @pvec: Where the resulting entries are placed * @pvec: Where the resulting entries are placed
* @mapping: The address_space to search * @mapping: The address_space to search
* @start: The starting entry index * @start: The starting entry index
* @end: The highest index to return (inclusive).
* @nr_entries: The maximum number of pages * @nr_entries: The maximum number of pages
* @indices: The cache indices corresponding to the entries in @pvec * @indices: The cache indices corresponding to the entries in @pvec
* *
...@@ -1042,11 +1043,10 @@ void __pagevec_lru_add(struct pagevec *pvec) ...@@ -1042,11 +1043,10 @@ void __pagevec_lru_add(struct pagevec *pvec)
* found. * found.
*/ */
unsigned pagevec_lookup_entries(struct pagevec *pvec, unsigned pagevec_lookup_entries(struct pagevec *pvec,
struct address_space *mapping, struct address_space *mapping, pgoff_t start, pgoff_t end,
pgoff_t start, unsigned nr_entries, unsigned nr_entries, pgoff_t *indices)
pgoff_t *indices)
{ {
pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries, pvec->nr = find_get_entries(mapping, start, end, nr_entries,
pvec->pages, indices); pvec->pages, indices);
return pagevec_count(pvec); return pagevec_count(pvec);
} }
......
...@@ -57,11 +57,10 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, ...@@ -57,11 +57,10 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
* exceptional entries similar to what pagevec_remove_exceptionals does. * exceptional entries similar to what pagevec_remove_exceptionals does.
*/ */
static void truncate_exceptional_pvec_entries(struct address_space *mapping, static void truncate_exceptional_pvec_entries(struct address_space *mapping,
struct pagevec *pvec, pgoff_t *indices, struct pagevec *pvec, pgoff_t *indices)
pgoff_t end)
{ {
int i, j; int i, j;
bool dax, lock; bool dax;
/* Handled by shmem itself */ /* Handled by shmem itself */
if (shmem_mapping(mapping)) if (shmem_mapping(mapping))
...@@ -75,8 +74,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, ...@@ -75,8 +74,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
return; return;
dax = dax_mapping(mapping); dax = dax_mapping(mapping);
lock = !dax && indices[j] < end; if (!dax)
if (lock)
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
for (i = j; i < pagevec_count(pvec); i++) { for (i = j; i < pagevec_count(pvec); i++) {
...@@ -88,9 +86,6 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, ...@@ -88,9 +86,6 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
continue; continue;
} }
if (index >= end)
continue;
if (unlikely(dax)) { if (unlikely(dax)) {
dax_delete_mapping_entry(mapping, index); dax_delete_mapping_entry(mapping, index);
continue; continue;
...@@ -99,7 +94,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, ...@@ -99,7 +94,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
__clear_shadow_entry(mapping, index, page); __clear_shadow_entry(mapping, index, page);
} }
if (lock) if (!dax)
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
pvec->nr = j; pvec->nr = j;
} }
...@@ -329,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -329,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
while (index < end && find_lock_entries(mapping, index, end - 1, while (index < end && find_lock_entries(mapping, index, end - 1,
&pvec, indices)) { &pvec, indices)) {
index = indices[pagevec_count(&pvec) - 1] + 1; index = indices[pagevec_count(&pvec) - 1] + 1;
truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); truncate_exceptional_pvec_entries(mapping, &pvec, indices);
for (i = 0; i < pagevec_count(&pvec); i++) for (i = 0; i < pagevec_count(&pvec); i++)
truncate_cleanup_page(mapping, pvec.pages[i]); truncate_cleanup_page(mapping, pvec.pages[i]);
delete_from_page_cache_batch(mapping, &pvec); delete_from_page_cache_batch(mapping, &pvec);
...@@ -381,8 +376,8 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -381,8 +376,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
index = start; index = start;
for ( ; ; ) { for ( ; ; ) {
cond_resched(); cond_resched();
if (!pagevec_lookup_entries(&pvec, mapping, index, if (!pagevec_lookup_entries(&pvec, mapping, index, end - 1,
min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { PAGEVEC_SIZE, indices)) {
/* If all gone from start onwards, we're done */ /* If all gone from start onwards, we're done */
if (index == start) if (index == start)
break; break;
...@@ -390,23 +385,12 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -390,23 +385,12 @@ void truncate_inode_pages_range(struct address_space *mapping,
index = start; index = start;
continue; continue;
} }
if (index == start && indices[0] >= end) {
/* All gone out of hole to be punched, we're done */
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
break;
}
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing page->index */
index = indices[i]; index = indices[i];
if (index >= end) {
/* Restart punch to make sure all gone */
index = start - 1;
break;
}
if (xa_is_value(page)) if (xa_is_value(page))
continue; continue;
...@@ -417,7 +401,7 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -417,7 +401,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
truncate_inode_page(mapping, page); truncate_inode_page(mapping, page);
unlock_page(page); unlock_page(page);
} }
truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); truncate_exceptional_pvec_entries(mapping, &pvec, indices);
pagevec_release(&pvec); pagevec_release(&pvec);
index++; index++;
} }
...@@ -513,8 +497,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, ...@@ -513,8 +497,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing page->index */
index = indices[i]; index = indices[i];
if (index > end)
break;
if (xa_is_value(page)) { if (xa_is_value(page)) {
invalidate_exceptional_entry(mapping, index, invalidate_exceptional_entry(mapping, index,
...@@ -656,16 +638,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -656,16 +638,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pagevec_init(&pvec); pagevec_init(&pvec);
index = start; index = start;
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, while (pagevec_lookup_entries(&pvec, mapping, index, end,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, PAGEVEC_SIZE, indices)) {
indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing page->index */
index = indices[i]; index = indices[i];
if (index > end)
break;
if (xa_is_value(page)) { if (xa_is_value(page)) {
if (!invalidate_exceptional_entry2(mapping, if (!invalidate_exceptional_entry2(mapping,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment