fs: Turn do_invalidatepage() into folio_invalidate()

Take a folio instead of a page, fix the types of the offset & length,
and export it to filesystems.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent 2e7e80f7
...@@ -1939,9 +1939,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, ...@@ -1939,9 +1939,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages); struct page **pages);
struct page *get_dump_page(unsigned long addr); struct page *get_dump_page(unsigned long addr);
extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
bool folio_mark_dirty(struct folio *folio); bool folio_mark_dirty(struct folio *folio);
bool set_page_dirty(struct page *page); bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page); int set_page_dirty_lock(struct page *page);
......
...@@ -893,6 +893,7 @@ static inline void cancel_dirty_page(struct page *page) ...@@ -893,6 +893,7 @@ static inline void cancel_dirty_page(struct page *page)
} }
bool folio_clear_dirty_for_io(struct folio *folio); bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page); bool clear_page_dirty_for_io(struct page *page);
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
int __must_check folio_write_one(struct folio *folio); int __must_check folio_write_one(struct folio *folio);
static inline int __must_check write_one_page(struct page *page) static inline int __must_check write_one_page(struct page *page)
{ {
......
...@@ -51,7 +51,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping, ...@@ -51,7 +51,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
if (!trylock_page(page)) if (!trylock_page(page))
BUG(); BUG();
page->mapping = mapping; page->mapping = mapping;
do_invalidatepage(page, 0, PAGE_SIZE); folio_invalidate(page_folio(page), 0, PAGE_SIZE);
page->mapping = NULL; page->mapping = NULL;
unlock_page(page); unlock_page(page);
} }
......
...@@ -138,33 +138,33 @@ static int invalidate_exceptional_entry2(struct address_space *mapping, ...@@ -138,33 +138,33 @@ static int invalidate_exceptional_entry2(struct address_space *mapping,
} }
/** /**
* do_invalidatepage - invalidate part or all of a page * folio_invalidate - Invalidate part or all of a folio.
* @page: the page which is affected * @folio: The folio which is affected.
* @offset: start of the range to invalidate * @offset: start of the range to invalidate
* @length: length of the range to invalidate * @length: length of the range to invalidate
* *
* do_invalidatepage() is called when all or part of the page has become * folio_invalidate() is called when all or part of the folio has become
* invalidated by a truncate operation. * invalidated by a truncate operation.
* *
* do_invalidatepage() does not have to release all buffers, but it must * folio_invalidate() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O * ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation * is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those * point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk. * blocks on-disk.
*/ */
void do_invalidatepage(struct page *page, unsigned int offset, void folio_invalidate(struct folio *folio, size_t offset, size_t length)
unsigned int length)
{ {
void (*invalidatepage)(struct page *, unsigned int, unsigned int); void (*invalidatepage)(struct page *, unsigned int, unsigned int);
invalidatepage = page->mapping->a_ops->invalidatepage; invalidatepage = folio->mapping->a_ops->invalidatepage;
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
if (!invalidatepage) if (!invalidatepage)
invalidatepage = block_invalidatepage; invalidatepage = block_invalidatepage;
#endif #endif
if (invalidatepage) if (invalidatepage)
(*invalidatepage)(page, offset, length); (*invalidatepage)(&folio->page, offset, length);
} }
EXPORT_SYMBOL_GPL(folio_invalidate);
/* /*
* If truncate cannot remove the fs-private metadata from the page, the page * If truncate cannot remove the fs-private metadata from the page, the page
...@@ -182,7 +182,7 @@ static void truncate_cleanup_folio(struct folio *folio) ...@@ -182,7 +182,7 @@ static void truncate_cleanup_folio(struct folio *folio)
unmap_mapping_folio(folio); unmap_mapping_folio(folio);
if (folio_has_private(folio)) if (folio_has_private(folio))
do_invalidatepage(&folio->page, 0, folio_size(folio)); folio_invalidate(folio, 0, folio_size(folio));
/* /*
* Some filesystems seem to re-dirty the page even after * Some filesystems seem to re-dirty the page even after
...@@ -264,7 +264,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) ...@@ -264,7 +264,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
folio_zero_range(folio, offset, length); folio_zero_range(folio, offset, length);
if (folio_has_private(folio)) if (folio_has_private(folio))
do_invalidatepage(&folio->page, offset, length); folio_invalidate(folio, offset, length);
if (!folio_test_large(folio)) if (!folio_test_large(folio))
return true; return true;
if (split_huge_page(&folio->page) == 0) if (split_huge_page(&folio->page) == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment