Commit 4a9622f2 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

buffer: convert page_zero_new_buffers() to folio_zero_new_buffers()

Most of the callers already have a folio; convert reiserfs_write_end() to
have a folio.  Removes a couple of hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20230612210141.730128-10-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8c6cb3e3
...@@ -1927,33 +1927,34 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio, ...@@ -1927,33 +1927,34 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
EXPORT_SYMBOL(__block_write_full_folio); EXPORT_SYMBOL(__block_write_full_folio);
/* /*
* If a page has any new buffers, zero them out here, and mark them uptodate * If a folio has any new buffers, zero them out here, and mark them uptodate
* and dirty so they'll be written out (in order to prevent uninitialised * and dirty so they'll be written out (in order to prevent uninitialised
* block data from leaking). And clear the new bit. * block data from leaking). And clear the new bit.
*/ */
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
{ {
unsigned int block_start, block_end; size_t block_start, block_end;
struct buffer_head *head, *bh; struct buffer_head *head, *bh;
BUG_ON(!PageLocked(page)); BUG_ON(!folio_test_locked(folio));
if (!page_has_buffers(page)) head = folio_buffers(folio);
if (!head)
return; return;
bh = head = page_buffers(page); bh = head;
block_start = 0; block_start = 0;
do { do {
block_end = block_start + bh->b_size; block_end = block_start + bh->b_size;
if (buffer_new(bh)) { if (buffer_new(bh)) {
if (block_end > from && block_start < to) { if (block_end > from && block_start < to) {
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
unsigned start, size; size_t start, xend;
start = max(from, block_start); start = max(from, block_start);
size = min(to, block_end) - start; xend = min(to, block_end);
zero_user(page, start, size); folio_zero_segment(folio, start, xend);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
...@@ -1966,7 +1967,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) ...@@ -1966,7 +1967,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
} }
EXPORT_SYMBOL(page_zero_new_buffers); EXPORT_SYMBOL(folio_zero_new_buffers);
static void static void
iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
...@@ -2104,7 +2105,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, ...@@ -2104,7 +2105,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
err = -EIO; err = -EIO;
} }
if (unlikely(err)) if (unlikely(err))
page_zero_new_buffers(&folio->page, from, to); folio_zero_new_buffers(folio, from, to);
return err; return err;
} }
...@@ -2208,7 +2209,7 @@ int block_write_end(struct file *file, struct address_space *mapping, ...@@ -2208,7 +2209,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
if (!folio_test_uptodate(folio)) if (!folio_test_uptodate(folio))
copied = 0; copied = 0;
page_zero_new_buffers(&folio->page, start+copied, start+len); folio_zero_new_buffers(folio, start+copied, start+len);
} }
flush_dcache_folio(folio); flush_dcache_folio(folio);
......
...@@ -1093,7 +1093,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len, ...@@ -1093,7 +1093,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
err = -EIO; err = -EIO;
} }
if (unlikely(err)) { if (unlikely(err)) {
page_zero_new_buffers(&folio->page, from, to); folio_zero_new_buffers(folio, from, to);
} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) { } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
for (i = 0; i < nr_wait; i++) { for (i = 0; i < nr_wait; i++) {
int err2; int err2;
...@@ -1339,7 +1339,7 @@ static int ext4_write_end(struct file *file, ...@@ -1339,7 +1339,7 @@ static int ext4_write_end(struct file *file,
} }
/* /*
* This is a private version of page_zero_new_buffers() which doesn't * This is a private version of folio_zero_new_buffers() which doesn't
* set the buffer to be dirty, since in data=journalled mode we need * set the buffer to be dirty, since in data=journalled mode we need
* to call ext4_dirty_journalled_data() instead. * to call ext4_dirty_journalled_data() instead.
*/ */
......
...@@ -2872,6 +2872,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, ...@@ -2872,6 +2872,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct page *page, void *fsdata)
{ {
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
int ret = 0; int ret = 0;
int update_sd = 0; int update_sd = 0;
...@@ -2887,12 +2888,12 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, ...@@ -2887,12 +2888,12 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
start = pos & (PAGE_SIZE - 1); start = pos & (PAGE_SIZE - 1);
if (unlikely(copied < len)) { if (unlikely(copied < len)) {
if (!PageUptodate(page)) if (!folio_test_uptodate(folio))
copied = 0; copied = 0;
page_zero_new_buffers(page, start + copied, start + len); folio_zero_new_buffers(folio, start + copied, start + len);
} }
flush_dcache_page(page); flush_dcache_folio(folio);
reiserfs_commit_page(inode, page, start, start + copied); reiserfs_commit_page(inode, page, start, start + copied);
......
...@@ -278,7 +278,7 @@ int block_write_end(struct file *, struct address_space *, ...@@ -278,7 +278,7 @@ int block_write_end(struct file *, struct address_space *,
int generic_write_end(struct file *, struct address_space *, int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned, loff_t, unsigned, unsigned,
struct page *, void *); struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
void clean_page_buffers(struct page *page); void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t, int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **, unsigned, struct page **, void **,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment