Commit 0e497ad5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pull-write-one-page' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs write_one_page removal from Al Viro:
 "write_one_page series"

* tag 'pull-write-one-page' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  mm,jfs: move write_one_page/folio_write_one to jfs
  ocfs2: don't use write_one_page in ocfs2_duplicate_clusters_by_page
  ufs: don't flush page immediately for DIRSYNC directories
parents ef36b9af 2d683175
...@@ -691,6 +691,35 @@ void grab_metapage(struct metapage * mp) ...@@ -691,6 +691,35 @@ void grab_metapage(struct metapage * mp)
unlock_page(mp->page); unlock_page(mp->page);
} }
static int metapage_write_one(struct page *page)
{
struct folio *folio = page_folio(page);
struct address_space *mapping = folio->mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = folio_nr_pages(folio),
};
int ret = 0;
BUG_ON(!folio_test_locked(folio));
folio_wait_writeback(folio);
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
ret = metapage_writepage(page, &wbc);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);
} else {
folio_unlock(folio);
}
if (!ret)
ret = filemap_check_errors(mapping);
return ret;
}
void force_metapage(struct metapage *mp) void force_metapage(struct metapage *mp)
{ {
struct page *page = mp->page; struct page *page = mp->page;
...@@ -700,8 +729,8 @@ void force_metapage(struct metapage *mp) ...@@ -700,8 +729,8 @@ void force_metapage(struct metapage *mp)
get_page(page); get_page(page);
lock_page(page); lock_page(page);
set_page_dirty(page); set_page_dirty(page);
if (write_one_page(page)) if (metapage_write_one(page))
jfs_error(mp->sb, "write_one_page() failed\n"); jfs_error(mp->sb, "metapage_write_one() failed\n");
clear_bit(META_forcewrite, &mp->flag); clear_bit(META_forcewrite, &mp->flag);
put_page(page); put_page(page);
} }
...@@ -746,9 +775,9 @@ void release_metapage(struct metapage * mp) ...@@ -746,9 +775,9 @@ void release_metapage(struct metapage * mp)
set_page_dirty(page); set_page_dirty(page);
if (test_bit(META_sync, &mp->flag)) { if (test_bit(META_sync, &mp->flag)) {
clear_bit(META_sync, &mp->flag); clear_bit(META_sync, &mp->flag);
if (write_one_page(page)) if (metapage_write_one(page))
jfs_error(mp->sb, "write_one_page() failed\n"); jfs_error(mp->sb, "metapage_write_one() failed\n");
lock_page(page); /* write_one_page unlocks the page */ lock_page(page);
} }
} else if (mp->lsn) /* discard_metapage doesn't remove it */ } else if (mp->lsn) /* discard_metapage doesn't remove it */
remove_from_logsync(mp); remove_from_logsync(mp);
......
...@@ -2952,10 +2952,11 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, ...@@ -2952,10 +2952,11 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
*/ */
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
if (PageDirty(page)) { if (PageDirty(page)) {
/* unlock_page(page);
* write_on_page will unlock the page on return put_page(page);
*/
ret = write_one_page(page); ret = filemap_write_and_wait_range(mapping,
offset, map_end - 1);
goto retry; goto retry;
} }
} }
......
...@@ -42,11 +42,10 @@ static inline int ufs_match(struct super_block *sb, int len, ...@@ -42,11 +42,10 @@ static inline int ufs_match(struct super_block *sb, int len,
return !memcmp(name, de->d_name, len); return !memcmp(name, de->d_name, len);
} }
static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
struct inode *dir = mapping->host; struct inode *dir = mapping->host;
int err = 0;
inode_inc_iversion(dir); inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, page, NULL); block_write_end(NULL, mapping, pos, len, len, page, NULL);
...@@ -54,10 +53,16 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) ...@@ -54,10 +53,16 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
i_size_write(dir, pos+len); i_size_write(dir, pos+len);
mark_inode_dirty(dir); mark_inode_dirty(dir);
} }
if (IS_DIRSYNC(dir)) unlock_page(page);
err = write_one_page(page); }
else
unlock_page(page); static int ufs_handle_dirsync(struct inode *dir)
{
int err;
err = filemap_write_and_wait(dir->i_mapping);
if (!err)
err = sync_inode_metadata(dir, 1);
return err; return err;
} }
...@@ -99,11 +104,12 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, ...@@ -99,11 +104,12 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
ufs_set_de_type(dir->i_sb, de, inode->i_mode); ufs_set_de_type(dir->i_sb, de, inode->i_mode);
err = ufs_commit_chunk(page, pos, len); ufs_commit_chunk(page, pos, len);
ufs_put_page(page); ufs_put_page(page);
if (update_times) if (update_times)
dir->i_mtime = dir->i_ctime = current_time(dir); dir->i_mtime = dir->i_ctime = current_time(dir);
mark_inode_dirty(dir); mark_inode_dirty(dir);
ufs_handle_dirsync(dir);
} }
...@@ -390,10 +396,11 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode) ...@@ -390,10 +396,11 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
de->d_ino = cpu_to_fs32(sb, inode->i_ino); de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode); ufs_set_de_type(sb, de, inode->i_mode);
err = ufs_commit_chunk(page, pos, rec_len); ufs_commit_chunk(page, pos, rec_len);
dir->i_mtime = dir->i_ctime = current_time(dir); dir->i_mtime = dir->i_ctime = current_time(dir);
mark_inode_dirty(dir); mark_inode_dirty(dir);
err = ufs_handle_dirsync(dir);
/* OFFSET_CACHE */ /* OFFSET_CACHE */
out_put: out_put:
ufs_put_page(page); ufs_put_page(page);
...@@ -531,9 +538,10 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, ...@@ -531,9 +538,10 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
if (pde) if (pde)
pde->d_reclen = cpu_to_fs16(sb, to - from); pde->d_reclen = cpu_to_fs16(sb, to - from);
dir->d_ino = 0; dir->d_ino = 0;
err = ufs_commit_chunk(page, pos, to - from); ufs_commit_chunk(page, pos, to - from);
inode->i_ctime = inode->i_mtime = current_time(inode); inode->i_ctime = inode->i_mtime = current_time(inode);
mark_inode_dirty(inode); mark_inode_dirty(inode);
err = ufs_handle_dirsync(inode);
out: out:
ufs_put_page(page); ufs_put_page(page);
UFSD("EXIT\n"); UFSD("EXIT\n");
...@@ -579,7 +587,8 @@ int ufs_make_empty(struct inode * inode, struct inode *dir) ...@@ -579,7 +587,8 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
strcpy (de->d_name, ".."); strcpy (de->d_name, "..");
kunmap(page); kunmap(page);
err = ufs_commit_chunk(page, 0, chunk_size); ufs_commit_chunk(page, 0, chunk_size);
err = ufs_handle_dirsync(inode);
fail: fail:
put_page(page); put_page(page);
return err; return err;
......
...@@ -1066,12 +1066,6 @@ static inline void folio_cancel_dirty(struct folio *folio) ...@@ -1066,12 +1066,6 @@ static inline void folio_cancel_dirty(struct folio *folio)
bool folio_clear_dirty_for_io(struct folio *folio); bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page); bool clear_page_dirty_for_io(struct page *page);
void folio_invalidate(struct folio *folio, size_t offset, size_t length); void folio_invalidate(struct folio *folio, size_t offset, size_t length);
int __must_check folio_write_one(struct folio *folio);
static inline int __must_check write_one_page(struct page *page)
{
return folio_write_one(page_folio(page));
}
int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page);
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
......
...@@ -2583,46 +2583,6 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc) ...@@ -2583,46 +2583,6 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
return ret; return ret;
} }
/**
* folio_write_one - write out a single folio and wait on I/O.
* @folio: The folio to write.
*
* The folio must be locked by the caller and will be unlocked upon return.
*
* Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
* function returns.
*
* Return: %0 on success, negative error code otherwise
*/
int folio_write_one(struct folio *folio)
{
struct address_space *mapping = folio->mapping;
int ret = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = folio_nr_pages(folio),
};
BUG_ON(!folio_test_locked(folio));
folio_wait_writeback(folio);
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
ret = mapping->a_ops->writepage(&folio->page, &wbc);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);
} else {
folio_unlock(folio);
}
if (!ret)
ret = filemap_check_errors(mapping);
return ret;
}
EXPORT_SYMBOL(folio_write_one);
/* /*
* For address_spaces which do not use buffers nor write back. * For address_spaces which do not use buffers nor write back.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment