Commit a16c2c48 authored by Goldwyn Rodrigues's avatar Goldwyn Rodrigues Committed by David Sterba

btrfs: convert relocate_one_page() to folios and rename

Convert page references to folios and call the respective folio
functions.  Since find_or_create_page() takes a mask argument, call
__filemap_get_folio() instead of filemap_grab_folio().

The patch assumes folio size is PAGE_SIZE, add a warning in case it's a
higher order that will be implemented in the future.
Signed-off-by: default avatarGoldwyn Rodrigues <rgoldwyn@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 8d6e5f9a
...@@ -2850,7 +2850,7 @@ static noinline_for_stack int prealloc_file_extent_cluster( ...@@ -2850,7 +2850,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
* btrfs_do_readpage() call of previously relocated file cluster. * btrfs_do_readpage() call of previously relocated file cluster.
* *
* If the current cluster starts in the above range, btrfs_do_readpage() * If the current cluster starts in the above range, btrfs_do_readpage()
* will skip the read, and relocate_one_page() will later writeback * will skip the read, and relocate_one_folio() will later writeback
* the padding zeros as new data, causing data corruption. * the padding zeros as new data, causing data corruption.
* *
* Here we have to manually invalidate the range (i_size, PAGE_END + 1). * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
...@@ -2984,68 +2984,71 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster, ...@@ -2984,68 +2984,71 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
return cluster->boundary[cluster_nr + 1] - 1; return cluster->boundary[cluster_nr + 1] - 1;
} }
static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
const struct file_extent_cluster *cluster, const struct file_extent_cluster *cluster,
int *cluster_nr, unsigned long page_index) int *cluster_nr, unsigned long index)
{ {
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
u64 offset = BTRFS_I(inode)->index_cnt; u64 offset = BTRFS_I(inode)->index_cnt;
const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
struct page *page; struct folio *folio;
u64 page_start; u64 folio_start;
u64 page_end; u64 folio_end;
u64 cur; u64 cur;
int ret; int ret;
ASSERT(page_index <= last_index); ASSERT(index <= last_index);
page = find_lock_page(inode->i_mapping, page_index); folio = filemap_lock_folio(inode->i_mapping, index);
if (!page) { if (IS_ERR(folio)) {
page_cache_sync_readahead(inode->i_mapping, ra, NULL, page_cache_sync_readahead(inode->i_mapping, ra, NULL,
page_index, last_index + 1 - page_index); index, last_index + 1 - index);
page = find_or_create_page(inode->i_mapping, page_index, mask); folio = __filemap_get_folio(inode->i_mapping, index,
if (!page) FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
return -ENOMEM; if (IS_ERR(folio))
return PTR_ERR(folio);
} }
if (PageReadahead(page)) WARN_ON(folio_order(folio));
if (folio_test_readahead(folio))
page_cache_async_readahead(inode->i_mapping, ra, NULL, page_cache_async_readahead(inode->i_mapping, ra, NULL,
page_folio(page), page_index, folio, index,
last_index + 1 - page_index); last_index + 1 - index);
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
btrfs_read_folio(NULL, page_folio(page)); btrfs_read_folio(NULL, folio);
lock_page(page); folio_lock(folio);
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
ret = -EIO; ret = -EIO;
goto release_page; goto release_folio;
} }
} }
/* /*
* We could have lost page private when we dropped the lock to read the * We could have lost folio private when we dropped the lock to read the
* page above, make sure we set_page_extent_mapped here so we have any * folio above, make sure we set_page_extent_mapped here so we have any
* of the subpage blocksize stuff we need in place. * of the subpage blocksize stuff we need in place.
*/ */
ret = set_page_extent_mapped(page); ret = set_folio_extent_mapped(folio);
if (ret < 0) if (ret < 0)
goto release_page; goto release_folio;
page_start = page_offset(page); folio_start = folio_pos(folio);
page_end = page_start + PAGE_SIZE - 1; folio_end = folio_start + PAGE_SIZE - 1;
/* /*
* Start from the cluster, as for subpage case, the cluster can start * Start from the cluster, as for subpage case, the cluster can start
* inside the page. * inside the folio.
*/ */
cur = max(page_start, cluster->boundary[*cluster_nr] - offset); cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
while (cur <= page_end) { while (cur <= folio_end) {
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
u64 extent_start = cluster->boundary[*cluster_nr] - offset; u64 extent_start = cluster->boundary[*cluster_nr] - offset;
u64 extent_end = get_cluster_boundary_end(cluster, u64 extent_end = get_cluster_boundary_end(cluster,
*cluster_nr) - offset; *cluster_nr) - offset;
u64 clamped_start = max(page_start, extent_start); u64 clamped_start = max(folio_start, extent_start);
u64 clamped_end = min(page_end, extent_end); u64 clamped_end = min(folio_end, extent_end);
u32 clamped_len = clamped_end + 1 - clamped_start; u32 clamped_len = clamped_end + 1 - clamped_start;
/* Reserve metadata for this range */ /* Reserve metadata for this range */
...@@ -3053,7 +3056,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ...@@ -3053,7 +3056,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
clamped_len, clamped_len, clamped_len, clamped_len,
false); false);
if (ret) if (ret)
goto release_page; goto release_folio;
/* Mark the range delalloc and dirty for later writeback */ /* Mark the range delalloc and dirty for later writeback */
lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
...@@ -3069,20 +3072,18 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ...@@ -3069,20 +3072,18 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
clamped_len, true); clamped_len, true);
btrfs_delalloc_release_extents(BTRFS_I(inode), btrfs_delalloc_release_extents(BTRFS_I(inode),
clamped_len); clamped_len);
goto release_page; goto release_folio;
} }
btrfs_folio_set_dirty(fs_info, page_folio(page), btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len);
clamped_start, clamped_len);
/* /*
* Set the boundary if it's inside the page. * Set the boundary if it's inside the folio.
* Data relocation requires the destination extents to have the * Data relocation requires the destination extents to have the
* same size as the source. * same size as the source.
* EXTENT_BOUNDARY bit prevents current extent from being merged * EXTENT_BOUNDARY bit prevents current extent from being merged
* with previous extent. * with previous extent.
*/ */
if (in_range(cluster->boundary[*cluster_nr] - offset, if (in_range(cluster->boundary[*cluster_nr] - offset, folio_start, PAGE_SIZE)) {
page_start, PAGE_SIZE)) {
u64 boundary_start = cluster->boundary[*cluster_nr] - u64 boundary_start = cluster->boundary[*cluster_nr] -
offset; offset;
u64 boundary_end = boundary_start + u64 boundary_end = boundary_start +
...@@ -3105,8 +3106,8 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ...@@ -3105,8 +3106,8 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
break; break;
} }
} }
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
balance_dirty_pages_ratelimited(inode->i_mapping); balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info); btrfs_throttle(fs_info);
...@@ -3114,9 +3115,9 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ...@@ -3114,9 +3115,9 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
ret = -ECANCELED; ret = -ECANCELED;
return ret; return ret;
release_page: release_folio:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return ret; return ret;
} }
...@@ -3151,7 +3152,7 @@ static int relocate_file_extent_cluster(struct inode *inode, ...@@ -3151,7 +3152,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
last_index = (cluster->end - offset) >> PAGE_SHIFT; last_index = (cluster->end - offset) >> PAGE_SHIFT;
for (index = (cluster->start - offset) >> PAGE_SHIFT; for (index = (cluster->start - offset) >> PAGE_SHIFT;
index <= last_index && !ret; index++) index <= last_index && !ret; index++)
ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index); ret = relocate_one_folio(inode, ra, cluster, &cluster_nr, index);
if (ret == 0) if (ret == 0)
WARN_ON(cluster_nr != cluster->nr); WARN_ON(cluster_nr != cluster->nr);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment