Commit 5eea586b authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Theodore Ts'o

ext4: convert bd_buddy_page to bd_buddy_folio

There is no need to make this a multi-page folio, so leave all the
infrastructure around it in pages.  But since we're locking it, playing
with its refcount and checking whether it's uptodate, it needs to move
to the folio API.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20240416172900.244637-3-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 99b150d8
......@@ -1443,7 +1443,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
* Lock the buddy and bitmap pages. This make sure other parallel init_group
* on the same buddy page doesn't happen whild holding the buddy page lock.
* Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
* are on the same page e4b->bd_buddy_page is NULL and return value is 0.
* are on the same page e4b->bd_buddy_folio is NULL and return value is 0.
*/
static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
......@@ -1451,10 +1451,9 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
int block, pnum, poff;
int blocks_per_page;
struct page *page;
struct folio *folio;
e4b->bd_buddy_page = NULL;
e4b->bd_buddy_folio = NULL;
e4b->bd_bitmap_folio = NULL;
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
......@@ -1480,11 +1479,12 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
}
/* blocks_per_page == 1, hence we need another page for the buddy */
page = find_or_create_page(inode->i_mapping, block + 1, gfp);
if (!page)
return -ENOMEM;
BUG_ON(page->mapping != inode->i_mapping);
e4b->bd_buddy_page = page;
folio = __filemap_get_folio(inode->i_mapping, block + 1,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
return PTR_ERR(folio);
BUG_ON(folio->mapping != inode->i_mapping);
e4b->bd_buddy_folio = folio;
return 0;
}
......@@ -1494,9 +1494,9 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
folio_unlock(e4b->bd_bitmap_folio);
folio_put(e4b->bd_bitmap_folio);
}
if (e4b->bd_buddy_page) {
unlock_page(e4b->bd_buddy_page);
put_page(e4b->bd_buddy_page);
if (e4b->bd_buddy_folio) {
folio_unlock(e4b->bd_buddy_folio);
folio_put(e4b->bd_buddy_folio);
}
}
......@@ -1511,7 +1511,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
struct ext4_group_info *this_grp;
struct ext4_buddy e4b;
struct page *page;
struct folio *folio;
int ret = 0;
......@@ -1548,7 +1547,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
goto err;
}
if (e4b.bd_buddy_page == NULL) {
if (e4b.bd_buddy_folio == NULL) {
/*
* If both the bitmap and buddy are in
* the same page we don't need to force
......@@ -1558,11 +1557,11 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
goto err;
}
/* init buddy cache */
page = e4b.bd_buddy_page;
ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
folio = e4b.bd_buddy_folio;
ret = ext4_mb_init_cache(&folio->page, e4b.bd_bitmap, gfp);
if (ret)
goto err;
if (!PageUptodate(page)) {
if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
......@@ -1584,7 +1583,6 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
int block;
int pnum;
int poff;
struct page *page;
struct folio *folio;
int ret;
struct ext4_group_info *grp;
......@@ -1603,7 +1601,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
e4b->bd_info = grp;
e4b->bd_sb = sb;
e4b->bd_group = group;
e4b->bd_buddy_page = NULL;
e4b->bd_buddy_folio = NULL;
e4b->bd_bitmap_folio = NULL;
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
......@@ -1669,7 +1667,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
goto err;
}
/* Pages marked accessed already */
/* Folios marked accessed already */
e4b->bd_bitmap_folio = folio;
e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
......@@ -1677,48 +1675,49 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
if (page == NULL || !PageUptodate(page)) {
if (page)
put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
if (page) {
if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
"ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
if (!IS_ERR(folio))
folio_put(folio);
folio = __filemap_get_folio(inode->i_mapping, pnum,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (!IS_ERR(folio)) {
if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
"ext4: buddy bitmap's mapping != inode->i_mapping\n")) {
/* should never happen */
unlock_page(page);
folio_unlock(folio);
ret = -EINVAL;
goto err;
}
if (!PageUptodate(page)) {
ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
if (!folio_test_uptodate(folio)) {
ret = ext4_mb_init_cache(&folio->page, e4b->bd_bitmap,
gfp);
if (ret) {
unlock_page(page);
folio_unlock(folio);
goto err;
}
}
unlock_page(page);
folio_unlock(folio);
}
}
if (page == NULL) {
ret = -ENOMEM;
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
goto err;
}
if (!PageUptodate(page)) {
if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
/* Pages marked accessed already */
e4b->bd_buddy_page = page;
e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
/* Folios marked accessed already */
e4b->bd_buddy_folio = folio;
e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize);
return 0;
err:
if (page)
put_page(page);
if (folio)
folio_put(folio);
if (e4b->bd_bitmap_folio)
folio_put(e4b->bd_bitmap_folio);
......@@ -1737,8 +1736,8 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_folio)
folio_put(e4b->bd_bitmap_folio);
if (e4b->bd_buddy_page)
put_page(e4b->bd_buddy_page);
if (e4b->bd_buddy_folio)
folio_put(e4b->bd_buddy_folio);
}
......@@ -2163,7 +2162,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
*/
ac->ac_bitmap_page = &e4b->bd_bitmap_folio->page;
get_page(ac->ac_bitmap_page);
ac->ac_buddy_page = e4b->bd_buddy_page;
ac->ac_buddy_page = &e4b->bd_buddy_folio->page;
get_page(ac->ac_buddy_page);
/* store last allocated for subsequent stream allocation */
if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
......@@ -3897,7 +3896,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/* No more items in the per group rb tree
* balance refcounts from ext4_mb_free_metadata()
*/
put_page(e4b.bd_buddy_page);
folio_put(e4b.bd_buddy_folio);
folio_put(e4b.bd_bitmap_folio);
}
ext4_unlock_group(sb, entry->efd_group);
......@@ -6321,7 +6320,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
BUG_ON(!ext4_handle_valid(handle));
BUG_ON(e4b->bd_bitmap_folio == NULL);
BUG_ON(e4b->bd_buddy_page == NULL);
BUG_ON(e4b->bd_buddy_folio == NULL);
new_node = &new_entry->efd_node;
cluster = new_entry->efd_start_cluster;
......@@ -6332,7 +6331,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
* otherwise we'll refresh it from
* on-disk bitmap and lose not-yet-available
* blocks */
get_page(e4b->bd_buddy_page);
folio_get(e4b->bd_buddy_folio);
folio_get(e4b->bd_bitmap_folio);
}
while (*n) {
......
......@@ -215,7 +215,7 @@ struct ext4_allocation_context {
#define AC_STATUS_BREAK 3
struct ext4_buddy {
struct page *bd_buddy_page;
struct folio *bd_buddy_folio;
void *bd_buddy;
struct folio *bd_bitmap_folio;
void *bd_bitmap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment