Commit 99b150d8 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Theodore Ts'o

ext4: convert bd_bitmap_page to bd_bitmap_folio

There is no need to make this a multi-page folio, so leave all the
infrastructure around it in pages.  But since we're locking it, playing
with its refcount and checking whether it's uptodate, it needs to move
to the folio API.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20240416172900.244637-2-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent da5704ee
......@@ -1452,9 +1452,10 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
int block, pnum, poff;
int blocks_per_page;
struct page *page;
struct folio *folio;
e4b->bd_buddy_page = NULL;
e4b->bd_bitmap_page = NULL;
e4b->bd_bitmap_folio = NULL;
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
/*
......@@ -1465,12 +1466,13 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
block = group * 2;
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
page = find_or_create_page(inode->i_mapping, pnum, gfp);
if (!page)
return -ENOMEM;
BUG_ON(page->mapping != inode->i_mapping);
e4b->bd_bitmap_page = page;
e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
folio = __filemap_get_folio(inode->i_mapping, pnum,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
return PTR_ERR(folio);
BUG_ON(folio->mapping != inode->i_mapping);
e4b->bd_bitmap_folio = folio;
e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
if (blocks_per_page >= 2) {
/* buddy and bitmap are on the same page */
......@@ -1488,9 +1490,9 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page) {
unlock_page(e4b->bd_bitmap_page);
put_page(e4b->bd_bitmap_page);
if (e4b->bd_bitmap_folio) {
folio_unlock(e4b->bd_bitmap_folio);
folio_put(e4b->bd_bitmap_folio);
}
if (e4b->bd_buddy_page) {
unlock_page(e4b->bd_buddy_page);
......@@ -1510,6 +1512,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
struct ext4_group_info *this_grp;
struct ext4_buddy e4b;
struct page *page;
struct folio *folio;
int ret = 0;
might_sleep();
......@@ -1536,11 +1539,11 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
goto err;
}
page = e4b.bd_bitmap_page;
ret = ext4_mb_init_cache(page, NULL, gfp);
folio = e4b.bd_bitmap_folio;
ret = ext4_mb_init_cache(&folio->page, NULL, gfp);
if (ret)
goto err;
if (!PageUptodate(page)) {
if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
......@@ -1582,6 +1585,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
int pnum;
int poff;
struct page *page;
struct folio *folio;
int ret;
struct ext4_group_info *grp;
struct ext4_sb_info *sbi = EXT4_SB(sb);
......@@ -1600,7 +1604,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
e4b->bd_sb = sb;
e4b->bd_group = group;
e4b->bd_buddy_page = NULL;
e4b->bd_bitmap_page = NULL;
e4b->bd_bitmap_folio = NULL;
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
/*
......@@ -1621,53 +1625,53 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
/* we could use find_or_create_page(), but it locks page
* what we'd like to avoid in fast path ... */
page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
if (page == NULL || !PageUptodate(page)) {
if (page)
/* Avoid locking the folio in the fast path ... */
folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
if (!IS_ERR(folio))
/*
* drop the page reference and try
* to get the page with lock. If we
* drop the folio reference and try
* to get the folio with lock. If we
* are not uptodate that implies
* somebody just created the page but
* is yet to initialize the same. So
* somebody just created the folio but
* is yet to initialize it. So
* wait for it to initialize.
*/
put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
if (page) {
if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
"ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
folio_put(folio);
folio = __filemap_get_folio(inode->i_mapping, pnum,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (!IS_ERR(folio)) {
if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
"ext4: bitmap's mapping != inode->i_mapping\n")) {
/* should never happen */
unlock_page(page);
folio_unlock(folio);
ret = -EINVAL;
goto err;
}
if (!PageUptodate(page)) {
ret = ext4_mb_init_cache(page, NULL, gfp);
if (!folio_test_uptodate(folio)) {
ret = ext4_mb_init_cache(&folio->page, NULL, gfp);
if (ret) {
unlock_page(page);
folio_unlock(folio);
goto err;
}
mb_cmp_bitmaps(e4b, page_address(page) +
mb_cmp_bitmaps(e4b, folio_address(folio) +
(poff * sb->s_blocksize));
}
unlock_page(page);
folio_unlock(folio);
}
}
if (page == NULL) {
ret = -ENOMEM;
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
goto err;
}
if (!PageUptodate(page)) {
if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
/* Pages marked accessed already */
e4b->bd_bitmap_page = page;
e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
e4b->bd_bitmap_folio = folio;
e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
block++;
pnum = block / blocks_per_page;
......@@ -1715,8 +1719,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
err:
if (page)
put_page(page);
if (e4b->bd_bitmap_page)
put_page(e4b->bd_bitmap_page);
if (e4b->bd_bitmap_folio)
folio_put(e4b->bd_bitmap_folio);
e4b->bd_buddy = NULL;
e4b->bd_bitmap = NULL;
......@@ -1731,8 +1735,8 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page)
put_page(e4b->bd_bitmap_page);
if (e4b->bd_bitmap_folio)
folio_put(e4b->bd_bitmap_folio);
if (e4b->bd_buddy_page)
put_page(e4b->bd_buddy_page);
}
......@@ -2157,7 +2161,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
* double allocate blocks. The reference is dropped
* in ext4_mb_release_context
*/
ac->ac_bitmap_page = e4b->bd_bitmap_page;
ac->ac_bitmap_page = &e4b->bd_bitmap_folio->page;
get_page(ac->ac_bitmap_page);
ac->ac_buddy_page = e4b->bd_buddy_page;
get_page(ac->ac_buddy_page);
......@@ -3894,7 +3898,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
* balance refcounts from ext4_mb_free_metadata()
*/
put_page(e4b.bd_buddy_page);
put_page(e4b.bd_bitmap_page);
folio_put(e4b.bd_bitmap_folio);
}
ext4_unlock_group(sb, entry->efd_group);
ext4_mb_unload_buddy(&e4b);
......@@ -6316,7 +6320,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
struct rb_node *parent = NULL, *new_node;
BUG_ON(!ext4_handle_valid(handle));
BUG_ON(e4b->bd_bitmap_page == NULL);
BUG_ON(e4b->bd_bitmap_folio == NULL);
BUG_ON(e4b->bd_buddy_page == NULL);
new_node = &new_entry->efd_node;
......@@ -6329,7 +6333,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
* on-disk bitmap and lose not-yet-available
* blocks */
get_page(e4b->bd_buddy_page);
get_page(e4b->bd_bitmap_page);
folio_get(e4b->bd_bitmap_folio);
}
while (*n) {
parent = *n;
......
......@@ -217,7 +217,7 @@ struct ext4_allocation_context {
struct ext4_buddy {
struct page *bd_buddy_page;
void *bd_buddy;
struct page *bd_bitmap_page;
struct folio *bd_bitmap_folio;
void *bd_bitmap;
struct ext4_group_info *bd_info;
struct super_block *bd_sb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment