Commit 54ed3fdd authored by Gao Xiang's avatar Gao Xiang

erofs: record `pclustersize` in bytes instead of pages

Currently, compressed sizes are recorded in pages using `pclusterpages`,
However, for tailpacking pclusters, `tailpacking_size` is used instead.

This approach doesn't work when dealing with sub-page blocks. To address
this, let's switch them to the unified `pclustersize` in bytes.
Reviewed-by: default avatarYue Hu <huyue2@coolpad.com>
Reviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20231206091057.87027-3-hsiangkao@linux.alibaba.com
parent 19235161
...@@ -56,6 +56,9 @@ struct z_erofs_pcluster { ...@@ -56,6 +56,9 @@ struct z_erofs_pcluster {
/* L: total number of bvecs */ /* L: total number of bvecs */
unsigned int vcnt; unsigned int vcnt;
/* I: pcluster size (compressed size) in bytes */
unsigned int pclustersize;
/* I: page offset of start position of decompression */ /* I: page offset of start position of decompression */
unsigned short pageofs_out; unsigned short pageofs_out;
...@@ -70,14 +73,6 @@ struct z_erofs_pcluster { ...@@ -70,14 +73,6 @@ struct z_erofs_pcluster {
struct rcu_head rcu; struct rcu_head rcu;
}; };
union {
/* I: physical cluster size in pages */
unsigned short pclusterpages;
/* I: tailpacking inline compressed size */
unsigned short tailpacking_size;
};
/* I: compression algorithm format */ /* I: compression algorithm format */
unsigned char algorithmformat; unsigned char algorithmformat;
...@@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) ...@@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
{ {
if (z_erofs_is_inline_pcluster(pcl)) return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
return 1;
return pcl->pclusterpages;
} }
/* /*
...@@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void) ...@@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void)
return 0; return 0;
} }
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
{ {
int i; unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct z_erofs_pcluster_slab *pcs = pcluster_pool;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
struct z_erofs_pcluster *pcl; struct z_erofs_pcluster *pcl;
if (nrpages > pcs->maxpages) if (nrpages > pcs->maxpages)
...@@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) ...@@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
if (!pcl) if (!pcl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
pcl->pclusterpages = nrpages; pcl->pclustersize = size;
return pcl; return pcl;
} }
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) ...@@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
{ {
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl; struct z_erofs_pcluster *pcl = fe->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
bool shouldalloc = z_erofs_should_alloc_cache(fe); bool shouldalloc = z_erofs_should_alloc_cache(fe);
bool standalone = true; bool standalone = true;
/* /*
...@@ -572,10 +566,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) ...@@ -572,10 +566,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
return; return;
for (i = 0; i < pcl->pclusterpages; ++i) { for (i = 0; i < pclusterpages; ++i) {
struct page *page; struct page *page, *newpage;
void *t; /* mark pages just found for debugging */ void *t; /* mark pages just found for debugging */
struct page *newpage = NULL;
/* the compressed page was loaded before */ /* the compressed page was loaded before */
if (READ_ONCE(pcl->compressed_bvecs[i].page)) if (READ_ONCE(pcl->compressed_bvecs[i].page))
...@@ -585,6 +578,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) ...@@ -585,6 +578,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
if (page) { if (page) {
t = (void *)((unsigned long)page | 1); t = (void *)((unsigned long)page | 1);
newpage = NULL;
} else { } else {
/* I/O is needed, no possible to decompress directly */ /* I/O is needed, no possible to decompress directly */
standalone = false; standalone = false;
...@@ -592,9 +586,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) ...@@ -592,9 +586,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
continue; continue;
/* /*
* try to use cached I/O if page allocation * Try cached I/O if allocation succeeds or fallback to
* succeeds or fallback to in-place I/O instead * in-place I/O instead to avoid any direct reclaim.
* to avoid any direct reclaim.
*/ */
newpage = erofs_allocpage(&fe->pagepool, gfp); newpage = erofs_allocpage(&fe->pagepool, gfp);
if (!newpage) if (!newpage)
...@@ -626,6 +619,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, ...@@ -626,6 +619,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
{ {
struct z_erofs_pcluster *const pcl = struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj); container_of(grp, struct z_erofs_pcluster, obj);
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
int i; int i;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
...@@ -633,7 +627,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, ...@@ -633,7 +627,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
* refcount of workgroup is now freezed as 0, * refcount of workgroup is now freezed as 0,
* therefore no need to worry about available decompression users. * therefore no need to worry about available decompression users.
*/ */
for (i = 0; i < pcl->pclusterpages; ++i) { for (i = 0; i < pclusterpages; ++i) {
struct page *page = pcl->compressed_bvecs[i].page; struct page *page = pcl->compressed_bvecs[i].page;
if (!page) if (!page)
...@@ -657,6 +651,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, ...@@ -657,6 +651,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{ {
struct z_erofs_pcluster *pcl = folio_get_private(folio); struct z_erofs_pcluster *pcl = folio_get_private(folio);
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
bool ret; bool ret;
int i; int i;
...@@ -669,7 +664,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) ...@@ -669,7 +664,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
goto out; goto out;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
for (i = 0; i < pcl->pclusterpages; ++i) { for (i = 0; i < pclusterpages; ++i) {
if (pcl->compressed_bvecs[i].page == &folio->page) { if (pcl->compressed_bvecs[i].page == &folio->page) {
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
ret = true; ret = true;
...@@ -778,20 +773,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) ...@@ -778,20 +773,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
{ {
struct erofs_map_blocks *map = &fe->map; struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
bool ztailpacking = map->m_flags & EROFS_MAP_META; bool ztailpacking = map->m_flags & EROFS_MAP_META;
struct z_erofs_pcluster *pcl; struct z_erofs_pcluster *pcl;
struct erofs_workgroup *grp; struct erofs_workgroup *grp;
int err; int err;
if (!(map->m_flags & EROFS_MAP_ENCODED) || if (!(map->m_flags & EROFS_MAP_ENCODED) ||
(!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
DBG_BUGON(1); DBG_BUGON(1);
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
/* no available pcluster, let's allocate one */ /* no available pcluster, let's allocate one */
pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : pcl = z_erofs_alloc_pcluster(map->m_plen);
map->m_plen >> PAGE_SHIFT);
if (IS_ERR(pcl)) if (IS_ERR(pcl))
return PTR_ERR(pcl); return PTR_ERR(pcl);
...@@ -816,9 +811,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) ...@@ -816,9 +811,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
if (ztailpacking) { if (ztailpacking) {
pcl->obj.index = 0; /* which indicates ztailpacking */ pcl->obj.index = 0; /* which indicates ztailpacking */
pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa); pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
pcl->tailpacking_size = map->m_plen;
} else { } else {
pcl->obj.index = map->m_pa >> PAGE_SHIFT; pcl->obj.index = erofs_blknr(sb, map->m_pa);
grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
if (IS_ERR(grp)) { if (IS_ERR(grp)) {
...@@ -1244,8 +1238,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, ...@@ -1244,8 +1238,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
unsigned int pclusterpages = z_erofs_pclusterpages(pcl); unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
const struct z_erofs_decompressor *decompressor = const struct z_erofs_decompressor *decompressor =
&erofs_decompressors[pcl->algorithmformat]; &erofs_decompressors[pcl->algorithmformat];
unsigned int i, inputsize; int i, err2;
int err2;
struct page *page; struct page *page;
bool overlapped; bool overlapped;
...@@ -1282,18 +1275,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, ...@@ -1282,18 +1275,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
if (err) if (err)
goto out; goto out;
if (z_erofs_is_inline_pcluster(pcl))
inputsize = pcl->tailpacking_size;
else
inputsize = pclusterpages * PAGE_SIZE;
err = decompressor->decompress(&(struct z_erofs_decompress_req) { err = decompressor->decompress(&(struct z_erofs_decompress_req) {
.sb = be->sb, .sb = be->sb,
.in = be->compressed_pages, .in = be->compressed_pages,
.out = be->decompressed_pages, .out = be->decompressed_pages,
.pageofs_in = pcl->pageofs_in, .pageofs_in = pcl->pageofs_in,
.pageofs_out = pcl->pageofs_out, .pageofs_out = pcl->pageofs_out,
.inputsize = inputsize, .inputsize = pcl->pclustersize,
.outputsize = pcl->length, .outputsize = pcl->length,
.alg = pcl->algorithmformat, .alg = pcl->algorithmformat,
.inplace_io = overlapped, .inplace_io = overlapped,
...@@ -1668,7 +1656,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, ...@@ -1668,7 +1656,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
(void)erofs_map_dev(sb, &mdev); (void)erofs_map_dev(sb, &mdev);
cur = mdev.m_pa; cur = mdev.m_pa;
end = cur + (pcl->pclusterpages << PAGE_SHIFT); end = cur + pcl->pclustersize;
do { do {
z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
if (!bvec.bv_page) if (!bvec.bv_page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment