Commit e2d73c30 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "No major kernel updates for this round since I'm fully diving into
  LZMA algorithm internals now to provide high CR XZ algorihm support.
  That needs more work and time for me to get a better compression time.

  Summary:

   - Introduce superblock checksum support

   - Set iowait when waiting I/O for sync decompression path

   - Several code cleanups"

* tag 'erofs-for-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: remove unnecessary output in erofs_show_options()
  erofs: drop all vle annotations for runtime names
  erofs: support superblock checksum
  erofs: set iowait for sync decompression
  erofs: clean up decompress queue stuffs
  erofs: get rid of __stagingpage_alloc helper
  erofs: remove dead code since managed cache is now built-in
  erofs: clean up collection handling routines
parents 21b26d26 3dcb5fa2
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
config EROFS_FS config EROFS_FS
tristate "EROFS filesystem support" tristate "EROFS filesystem support"
depends on BLOCK depends on BLOCK
select LIBCRC32C
help help
EROFS (Enhanced Read-Only File System) is a lightweight EROFS (Enhanced Read-Only File System) is a lightweight
read-only file system with modern designs (eg. page-sized read-only file system with modern designs (eg. page-sized
......
...@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, ...@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
victim = availables[--top]; victim = availables[--top];
get_page(victim); get_page(victim);
} else { } else {
victim = erofs_allocpage(pagepool, GFP_KERNEL, false); victim = erofs_allocpage(pagepool, GFP_KERNEL);
if (!victim) if (!victim)
return -ENOMEM; return -ENOMEM;
victim->mapping = Z_EROFS_MAPPING_STAGING; victim->mapping = Z_EROFS_MAPPING_STAGING;
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#define EROFS_SUPER_OFFSET 1024 #define EROFS_SUPER_OFFSET 1024
#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
/* /*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should * Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
* be incompatible with this kernel version. * be incompatible with this kernel version.
...@@ -37,7 +39,6 @@ struct erofs_super_block { ...@@ -37,7 +39,6 @@ struct erofs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */ __u8 uuid[16]; /* 128-bit uuid for volume */
__u8 volume_name[16]; /* volume name */ __u8 volume_name[16]; /* volume name */
__le32 feature_incompat; __le32 feature_incompat;
__u8 reserved2[44]; __u8 reserved2[44];
}; };
......
...@@ -85,6 +85,7 @@ struct erofs_sb_info { ...@@ -85,6 +85,7 @@ struct erofs_sb_info {
u8 uuid[16]; /* 128-bit uuid for volume */ u8 uuid[16]; /* 128-bit uuid for volume */
u8 volume_name[16]; /* volume name */ u8 volume_name[16]; /* volume name */
u32 feature_compat;
u32 feature_incompat; u32 feature_incompat;
unsigned int mount_opt; unsigned int mount_opt;
...@@ -278,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value) ...@@ -278,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value)
extern const struct super_operations erofs_sops; extern const struct super_operations erofs_sops;
extern const struct address_space_operations erofs_raw_access_aops; extern const struct address_space_operations erofs_raw_access_aops;
#ifdef CONFIG_EROFS_FS_ZIP extern const struct address_space_operations z_erofs_aops;
extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
#endif
/* /*
* Logical to physical block mapping, used by erofs_map_blocks() * Logical to physical block mapping, used by erofs_map_blocks()
...@@ -382,7 +381,7 @@ int erofs_namei(struct inode *dir, struct qstr *name, ...@@ -382,7 +381,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
extern const struct file_operations erofs_dir_fops; extern const struct file_operations erofs_dir_fops;
/* utils.c / zdata.c */ /* utils.c / zdata.c */
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#if (EROFS_PCPUBUF_NR_PAGES > 0) #if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr); void *erofs_get_pcpubuf(unsigned int pagenr);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/statfs.h> #include <linux/statfs.h>
#include <linux/parser.h> #include <linux/parser.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/crc32c.h>
#include "xattr.h" #include "xattr.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
...@@ -46,6 +47,30 @@ void _erofs_info(struct super_block *sb, const char *function, ...@@ -46,6 +47,30 @@ void _erofs_info(struct super_block *sb, const char *function,
va_end(args); va_end(args);
} }
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
struct erofs_super_block *dsb;
u32 expected_crc, crc;
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
if (!dsb)
return -ENOMEM;
expected_crc = le32_to_cpu(dsb->checksum);
dsb->checksum = 0;
/* to allow for x86 boot sectors and other oddities. */
crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
kfree(dsb);
if (crc != expected_crc) {
erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
crc, expected_crc);
return -EBADMSG;
}
return 0;
}
static void erofs_inode_init_once(void *ptr) static void erofs_inode_init_once(void *ptr)
{ {
struct erofs_inode *vi = ptr; struct erofs_inode *vi = ptr;
...@@ -112,7 +137,7 @@ static int erofs_read_superblock(struct super_block *sb) ...@@ -112,7 +137,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi = EROFS_SB(sb); sbi = EROFS_SB(sb);
data = kmap_atomic(page); data = kmap(page);
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ret = -EINVAL; ret = -EINVAL;
...@@ -121,6 +146,13 @@ static int erofs_read_superblock(struct super_block *sb) ...@@ -121,6 +146,13 @@ static int erofs_read_superblock(struct super_block *sb)
goto out; goto out;
} }
sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) {
ret = erofs_superblock_csum_verify(sb, data);
if (ret)
goto out;
}
blkszbits = dsb->blkszbits; blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) { if (blkszbits != LOG_BLOCK_SIZE) {
...@@ -155,7 +187,7 @@ static int erofs_read_superblock(struct super_block *sb) ...@@ -155,7 +187,7 @@ static int erofs_read_superblock(struct super_block *sb)
} }
ret = 0; ret = 0;
out: out:
kunmap_atomic(data); kunmap(page);
put_page(page); put_page(page);
return ret; return ret;
} }
...@@ -566,9 +598,6 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) ...@@ -566,9 +598,6 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",cache_strategy=readahead"); seq_puts(seq, ",cache_strategy=readahead");
} else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
seq_puts(seq, ",cache_strategy=readaround"); seq_puts(seq, ",cache_strategy=readaround");
} else {
seq_puts(seq, ",cache_strategy=(unknown)");
DBG_BUGON(1);
} }
#endif #endif
return 0; return 0;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "internal.h" #include "internal.h"
#include <linux/pagevec.h> #include <linux/pagevec.h>
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
{ {
struct page *page; struct page *page;
...@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) ...@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
DBG_BUGON(page_ref_count(page) != 1); DBG_BUGON(page_ref_count(page) != 1);
list_del(&page->lru); list_del(&page->lru);
} else { } else {
page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); page = alloc_page(gfp);
} }
return page; return page;
} }
...@@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) ...@@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
} }
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp, struct erofs_workgroup *grp)
bool cleanup)
{ {
/* /*
* If managed cache is on, refcount of workgroups * If managed cache is on, refcount of workgroups
...@@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, ...@@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
} }
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned long nr_shrink, unsigned long nr_shrink)
bool cleanup)
{ {
pgoff_t first_index = 0; pgoff_t first_index = 0;
void *batch[PAGEVEC_SIZE]; void *batch[PAGEVEC_SIZE];
...@@ -208,7 +206,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, ...@@ -208,7 +206,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
first_index = grp->index + 1; first_index = grp->index + 1;
/* try to shrink each valid workgroup */ /* try to shrink each valid workgroup */
if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) if (!erofs_try_to_release_workgroup(sbi, grp))
continue; continue;
++freed; ++freed;
...@@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb) ...@@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex); mutex_lock(&sbi->umount_mutex);
erofs_shrink_workstation(sbi, ~0UL, true); /* clean up all remaining workgroups in memory */
erofs_shrink_workstation(sbi, ~0UL);
spin_lock(&erofs_sb_list_lock); spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list); list_del(&sbi->list);
...@@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink, ...@@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
spin_unlock(&erofs_sb_list_lock); spin_unlock(&erofs_sb_list_lock);
sbi->shrinker_run_no = run_no; sbi->shrinker_run_no = run_no;
freed += erofs_shrink_workstation(sbi, nr, false); freed += erofs_shrink_workstation(sbi, nr);
spin_lock(&erofs_sb_list_lock); spin_lock(&erofs_sb_list_lock);
/* Get the next list element before we move this one */ /* Get the next list element before we move this one */
......
...@@ -337,7 +337,7 @@ try_to_claim_pcluster(struct z_erofs_pcluster *pcl, ...@@ -337,7 +337,7 @@ try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
return COLLECT_PRIMARY; /* :( better luck next time */ return COLLECT_PRIMARY; /* :( better luck next time */
} }
static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
struct inode *inode, struct inode *inode,
struct erofs_map_blocks *map) struct erofs_map_blocks *map)
{ {
...@@ -349,20 +349,20 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, ...@@ -349,20 +349,20 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
if (!grp) if (!grp)
return NULL; return -ENOENT;
pcl = container_of(grp, struct z_erofs_pcluster, obj); pcl = container_of(grp, struct z_erofs_pcluster, obj);
if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
DBG_BUGON(1); DBG_BUGON(1);
erofs_workgroup_put(grp); erofs_workgroup_put(grp);
return ERR_PTR(-EFSCORRUPTED); return -EFSCORRUPTED;
} }
cl = z_erofs_primarycollection(pcl); cl = z_erofs_primarycollection(pcl);
if (cl->pageofs != (map->m_la & ~PAGE_MASK)) { if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
DBG_BUGON(1); DBG_BUGON(1);
erofs_workgroup_put(grp); erofs_workgroup_put(grp);
return ERR_PTR(-EFSCORRUPTED); return -EFSCORRUPTED;
} }
length = READ_ONCE(pcl->length); length = READ_ONCE(pcl->length);
...@@ -370,7 +370,7 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, ...@@ -370,7 +370,7 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
DBG_BUGON(1); DBG_BUGON(1);
erofs_workgroup_put(grp); erofs_workgroup_put(grp);
return ERR_PTR(-EFSCORRUPTED); return -EFSCORRUPTED;
} }
} else { } else {
unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
...@@ -394,10 +394,10 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, ...@@ -394,10 +394,10 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
clt->tailpcl = NULL; clt->tailpcl = NULL;
clt->pcl = pcl; clt->pcl = pcl;
clt->cl = cl; clt->cl = cl;
return cl; return 0;
} }
static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, static int z_erofs_register_collection(struct z_erofs_collector *clt,
struct inode *inode, struct inode *inode,
struct erofs_map_blocks *map) struct erofs_map_blocks *map)
{ {
...@@ -408,7 +408,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, ...@@ -408,7 +408,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
/* no available workgroup, let's allocate one */ /* no available workgroup, let's allocate one */
pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
if (!pcl) if (!pcl)
return ERR_PTR(-ENOMEM); return -ENOMEM;
z_erofs_pcluster_init_always(pcl); z_erofs_pcluster_init_always(pcl);
pcl->obj.index = map->m_pa >> PAGE_SHIFT; pcl->obj.index = map->m_pa >> PAGE_SHIFT;
...@@ -442,7 +442,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, ...@@ -442,7 +442,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
if (err) { if (err) {
mutex_unlock(&cl->lock); mutex_unlock(&cl->lock);
kmem_cache_free(pcluster_cachep, pcl); kmem_cache_free(pcluster_cachep, pcl);
return ERR_PTR(-EAGAIN); return -EAGAIN;
} }
/* used to check tail merging loop due to corrupted images */ /* used to check tail merging loop due to corrupted images */
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
...@@ -450,14 +450,14 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, ...@@ -450,14 +450,14 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
clt->owned_head = &pcl->next; clt->owned_head = &pcl->next;
clt->pcl = pcl; clt->pcl = pcl;
clt->cl = cl; clt->cl = cl;
return cl; return 0;
} }
static int z_erofs_collector_begin(struct z_erofs_collector *clt, static int z_erofs_collector_begin(struct z_erofs_collector *clt,
struct inode *inode, struct inode *inode,
struct erofs_map_blocks *map) struct erofs_map_blocks *map)
{ {
struct z_erofs_collection *cl; int ret;
DBG_BUGON(clt->cl); DBG_BUGON(clt->cl);
...@@ -471,19 +471,22 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt, ...@@ -471,19 +471,22 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
} }
repeat: repeat:
cl = cllookup(clt, inode, map); ret = z_erofs_lookup_collection(clt, inode, map);
if (!cl) { if (ret == -ENOENT) {
cl = clregister(clt, inode, map); ret = z_erofs_register_collection(clt, inode, map);
if (cl == ERR_PTR(-EAGAIN)) /* someone registered at the same time, give another try */
if (ret == -EAGAIN) {
cond_resched();
goto repeat; goto repeat;
} }
}
if (IS_ERR(cl)) if (ret)
return PTR_ERR(cl); return ret;
z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
cl->pagevec, cl->vcnt); clt->cl->pagevec, clt->cl->vcnt);
clt->compressedpages = clt->pcl->compressed_pages; clt->compressedpages = clt->pcl->compressed_pages;
if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
...@@ -543,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt) ...@@ -543,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt)
return true; return true;
} }
static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
gfp_t gfp)
{
struct page *page = erofs_allocpage(pagepool, gfp, true);
page->mapping = Z_EROFS_MAPPING_STAGING;
return page;
}
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
unsigned int cachestrategy, unsigned int cachestrategy,
erofs_off_t la) erofs_off_t la)
...@@ -571,7 +565,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -571,7 +565,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct list_head *pagepool) struct list_head *pagepool)
{ {
struct inode *const inode = fe->inode; struct inode *const inode = fe->inode;
struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct erofs_map_blocks *const map = &fe->map; struct erofs_map_blocks *const map = &fe->map;
struct z_erofs_collector *const clt = &fe->clt; struct z_erofs_collector *const clt = &fe->clt;
const loff_t offset = page_offset(page); const loff_t offset = page_offset(page);
...@@ -658,8 +652,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -658,8 +652,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
/* should allocate an additional staging page for pagevec */ /* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) { if (err == -EAGAIN) {
struct page *const newpage = struct page *const newpage =
__stagingpage_alloc(pagepool, GFP_NOFS); erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
newpage->mapping = Z_EROFS_MAPPING_STAGING;
err = z_erofs_attach_page(clt, newpage, err = z_erofs_attach_page(clt, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE); Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (!err) if (!err)
...@@ -698,13 +693,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -698,13 +693,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
goto out; goto out;
} }
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
bool sync, int bios)
{ {
tagptr1_t t = tagptr_init(tagptr1_t, ptr); /* wake up the caller thread for sync decompression */
struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); if (sync) {
bool background = tagptr_unfold_tags(t);
if (!background) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&io->u.wait.lock, flags); spin_lock_irqsave(&io->u.wait.lock, flags);
...@@ -718,37 +711,30 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) ...@@ -718,37 +711,30 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
queue_work(z_erofs_workqueue, &io->u.work); queue_work(z_erofs_workqueue, &io->u.work);
} }
static inline void z_erofs_vle_read_endio(struct bio *bio) static void z_erofs_decompressqueue_endio(struct bio *bio)
{ {
struct erofs_sb_info *sbi = NULL; tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
blk_status_t err = bio->bi_status; blk_status_t err = bio->bi_status;
struct bio_vec *bvec; struct bio_vec *bvec;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) { bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
bool cachemngd = false;
DBG_BUGON(PageUptodate(page)); DBG_BUGON(PageUptodate(page));
DBG_BUGON(!page->mapping); DBG_BUGON(!page->mapping);
if (!sbi && !z_erofs_page_is_staging(page))
sbi = EROFS_SB(page->mapping->host->i_sb);
/* sbi should already be gotten if the page is managed */
if (sbi)
cachemngd = erofs_page_is_managed(sbi, page);
if (err) if (err)
SetPageError(page); SetPageError(page);
else if (cachemngd)
SetPageUptodate(page);
if (cachemngd) if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
if (!err)
SetPageUptodate(page);
unlock_page(page); unlock_page(page);
} }
}
z_erofs_vle_unzip_kickoff(bio->bi_private, -1); z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
bio_put(bio); bio_put(bio);
} }
...@@ -953,8 +939,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, ...@@ -953,8 +939,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
return err; return err;
} }
static void z_erofs_vle_unzip_all(struct super_block *sb, static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct z_erofs_unzip_io *io,
struct list_head *pagepool) struct list_head *pagepool)
{ {
z_erofs_next_pcluster_t owned = io->head; z_erofs_next_pcluster_t owned = io->head;
...@@ -971,21 +956,21 @@ static void z_erofs_vle_unzip_all(struct super_block *sb, ...@@ -971,21 +956,21 @@ static void z_erofs_vle_unzip_all(struct super_block *sb,
pcl = container_of(owned, struct z_erofs_pcluster, next); pcl = container_of(owned, struct z_erofs_pcluster, next);
owned = READ_ONCE(pcl->next); owned = READ_ONCE(pcl->next);
z_erofs_decompress_pcluster(sb, pcl, pagepool); z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
} }
} }
static void z_erofs_vle_unzip_wq(struct work_struct *work) static void z_erofs_decompressqueue_work(struct work_struct *work)
{ {
struct z_erofs_unzip_io_sb *iosb = struct z_erofs_decompressqueue *bgq =
container_of(work, struct z_erofs_unzip_io_sb, io.u.work); container_of(work, struct z_erofs_decompressqueue, u.work);
LIST_HEAD(pagepool); LIST_HEAD(pagepool);
DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); z_erofs_decompress_queue(bgq, &pagepool);
put_pages_list(&pagepool); put_pages_list(&pagepool);
kvfree(iosb); kvfree(bgq);
} }
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
...@@ -994,8 +979,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, ...@@ -994,8 +979,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
struct address_space *mc, struct address_space *mc,
gfp_t gfp) gfp_t gfp)
{ {
/* determined at compile time to avoid too many #ifdefs */
const bool nocache = __builtin_constant_p(mc) ? !mc : false;
const pgoff_t index = pcl->obj.index; const pgoff_t index = pcl->obj.index;
bool tocache = false; bool tocache = false;
...@@ -1016,7 +999,7 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, ...@@ -1016,7 +999,7 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
* the cached page has not been allocated and * the cached page has not been allocated and
* an placeholder is out there, prepare it now. * an placeholder is out there, prepare it now.
*/ */
if (!nocache && page == PAGE_UNALLOCATED) { if (page == PAGE_UNALLOCATED) {
tocache = true; tocache = true;
goto out_allocpage; goto out_allocpage;
} }
...@@ -1028,21 +1011,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, ...@@ -1028,21 +1011,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
mapping = READ_ONCE(page->mapping); mapping = READ_ONCE(page->mapping);
/*
* if managed cache is disabled, it's no way to
* get such a cached-like page.
*/
if (nocache) {
/* if managed cache is disabled, it is impossible `justfound' */
DBG_BUGON(justfound);
/* and it should be locked, not uptodate, and not truncated */
DBG_BUGON(!PageLocked(page));
DBG_BUGON(PageUptodate(page));
DBG_BUGON(!mapping);
goto out;
}
/* /*
* unmanaged (file) pages are all locked solidly, * unmanaged (file) pages are all locked solidly,
* therefore it is impossible for `mapping' to be NULL. * therefore it is impossible for `mapping' to be NULL.
...@@ -1093,50 +1061,52 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, ...@@ -1093,50 +1061,52 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
out_allocpage: out_allocpage:
page = __stagingpage_alloc(pagepool, gfp); page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
/* non-LRU / non-movable temporary page is needed */
page->mapping = Z_EROFS_MAPPING_STAGING;
tocache = false;
}
if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
if (tocache) {
/* since it added to managed cache successfully */
unlock_page(page);
put_page(page);
} else {
list_add(&page->lru, pagepool); list_add(&page->lru, pagepool);
cpu_relax();
goto repeat;
} }
if (nocache || !tocache) cond_resched();
goto out; goto repeat;
if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
page->mapping = Z_EROFS_MAPPING_STAGING;
goto out;
} }
set_page_private(page, (unsigned long)pcl); set_page_private(page, (unsigned long)pcl);
SetPagePrivate(page); SetPagePrivate(page);
out: /* the only exit (for tracing and debugging) */ out: /* the only exit (for tracing and debugging) */
return page; return page;
} }
static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, static struct z_erofs_decompressqueue *
struct z_erofs_unzip_io *io, jobqueue_init(struct super_block *sb,
bool foreground) struct z_erofs_decompressqueue *fgq, bool *fg)
{ {
struct z_erofs_unzip_io_sb *iosb; struct z_erofs_decompressqueue *q;
if (foreground) {
/* waitqueue available for foreground io */
DBG_BUGON(!io);
init_waitqueue_head(&io->u.wait); if (fg && !*fg) {
atomic_set(&io->pending_bios, 0); q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
goto out; if (!q) {
*fg = true;
goto fg_out;
} }
INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); } else {
DBG_BUGON(!iosb); fg_out:
q = fgq;
/* initialize fields in the allocated descriptor */ init_waitqueue_head(&fgq->u.wait);
io = &iosb->io; atomic_set(&fgq->pending_bios, 0);
iosb->sb = sb; }
INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); q->sb = sb;
out: q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; return q;
return io;
} }
/* define decompression jobqueue types */ /* define decompression jobqueue types */
...@@ -1147,22 +1117,17 @@ enum { ...@@ -1147,22 +1117,17 @@ enum {
}; };
static void *jobqueueset_init(struct super_block *sb, static void *jobqueueset_init(struct super_block *sb,
z_erofs_next_pcluster_t qtail[], struct z_erofs_decompressqueue *q[],
struct z_erofs_unzip_io *q[], struct z_erofs_decompressqueue *fgq, bool *fg)
struct z_erofs_unzip_io *fgq,
bool forcefg)
{ {
/* /*
* if managed cache is enabled, bypass jobqueue is needed, * if managed cache is enabled, bypass jobqueue is needed,
* no need to read from device for all pclusters in this queue. * no need to read from device for all pclusters in this queue.
*/ */
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
} }
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
...@@ -1184,9 +1149,8 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, ...@@ -1184,9 +1149,8 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next; qtail[JQ_BYPASS] = &pcl->next;
} }
static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
unsigned int nr_bios, unsigned int nr_bios, bool force_fg)
bool force_fg)
{ {
/* /*
* although background is preferred, no one is pending for submission. * although background is preferred, no one is pending for submission.
...@@ -1195,19 +1159,19 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], ...@@ -1195,19 +1159,19 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
if (force_fg || nr_bios) if (force_fg || nr_bios)
return false; return false;
kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); kvfree(q[JQ_SUBMIT]);
return true; return true;
} }
static bool z_erofs_vle_submit_all(struct super_block *sb, static bool z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t owned_head, z_erofs_next_pcluster_t owned_head,
struct list_head *pagepool, struct list_head *pagepool,
struct z_erofs_unzip_io *fgq, struct z_erofs_decompressqueue *fgq,
bool force_fg) bool *force_fg)
{ {
struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(sb);
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_unzip_io *q[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
struct bio *bio; struct bio *bio;
void *bi_private; void *bi_private;
/* since bio will be NULL, no need to initialize last_index */ /* since bio will be NULL, no need to initialize last_index */
...@@ -1221,7 +1185,9 @@ static bool z_erofs_vle_submit_all(struct super_block *sb, ...@@ -1221,7 +1185,9 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
force_submit = false; force_submit = false;
bio = NULL; bio = NULL;
nr_bios = 0; nr_bios = 0;
bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); bi_private = jobqueueset_init(sb, q, fgq, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */ /* by default, all need io submission */
q[JQ_SUBMIT]->head = owned_head; q[JQ_SUBMIT]->head = owned_head;
...@@ -1268,7 +1234,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb, ...@@ -1268,7 +1234,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
if (!bio) { if (!bio) {
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_end_io = z_erofs_vle_read_endio; bio->bi_end_io = z_erofs_decompressqueue_endio;
bio_set_dev(bio, sb->s_bdev); bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)(first_index + i) << bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
LOG_SECTORS_PER_BLOCK; LOG_SECTORS_PER_BLOCK;
...@@ -1297,40 +1263,38 @@ static bool z_erofs_vle_submit_all(struct super_block *sb, ...@@ -1297,40 +1263,38 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
if (bio) if (bio)
submit_bio(bio); submit_bio(bio);
if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
return true; return true;
z_erofs_vle_unzip_kickoff(bi_private, nr_bios); z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
return true; return true;
} }
static void z_erofs_submit_and_unzip(struct super_block *sb, static void z_erofs_runqueue(struct super_block *sb,
struct z_erofs_collector *clt, struct z_erofs_collector *clt,
struct list_head *pagepool, struct list_head *pagepool, bool force_fg)
bool force_fg)
{ {
struct z_erofs_unzip_io io[NR_JOBQUEUES]; struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (!z_erofs_vle_submit_all(sb, clt->owned_head, if (!z_erofs_submit_queue(sb, clt->owned_head,
pagepool, io, force_fg)) pagepool, io, &force_fg))
return; return;
/* decompress no I/O pclusters immediately */ /* handle bypass queue (no i/o pclusters) immediately */
z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
if (!force_fg) if (!force_fg)
return; return;
/* wait until all bios are completed */ /* wait until all bios are completed */
wait_event(io[JQ_SUBMIT].u.wait, io_wait_event(io[JQ_SUBMIT].u.wait,
!atomic_read(&io[JQ_SUBMIT].pending_bios)); !atomic_read(&io[JQ_SUBMIT].pending_bios));
/* let's synchronous decompression */ /* handle synchronous decompress queue in the caller context */
z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
} }
static int z_erofs_vle_normalaccess_readpage(struct file *file, static int z_erofs_readpage(struct file *file, struct page *page)
struct page *page)
{ {
struct inode *const inode = page->mapping->host; struct inode *const inode = page->mapping->host;
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
...@@ -1345,7 +1309,7 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file, ...@@ -1345,7 +1309,7 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
(void)z_erofs_collector_end(&f.clt); (void)z_erofs_collector_end(&f.clt);
/* if some compressed cluster ready, need submit them anyway */ /* if some compressed cluster ready, need submit them anyway */
z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true);
if (err) if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err); erofs_err(inode->i_sb, "failed to read, err [%d]", err);
...@@ -1364,10 +1328,8 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi, ...@@ -1364,10 +1328,8 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
return nr <= sbi->max_sync_decompress_pages; return nr <= sbi->max_sync_decompress_pages;
} }
static int z_erofs_vle_normalaccess_readpages(struct file *filp, static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
struct address_space *mapping, struct list_head *pages, unsigned int nr_pages)
struct list_head *pages,
unsigned int nr_pages)
{ {
struct inode *const inode = mapping->host; struct inode *const inode = mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
...@@ -1422,7 +1384,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, ...@@ -1422,7 +1384,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
(void)z_erofs_collector_end(&f.clt); (void)z_erofs_collector_end(&f.clt);
z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync);
if (f.map.mpage) if (f.map.mpage)
put_page(f.map.mpage); put_page(f.map.mpage);
...@@ -1432,8 +1394,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, ...@@ -1432,8 +1394,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
return 0; return 0;
} }
const struct address_space_operations z_erofs_vle_normalaccess_aops = { const struct address_space_operations z_erofs_aops = {
.readpage = z_erofs_vle_normalaccess_readpage, .readpage = z_erofs_readpage,
.readpages = z_erofs_vle_normalaccess_readpages, .readpages = z_erofs_readpages,
}; };
...@@ -84,7 +84,8 @@ struct z_erofs_pcluster { ...@@ -84,7 +84,8 @@ struct z_erofs_pcluster {
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
struct z_erofs_unzip_io { struct z_erofs_decompressqueue {
struct super_block *sb;
atomic_t pending_bios; atomic_t pending_bios;
z_erofs_next_pcluster_t head; z_erofs_next_pcluster_t head;
...@@ -94,11 +95,6 @@ struct z_erofs_unzip_io { ...@@ -94,11 +95,6 @@ struct z_erofs_unzip_io {
} u; } u;
}; };
struct z_erofs_unzip_io_sb {
struct z_erofs_unzip_io io;
struct super_block *sb;
};
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page) struct page *page)
......
...@@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode) ...@@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode)
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
} }
inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; inode->i_mapping->a_ops = &z_erofs_aops;
return 0; return 0;
} }
static int fill_inode_lazy(struct inode *inode) static int z_erofs_fill_inode_lazy(struct inode *inode)
{ {
struct erofs_inode *const vi = EROFS_I(inode); struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb; struct super_block *const sb = inode->i_sb;
...@@ -138,7 +138,7 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, ...@@ -138,7 +138,7 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
return 0; return 0;
} }
static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned long lcn) unsigned long lcn)
{ {
struct inode *const inode = m->inode; struct inode *const inode = m->inode;
...@@ -311,13 +311,13 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, ...@@ -311,13 +311,13 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
} }
static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn) unsigned int lcn)
{ {
const unsigned int datamode = EROFS_I(m->inode)->datalayout; const unsigned int datamode = EROFS_I(m->inode)->datalayout;
if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
return vle_legacy_load_cluster_from_disk(m, lcn); return legacy_load_cluster_from_disk(m, lcn);
if (datamode == EROFS_INODE_FLAT_COMPRESSION) if (datamode == EROFS_INODE_FLAT_COMPRESSION)
return compacted_load_cluster_from_disk(m, lcn); return compacted_load_cluster_from_disk(m, lcn);
...@@ -325,7 +325,7 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, ...@@ -325,7 +325,7 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
return -EINVAL; return -EINVAL;
} }
static int vle_extent_lookback(struct z_erofs_maprecorder *m, static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned int lookback_distance) unsigned int lookback_distance)
{ {
struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_inode *const vi = EROFS_I(m->inode);
...@@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, ...@@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
/* load extent head logical cluster if needed */ /* load extent head logical cluster if needed */
lcn -= lookback_distance; lcn -= lookback_distance;
err = vle_load_cluster_from_disk(m, lcn); err = z_erofs_load_cluster_from_disk(m, lcn);
if (err) if (err)
return err; return err;
...@@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, ...@@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
DBG_BUGON(1); DBG_BUGON(1);
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
return vle_extent_lookback(m, m->delta[0]); return z_erofs_extent_lookback(m, m->delta[0]);
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
map->m_flags &= ~EROFS_MAP_ZIPPED; map->m_flags &= ~EROFS_MAP_ZIPPED;
/* fallthrough */ /* fallthrough */
...@@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
goto out; goto out;
} }
err = fill_inode_lazy(inode); err = z_erofs_fill_inode_lazy(inode);
if (err) if (err)
goto out; goto out;
...@@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
m.lcn = ofs >> lclusterbits; m.lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1); endoff = ofs & ((1 << lclusterbits) - 1);
err = vle_load_cluster_from_disk(&m, m.lcn); err = z_erofs_load_cluster_from_disk(&m, m.lcn);
if (err) if (err)
goto unmap_out; goto unmap_out;
...@@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* fallthrough */ /* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */ /* get the correspoinding first chunk */
err = vle_extent_lookback(&m, m.delta[0]); err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err) if (err)
goto unmap_out; goto unmap_out;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment