Commit e88745dc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "First of all, we'd like to add Yue Hu and Jeffle Xu as two new
  reviewers. Thank them for spending time working on EROFS!

  There is no major feature outstanding in this cycle, mainly a patchset
  I worked on to prepare for rolling hash deduplication and folios for
  compressed data as the next big features. It kills the unneeded
  PG_error flag dependency as well.

  Apart from that, there are bugfixes and cleanups as always. Details
  are listed below:

   - Add Yue Hu and Jeffle Xu as reviewers

   - Add the missing wake_up when updating lzma streams

   - Avoid consecutive detection for Highmem memory

   - Prepare for multi-reference pclusters and get rid of PG_error

   - Fix ctx->pos update for NFS export

   - minor cleanups"

* tag 'erofs-for-5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: (23 commits)
  erofs: update ctx->pos for every emitted dirent
  erofs: get rid of the leftover PAGE_SIZE in dir.c
  erofs: get rid of erofs_prepare_dio() helper
  erofs: introduce multi-reference pclusters (fully-referenced)
  erofs: record the longest decompressed size in this round
  erofs: introduce z_erofs_do_decompressed_bvec()
  erofs: try to leave (de)compressed_pages on stack if possible
  erofs: introduce struct z_erofs_decompress_backend
  erofs: get rid of `z_pagemap_global'
  erofs: clean up `enum z_erofs_collectmode'
  erofs: get rid of `enum z_erofs_page_type'
  erofs: rework online page handling
  erofs: switch compressed_pages[] to bufvec
  erofs: introduce `z_erofs_parse_in_bvecs'
  erofs: drop the old pagevec approach
  erofs: introduce bufvec to store decompressed buffers
  erofs: introduce `z_erofs_parse_out_bvecs()'
  erofs: clean up z_erofs_collector_begin()
  erofs: get rid of unneeded `inode', `map' and `sb'
  erofs: avoid consecutive detection for Highmem memory
  ...
parents bec14d79 ecce9212
...@@ -7485,6 +7485,8 @@ F: include/video/s1d13xxxfb.h ...@@ -7485,6 +7485,8 @@ F: include/video/s1d13xxxfb.h
EROFS FILE SYSTEM EROFS FILE SYSTEM
M: Gao Xiang <xiang@kernel.org> M: Gao Xiang <xiang@kernel.org>
M: Chao Yu <chao@kernel.org> M: Chao Yu <chao@kernel.org>
R: Yue Hu <huyue2@coolpad.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
L: linux-erofs@lists.ozlabs.org L: linux-erofs@lists.ozlabs.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
......
...@@ -17,7 +17,7 @@ struct z_erofs_decompress_req { ...@@ -17,7 +17,7 @@ struct z_erofs_decompress_req {
/* indicate the algorithm will be used for decompression */ /* indicate the algorithm will be used for decompression */
unsigned int alg; unsigned int alg;
bool inplace_io, partial_decoding; bool inplace_io, partial_decoding, fillgaps;
}; };
struct z_erofs_decompressor { struct z_erofs_decompressor {
......
...@@ -366,42 +366,33 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block) ...@@ -366,42 +366,33 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
return iomap_bmap(mapping, block, &erofs_iomap_ops); return iomap_bmap(mapping, block, &erofs_iomap_ops);
} }
static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to) static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{ {
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
loff_t align = iocb->ki_pos | iov_iter_count(to) |
iov_iter_alignment(to);
struct block_device *bdev = inode->i_sb->s_bdev;
unsigned int blksize_mask;
if (bdev)
blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
else
blksize_mask = (1 << inode->i_blkbits) - 1;
if (align & blksize_mask)
return -EINVAL;
return 0;
}
static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
/* no need taking (shared) inode lock since it's a ro filesystem */ /* no need taking (shared) inode lock since it's a ro filesystem */
if (!iov_iter_count(to)) if (!iov_iter_count(to))
return 0; return 0;
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
if (IS_DAX(iocb->ki_filp->f_mapping->host)) if (IS_DAX(inode))
return dax_iomap_rw(iocb, to, &erofs_iomap_ops); return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
#endif #endif
if (iocb->ki_flags & IOCB_DIRECT) { if (iocb->ki_flags & IOCB_DIRECT) {
int err = erofs_prepare_dio(iocb, to); struct block_device *bdev = inode->i_sb->s_bdev;
unsigned int blksize_mask;
if (bdev)
blksize_mask = bdev_logical_block_size(bdev) - 1;
else
blksize_mask = (1 << inode->i_blkbits) - 1;
if ((iocb->ki_pos | iov_iter_count(to) |
iov_iter_alignment(to)) & blksize_mask)
return -EINVAL;
if (!err)
return iomap_dio_rw(iocb, to, &erofs_iomap_ops, return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
NULL, 0, NULL, 0); NULL, 0, NULL, 0);
if (err < 0)
return err;
} }
return filemap_read(iocb, to, 0); return filemap_read(iocb, to, 0);
} }
......
...@@ -83,7 +83,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -83,7 +83,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
j = 0; j = 0;
/* 'valid' bounced can only be tested after a complete round */ /* 'valid' bounced can only be tested after a complete round */
if (test_bit(j, bounced)) { if (!rq->fillgaps && test_bit(j, bounced)) {
DBG_BUGON(i < lz4_max_distance_pages); DBG_BUGON(i < lz4_max_distance_pages);
DBG_BUGON(top >= lz4_max_distance_pages); DBG_BUGON(top >= lz4_max_distance_pages);
availables[top++] = rq->out[i - lz4_max_distance_pages]; availables[top++] = rq->out[i - lz4_max_distance_pages];
...@@ -91,14 +91,18 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -91,14 +91,18 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
if (page) { if (page) {
__clear_bit(j, bounced); __clear_bit(j, bounced);
if (kaddr) { if (!PageHighMem(page)) {
if (kaddr + PAGE_SIZE == page_address(page)) if (!i) {
kaddr += PAGE_SIZE;
else
kaddr = NULL;
} else if (!i) {
kaddr = page_address(page); kaddr = page_address(page);
continue;
} }
if (kaddr &&
kaddr + PAGE_SIZE == page_address(page)) {
kaddr += PAGE_SIZE;
continue;
}
}
kaddr = NULL;
continue; continue;
} }
kaddr = NULL; kaddr = NULL;
......
...@@ -143,6 +143,7 @@ int z_erofs_load_lzma_config(struct super_block *sb, ...@@ -143,6 +143,7 @@ int z_erofs_load_lzma_config(struct super_block *sb,
DBG_BUGON(z_erofs_lzma_head); DBG_BUGON(z_erofs_lzma_head);
z_erofs_lzma_head = head; z_erofs_lzma_head = head;
spin_unlock(&z_erofs_lzma_lock); spin_unlock(&z_erofs_lzma_lock);
wake_up_all(&z_erofs_lzma_wq);
z_erofs_lzma_max_dictsize = dict_size; z_erofs_lzma_max_dictsize = dict_size;
mutex_unlock(&lzma_resize_mutex); mutex_unlock(&lzma_resize_mutex);
......
...@@ -22,10 +22,9 @@ static void debug_one_dentry(unsigned char d_type, const char *de_name, ...@@ -22,10 +22,9 @@ static void debug_one_dentry(unsigned char d_type, const char *de_name,
} }
static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
void *dentry_blk, unsigned int *ofs, void *dentry_blk, struct erofs_dirent *de,
unsigned int nameoff, unsigned int maxsize) unsigned int nameoff, unsigned int maxsize)
{ {
struct erofs_dirent *de = dentry_blk + *ofs;
const struct erofs_dirent *end = dentry_blk + nameoff; const struct erofs_dirent *end = dentry_blk + nameoff;
while (de < end) { while (de < end) {
...@@ -59,9 +58,8 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, ...@@ -59,9 +58,8 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
/* stopped by some reason */ /* stopped by some reason */
return 1; return 1;
++de; ++de;
*ofs += sizeof(struct erofs_dirent); ctx->pos += sizeof(struct erofs_dirent);
} }
*ofs = maxsize;
return 0; return 0;
} }
...@@ -90,33 +88,33 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) ...@@ -90,33 +88,33 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
nameoff = le16_to_cpu(de->nameoff); nameoff = le16_to_cpu(de->nameoff);
if (nameoff < sizeof(struct erofs_dirent) || if (nameoff < sizeof(struct erofs_dirent) ||
nameoff >= PAGE_SIZE) { nameoff >= EROFS_BLKSIZ) {
erofs_err(dir->i_sb, erofs_err(dir->i_sb,
"invalid de[0].nameoff %u @ nid %llu", "invalid de[0].nameoff %u @ nid %llu",
nameoff, EROFS_I(dir)->nid); nameoff, EROFS_I(dir)->nid);
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
goto skip_this; break;
} }
maxsize = min_t(unsigned int, maxsize = min_t(unsigned int,
dirsize - ctx->pos + ofs, PAGE_SIZE); dirsize - ctx->pos + ofs, EROFS_BLKSIZ);
/* search dirents at the arbitrary position */ /* search dirents at the arbitrary position */
if (initial) { if (initial) {
initial = false; initial = false;
ofs = roundup(ofs, sizeof(struct erofs_dirent)); ofs = roundup(ofs, sizeof(struct erofs_dirent));
ctx->pos = blknr_to_addr(i) + ofs;
if (ofs >= nameoff) if (ofs >= nameoff)
goto skip_this; goto skip_this;
} }
err = erofs_fill_dentries(dir, ctx, de, &ofs, err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs,
nameoff, maxsize); nameoff, maxsize);
skip_this:
ctx->pos = blknr_to_addr(i) + ofs;
if (err) if (err)
break; break;
skip_this:
ctx->pos = blknr_to_addr(i) + maxsize;
++i; ++i;
ofs = 0; ofs = 0;
} }
......
This diff is collapsed.
...@@ -7,13 +7,10 @@ ...@@ -7,13 +7,10 @@
#define __EROFS_FS_ZDATA_H #define __EROFS_FS_ZDATA_H
#include "internal.h" #include "internal.h"
#include "zpvec.h" #include "tagptr.h"
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_NR_INLINE_PAGEVECS 3 #define Z_EROFS_INLINE_BVECS 2
#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
/* /*
* let's leave a type here in case of introducing * let's leave a type here in case of introducing
...@@ -21,6 +18,21 @@ ...@@ -21,6 +18,21 @@
*/ */
typedef void *z_erofs_next_pcluster_t; typedef void *z_erofs_next_pcluster_t;
struct z_erofs_bvec {
struct page *page;
int offset;
unsigned int end;
};
#define __Z_EROFS_BVSET(name, total) \
struct name { \
/* point to the next page which contains the following bvecs */ \
struct page *nextpage; \
struct z_erofs_bvec bvec[total]; \
}
__Z_EROFS_BVSET(z_erofs_bvset,);
__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
/* /*
* Structure fields follow one of the following exclusion rules. * Structure fields follow one of the following exclusion rules.
* *
...@@ -38,24 +50,21 @@ struct z_erofs_pcluster { ...@@ -38,24 +50,21 @@ struct z_erofs_pcluster {
/* A: point to next chained pcluster or TAILs */ /* A: point to next chained pcluster or TAILs */
z_erofs_next_pcluster_t next; z_erofs_next_pcluster_t next;
/* A: lower limit of decompressed length and if full length or not */ /* L: the maximum decompression size of this round */
unsigned int length; unsigned int length;
/* L: total number of bvecs */
unsigned int vcnt;
/* I: page offset of start position of decompression */ /* I: page offset of start position of decompression */
unsigned short pageofs_out; unsigned short pageofs_out;
/* I: page offset of inline compressed data */ /* I: page offset of inline compressed data */
unsigned short pageofs_in; unsigned short pageofs_in;
/* L: maximum relative page index in pagevec[] */
unsigned short nr_pages;
/* L: total number of pages in pagevec[] */
unsigned int vcnt;
union { union {
/* L: inline a certain number of pagevecs for bootstrap */ /* L: inline a certain number of bvec for bootstrap */
erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; struct z_erofs_bvset_inline bvset;
/* I: can be used to free the pcluster by RCU. */ /* I: can be used to free the pcluster by RCU. */
struct rcu_head rcu; struct rcu_head rcu;
...@@ -72,8 +81,14 @@ struct z_erofs_pcluster { ...@@ -72,8 +81,14 @@ struct z_erofs_pcluster {
/* I: compression algorithm format */ /* I: compression algorithm format */
unsigned char algorithmformat; unsigned char algorithmformat;
/* A: compressed pages (can be cached or inplaced pages) */ /* L: whether partial decompression or not */
struct page *compressed_pages[]; bool partial;
/* L: indicate several pageofs_outs or not */
bool multibases;
/* A: compressed bvecs (can be cached or inplaced pages) */
struct z_erofs_bvec compressed_bvecs[];
}; };
/* let's avoid the valid 32-bit kernel addresses */ /* let's avoid the valid 32-bit kernel addresses */
...@@ -94,6 +109,8 @@ struct z_erofs_decompressqueue { ...@@ -94,6 +109,8 @@ struct z_erofs_decompressqueue {
struct completion done; struct completion done;
struct work_struct work; struct work_struct work;
} u; } u;
bool eio;
}; };
static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
...@@ -108,38 +125,17 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) ...@@ -108,38 +125,17 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
return pcl->pclusterpages; return pcl->pclusterpages;
} }
#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
/* /*
* waiters (aka. ongoing_packs): # to unlock the page * bit 31: I/O error occurred on this page
* sub-index: 0 - for partial page, >= 1 full page sub-index * bit 0 - 30: remaining parts to complete this page
*/ */
typedef atomic_t z_erofs_onlinepage_t; #define Z_EROFS_PAGE_EIO (1 << 31)
/* type punning */
union z_erofs_onlinepage_converter {
z_erofs_onlinepage_t *o;
unsigned long *v;
};
static inline unsigned int z_erofs_onlinepage_index(struct page *page)
{
union z_erofs_onlinepage_converter u;
DBG_BUGON(!PagePrivate(page));
u.v = &page_private(page);
return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
}
static inline void z_erofs_onlinepage_init(struct page *page) static inline void z_erofs_onlinepage_init(struct page *page)
{ {
union { union {
z_erofs_onlinepage_t o; atomic_t o;
unsigned long v; unsigned long v;
/* keep from being unlocked in advance */
} u = { .o = ATOMIC_INIT(1) }; } u = { .o = ATOMIC_INIT(1) };
set_page_private(page, u.v); set_page_private(page, u.v);
...@@ -147,49 +143,36 @@ static inline void z_erofs_onlinepage_init(struct page *page) ...@@ -147,49 +143,36 @@ static inline void z_erofs_onlinepage_init(struct page *page)
SetPagePrivate(page); SetPagePrivate(page);
} }
static inline void z_erofs_onlinepage_fixup(struct page *page, static inline void z_erofs_onlinepage_split(struct page *page)
uintptr_t index, bool down)
{ {
union z_erofs_onlinepage_converter u = { .v = &page_private(page) }; atomic_inc((atomic_t *)&page->private);
int orig, orig_index, val; }
repeat:
orig = atomic_read(u.o);
orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
if (orig_index) {
if (!index)
return;
DBG_BUGON(orig_index != index); static inline void z_erofs_page_mark_eio(struct page *page)
} {
int orig;
val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | do {
((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); orig = atomic_read((atomic_t *)&page->private);
if (atomic_cmpxchg(u.o, orig, val) != orig) } while (atomic_cmpxchg((atomic_t *)&page->private, orig,
goto repeat; orig | Z_EROFS_PAGE_EIO) != orig);
} }
static inline void z_erofs_onlinepage_endio(struct page *page) static inline void z_erofs_onlinepage_endio(struct page *page)
{ {
union z_erofs_onlinepage_converter u;
unsigned int v; unsigned int v;
DBG_BUGON(!PagePrivate(page)); DBG_BUGON(!PagePrivate(page));
u.v = &page_private(page); v = atomic_dec_return((atomic_t *)&page->private);
if (!(v & ~Z_EROFS_PAGE_EIO)) {
v = atomic_dec_return(u.o);
if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
set_page_private(page, 0); set_page_private(page, 0);
ClearPagePrivate(page); ClearPagePrivate(page);
if (!PageError(page)) if (!(v & Z_EROFS_PAGE_EIO))
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
} }
erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
} }
#define Z_EROFS_VMAP_ONSTACK_PAGES \ #define Z_EROFS_ONSTACK_PAGES 32
min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
#endif #endif
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
*/
#ifndef __EROFS_FS_ZPVEC_H
#define __EROFS_FS_ZPVEC_H
#include "tagptr.h"
/* page type in pagevec for decompress subsystem */
enum z_erofs_page_type {
/* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
Z_EROFS_PAGE_TYPE_EXCLUSIVE,
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
Z_EROFS_VLE_PAGE_TYPE_HEAD,
Z_EROFS_VLE_PAGE_TYPE_MAX
};
extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
__bad_page_type_exclusive(void);
/* pagevec tagged pointer */
typedef tagptr2_t erofs_vtptr_t;
/* pagevec collector */
struct z_erofs_pagevec_ctor {
struct page *curr, *next;
erofs_vtptr_t *pages;
unsigned int nr, index;
};
static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
bool atomic)
{
if (!ctor->curr)
return;
if (atomic)
kunmap_atomic(ctor->pages);
else
kunmap(ctor->curr);
}
static inline struct page *
z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
unsigned int nr)
{
unsigned int index;
/* keep away from occupied pages */
if (ctor->next)
return ctor->next;
for (index = 0; index < nr; ++index) {
const erofs_vtptr_t t = ctor->pages[index];
const unsigned int tags = tagptr_unfold_tags(t);
if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
return tagptr_unfold_ptr(t);
}
DBG_BUGON(nr >= ctor->nr);
return NULL;
}
static inline void
z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
bool atomic)
{
struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
z_erofs_pagevec_ctor_exit(ctor, atomic);
ctor->curr = next;
ctor->next = NULL;
ctor->pages = atomic ?
kmap_atomic(ctor->curr) : kmap(ctor->curr);
ctor->nr = PAGE_SIZE / sizeof(struct page *);
ctor->index = 0;
}
static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
unsigned int nr,
erofs_vtptr_t *pages,
unsigned int i)
{
ctor->nr = nr;
ctor->curr = ctor->next = NULL;
ctor->pages = pages;
if (i >= nr) {
i -= nr;
z_erofs_pagevec_ctor_pagedown(ctor, false);
while (i > ctor->nr) {
i -= ctor->nr;
z_erofs_pagevec_ctor_pagedown(ctor, false);
}
}
ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
ctor->index = i;
}
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
struct page *page,
enum z_erofs_page_type type,
bool pvec_safereuse)
{
if (!ctor->next) {
/* some pages cannot be reused as pvec safely without I/O */
if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
ctor->index + 1 == ctor->nr)
return false;
}
if (ctor->index >= ctor->nr)
z_erofs_pagevec_ctor_pagedown(ctor, false);
/* exclusive page type must be 0 */
if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
__bad_page_type_exclusive();
/* should remind that collector->next never equal to 1, 2 */
if (type == (uintptr_t)ctor->next) {
ctor->next = page;
}
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
return true;
}
static inline struct page *
z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
enum z_erofs_page_type *type)
{
erofs_vtptr_t t;
if (ctor->index >= ctor->nr) {
DBG_BUGON(!ctor->next);
z_erofs_pagevec_ctor_pagedown(ctor, true);
}
t = ctor->pages[ctor->index];
*type = tagptr_unfold_tags(t);
/* should remind that collector->next never equal to 1, 2 */
if (*type == (uintptr_t)ctor->next)
ctor->next = tagptr_unfold_ptr(t);
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
return tagptr_unfold_ptr(t);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment