Commit dfef313e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "This cycle addresses a reported permission issue with overlay due to a
  duplicated permission check for "trusted." xattrs. Also, a REQ_RAHEAD
  flag is added now to all readahead requests in order to trace
  readahead I/Os. The others are random cleanups.

  All commits have been tested and have been in linux-next as well.

  Summary:

   - fix an issue which can cause overlay permission problem due to
     duplicated permission check for "trusted." xattrs;

   - add REQ_RAHEAD flag to readahead requests for blktrace;

   - several random cleanup"

* tag 'erofs-for-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: remove unnecessary enum entries
  erofs: add REQ_RAHEAD flag to readahead requests
  erofs: fold in should_decompress_synchronously()
  erofs: avoid unnecessary variable `err'
  erofs: remove unneeded parameter
  erofs: avoid duplicated permission check for "trusted." xattrs
parents 11e3235b 915f4c93
...@@ -224,7 +224,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio, ...@@ -224,7 +224,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
bio_set_dev(bio, sb->s_bdev); bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)blknr << bio->bi_iter.bi_sector = (sector_t)blknr <<
LOG_SECTORS_PER_BLOCK; LOG_SECTORS_PER_BLOCK;
bio->bi_opf = REQ_OP_READ; bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
} }
err = bio_add_page(bio, page, PAGE_SIZE, 0); err = bio_add_page(bio, page, PAGE_SIZE, 0);
......
...@@ -211,9 +211,7 @@ static void erofs_default_options(struct erofs_fs_context *ctx) ...@@ -211,9 +211,7 @@ static void erofs_default_options(struct erofs_fs_context *ctx)
enum { enum {
Opt_user_xattr, Opt_user_xattr,
Opt_nouser_xattr,
Opt_acl, Opt_acl,
Opt_noacl,
Opt_cache_strategy, Opt_cache_strategy,
Opt_err Opt_err
}; };
......
...@@ -473,8 +473,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler, ...@@ -473,8 +473,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
return -EOPNOTSUPP; return -EOPNOTSUPP;
break; break;
case EROFS_XATTR_INDEX_TRUSTED: case EROFS_XATTR_INDEX_TRUSTED:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
break; break;
case EROFS_XATTR_INDEX_SECURITY: case EROFS_XATTR_INDEX_SECURITY:
break; break;
......
...@@ -135,6 +135,7 @@ struct z_erofs_decompress_frontend { ...@@ -135,6 +135,7 @@ struct z_erofs_decompress_frontend {
struct z_erofs_collector clt; struct z_erofs_collector clt;
struct erofs_map_blocks map; struct erofs_map_blocks map;
bool readahead;
/* used for applying cache strategy on the fly */ /* used for applying cache strategy on the fly */
bool backmost; bool backmost;
erofs_off_t headoffset; erofs_off_t headoffset;
...@@ -153,8 +154,7 @@ static DEFINE_MUTEX(z_pagemap_global_lock); ...@@ -153,8 +154,7 @@ static DEFINE_MUTEX(z_pagemap_global_lock);
static void preload_compressed_pages(struct z_erofs_collector *clt, static void preload_compressed_pages(struct z_erofs_collector *clt,
struct address_space *mc, struct address_space *mc,
enum z_erofs_cache_alloctype type, enum z_erofs_cache_alloctype type)
struct list_head *pagepool)
{ {
const struct z_erofs_pcluster *pcl = clt->pcl; const struct z_erofs_pcluster *pcl = clt->pcl;
const unsigned int clusterpages = BIT(pcl->clusterbits); const unsigned int clusterpages = BIT(pcl->clusterbits);
...@@ -562,8 +562,7 @@ static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, ...@@ -562,8 +562,7 @@ static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
} }
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct page *page, struct page *page)
struct list_head *pagepool)
{ {
struct inode *const inode = fe->inode; struct inode *const inode = fe->inode;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
...@@ -620,8 +619,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -620,8 +619,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
else else
cache_strategy = DONTALLOC; cache_strategy = DONTALLOC;
preload_compressed_pages(clt, MNGD_MAPPING(sbi), preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy);
cache_strategy, pagepool);
hitted: hitted:
/* /*
...@@ -653,7 +651,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -653,7 +651,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
/* should allocate an additional staging page for pagevec */ /* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) { if (err == -EAGAIN) {
struct page *const newpage = struct page *const newpage =
erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL); alloc_page(GFP_NOFS | __GFP_NOFAIL);
newpage->mapping = Z_EROFS_MAPPING_STAGING; newpage->mapping = Z_EROFS_MAPPING_STAGING;
err = z_erofs_attach_page(clt, newpage, err = z_erofs_attach_page(clt, newpage,
...@@ -1151,7 +1149,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, ...@@ -1151,7 +1149,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
} }
static void z_erofs_submit_queue(struct super_block *sb, static void z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t owned_head, struct z_erofs_decompress_frontend *f,
struct list_head *pagepool, struct list_head *pagepool,
struct z_erofs_decompressqueue *fgq, struct z_erofs_decompressqueue *fgq,
bool *force_fg) bool *force_fg)
...@@ -1160,6 +1158,7 @@ static void z_erofs_submit_queue(struct super_block *sb, ...@@ -1160,6 +1158,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
void *bi_private; void *bi_private;
z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
/* since bio will be NULL, no need to initialize last_index */ /* since bio will be NULL, no need to initialize last_index */
pgoff_t last_index; pgoff_t last_index;
unsigned int nr_bios = 0; unsigned int nr_bios = 0;
...@@ -1193,7 +1192,6 @@ static void z_erofs_submit_queue(struct super_block *sb, ...@@ -1193,7 +1192,6 @@ static void z_erofs_submit_queue(struct super_block *sb,
do { do {
struct page *page; struct page *page;
int err;
page = pickup_page_for_submission(pcl, i++, pagepool, page = pickup_page_for_submission(pcl, i++, pagepool,
MNGD_MAPPING(sbi), MNGD_MAPPING(sbi),
...@@ -1216,11 +1214,12 @@ static void z_erofs_submit_queue(struct super_block *sb, ...@@ -1216,11 +1214,12 @@ static void z_erofs_submit_queue(struct super_block *sb,
LOG_SECTORS_PER_BLOCK; LOG_SECTORS_PER_BLOCK;
bio->bi_private = bi_private; bio->bi_private = bi_private;
bio->bi_opf = REQ_OP_READ; bio->bi_opf = REQ_OP_READ;
if (f->readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios; ++nr_bios;
} }
err = bio_add_page(bio, page, PAGE_SIZE, 0); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
if (err < PAGE_SIZE)
goto submit_bio_retry; goto submit_bio_retry;
last_index = cur; last_index = cur;
...@@ -1248,14 +1247,14 @@ static void z_erofs_submit_queue(struct super_block *sb, ...@@ -1248,14 +1247,14 @@ static void z_erofs_submit_queue(struct super_block *sb,
} }
static void z_erofs_runqueue(struct super_block *sb, static void z_erofs_runqueue(struct super_block *sb,
struct z_erofs_collector *clt, struct z_erofs_decompress_frontend *f,
struct list_head *pagepool, bool force_fg) struct list_head *pagepool, bool force_fg)
{ {
struct z_erofs_decompressqueue io[NR_JOBQUEUES]; struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
return; return;
z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg); z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
/* handle bypass queue (no i/o pclusters) immediately */ /* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
...@@ -1282,11 +1281,11 @@ static int z_erofs_readpage(struct file *file, struct page *page) ...@@ -1282,11 +1281,11 @@ static int z_erofs_readpage(struct file *file, struct page *page)
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
err = z_erofs_do_read_page(&f, page, &pagepool); err = z_erofs_do_read_page(&f, page);
(void)z_erofs_collector_end(&f.clt); (void)z_erofs_collector_end(&f.clt);
/* if some compressed cluster ready, need submit them anyway */ /* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true); z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);
if (err) if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err); erofs_err(inode->i_sb, "failed to read, err [%d]", err);
...@@ -1299,25 +1298,20 @@ static int z_erofs_readpage(struct file *file, struct page *page) ...@@ -1299,25 +1298,20 @@ static int z_erofs_readpage(struct file *file, struct page *page)
return err; return err;
} }
static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
unsigned int nr)
{
return nr <= sbi->ctx.max_sync_decompress_pages;
}
static void z_erofs_readahead(struct readahead_control *rac) static void z_erofs_readahead(struct readahead_control *rac)
{ {
struct inode *const inode = rac->mapping->host; struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
bool sync = should_decompress_synchronously(sbi, readahead_count(rac)); unsigned int nr_pages = readahead_count(rac);
bool sync = (nr_pages <= sbi->ctx.max_sync_decompress_pages);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
struct page *page, *head = NULL; struct page *page, *head = NULL;
LIST_HEAD(pagepool); LIST_HEAD(pagepool);
trace_erofs_readpages(inode, readahead_index(rac), trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
readahead_count(rac), false);
f.readahead = true;
f.headoffset = readahead_pos(rac); f.headoffset = readahead_pos(rac);
while ((page = readahead_page(rac))) { while ((page = readahead_page(rac))) {
...@@ -1341,7 +1335,7 @@ static void z_erofs_readahead(struct readahead_control *rac) ...@@ -1341,7 +1335,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
/* traversal in reverse order */ /* traversal in reverse order */
head = (void *)page_private(page); head = (void *)page_private(page);
err = z_erofs_do_read_page(&f, page, &pagepool); err = z_erofs_do_read_page(&f, page);
if (err) if (err)
erofs_err(inode->i_sb, erofs_err(inode->i_sb,
"readahead error at page %lu @ nid %llu", "readahead error at page %lu @ nid %llu",
...@@ -1351,7 +1345,7 @@ static void z_erofs_readahead(struct readahead_control *rac) ...@@ -1351,7 +1345,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
(void)z_erofs_collector_end(&f.clt); (void)z_erofs_collector_end(&f.clt);
z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync); z_erofs_runqueue(inode->i_sb, &f, &pagepool, sync);
if (f.map.mpage) if (f.map.mpage)
put_page(f.map.mpage); put_page(f.map.mpage);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment