Commit 4b810bf0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-6.5-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs fixes from Gao Xiang:
 "Three patches address regressions related to post-EOF unexpected
  behaviors and fsdax unavailability of chunk-based regular files.

  The other two patches mainly get rid of kmap_atomic() and simplify
  z_erofs_transform_plain().

   - Fix two unexpected loop cases when reading beyond EOF

   - Fix fsdax unavailability for chunk-based regular files

   - Get rid of the remaining kmap_atomic()

   - Minor cleanups"

* tag 'erofs-for-6.5-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: fix fsdax unavailability for chunk-based regular files
  erofs: avoid infinite loop in z_erofs_do_read_page() when reading beyond EOF
  erofs: avoid useless loops in z_erofs_pcluster_readmore() when reading beyond EOF
  erofs: simplify z_erofs_transform_plain()
  erofs: get rid of the remaining kmap_atomic()
parents b1983d42 18bddc5b
...@@ -148,7 +148,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -148,7 +148,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
*maptype = 0; *maptype = 0;
return inpage; return inpage;
} }
kunmap_atomic(inpage); kunmap_local(inpage);
might_sleep(); might_sleep();
src = erofs_vm_map_ram(rq->in, ctx->inpages); src = erofs_vm_map_ram(rq->in, ctx->inpages);
if (!src) if (!src)
...@@ -162,7 +162,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -162,7 +162,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
src = erofs_get_pcpubuf(ctx->inpages); src = erofs_get_pcpubuf(ctx->inpages);
if (!src) { if (!src) {
DBG_BUGON(1); DBG_BUGON(1);
kunmap_atomic(inpage); kunmap_local(inpage);
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
...@@ -173,9 +173,9 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -173,9 +173,9 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
min_t(unsigned int, total, PAGE_SIZE - *inputmargin); min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
if (!inpage) if (!inpage)
inpage = kmap_atomic(*in); inpage = kmap_local_page(*in);
memcpy(tmp, inpage + *inputmargin, page_copycnt); memcpy(tmp, inpage + *inputmargin, page_copycnt);
kunmap_atomic(inpage); kunmap_local(inpage);
inpage = NULL; inpage = NULL;
tmp += page_copycnt; tmp += page_copycnt;
total -= page_copycnt; total -= page_copycnt;
...@@ -214,7 +214,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -214,7 +214,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
int ret, maptype; int ret, maptype;
DBG_BUGON(*rq->in == NULL); DBG_BUGON(*rq->in == NULL);
headpage = kmap_atomic(*rq->in); headpage = kmap_local_page(*rq->in);
/* LZ4 decompression inplace is only safe if zero_padding is enabled */ /* LZ4 decompression inplace is only safe if zero_padding is enabled */
if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) { if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
...@@ -223,7 +223,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -223,7 +223,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
min_t(unsigned int, rq->inputsize, min_t(unsigned int, rq->inputsize,
rq->sb->s_blocksize - rq->pageofs_in)); rq->sb->s_blocksize - rq->pageofs_in));
if (ret) { if (ret) {
kunmap_atomic(headpage); kunmap_local(headpage);
return ret; return ret;
} }
may_inplace = !((rq->pageofs_in + rq->inputsize) & may_inplace = !((rq->pageofs_in + rq->inputsize) &
...@@ -261,7 +261,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -261,7 +261,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
} }
if (maptype == 0) { if (maptype == 0) {
kunmap_atomic(headpage); kunmap_local(headpage);
} else if (maptype == 1) { } else if (maptype == 1) {
vm_unmap_ram(src, ctx->inpages); vm_unmap_ram(src, ctx->inpages);
} else if (maptype == 2) { } else if (maptype == 2) {
...@@ -289,7 +289,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, ...@@ -289,7 +289,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
/* one optimized fast path only for non bigpcluster cases yet */ /* one optimized fast path only for non bigpcluster cases yet */
if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) { if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out); DBG_BUGON(!*rq->out);
dst = kmap_atomic(*rq->out); dst = kmap_local_page(*rq->out);
dst_maptype = 0; dst_maptype = 0;
goto dstmap_out; goto dstmap_out;
} }
...@@ -311,7 +311,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, ...@@ -311,7 +311,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
dstmap_out: dstmap_out:
ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out); ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
if (!dst_maptype) if (!dst_maptype)
kunmap_atomic(dst); kunmap_local(dst);
else if (dst_maptype == 2) else if (dst_maptype == 2)
vm_unmap_ram(dst, ctx.outpages); vm_unmap_ram(dst, ctx.outpages);
return ret; return ret;
...@@ -328,7 +328,7 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, ...@@ -328,7 +328,7 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
const unsigned int lefthalf = rq->outputsize - righthalf; const unsigned int lefthalf = rq->outputsize - righthalf;
const unsigned int interlaced_offset = const unsigned int interlaced_offset =
rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out; rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out;
unsigned char *src, *dst; u8 *src;
if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) { if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
DBG_BUGON(1); DBG_BUGON(1);
...@@ -341,22 +341,19 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, ...@@ -341,22 +341,19 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
} }
src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in; src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in;
if (rq->out[0]) { if (rq->out[0])
dst = kmap_local_page(rq->out[0]); memcpy_to_page(rq->out[0], rq->pageofs_out,
memcpy(dst + rq->pageofs_out, src + interlaced_offset, src + interlaced_offset, righthalf);
righthalf);
kunmap_local(dst);
}
if (outpages > inpages) { if (outpages > inpages) {
DBG_BUGON(!rq->out[outpages - 1]); DBG_BUGON(!rq->out[outpages - 1]);
if (rq->out[outpages - 1] != rq->in[inpages - 1]) { if (rq->out[outpages - 1] != rq->in[inpages - 1]) {
dst = kmap_local_page(rq->out[outpages - 1]); memcpy_to_page(rq->out[outpages - 1], 0, src +
memcpy(dst, interlaced_offset ? src : (interlaced_offset ? 0 : righthalf),
(src + righthalf), lefthalf); lefthalf);
kunmap_local(dst);
} else if (!interlaced_offset) { } else if (!interlaced_offset) {
memmove(src, src + righthalf, lefthalf); memmove(src, src + righthalf, lefthalf);
flush_dcache_page(rq->in[inpages - 1]);
} }
} }
kunmap_local(src); kunmap_local(src);
......
...@@ -183,7 +183,8 @@ static void *erofs_read_inode(struct erofs_buf *buf, ...@@ -183,7 +183,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
inode->i_flags &= ~S_DAX; inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) && if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
vi->datalayout == EROFS_INODE_FLAT_PLAIN) (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
vi->datalayout == EROFS_INODE_CHUNK_BASED))
inode->i_flags |= S_DAX; inode->i_flags |= S_DAX;
if (!nblks) if (!nblks)
......
...@@ -1035,7 +1035,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -1035,7 +1035,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
*/ */
tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
cur = end - min_t(unsigned int, offset + end - map->m_la, end); cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) { if (!(map->m_flags & EROFS_MAP_MAPPED)) {
zero_user_segment(page, cur, end); zero_user_segment(page, cur, end);
goto next_part; goto next_part;
...@@ -1841,7 +1841,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, ...@@ -1841,7 +1841,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
} }
cur = map->m_la + map->m_llen - 1; cur = map->m_la + map->m_llen - 1;
while (cur >= end) { while ((cur >= end) && (cur < i_size_read(inode))) {
pgoff_t index = cur >> PAGE_SHIFT; pgoff_t index = cur >> PAGE_SHIFT;
struct page *page; struct page *page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment