Commit 0507d252 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-6.8-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "In this cycle, we'd like to enable basic sub-page compressed data
  support for Android ecosystem (for vendors to try out 16k page size
  with 4k-block images in their compatibility mode) as well as container
  images (so that 4k-block images can be parsed on arm64 cloud servers
  using 64k page size.)

  In addition, there are several bugfixes and cleanups as usual. All
  commits have been in -next for a while and no potential merge conflict
  is observed.

  Summary:

   - Add basic sub-page compressed data support

   - Fix a memory leak on MicroLZMA and DEFLATE compression

   - Fix a rare LZ4 inplace decompression issue on recent x86 CPUs

   - Fix a KASAN issue reported by syzbot around crafted images

   - Some cleanups"

* tag 'erofs-for-6.8-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: make erofs_{err,info}() support NULL sb parameter
  erofs: avoid debugging output for (de)compressed data
  erofs: allow partially filled compressed bvecs
  erofs: enable sub-page compressed block support
  erofs: refine z_erofs_transform_plain() for sub-page block support
  erofs: fix ztailpacking for subpage compressed blocks
  erofs: fix up compacted indexes for block size < 4096
  erofs: record `pclustersize` in bytes instead of pages
  erofs: support I/O submission for sub-page compressed blocks
  erofs: fix lz4 inplace decompression
  erofs: fix memory leak on short-lived bounced pages
parents 17b9e388 aa12a790
...@@ -121,11 +121,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -121,11 +121,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
} }
static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
void *inpage, unsigned int *inputmargin, int *maptype, void *inpage, void *out, unsigned int *inputmargin,
bool may_inplace) int *maptype, bool may_inplace)
{ {
struct z_erofs_decompress_req *rq = ctx->rq; struct z_erofs_decompress_req *rq = ctx->rq;
unsigned int omargin, total, i, j; unsigned int omargin, total, i;
struct page **in; struct page **in;
void *src, *tmp; void *src, *tmp;
...@@ -135,12 +135,13 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -135,12 +135,13 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
goto docopy; goto docopy;
for (i = 0; i < ctx->inpages; ++i) { for (i = 0; i < ctx->inpages; ++i)
DBG_BUGON(rq->in[i] == NULL); if (rq->out[ctx->outpages - ctx->inpages + i] !=
for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j) rq->in[i])
if (rq->out[j] == rq->in[i]) goto docopy;
goto docopy; kunmap_local(inpage);
} *maptype = 3;
return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
} }
if (ctx->inpages <= 1) { if (ctx->inpages <= 1) {
...@@ -148,7 +149,6 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -148,7 +149,6 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
return inpage; return inpage;
} }
kunmap_local(inpage); kunmap_local(inpage);
might_sleep();
src = erofs_vm_map_ram(rq->in, ctx->inpages); src = erofs_vm_map_ram(rq->in, ctx->inpages);
if (!src) if (!src)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -204,12 +204,12 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, ...@@ -204,12 +204,12 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
} }
static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
u8 *out) u8 *dst)
{ {
struct z_erofs_decompress_req *rq = ctx->rq; struct z_erofs_decompress_req *rq = ctx->rq;
bool support_0padding = false, may_inplace = false; bool support_0padding = false, may_inplace = false;
unsigned int inputmargin; unsigned int inputmargin;
u8 *headpage, *src; u8 *out, *headpage, *src;
int ret, maptype; int ret, maptype;
DBG_BUGON(*rq->in == NULL); DBG_BUGON(*rq->in == NULL);
...@@ -230,11 +230,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -230,11 +230,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
} }
inputmargin = rq->pageofs_in; inputmargin = rq->pageofs_in;
src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin, src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
&maptype, may_inplace); &maptype, may_inplace);
if (IS_ERR(src)) if (IS_ERR(src))
return PTR_ERR(src); return PTR_ERR(src);
out = dst + rq->pageofs_out;
/* legacy format could compress extra data in a pcluster. */ /* legacy format could compress extra data in a pcluster. */
if (rq->partial_decoding || !support_0padding) if (rq->partial_decoding || !support_0padding)
ret = LZ4_decompress_safe_partial(src + inputmargin, out, ret = LZ4_decompress_safe_partial(src + inputmargin, out,
...@@ -246,15 +247,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -246,15 +247,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
if (ret != rq->outputsize) { if (ret != rq->outputsize) {
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
ret, rq->inputsize, inputmargin, rq->outputsize); ret, rq->inputsize, inputmargin, rq->outputsize);
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
16, 1, src + inputmargin, rq->inputsize, true);
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
16, 1, out, rq->outputsize, true);
if (ret >= 0) if (ret >= 0)
memset(out + ret, 0, rq->outputsize - ret); memset(out + ret, 0, rq->outputsize - ret);
ret = -EIO; ret = -EFSCORRUPTED;
} else { } else {
ret = 0; ret = 0;
} }
...@@ -265,7 +260,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -265,7 +260,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
vm_unmap_ram(src, ctx->inpages); vm_unmap_ram(src, ctx->inpages);
} else if (maptype == 2) { } else if (maptype == 2) {
erofs_put_pcpubuf(src); erofs_put_pcpubuf(src);
} else { } else if (maptype != 3) {
DBG_BUGON(1); DBG_BUGON(1);
return -EFAULT; return -EFAULT;
} }
...@@ -308,7 +303,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, ...@@ -308,7 +303,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
} }
dstmap_out: dstmap_out:
ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out); ret = z_erofs_lz4_decompress_mem(&ctx, dst);
if (!dst_maptype) if (!dst_maptype)
kunmap_local(dst); kunmap_local(dst);
else if (dst_maptype == 2) else if (dst_maptype == 2)
...@@ -319,43 +314,58 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, ...@@ -319,43 +314,58 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
struct page **pagepool) struct page **pagepool)
{ {
const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; const unsigned int nrpages_in =
const unsigned int outpages = PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
const unsigned int righthalf = min_t(unsigned int, rq->outputsize, const unsigned int bs = rq->sb->s_blocksize;
PAGE_SIZE - rq->pageofs_out); unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
const unsigned int lefthalf = rq->outputsize - righthalf; u8 *kin;
const unsigned int interlaced_offset =
rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out; DBG_BUGON(rq->outputsize > rq->inputsize);
u8 *src; if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
cur = bs - (rq->pageofs_out & (bs - 1));
if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) { pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
DBG_BUGON(1); cur = min(cur, rq->outputsize);
return -EFSCORRUPTED; if (cur && rq->out[0]) {
} kin = kmap_local_page(rq->in[nrpages_in - 1]);
if (rq->out[0] == rq->in[nrpages_in - 1]) {
if (rq->out[0] == *rq->in) { memmove(kin + rq->pageofs_out, kin + pi, cur);
DBG_BUGON(rq->pageofs_out); flush_dcache_page(rq->out[0]);
return 0; } else {
memcpy_to_page(rq->out[0], rq->pageofs_out,
kin + pi, cur);
}
kunmap_local(kin);
}
rq->outputsize -= cur;
} }
src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in; for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
if (rq->out[0]) insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
memcpy_to_page(rq->out[0], rq->pageofs_out, rq->outputsize -= insz;
src + interlaced_offset, righthalf); if (!rq->in[ni])
continue;
if (outpages > inpages) { kin = kmap_local_page(rq->in[ni]);
DBG_BUGON(!rq->out[outpages - 1]); pi = 0;
if (rq->out[outpages - 1] != rq->in[inpages - 1]) { do {
memcpy_to_page(rq->out[outpages - 1], 0, src + no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
(interlaced_offset ? 0 : righthalf), po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
lefthalf); DBG_BUGON(no >= nrpages_out);
} else if (!interlaced_offset) { cnt = min(insz - pi, PAGE_SIZE - po);
memmove(src, src + righthalf, lefthalf); if (rq->out[no] == rq->in[ni]) {
flush_dcache_page(rq->in[inpages - 1]); memmove(kin + po,
} kin + rq->pageofs_in + pi, cnt);
flush_dcache_page(rq->out[no]);
} else if (rq->out[no]) {
memcpy_to_page(rq->out[no], po,
kin + rq->pageofs_in + pi, cnt);
}
pi += cnt;
} while (pi < insz);
kunmap_local(kin);
} }
kunmap_local(src); DBG_BUGON(ni > nrpages_in);
return 0; return 0;
} }
......
...@@ -70,7 +70,7 @@ int __init z_erofs_deflate_init(void) ...@@ -70,7 +70,7 @@ int __init z_erofs_deflate_init(void)
return 0; return 0;
out_failed: out_failed:
pr_err("failed to allocate zlib workspace\n"); erofs_err(NULL, "failed to allocate zlib workspace");
z_erofs_deflate_exit(); z_erofs_deflate_exit();
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -259,8 +259,10 @@ static int erofs_fill_inode(struct inode *inode) ...@@ -259,8 +259,10 @@ static int erofs_fill_inode(struct inode *inode)
if (erofs_inode_is_data_compressed(vi->datalayout)) { if (erofs_inode_is_data_compressed(vi->datalayout)) {
#ifdef CONFIG_EROFS_FS_ZIP #ifdef CONFIG_EROFS_FS_ZIP
if (!erofs_is_fscache_mode(inode->i_sb) && if (!erofs_is_fscache_mode(inode->i_sb)) {
inode->i_sb->s_blocksize_bits == PAGE_SHIFT) { DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
erofs_info, inode->i_sb,
"EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
inode->i_mapping->a_ops = &z_erofs_aops; inode->i_mapping->a_ops = &z_erofs_aops;
err = 0; err = 0;
goto out_unlock; goto out_unlock;
......
...@@ -27,7 +27,10 @@ void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) ...@@ -27,7 +27,10 @@ void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
vaf.fmt = fmt; vaf.fmt = fmt;
vaf.va = &args; vaf.va = &args;
pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); if (sb)
pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
else
pr_err("%s: %pV", func, &vaf);
va_end(args); va_end(args);
} }
...@@ -41,7 +44,10 @@ void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) ...@@ -41,7 +44,10 @@ void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
vaf.fmt = fmt; vaf.fmt = fmt;
vaf.va = &args; vaf.va = &args;
pr_info("(device %s): %pV", sb->s_id, &vaf); if (sb)
pr_info("(device %s): %pV", sb->s_id, &vaf);
else
pr_info("%pV", &vaf);
va_end(args); va_end(args);
} }
......
This diff is collapsed.
...@@ -82,29 +82,26 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, ...@@ -82,29 +82,26 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
} }
static unsigned int decode_compactedbits(unsigned int lobits, static unsigned int decode_compactedbits(unsigned int lobits,
unsigned int lomask,
u8 *in, unsigned int pos, u8 *type) u8 *in, unsigned int pos, u8 *type)
{ {
const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
const unsigned int lo = v & lomask; const unsigned int lo = v & ((1 << lobits) - 1);
*type = (v >> lobits) & 3; *type = (v >> lobits) & 3;
return lo; return lo;
} }
static int get_compacted_la_distance(unsigned int lclusterbits, static int get_compacted_la_distance(unsigned int lobits,
unsigned int encodebits, unsigned int encodebits,
unsigned int vcnt, u8 *in, int i) unsigned int vcnt, u8 *in, int i)
{ {
const unsigned int lomask = (1 << lclusterbits) - 1;
unsigned int lo, d1 = 0; unsigned int lo, d1 = 0;
u8 type; u8 type;
DBG_BUGON(i >= vcnt); DBG_BUGON(i >= vcnt);
do { do {
lo = decode_compactedbits(lclusterbits, lomask, lo = decode_compactedbits(lobits, in, encodebits * i, &type);
in, encodebits * i, &type);
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
return d1; return d1;
...@@ -123,15 +120,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -123,15 +120,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
{ {
struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_inode *const vi = EROFS_I(m->inode);
const unsigned int lclusterbits = vi->z_logical_clusterbits; const unsigned int lclusterbits = vi->z_logical_clusterbits;
const unsigned int lomask = (1 << lclusterbits) - 1; unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs;
unsigned int vcnt, base, lo, encodebits, nblk, eofs;
int i; int i;
u8 *in, type; u8 *in, type;
bool big_pcluster; bool big_pcluster;
if (1 << amortizedshift == 4 && lclusterbits <= 14) if (1 << amortizedshift == 4 && lclusterbits <= 14)
vcnt = 2; vcnt = 2;
else if (1 << amortizedshift == 2 && lclusterbits == 12) else if (1 << amortizedshift == 2 && lclusterbits <= 12)
vcnt = 16; vcnt = 16;
else else
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -140,6 +136,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -140,6 +136,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
m->nextpackoff = round_down(pos, vcnt << amortizedshift) + m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
(vcnt << amortizedshift); (vcnt << amortizedshift);
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
eofs = erofs_blkoff(m->inode->i_sb, pos); eofs = erofs_blkoff(m->inode->i_sb, pos);
base = round_down(eofs, vcnt << amortizedshift); base = round_down(eofs, vcnt << amortizedshift);
...@@ -147,15 +144,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -147,15 +144,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
i = (eofs - base) >> amortizedshift; i = (eofs - base) >> amortizedshift;
lo = decode_compactedbits(lclusterbits, lomask, lo = decode_compactedbits(lobits, in, encodebits * i, &type);
in, encodebits * i, &type);
m->type = type; m->type = type;
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
m->clusterofs = 1 << lclusterbits; m->clusterofs = 1 << lclusterbits;
/* figure out lookahead_distance: delta[1] if needed */ /* figure out lookahead_distance: delta[1] if needed */
if (lookahead) if (lookahead)
m->delta[1] = get_compacted_la_distance(lclusterbits, m->delta[1] = get_compacted_la_distance(lobits,
encodebits, vcnt, in, i); encodebits, vcnt, in, i);
if (lo & Z_EROFS_LI_D0_CBLKCNT) { if (lo & Z_EROFS_LI_D0_CBLKCNT) {
if (!big_pcluster) { if (!big_pcluster) {
...@@ -174,8 +170,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -174,8 +170,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
* of which lo saves delta[1] rather than delta[0]. * of which lo saves delta[1] rather than delta[0].
* Hence, get delta[0] by the previous lcluster indirectly. * Hence, get delta[0] by the previous lcluster indirectly.
*/ */
lo = decode_compactedbits(lclusterbits, lomask, lo = decode_compactedbits(lobits, in,
in, encodebits * (i - 1), &type); encodebits * (i - 1), &type);
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
lo = 0; lo = 0;
else if (lo & Z_EROFS_LI_D0_CBLKCNT) else if (lo & Z_EROFS_LI_D0_CBLKCNT)
...@@ -190,8 +186,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -190,8 +186,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
nblk = 1; nblk = 1;
while (i > 0) { while (i > 0) {
--i; --i;
lo = decode_compactedbits(lclusterbits, lomask, lo = decode_compactedbits(lobits, in,
in, encodebits * i, &type); encodebits * i, &type);
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
i -= lo; i -= lo;
...@@ -202,8 +198,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -202,8 +198,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
nblk = 0; nblk = 0;
while (i > 0) { while (i > 0) {
--i; --i;
lo = decode_compactedbits(lclusterbits, lomask, lo = decode_compactedbits(lobits, in,
in, encodebits * i, &type); encodebits * i, &type);
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
if (lo & Z_EROFS_LI_D0_CBLKCNT) { if (lo & Z_EROFS_LI_D0_CBLKCNT) {
--i; --i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment