1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
9 #ifndef LZ4_DISTANCE_MAX /* history window size */
10 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
13 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
14 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
15 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
18 struct z_erofs_lz4_decompress_ctx {
19 struct z_erofs_decompress_req *rq;
20 /* # of encoded, decoded pages */
21 unsigned int inpages, outpages;
22 /* decoded block total length (used for in-place decompression) */
26 static int z_erofs_load_lz4_config(struct super_block *sb,
27 struct erofs_super_block *dsb, void *data, int size)
29 struct erofs_sb_info *sbi = EROFS_SB(sb);
30 struct z_erofs_lz4_cfgs *lz4 = data;
34 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
35 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
38 distance = le16_to_cpu(lz4->max_distance);
40 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
41 if (!sbi->lz4.max_pclusterblks) {
42 sbi->lz4.max_pclusterblks = 1; /* reserved case */
43 } else if (sbi->lz4.max_pclusterblks >
44 erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
45 erofs_err(sb, "too large lz4 pclusterblks %u",
46 sbi->lz4.max_pclusterblks);
50 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
51 sbi->lz4.max_pclusterblks = 1;
54 sbi->lz4.max_distance_pages = distance ?
55 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
56 LZ4_MAX_DISTANCE_PAGES;
57 return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
61 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
62 * all physical pages are consecutive, which can be seen for moderate CR.
64 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
65 struct page **pagepool)
67 struct z_erofs_decompress_req *rq = ctx->rq;
68 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
69 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
70 BITS_PER_LONG)] = { 0 };
71 unsigned int lz4_max_distance_pages =
72 EROFS_SB(rq->sb)->lz4.max_distance_pages;
74 unsigned int i, j, top;
77 for (i = j = 0; i < ctx->outpages; ++i, ++j) {
78 struct page *const page = rq->out[i];
81 if (j >= lz4_max_distance_pages)
84 /* 'valid' bounced can only be tested after a complete round */
85 if (!rq->fillgaps && test_bit(j, bounced)) {
86 DBG_BUGON(i < lz4_max_distance_pages);
87 DBG_BUGON(top >= lz4_max_distance_pages);
88 availables[top++] = rq->out[i - lz4_max_distance_pages];
92 __clear_bit(j, bounced);
93 if (!PageHighMem(page)) {
95 kaddr = page_address(page);
99 kaddr + PAGE_SIZE == page_address(page)) {
108 __set_bit(j, bounced);
111 victim = availables[--top];
114 victim = erofs_allocpage(pagepool, rq->gfp);
117 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
121 return kaddr ? 1 : 0;
124 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
125 void *inpage, void *out, unsigned int *inputmargin,
126 int *maptype, bool may_inplace)
128 struct z_erofs_decompress_req *rq = ctx->rq;
129 unsigned int omargin, total, i;
133 if (rq->inplace_io) {
134 omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
135 if (rq->partial_decoding || !may_inplace ||
136 omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
139 for (i = 0; i < ctx->inpages; ++i)
140 if (rq->out[ctx->outpages - ctx->inpages + i] !=
143 kunmap_local(inpage);
145 return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
148 if (ctx->inpages <= 1) {
152 kunmap_local(inpage);
153 src = erofs_vm_map_ram(rq->in, ctx->inpages);
155 return ERR_PTR(-ENOMEM);
160 /* Or copy compressed data which can be overlapped to per-CPU buffer */
162 src = erofs_get_pcpubuf(ctx->inpages);
165 kunmap_local(inpage);
166 return ERR_PTR(-EFAULT);
170 total = rq->inputsize;
172 unsigned int page_copycnt =
173 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
176 inpage = kmap_local_page(*in);
177 memcpy(tmp, inpage + *inputmargin, page_copycnt);
178 kunmap_local(inpage);
181 total -= page_copycnt;
190 * Get the exact inputsize with zero_padding feature.
191 * - For LZ4, it should work if zero_padding feature is on (5.3+);
192 * - For MicroLZMA, it'd be enabled all the time.
194 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
195 unsigned int padbufsize)
199 padend = memchr_inv(padbuf, 0, padbufsize);
201 return -EFSCORRUPTED;
202 rq->inputsize -= padend - padbuf;
203 rq->pageofs_in += padend - padbuf;
207 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
210 struct z_erofs_decompress_req *rq = ctx->rq;
211 bool support_0padding = false, may_inplace = false;
212 unsigned int inputmargin;
213 u8 *out, *headpage, *src;
216 DBG_BUGON(*rq->in == NULL);
217 headpage = kmap_local_page(*rq->in);
219 /* LZ4 decompression inplace is only safe if zero_padding is enabled */
220 if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
221 support_0padding = true;
222 ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
223 min_t(unsigned int, rq->inputsize,
224 rq->sb->s_blocksize - rq->pageofs_in));
226 kunmap_local(headpage);
229 may_inplace = !((rq->pageofs_in + rq->inputsize) &
230 (rq->sb->s_blocksize - 1));
233 inputmargin = rq->pageofs_in;
234 src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
235 &maptype, may_inplace);
239 out = dst + rq->pageofs_out;
240 /* legacy format could compress extra data in a pcluster. */
241 if (rq->partial_decoding || !support_0padding)
242 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
243 rq->inputsize, rq->outputsize, rq->outputsize);
245 ret = LZ4_decompress_safe(src + inputmargin, out,
246 rq->inputsize, rq->outputsize);
248 if (ret != rq->outputsize) {
249 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
250 ret, rq->inputsize, inputmargin, rq->outputsize);
252 memset(out + ret, 0, rq->outputsize - ret);
259 kunmap_local(headpage);
260 } else if (maptype == 1) {
261 vm_unmap_ram(src, ctx->inpages);
262 } else if (maptype == 2) {
263 erofs_put_pcpubuf(src);
264 } else if (maptype != 3) {
271 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
272 struct page **pagepool)
274 struct z_erofs_lz4_decompress_ctx ctx;
275 unsigned int dst_maptype;
280 ctx.oend = rq->pageofs_out + rq->outputsize;
281 ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
282 ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
284 /* one optimized fast path only for non bigpcluster cases yet */
285 if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
286 DBG_BUGON(!*rq->out);
287 dst = kmap_local_page(*rq->out);
292 /* general decoding path which can be used for all cases */
293 ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
296 } else if (ret > 0) {
297 dst = page_address(*rq->out);
300 dst = erofs_vm_map_ram(rq->out, ctx.outpages);
307 ret = z_erofs_lz4_decompress_mem(&ctx, dst);
310 else if (dst_maptype == 2)
311 vm_unmap_ram(dst, ctx.outpages);
315 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
316 struct page **pagepool)
318 const unsigned int nrpages_in =
319 PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
320 const unsigned int nrpages_out =
321 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
322 const unsigned int bs = rq->sb->s_blocksize;
323 unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
326 if (rq->outputsize > rq->inputsize)
328 if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
329 cur = bs - (rq->pageofs_out & (bs - 1));
330 pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
331 cur = min(cur, rq->outputsize);
332 if (cur && rq->out[0]) {
333 kin = kmap_local_page(rq->in[nrpages_in - 1]);
334 if (rq->out[0] == rq->in[nrpages_in - 1]) {
335 memmove(kin + rq->pageofs_out, kin + pi, cur);
336 flush_dcache_page(rq->out[0]);
338 memcpy_to_page(rq->out[0], rq->pageofs_out,
343 rq->outputsize -= cur;
346 for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
347 insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
348 rq->outputsize -= insz;
351 kin = kmap_local_page(rq->in[ni]);
354 no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
355 po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
356 DBG_BUGON(no >= nrpages_out);
357 cnt = min(insz - pi, PAGE_SIZE - po);
358 if (rq->out[no] == rq->in[ni]) {
360 kin + rq->pageofs_in + pi, cnt);
361 flush_dcache_page(rq->out[no]);
362 } else if (rq->out[no]) {
363 memcpy_to_page(rq->out[no], po,
364 kin + rq->pageofs_in + pi, cnt);
370 DBG_BUGON(ni > nrpages_in);
374 const struct z_erofs_decompressor erofs_decompressors[] = {
375 [Z_EROFS_COMPRESSION_SHIFTED] = {
376 .decompress = z_erofs_transform_plain,
379 [Z_EROFS_COMPRESSION_INTERLACED] = {
380 .decompress = z_erofs_transform_plain,
383 [Z_EROFS_COMPRESSION_LZ4] = {
384 .config = z_erofs_load_lz4_config,
385 .decompress = z_erofs_lz4_decompress,
388 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
389 [Z_EROFS_COMPRESSION_LZMA] = {
390 .config = z_erofs_load_lzma_config,
391 .decompress = z_erofs_lzma_decompress,
395 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
396 [Z_EROFS_COMPRESSION_DEFLATE] = {
397 .config = z_erofs_load_deflate_config,
398 .decompress = z_erofs_deflate_decompress,
404 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
406 struct erofs_sb_info *sbi = EROFS_SB(sb);
407 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
408 unsigned int algs, alg;
412 if (!erofs_sb_has_compr_cfgs(sbi)) {
413 sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
414 return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
417 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
418 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
419 erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
420 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
424 erofs_init_metabuf(&buf, sb);
425 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
427 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
433 data = erofs_read_metadata(sb, &buf, &offset, &size);
439 if (alg >= ARRAY_SIZE(erofs_decompressors) ||
440 !erofs_decompressors[alg].config) {
441 erofs_err(sb, "algorithm %d isn't enabled on this kernel",
445 ret = erofs_decompressors[alg].config(sb,
453 erofs_put_metabuf(&buf);