1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
6 #ifndef __EROFS_FS_COMPRESS_H
7 #define __EROFS_FS_COMPRESS_H
11 struct z_erofs_decompress_req {
12 struct super_block *sb;
13 struct page **in, **out;
15 unsigned short pageofs_in, pageofs_out;
16 unsigned int inputsize, outputsize;
18 /* indicate the algorithm will be used for decompression */
20 bool inplace_io, partial_decoding, fillgaps;
23 struct z_erofs_decompressor {
24 int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
25 void *data, int size);
26 int (*decompress)(struct z_erofs_decompress_req *rq,
27 struct page **pagepool);
31 /* some special page->private (unsigned long, see below) */
32 #define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
33 #define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
36 * For all pages in a pcluster, page->private should be one of
37 * Type Last 2bits page->private
38 * short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
39 * preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
40 * cached/managed page 00 pointer to z_erofs_pcluster
41 * online page (file-backed, 01/10/11 sub-index << 2 | count
42 * some pages can be used for inplace I/O)
44 * page->mapping should be one of
46 * short-lived page NULL
47 * preallocated page NULL
48 * cached/managed page non-NULL or NULL (invalidated/truncated page)
49 * online page non-NULL
51 * For all managed pages, PG_private should be set with 1 extra refcount,
52 * which is used for page reclaim / migration.
56 * short-lived pages are pages directly from buddy system with specific
57 * page->private (no need to set PagePrivate since these are non-LRU /
58 * non-movable pages and bypass reclaim / migration code).
60 static inline bool z_erofs_is_shortlived_page(struct page *page)
62 if (page->private != Z_EROFS_SHORTLIVED_PAGE)
65 DBG_BUGON(page->mapping);
69 static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
72 if (!z_erofs_is_shortlived_page(page))
75 /* short-lived pages should not be used by others at the same time */
76 if (page_ref_count(page) > 1) {
79 /* follow the pcluster rule above. */
80 erofs_pagepool_add(pagepool, page);
85 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
86 static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
89 return page->mapping == MNGD_MAPPING(sbi);
92 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
93 unsigned int padbufsize);
94 extern const struct z_erofs_decompressor erofs_decompressors[];
96 /* prototypes for specific algorithms */
97 int z_erofs_load_lzma_config(struct super_block *sb,
98 struct erofs_super_block *dsb, void *data, int size);
99 int z_erofs_load_deflate_config(struct super_block *sb,
100 struct erofs_super_block *dsb, void *data, int size);
101 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
102 struct page **pagepool);
103 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
104 struct page **pagepool);