1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle_lz4.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/lz4.h>
16 static int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen)
18 int ret = LZ4_decompress_safe_partial(in, out, inlen, outlen, outlen);
24 * LZ4_decompress_safe_partial will return an error code
25 * (< 0) if decompression failed
27 errln("%s, failed to decompress, in[%p, %zu] outlen[%p, %zu]",
28 __func__, in, inlen, out, outlen);
30 print_hex_dump(KERN_DEBUG, "raw data [in]: ", DUMP_PREFIX_OFFSET,
31 16, 1, in, inlen, true);
32 print_hex_dump(KERN_DEBUG, "raw data [out]: ", DUMP_PREFIX_OFFSET,
33 16, 1, out, outlen, true);
37 #if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS
38 #define EROFS_PERCPU_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
40 #define EROFS_PERCPU_NR_PAGES Z_EROFS_VLE_INLINE_PAGEVECS
44 char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES];
45 } erofs_pcpubuf[NR_CPUS];
47 int z_erofs_vle_plain_copy(struct page **compressed_pages,
48 unsigned int clusterpages,
50 unsigned int nr_pages,
51 unsigned short pageofs)
55 const unsigned int righthalf = PAGE_SIZE - pageofs;
57 bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
60 percpu_data = erofs_pcpubuf[smp_processor_id()].data;
63 for (i = 0; i < nr_pages; j = i++) {
64 struct page *page = pages[i];
76 dst = kmap_atomic(page);
78 for (; j < clusterpages; ++j) {
79 if (compressed_pages[j] != page)
82 DBG_BUGON(mirrored[j]);
83 memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
90 src = mirrored[i - 1] ?
91 percpu_data + (i - 1) * PAGE_SIZE :
92 kmap_atomic(compressed_pages[i - 1]);
94 memcpy(dst, src + righthalf, pageofs);
99 if (unlikely(i >= clusterpages)) {
108 src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
109 kmap_atomic(compressed_pages[i]);
111 memcpy(dst + pageofs, src, righthalf);
117 if (src && !mirrored[j])
124 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
125 unsigned int clusterpages,
128 unsigned short pageofs)
131 unsigned int nr_pages, i, j;
134 if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
137 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
139 if (clusterpages == 1) {
140 vin = kmap_atomic(compressed_pages[0]);
142 vin = erofs_vmap(compressed_pages, clusterpages);
148 vout = erofs_pcpubuf[smp_processor_id()].data;
150 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
151 clusterpages * PAGE_SIZE, outlen);
157 for (i = 0; i < nr_pages; ++i) {
158 j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
161 if (clusterpages == 1 &&
162 pages[i] == compressed_pages[0]) {
163 memcpy(vin + pageofs, vout + pageofs, j);
165 void *dst = kmap_atomic(pages[i]);
167 memcpy(dst + pageofs, vout + pageofs, j);
179 if (clusterpages == 1)
182 erofs_vunmap(vin, clusterpages);
187 int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
188 unsigned int clusterpages,
191 unsigned short pageofs,
200 vin = erofs_pcpubuf[smp_processor_id()].data;
202 for (i = 0; i < clusterpages; ++i) {
203 void *t = kmap_atomic(compressed_pages[i]);
205 memcpy(vin + PAGE_SIZE * i, t, PAGE_SIZE);
208 } else if (clusterpages == 1) {
209 vin = kmap_atomic(compressed_pages[0]);
211 vin = erofs_vmap(compressed_pages, clusterpages);
214 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
215 clusterpages * PAGE_SIZE, llen);
220 if (clusterpages == 1)
223 erofs_vunmap(vin, clusterpages);