Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux-2.6-microblaze.git] / drivers / staging / erofs / unzip_vle_lz4.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle_lz4.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include <linux/lz4.h>
15
16 static int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen)
17 {
18         int ret = LZ4_decompress_safe_partial(in, out, inlen, outlen, outlen);
19
20         if (ret >= 0)
21                 return ret;
22
23         /*
24          * LZ4_decompress_safe_partial will return an error code
25          * (< 0) if decompression failed
26          */
27         errln("%s, failed to decompress, in[%p, %zu] outlen[%p, %zu]",
28               __func__, in, inlen, out, outlen);
29         WARN_ON(1);
30         print_hex_dump(KERN_DEBUG, "raw data [in]: ", DUMP_PREFIX_OFFSET,
31                        16, 1, in, inlen, true);
32         print_hex_dump(KERN_DEBUG, "raw data [out]: ", DUMP_PREFIX_OFFSET,
33                        16, 1, out, outlen, true);
34         return -EIO;
35 }
36
37 #if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS
38 #define EROFS_PERCPU_NR_PAGES   Z_EROFS_CLUSTER_MAX_PAGES
39 #else
40 #define EROFS_PERCPU_NR_PAGES   Z_EROFS_VLE_INLINE_PAGEVECS
41 #endif
42
43 static struct {
44         char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES];
45 } erofs_pcpubuf[NR_CPUS];
46
47 int z_erofs_vle_plain_copy(struct page **compressed_pages,
48                            unsigned int clusterpages,
49                            struct page **pages,
50                            unsigned int nr_pages,
51                            unsigned short pageofs)
52 {
53         unsigned int i, j;
54         void *src = NULL;
55         const unsigned int righthalf = PAGE_SIZE - pageofs;
56         char *percpu_data;
57         bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
58
59         preempt_disable();
60         percpu_data = erofs_pcpubuf[smp_processor_id()].data;
61
62         j = 0;
63         for (i = 0; i < nr_pages; j = i++) {
64                 struct page *page = pages[i];
65                 void *dst;
66
67                 if (!page) {
68                         if (src) {
69                                 if (!mirrored[j])
70                                         kunmap_atomic(src);
71                                 src = NULL;
72                         }
73                         continue;
74                 }
75
76                 dst = kmap_atomic(page);
77
78                 for (; j < clusterpages; ++j) {
79                         if (compressed_pages[j] != page)
80                                 continue;
81
82                         DBG_BUGON(mirrored[j]);
83                         memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
84                         mirrored[j] = true;
85                         break;
86                 }
87
88                 if (i) {
89                         if (!src)
90                                 src = mirrored[i - 1] ?
91                                         percpu_data + (i - 1) * PAGE_SIZE :
92                                         kmap_atomic(compressed_pages[i - 1]);
93
94                         memcpy(dst, src + righthalf, pageofs);
95
96                         if (!mirrored[i - 1])
97                                 kunmap_atomic(src);
98
99                         if (unlikely(i >= clusterpages)) {
100                                 kunmap_atomic(dst);
101                                 break;
102                         }
103                 }
104
105                 if (!righthalf) {
106                         src = NULL;
107                 } else {
108                         src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
109                                 kmap_atomic(compressed_pages[i]);
110
111                         memcpy(dst + pageofs, src, righthalf);
112                 }
113
114                 kunmap_atomic(dst);
115         }
116
117         if (src && !mirrored[j])
118                 kunmap_atomic(src);
119
120         preempt_enable();
121         return 0;
122 }
123
124 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
125                                   unsigned int clusterpages,
126                                   struct page **pages,
127                                   unsigned int outlen,
128                                   unsigned short pageofs)
129 {
130         void *vin, *vout;
131         unsigned int nr_pages, i, j;
132         int ret;
133
134         if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
135                 return -ENOTSUPP;
136
137         nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
138
139         if (clusterpages == 1) {
140                 vin = kmap_atomic(compressed_pages[0]);
141         } else {
142                 vin = erofs_vmap(compressed_pages, clusterpages);
143                 if (!vin)
144                         return -ENOMEM;
145         }
146
147         preempt_disable();
148         vout = erofs_pcpubuf[smp_processor_id()].data;
149
150         ret = z_erofs_unzip_lz4(vin, vout + pageofs,
151                                 clusterpages * PAGE_SIZE, outlen);
152
153         if (ret < 0)
154                 goto out;
155         ret = 0;
156
157         for (i = 0; i < nr_pages; ++i) {
158                 j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
159
160                 if (pages[i]) {
161                         if (clusterpages == 1 &&
162                             pages[i] == compressed_pages[0]) {
163                                 memcpy(vin + pageofs, vout + pageofs, j);
164                         } else {
165                                 void *dst = kmap_atomic(pages[i]);
166
167                                 memcpy(dst + pageofs, vout + pageofs, j);
168                                 kunmap_atomic(dst);
169                         }
170                 }
171                 vout += PAGE_SIZE;
172                 outlen -= j;
173                 pageofs = 0;
174         }
175
176 out:
177         preempt_enable();
178
179         if (clusterpages == 1)
180                 kunmap_atomic(vin);
181         else
182                 erofs_vunmap(vin, clusterpages);
183
184         return ret;
185 }
186
187 int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
188                            unsigned int clusterpages,
189                            void *vout,
190                            unsigned int llen,
191                            unsigned short pageofs,
192                            bool overlapped)
193 {
194         void *vin;
195         unsigned int i;
196         int ret;
197
198         if (overlapped) {
199                 preempt_disable();
200                 vin = erofs_pcpubuf[smp_processor_id()].data;
201
202                 for (i = 0; i < clusterpages; ++i) {
203                         void *t = kmap_atomic(compressed_pages[i]);
204
205                         memcpy(vin + PAGE_SIZE * i, t, PAGE_SIZE);
206                         kunmap_atomic(t);
207                 }
208         } else if (clusterpages == 1) {
209                 vin = kmap_atomic(compressed_pages[0]);
210         } else {
211                 vin = erofs_vmap(compressed_pages, clusterpages);
212         }
213
214         ret = z_erofs_unzip_lz4(vin, vout + pageofs,
215                                 clusterpages * PAGE_SIZE, llen);
216         if (ret > 0)
217                 ret = 0;
218
219         if (!overlapped) {
220                 if (clusterpages == 1)
221                         kunmap_atomic(vin);
222                 else
223                         erofs_vunmap(vin, clusterpages);
224         } else {
225                 preempt_enable();
226         }
227         return ret;
228 }
229