Merge branch 'stable/for-linus-5.15' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / fs / f2fs / compress.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 #include <linux/pagevec.h>
16
17 #include "f2fs.h"
18 #include "node.h"
19 #include "segment.h"
20 #include <trace/events/f2fs.h>
21
22 static struct kmem_cache *cic_entry_slab;
23 static struct kmem_cache *dic_entry_slab;
24
25 static void *page_array_alloc(struct inode *inode, int nr)
26 {
27         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
28         unsigned int size = sizeof(struct page *) * nr;
29
30         if (likely(size <= sbi->page_array_slab_size))
31                 return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
32         return f2fs_kzalloc(sbi, size, GFP_NOFS);
33 }
34
35 static void page_array_free(struct inode *inode, void *pages, int nr)
36 {
37         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
38         unsigned int size = sizeof(struct page *) * nr;
39
40         if (!pages)
41                 return;
42
43         if (likely(size <= sbi->page_array_slab_size))
44                 kmem_cache_free(sbi->page_array_slab, pages);
45         else
46                 kfree(pages);
47 }
48
49 struct f2fs_compress_ops {
50         int (*init_compress_ctx)(struct compress_ctx *cc);
51         void (*destroy_compress_ctx)(struct compress_ctx *cc);
52         int (*compress_pages)(struct compress_ctx *cc);
53         int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
54         void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
55         int (*decompress_pages)(struct decompress_io_ctx *dic);
56 };
57
58 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
59 {
60         return index & (cc->cluster_size - 1);
61 }
62
63 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
64 {
65         return index >> cc->log_cluster_size;
66 }
67
68 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
69 {
70         return cc->cluster_idx << cc->log_cluster_size;
71 }
72
73 bool f2fs_is_compressed_page(struct page *page)
74 {
75         if (!PagePrivate(page))
76                 return false;
77         if (!page_private(page))
78                 return false;
79         if (page_private_nonpointer(page))
80                 return false;
81
82         f2fs_bug_on(F2FS_M_SB(page->mapping),
83                 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
84         return true;
85 }
86
87 static void f2fs_set_compressed_page(struct page *page,
88                 struct inode *inode, pgoff_t index, void *data)
89 {
90         attach_page_private(page, (void *)data);
91
92         /* i_crypto_info and iv index */
93         page->index = index;
94         page->mapping = inode->i_mapping;
95 }
96
97 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
98 {
99         int i;
100
101         for (i = 0; i < len; i++) {
102                 if (!cc->rpages[i])
103                         continue;
104                 if (unlock)
105                         unlock_page(cc->rpages[i]);
106                 else
107                         put_page(cc->rpages[i]);
108         }
109 }
110
111 static void f2fs_put_rpages(struct compress_ctx *cc)
112 {
113         f2fs_drop_rpages(cc, cc->cluster_size, false);
114 }
115
116 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
117 {
118         f2fs_drop_rpages(cc, len, true);
119 }
120
121 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
122                 struct writeback_control *wbc, bool redirty, int unlock)
123 {
124         unsigned int i;
125
126         for (i = 0; i < cc->cluster_size; i++) {
127                 if (!cc->rpages[i])
128                         continue;
129                 if (redirty)
130                         redirty_page_for_writepage(wbc, cc->rpages[i]);
131                 f2fs_put_page(cc->rpages[i], unlock);
132         }
133 }
134
135 struct page *f2fs_compress_control_page(struct page *page)
136 {
137         return ((struct compress_io_ctx *)page_private(page))->rpages[0];
138 }
139
140 int f2fs_init_compress_ctx(struct compress_ctx *cc)
141 {
142         if (cc->rpages)
143                 return 0;
144
145         cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
146         return cc->rpages ? 0 : -ENOMEM;
147 }
148
149 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
150 {
151         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
152         cc->rpages = NULL;
153         cc->nr_rpages = 0;
154         cc->nr_cpages = 0;
155         if (!reuse)
156                 cc->cluster_idx = NULL_CLUSTER;
157 }
158
159 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
160 {
161         unsigned int cluster_ofs;
162
163         if (!f2fs_cluster_can_merge_page(cc, page->index))
164                 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
165
166         cluster_ofs = offset_in_cluster(cc, page->index);
167         cc->rpages[cluster_ofs] = page;
168         cc->nr_rpages++;
169         cc->cluster_idx = cluster_idx(cc, page->index);
170 }
171
172 #ifdef CONFIG_F2FS_FS_LZO
173 static int lzo_init_compress_ctx(struct compress_ctx *cc)
174 {
175         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
176                                 LZO1X_MEM_COMPRESS, GFP_NOFS);
177         if (!cc->private)
178                 return -ENOMEM;
179
180         cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
181         return 0;
182 }
183
184 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
185 {
186         kvfree(cc->private);
187         cc->private = NULL;
188 }
189
190 static int lzo_compress_pages(struct compress_ctx *cc)
191 {
192         int ret;
193
194         ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
195                                         &cc->clen, cc->private);
196         if (ret != LZO_E_OK) {
197                 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
198                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
199                 return -EIO;
200         }
201         return 0;
202 }
203
204 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
205 {
206         int ret;
207
208         ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
209                                                 dic->rbuf, &dic->rlen);
210         if (ret != LZO_E_OK) {
211                 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
212                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
213                 return -EIO;
214         }
215
216         if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
217                 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
218                                         "expected:%lu\n", KERN_ERR,
219                                         F2FS_I_SB(dic->inode)->sb->s_id,
220                                         dic->rlen,
221                                         PAGE_SIZE << dic->log_cluster_size);
222                 return -EIO;
223         }
224         return 0;
225 }
226
227 static const struct f2fs_compress_ops f2fs_lzo_ops = {
228         .init_compress_ctx      = lzo_init_compress_ctx,
229         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
230         .compress_pages         = lzo_compress_pages,
231         .decompress_pages       = lzo_decompress_pages,
232 };
233 #endif
234
235 #ifdef CONFIG_F2FS_FS_LZ4
236 static int lz4_init_compress_ctx(struct compress_ctx *cc)
237 {
238         unsigned int size = LZ4_MEM_COMPRESS;
239
240 #ifdef CONFIG_F2FS_FS_LZ4HC
241         if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
242                 size = LZ4HC_MEM_COMPRESS;
243 #endif
244
245         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
246         if (!cc->private)
247                 return -ENOMEM;
248
249         /*
250          * we do not change cc->clen to LZ4_compressBound(inputsize) to
251          * adapt worst compress case, because lz4 compressor can handle
252          * output budget properly.
253          */
254         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
255         return 0;
256 }
257
258 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
259 {
260         kvfree(cc->private);
261         cc->private = NULL;
262 }
263
264 #ifdef CONFIG_F2FS_FS_LZ4HC
265 static int lz4hc_compress_pages(struct compress_ctx *cc)
266 {
267         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
268                                                 COMPRESS_LEVEL_OFFSET;
269         int len;
270
271         if (level)
272                 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
273                                         cc->clen, level, cc->private);
274         else
275                 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
276                                                 cc->clen, cc->private);
277         if (!len)
278                 return -EAGAIN;
279
280         cc->clen = len;
281         return 0;
282 }
283 #endif
284
285 static int lz4_compress_pages(struct compress_ctx *cc)
286 {
287         int len;
288
289 #ifdef CONFIG_F2FS_FS_LZ4HC
290         return lz4hc_compress_pages(cc);
291 #endif
292         len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
293                                                 cc->clen, cc->private);
294         if (!len)
295                 return -EAGAIN;
296
297         cc->clen = len;
298         return 0;
299 }
300
301 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
302 {
303         int ret;
304
305         ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
306                                                 dic->clen, dic->rlen);
307         if (ret < 0) {
308                 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
309                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
310                 return -EIO;
311         }
312
313         if (ret != PAGE_SIZE << dic->log_cluster_size) {
314                 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
315                                         "expected:%lu\n", KERN_ERR,
316                                         F2FS_I_SB(dic->inode)->sb->s_id,
317                                         dic->rlen,
318                                         PAGE_SIZE << dic->log_cluster_size);
319                 return -EIO;
320         }
321         return 0;
322 }
323
324 static const struct f2fs_compress_ops f2fs_lz4_ops = {
325         .init_compress_ctx      = lz4_init_compress_ctx,
326         .destroy_compress_ctx   = lz4_destroy_compress_ctx,
327         .compress_pages         = lz4_compress_pages,
328         .decompress_pages       = lz4_decompress_pages,
329 };
330 #endif
331
332 #ifdef CONFIG_F2FS_FS_ZSTD
333 #define F2FS_ZSTD_DEFAULT_CLEVEL        1
334
335 static int zstd_init_compress_ctx(struct compress_ctx *cc)
336 {
337         ZSTD_parameters params;
338         ZSTD_CStream *stream;
339         void *workspace;
340         unsigned int workspace_size;
341         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
342                                                 COMPRESS_LEVEL_OFFSET;
343
344         if (!level)
345                 level = F2FS_ZSTD_DEFAULT_CLEVEL;
346
347         params = ZSTD_getParams(level, cc->rlen, 0);
348         workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
349
350         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
351                                         workspace_size, GFP_NOFS);
352         if (!workspace)
353                 return -ENOMEM;
354
355         stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
356         if (!stream) {
357                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
358                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
359                                 __func__);
360                 kvfree(workspace);
361                 return -EIO;
362         }
363
364         cc->private = workspace;
365         cc->private2 = stream;
366
367         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
368         return 0;
369 }
370
371 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
372 {
373         kvfree(cc->private);
374         cc->private = NULL;
375         cc->private2 = NULL;
376 }
377
378 static int zstd_compress_pages(struct compress_ctx *cc)
379 {
380         ZSTD_CStream *stream = cc->private2;
381         ZSTD_inBuffer inbuf;
382         ZSTD_outBuffer outbuf;
383         int src_size = cc->rlen;
384         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
385         int ret;
386
387         inbuf.pos = 0;
388         inbuf.src = cc->rbuf;
389         inbuf.size = src_size;
390
391         outbuf.pos = 0;
392         outbuf.dst = cc->cbuf->cdata;
393         outbuf.size = dst_size;
394
395         ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
396         if (ZSTD_isError(ret)) {
397                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
398                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
399                                 __func__, ZSTD_getErrorCode(ret));
400                 return -EIO;
401         }
402
403         ret = ZSTD_endStream(stream, &outbuf);
404         if (ZSTD_isError(ret)) {
405                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
406                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
407                                 __func__, ZSTD_getErrorCode(ret));
408                 return -EIO;
409         }
410
411         /*
412          * there is compressed data remained in intermediate buffer due to
413          * no more space in cbuf.cdata
414          */
415         if (ret)
416                 return -EAGAIN;
417
418         cc->clen = outbuf.pos;
419         return 0;
420 }
421
422 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
423 {
424         ZSTD_DStream *stream;
425         void *workspace;
426         unsigned int workspace_size;
427         unsigned int max_window_size =
428                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
429
430         workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
431
432         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
433                                         workspace_size, GFP_NOFS);
434         if (!workspace)
435                 return -ENOMEM;
436
437         stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
438         if (!stream) {
439                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
440                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
441                                 __func__);
442                 kvfree(workspace);
443                 return -EIO;
444         }
445
446         dic->private = workspace;
447         dic->private2 = stream;
448
449         return 0;
450 }
451
452 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
453 {
454         kvfree(dic->private);
455         dic->private = NULL;
456         dic->private2 = NULL;
457 }
458
459 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
460 {
461         ZSTD_DStream *stream = dic->private2;
462         ZSTD_inBuffer inbuf;
463         ZSTD_outBuffer outbuf;
464         int ret;
465
466         inbuf.pos = 0;
467         inbuf.src = dic->cbuf->cdata;
468         inbuf.size = dic->clen;
469
470         outbuf.pos = 0;
471         outbuf.dst = dic->rbuf;
472         outbuf.size = dic->rlen;
473
474         ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
475         if (ZSTD_isError(ret)) {
476                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
477                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
478                                 __func__, ZSTD_getErrorCode(ret));
479                 return -EIO;
480         }
481
482         if (dic->rlen != outbuf.pos) {
483                 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
484                                 "expected:%lu\n", KERN_ERR,
485                                 F2FS_I_SB(dic->inode)->sb->s_id,
486                                 __func__, dic->rlen,
487                                 PAGE_SIZE << dic->log_cluster_size);
488                 return -EIO;
489         }
490
491         return 0;
492 }
493
494 static const struct f2fs_compress_ops f2fs_zstd_ops = {
495         .init_compress_ctx      = zstd_init_compress_ctx,
496         .destroy_compress_ctx   = zstd_destroy_compress_ctx,
497         .compress_pages         = zstd_compress_pages,
498         .init_decompress_ctx    = zstd_init_decompress_ctx,
499         .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
500         .decompress_pages       = zstd_decompress_pages,
501 };
502 #endif
503
504 #ifdef CONFIG_F2FS_FS_LZO
505 #ifdef CONFIG_F2FS_FS_LZORLE
506 static int lzorle_compress_pages(struct compress_ctx *cc)
507 {
508         int ret;
509
510         ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
511                                         &cc->clen, cc->private);
512         if (ret != LZO_E_OK) {
513                 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
514                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
515                 return -EIO;
516         }
517         return 0;
518 }
519
520 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
521         .init_compress_ctx      = lzo_init_compress_ctx,
522         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
523         .compress_pages         = lzorle_compress_pages,
524         .decompress_pages       = lzo_decompress_pages,
525 };
526 #endif
527 #endif
528
529 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
530 #ifdef CONFIG_F2FS_FS_LZO
531         &f2fs_lzo_ops,
532 #else
533         NULL,
534 #endif
535 #ifdef CONFIG_F2FS_FS_LZ4
536         &f2fs_lz4_ops,
537 #else
538         NULL,
539 #endif
540 #ifdef CONFIG_F2FS_FS_ZSTD
541         &f2fs_zstd_ops,
542 #else
543         NULL,
544 #endif
545 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
546         &f2fs_lzorle_ops,
547 #else
548         NULL,
549 #endif
550 };
551
552 bool f2fs_is_compress_backend_ready(struct inode *inode)
553 {
554         if (!f2fs_compressed_file(inode))
555                 return true;
556         return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
557 }
558
559 static mempool_t *compress_page_pool;
560 static int num_compress_pages = 512;
561 module_param(num_compress_pages, uint, 0444);
562 MODULE_PARM_DESC(num_compress_pages,
563                 "Number of intermediate compress pages to preallocate");
564
565 int f2fs_init_compress_mempool(void)
566 {
567         compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
568         if (!compress_page_pool)
569                 return -ENOMEM;
570
571         return 0;
572 }
573
574 void f2fs_destroy_compress_mempool(void)
575 {
576         mempool_destroy(compress_page_pool);
577 }
578
579 static struct page *f2fs_compress_alloc_page(void)
580 {
581         struct page *page;
582
583         page = mempool_alloc(compress_page_pool, GFP_NOFS);
584         lock_page(page);
585
586         return page;
587 }
588
589 static void f2fs_compress_free_page(struct page *page)
590 {
591         if (!page)
592                 return;
593         detach_page_private(page);
594         page->mapping = NULL;
595         unlock_page(page);
596         mempool_free(page, compress_page_pool);
597 }
598
599 #define MAX_VMAP_RETRIES        3
600
601 static void *f2fs_vmap(struct page **pages, unsigned int count)
602 {
603         int i;
604         void *buf = NULL;
605
606         for (i = 0; i < MAX_VMAP_RETRIES; i++) {
607                 buf = vm_map_ram(pages, count, -1);
608                 if (buf)
609                         break;
610                 vm_unmap_aliases();
611         }
612         return buf;
613 }
614
615 static int f2fs_compress_pages(struct compress_ctx *cc)
616 {
617         struct f2fs_inode_info *fi = F2FS_I(cc->inode);
618         const struct f2fs_compress_ops *cops =
619                                 f2fs_cops[fi->i_compress_algorithm];
620         unsigned int max_len, new_nr_cpages;
621         struct page **new_cpages;
622         u32 chksum = 0;
623         int i, ret;
624
625         trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
626                                 cc->cluster_size, fi->i_compress_algorithm);
627
628         if (cops->init_compress_ctx) {
629                 ret = cops->init_compress_ctx(cc);
630                 if (ret)
631                         goto out;
632         }
633
634         max_len = COMPRESS_HEADER_SIZE + cc->clen;
635         cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
636
637         cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
638         if (!cc->cpages) {
639                 ret = -ENOMEM;
640                 goto destroy_compress_ctx;
641         }
642
643         for (i = 0; i < cc->nr_cpages; i++) {
644                 cc->cpages[i] = f2fs_compress_alloc_page();
645                 if (!cc->cpages[i]) {
646                         ret = -ENOMEM;
647                         goto out_free_cpages;
648                 }
649         }
650
651         cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
652         if (!cc->rbuf) {
653                 ret = -ENOMEM;
654                 goto out_free_cpages;
655         }
656
657         cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
658         if (!cc->cbuf) {
659                 ret = -ENOMEM;
660                 goto out_vunmap_rbuf;
661         }
662
663         ret = cops->compress_pages(cc);
664         if (ret)
665                 goto out_vunmap_cbuf;
666
667         max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
668
669         if (cc->clen > max_len) {
670                 ret = -EAGAIN;
671                 goto out_vunmap_cbuf;
672         }
673
674         cc->cbuf->clen = cpu_to_le32(cc->clen);
675
676         if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
677                 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
678                                         cc->cbuf->cdata, cc->clen);
679         cc->cbuf->chksum = cpu_to_le32(chksum);
680
681         for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
682                 cc->cbuf->reserved[i] = cpu_to_le32(0);
683
684         new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
685
686         /* Now we're going to cut unnecessary tail pages */
687         new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
688         if (!new_cpages) {
689                 ret = -ENOMEM;
690                 goto out_vunmap_cbuf;
691         }
692
693         /* zero out any unused part of the last page */
694         memset(&cc->cbuf->cdata[cc->clen], 0,
695                         (new_nr_cpages * PAGE_SIZE) -
696                         (cc->clen + COMPRESS_HEADER_SIZE));
697
698         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
699         vm_unmap_ram(cc->rbuf, cc->cluster_size);
700
701         for (i = 0; i < cc->nr_cpages; i++) {
702                 if (i < new_nr_cpages) {
703                         new_cpages[i] = cc->cpages[i];
704                         continue;
705                 }
706                 f2fs_compress_free_page(cc->cpages[i]);
707                 cc->cpages[i] = NULL;
708         }
709
710         if (cops->destroy_compress_ctx)
711                 cops->destroy_compress_ctx(cc);
712
713         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
714         cc->cpages = new_cpages;
715         cc->nr_cpages = new_nr_cpages;
716
717         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
718                                                         cc->clen, ret);
719         return 0;
720
721 out_vunmap_cbuf:
722         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
723 out_vunmap_rbuf:
724         vm_unmap_ram(cc->rbuf, cc->cluster_size);
725 out_free_cpages:
726         for (i = 0; i < cc->nr_cpages; i++) {
727                 if (cc->cpages[i])
728                         f2fs_compress_free_page(cc->cpages[i]);
729         }
730         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
731         cc->cpages = NULL;
732 destroy_compress_ctx:
733         if (cops->destroy_compress_ctx)
734                 cops->destroy_compress_ctx(cc);
735 out:
736         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
737                                                         cc->clen, ret);
738         return ret;
739 }
740
741 void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
742 {
743         struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
744         struct f2fs_inode_info *fi = F2FS_I(dic->inode);
745         const struct f2fs_compress_ops *cops =
746                         f2fs_cops[fi->i_compress_algorithm];
747         int ret;
748         int i;
749
750         trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
751                                 dic->cluster_size, fi->i_compress_algorithm);
752
753         if (dic->failed) {
754                 ret = -EIO;
755                 goto out_end_io;
756         }
757
758         dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
759         if (!dic->tpages) {
760                 ret = -ENOMEM;
761                 goto out_end_io;
762         }
763
764         for (i = 0; i < dic->cluster_size; i++) {
765                 if (dic->rpages[i]) {
766                         dic->tpages[i] = dic->rpages[i];
767                         continue;
768                 }
769
770                 dic->tpages[i] = f2fs_compress_alloc_page();
771                 if (!dic->tpages[i]) {
772                         ret = -ENOMEM;
773                         goto out_end_io;
774                 }
775         }
776
777         if (cops->init_decompress_ctx) {
778                 ret = cops->init_decompress_ctx(dic);
779                 if (ret)
780                         goto out_end_io;
781         }
782
783         dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
784         if (!dic->rbuf) {
785                 ret = -ENOMEM;
786                 goto out_destroy_decompress_ctx;
787         }
788
789         dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
790         if (!dic->cbuf) {
791                 ret = -ENOMEM;
792                 goto out_vunmap_rbuf;
793         }
794
795         dic->clen = le32_to_cpu(dic->cbuf->clen);
796         dic->rlen = PAGE_SIZE << dic->log_cluster_size;
797
798         if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
799                 ret = -EFSCORRUPTED;
800                 goto out_vunmap_cbuf;
801         }
802
803         ret = cops->decompress_pages(dic);
804
805         if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
806                 u32 provided = le32_to_cpu(dic->cbuf->chksum);
807                 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
808
809                 if (provided != calculated) {
810                         if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
811                                 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
812                                 printk_ratelimited(
813                                         "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
814                                         KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
815                                         provided, calculated);
816                         }
817                         set_sbi_flag(sbi, SBI_NEED_FSCK);
818                 }
819         }
820
821 out_vunmap_cbuf:
822         vm_unmap_ram(dic->cbuf, dic->nr_cpages);
823 out_vunmap_rbuf:
824         vm_unmap_ram(dic->rbuf, dic->cluster_size);
825 out_destroy_decompress_ctx:
826         if (cops->destroy_decompress_ctx)
827                 cops->destroy_decompress_ctx(dic);
828 out_end_io:
829         trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
830                                                         dic->clen, ret);
831         f2fs_decompress_end_io(dic, ret);
832 }
833
834 /*
835  * This is called when a page of a compressed cluster has been read from disk
836  * (or failed to be read from disk).  It checks whether this page was the last
837  * page being waited on in the cluster, and if so, it decompresses the cluster
838  * (or in the case of a failure, cleans up without actually decompressing).
839  */
840 void f2fs_end_read_compressed_page(struct page *page, bool failed,
841                                                 block_t blkaddr)
842 {
843         struct decompress_io_ctx *dic =
844                         (struct decompress_io_ctx *)page_private(page);
845         struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
846
847         dec_page_count(sbi, F2FS_RD_DATA);
848
849         if (failed)
850                 WRITE_ONCE(dic->failed, true);
851         else if (blkaddr)
852                 f2fs_cache_compressed_page(sbi, page,
853                                         dic->inode->i_ino, blkaddr);
854
855         if (atomic_dec_and_test(&dic->remaining_pages))
856                 f2fs_decompress_cluster(dic);
857 }
858
859 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
860 {
861         if (cc->cluster_idx == NULL_CLUSTER)
862                 return true;
863         return cc->cluster_idx == cluster_idx(cc, index);
864 }
865
866 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
867 {
868         return cc->nr_rpages == 0;
869 }
870
871 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
872 {
873         return cc->cluster_size == cc->nr_rpages;
874 }
875
876 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
877 {
878         if (f2fs_cluster_is_empty(cc))
879                 return true;
880         return is_page_in_cluster(cc, index);
881 }
882
883 static bool cluster_has_invalid_data(struct compress_ctx *cc)
884 {
885         loff_t i_size = i_size_read(cc->inode);
886         unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
887         int i;
888
889         for (i = 0; i < cc->cluster_size; i++) {
890                 struct page *page = cc->rpages[i];
891
892                 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
893
894                 /* beyond EOF */
895                 if (page->index >= nr_pages)
896                         return true;
897         }
898         return false;
899 }
900
901 static int __f2fs_cluster_blocks(struct inode *inode,
902                                 unsigned int cluster_idx, bool compr)
903 {
904         struct dnode_of_data dn;
905         unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
906         unsigned int start_idx = cluster_idx <<
907                                 F2FS_I(inode)->i_log_cluster_size;
908         int ret;
909
910         set_new_dnode(&dn, inode, NULL, NULL, 0);
911         ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
912         if (ret) {
913                 if (ret == -ENOENT)
914                         ret = 0;
915                 goto fail;
916         }
917
918         if (dn.data_blkaddr == COMPRESS_ADDR) {
919                 int i;
920
921                 ret = 1;
922                 for (i = 1; i < cluster_size; i++) {
923                         block_t blkaddr;
924
925                         blkaddr = data_blkaddr(dn.inode,
926                                         dn.node_page, dn.ofs_in_node + i);
927                         if (compr) {
928                                 if (__is_valid_data_blkaddr(blkaddr))
929                                         ret++;
930                         } else {
931                                 if (blkaddr != NULL_ADDR)
932                                         ret++;
933                         }
934                 }
935
936                 f2fs_bug_on(F2FS_I_SB(inode),
937                         !compr && ret != cluster_size &&
938                         !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
939         }
940 fail:
941         f2fs_put_dnode(&dn);
942         return ret;
943 }
944
945 /* return # of compressed blocks in compressed cluster */
946 static int f2fs_compressed_blocks(struct compress_ctx *cc)
947 {
948         return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
949 }
950
951 /* return # of valid blocks in compressed cluster */
952 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
953 {
954         return __f2fs_cluster_blocks(inode,
955                 index >> F2FS_I(inode)->i_log_cluster_size,
956                 false);
957 }
958
959 static bool cluster_may_compress(struct compress_ctx *cc)
960 {
961         if (!f2fs_need_compress_data(cc->inode))
962                 return false;
963         if (f2fs_is_atomic_file(cc->inode))
964                 return false;
965         if (!f2fs_cluster_is_full(cc))
966                 return false;
967         if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
968                 return false;
969         return !cluster_has_invalid_data(cc);
970 }
971
972 static void set_cluster_writeback(struct compress_ctx *cc)
973 {
974         int i;
975
976         for (i = 0; i < cc->cluster_size; i++) {
977                 if (cc->rpages[i])
978                         set_page_writeback(cc->rpages[i]);
979         }
980 }
981
982 static void set_cluster_dirty(struct compress_ctx *cc)
983 {
984         int i;
985
986         for (i = 0; i < cc->cluster_size; i++)
987                 if (cc->rpages[i])
988                         set_page_dirty(cc->rpages[i]);
989 }
990
991 static int prepare_compress_overwrite(struct compress_ctx *cc,
992                 struct page **pagep, pgoff_t index, void **fsdata)
993 {
994         struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
995         struct address_space *mapping = cc->inode->i_mapping;
996         struct page *page;
997         sector_t last_block_in_bio;
998         unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
999         pgoff_t start_idx = start_idx_of_cluster(cc);
1000         int i, ret;
1001
1002 retry:
1003         ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1004         if (ret <= 0)
1005                 return ret;
1006
1007         ret = f2fs_init_compress_ctx(cc);
1008         if (ret)
1009                 return ret;
1010
1011         /* keep page reference to avoid page reclaim */
1012         for (i = 0; i < cc->cluster_size; i++) {
1013                 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1014                                                         fgp_flag, GFP_NOFS);
1015                 if (!page) {
1016                         ret = -ENOMEM;
1017                         goto unlock_pages;
1018                 }
1019
1020                 if (PageUptodate(page))
1021                         f2fs_put_page(page, 1);
1022                 else
1023                         f2fs_compress_ctx_add_page(cc, page);
1024         }
1025
1026         if (!f2fs_cluster_is_empty(cc)) {
1027                 struct bio *bio = NULL;
1028
1029                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1030                                         &last_block_in_bio, false, true);
1031                 f2fs_put_rpages(cc);
1032                 f2fs_destroy_compress_ctx(cc, true);
1033                 if (ret)
1034                         goto out;
1035                 if (bio)
1036                         f2fs_submit_bio(sbi, bio, DATA);
1037
1038                 ret = f2fs_init_compress_ctx(cc);
1039                 if (ret)
1040                         goto out;
1041         }
1042
1043         for (i = 0; i < cc->cluster_size; i++) {
1044                 f2fs_bug_on(sbi, cc->rpages[i]);
1045
1046                 page = find_lock_page(mapping, start_idx + i);
1047                 if (!page) {
1048                         /* page can be truncated */
1049                         goto release_and_retry;
1050                 }
1051
1052                 f2fs_wait_on_page_writeback(page, DATA, true, true);
1053                 f2fs_compress_ctx_add_page(cc, page);
1054
1055                 if (!PageUptodate(page)) {
1056 release_and_retry:
1057                         f2fs_put_rpages(cc);
1058                         f2fs_unlock_rpages(cc, i + 1);
1059                         f2fs_destroy_compress_ctx(cc, true);
1060                         goto retry;
1061                 }
1062         }
1063
1064         if (likely(!ret)) {
1065                 *fsdata = cc->rpages;
1066                 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1067                 return cc->cluster_size;
1068         }
1069
1070 unlock_pages:
1071         f2fs_put_rpages(cc);
1072         f2fs_unlock_rpages(cc, i);
1073         f2fs_destroy_compress_ctx(cc, true);
1074 out:
1075         return ret;
1076 }
1077
1078 int f2fs_prepare_compress_overwrite(struct inode *inode,
1079                 struct page **pagep, pgoff_t index, void **fsdata)
1080 {
1081         struct compress_ctx cc = {
1082                 .inode = inode,
1083                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1084                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1085                 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1086                 .rpages = NULL,
1087                 .nr_rpages = 0,
1088         };
1089
1090         return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1091 }
1092
1093 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1094                                         pgoff_t index, unsigned copied)
1095
1096 {
1097         struct compress_ctx cc = {
1098                 .inode = inode,
1099                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1100                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1101                 .rpages = fsdata,
1102         };
1103         bool first_index = (index == cc.rpages[0]->index);
1104
1105         if (copied)
1106                 set_cluster_dirty(&cc);
1107
1108         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1109         f2fs_destroy_compress_ctx(&cc, false);
1110
1111         return first_index;
1112 }
1113
1114 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1115 {
1116         void *fsdata = NULL;
1117         struct page *pagep;
1118         int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1119         pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1120                                                         log_cluster_size;
1121         int err;
1122
1123         err = f2fs_is_compressed_cluster(inode, start_idx);
1124         if (err < 0)
1125                 return err;
1126
1127         /* truncate normal cluster */
1128         if (!err)
1129                 return f2fs_do_truncate_blocks(inode, from, lock);
1130
1131         /* truncate compressed cluster */
1132         err = f2fs_prepare_compress_overwrite(inode, &pagep,
1133                                                 start_idx, &fsdata);
1134
1135         /* should not be a normal cluster */
1136         f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1137
1138         if (err <= 0)
1139                 return err;
1140
1141         if (err > 0) {
1142                 struct page **rpages = fsdata;
1143                 int cluster_size = F2FS_I(inode)->i_cluster_size;
1144                 int i;
1145
1146                 for (i = cluster_size - 1; i >= 0; i--) {
1147                         loff_t start = rpages[i]->index << PAGE_SHIFT;
1148
1149                         if (from <= start) {
1150                                 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1151                         } else {
1152                                 zero_user_segment(rpages[i], from - start,
1153                                                                 PAGE_SIZE);
1154                                 break;
1155                         }
1156                 }
1157
1158                 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1159         }
1160         return 0;
1161 }
1162
1163 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1164                                         int *submitted,
1165                                         struct writeback_control *wbc,
1166                                         enum iostat_type io_type)
1167 {
1168         struct inode *inode = cc->inode;
1169         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1170         struct f2fs_inode_info *fi = F2FS_I(inode);
1171         struct f2fs_io_info fio = {
1172                 .sbi = sbi,
1173                 .ino = cc->inode->i_ino,
1174                 .type = DATA,
1175                 .op = REQ_OP_WRITE,
1176                 .op_flags = wbc_to_write_flags(wbc),
1177                 .old_blkaddr = NEW_ADDR,
1178                 .page = NULL,
1179                 .encrypted_page = NULL,
1180                 .compressed_page = NULL,
1181                 .submitted = false,
1182                 .io_type = io_type,
1183                 .io_wbc = wbc,
1184                 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1185         };
1186         struct dnode_of_data dn;
1187         struct node_info ni;
1188         struct compress_io_ctx *cic;
1189         pgoff_t start_idx = start_idx_of_cluster(cc);
1190         unsigned int last_index = cc->cluster_size - 1;
1191         loff_t psize;
1192         int i, err;
1193
1194         /* we should bypass data pages to proceed the kworkder jobs */
1195         if (unlikely(f2fs_cp_error(sbi))) {
1196                 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1197                 goto out_free;
1198         }
1199
1200         if (IS_NOQUOTA(inode)) {
1201                 /*
1202                  * We need to wait for node_write to avoid block allocation during
1203                  * checkpoint. This can only happen to quota writes which can cause
1204                  * the below discard race condition.
1205                  */
1206                 down_read(&sbi->node_write);
1207         } else if (!f2fs_trylock_op(sbi)) {
1208                 goto out_free;
1209         }
1210
1211         set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1212
1213         err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1214         if (err)
1215                 goto out_unlock_op;
1216
1217         for (i = 0; i < cc->cluster_size; i++) {
1218                 if (data_blkaddr(dn.inode, dn.node_page,
1219                                         dn.ofs_in_node + i) == NULL_ADDR)
1220                         goto out_put_dnode;
1221         }
1222
1223         psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1224
1225         err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1226         if (err)
1227                 goto out_put_dnode;
1228
1229         fio.version = ni.version;
1230
1231         cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1232         if (!cic)
1233                 goto out_put_dnode;
1234
1235         cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1236         cic->inode = inode;
1237         atomic_set(&cic->pending_pages, cc->nr_cpages);
1238         cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1239         if (!cic->rpages)
1240                 goto out_put_cic;
1241
1242         cic->nr_rpages = cc->cluster_size;
1243
1244         for (i = 0; i < cc->nr_cpages; i++) {
1245                 f2fs_set_compressed_page(cc->cpages[i], inode,
1246                                         cc->rpages[i + 1]->index, cic);
1247                 fio.compressed_page = cc->cpages[i];
1248
1249                 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1250                                                 dn.ofs_in_node + i + 1);
1251
1252                 /* wait for GCed page writeback via META_MAPPING */
1253                 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1254
1255                 if (fio.encrypted) {
1256                         fio.page = cc->rpages[i + 1];
1257                         err = f2fs_encrypt_one_page(&fio);
1258                         if (err)
1259                                 goto out_destroy_crypt;
1260                         cc->cpages[i] = fio.encrypted_page;
1261                 }
1262         }
1263
1264         set_cluster_writeback(cc);
1265
1266         for (i = 0; i < cc->cluster_size; i++)
1267                 cic->rpages[i] = cc->rpages[i];
1268
1269         for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1270                 block_t blkaddr;
1271
1272                 blkaddr = f2fs_data_blkaddr(&dn);
1273                 fio.page = cc->rpages[i];
1274                 fio.old_blkaddr = blkaddr;
1275
1276                 /* cluster header */
1277                 if (i == 0) {
1278                         if (blkaddr == COMPRESS_ADDR)
1279                                 fio.compr_blocks++;
1280                         if (__is_valid_data_blkaddr(blkaddr))
1281                                 f2fs_invalidate_blocks(sbi, blkaddr);
1282                         f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1283                         goto unlock_continue;
1284                 }
1285
1286                 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1287                         fio.compr_blocks++;
1288
1289                 if (i > cc->nr_cpages) {
1290                         if (__is_valid_data_blkaddr(blkaddr)) {
1291                                 f2fs_invalidate_blocks(sbi, blkaddr);
1292                                 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1293                         }
1294                         goto unlock_continue;
1295                 }
1296
1297                 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1298
1299                 if (fio.encrypted)
1300                         fio.encrypted_page = cc->cpages[i - 1];
1301                 else
1302                         fio.compressed_page = cc->cpages[i - 1];
1303
1304                 cc->cpages[i - 1] = NULL;
1305                 f2fs_outplace_write_data(&dn, &fio);
1306                 (*submitted)++;
1307 unlock_continue:
1308                 inode_dec_dirty_pages(cc->inode);
1309                 unlock_page(fio.page);
1310         }
1311
1312         if (fio.compr_blocks)
1313                 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1314         f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1315         add_compr_block_stat(inode, cc->nr_cpages);
1316
1317         set_inode_flag(cc->inode, FI_APPEND_WRITE);
1318         if (cc->cluster_idx == 0)
1319                 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1320
1321         f2fs_put_dnode(&dn);
1322         if (IS_NOQUOTA(inode))
1323                 up_read(&sbi->node_write);
1324         else
1325                 f2fs_unlock_op(sbi);
1326
1327         spin_lock(&fi->i_size_lock);
1328         if (fi->last_disk_size < psize)
1329                 fi->last_disk_size = psize;
1330         spin_unlock(&fi->i_size_lock);
1331
1332         f2fs_put_rpages(cc);
1333         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1334         cc->cpages = NULL;
1335         f2fs_destroy_compress_ctx(cc, false);
1336         return 0;
1337
1338 out_destroy_crypt:
1339         page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1340
1341         for (--i; i >= 0; i--)
1342                 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1343         for (i = 0; i < cc->nr_cpages; i++) {
1344                 if (!cc->cpages[i])
1345                         continue;
1346                 f2fs_compress_free_page(cc->cpages[i]);
1347                 cc->cpages[i] = NULL;
1348         }
1349 out_put_cic:
1350         kmem_cache_free(cic_entry_slab, cic);
1351 out_put_dnode:
1352         f2fs_put_dnode(&dn);
1353 out_unlock_op:
1354         if (IS_NOQUOTA(inode))
1355                 up_read(&sbi->node_write);
1356         else
1357                 f2fs_unlock_op(sbi);
1358 out_free:
1359         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1360         cc->cpages = NULL;
1361         return -EAGAIN;
1362 }
1363
1364 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1365 {
1366         struct f2fs_sb_info *sbi = bio->bi_private;
1367         struct compress_io_ctx *cic =
1368                         (struct compress_io_ctx *)page_private(page);
1369         int i;
1370
1371         if (unlikely(bio->bi_status))
1372                 mapping_set_error(cic->inode->i_mapping, -EIO);
1373
1374         f2fs_compress_free_page(page);
1375
1376         dec_page_count(sbi, F2FS_WB_DATA);
1377
1378         if (atomic_dec_return(&cic->pending_pages))
1379                 return;
1380
1381         for (i = 0; i < cic->nr_rpages; i++) {
1382                 WARN_ON(!cic->rpages[i]);
1383                 clear_page_private_gcing(cic->rpages[i]);
1384                 end_page_writeback(cic->rpages[i]);
1385         }
1386
1387         page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1388         kmem_cache_free(cic_entry_slab, cic);
1389 }
1390
1391 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1392                                         int *submitted,
1393                                         struct writeback_control *wbc,
1394                                         enum iostat_type io_type)
1395 {
1396         struct address_space *mapping = cc->inode->i_mapping;
1397         int _submitted, compr_blocks, ret;
1398         int i = -1, err = 0;
1399
1400         compr_blocks = f2fs_compressed_blocks(cc);
1401         if (compr_blocks < 0) {
1402                 err = compr_blocks;
1403                 goto out_err;
1404         }
1405
1406         for (i = 0; i < cc->cluster_size; i++) {
1407                 if (!cc->rpages[i])
1408                         continue;
1409 retry_write:
1410                 if (cc->rpages[i]->mapping != mapping) {
1411                         unlock_page(cc->rpages[i]);
1412                         continue;
1413                 }
1414
1415                 BUG_ON(!PageLocked(cc->rpages[i]));
1416
1417                 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1418                                                 NULL, NULL, wbc, io_type,
1419                                                 compr_blocks, false);
1420                 if (ret) {
1421                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
1422                                 unlock_page(cc->rpages[i]);
1423                                 ret = 0;
1424                         } else if (ret == -EAGAIN) {
1425                                 /*
1426                                  * for quota file, just redirty left pages to
1427                                  * avoid deadlock caused by cluster update race
1428                                  * from foreground operation.
1429                                  */
1430                                 if (IS_NOQUOTA(cc->inode)) {
1431                                         err = 0;
1432                                         goto out_err;
1433                                 }
1434                                 ret = 0;
1435                                 cond_resched();
1436                                 congestion_wait(BLK_RW_ASYNC,
1437                                                 DEFAULT_IO_TIMEOUT);
1438                                 lock_page(cc->rpages[i]);
1439
1440                                 if (!PageDirty(cc->rpages[i])) {
1441                                         unlock_page(cc->rpages[i]);
1442                                         continue;
1443                                 }
1444
1445                                 clear_page_dirty_for_io(cc->rpages[i]);
1446                                 goto retry_write;
1447                         }
1448                         err = ret;
1449                         goto out_err;
1450                 }
1451
1452                 *submitted += _submitted;
1453         }
1454
1455         f2fs_balance_fs(F2FS_M_SB(mapping), true);
1456
1457         return 0;
1458 out_err:
1459         for (++i; i < cc->cluster_size; i++) {
1460                 if (!cc->rpages[i])
1461                         continue;
1462                 redirty_page_for_writepage(wbc, cc->rpages[i]);
1463                 unlock_page(cc->rpages[i]);
1464         }
1465         return err;
1466 }
1467
1468 int f2fs_write_multi_pages(struct compress_ctx *cc,
1469                                         int *submitted,
1470                                         struct writeback_control *wbc,
1471                                         enum iostat_type io_type)
1472 {
1473         int err;
1474
1475         *submitted = 0;
1476         if (cluster_may_compress(cc)) {
1477                 err = f2fs_compress_pages(cc);
1478                 if (err == -EAGAIN) {
1479                         goto write;
1480                 } else if (err) {
1481                         f2fs_put_rpages_wbc(cc, wbc, true, 1);
1482                         goto destroy_out;
1483                 }
1484
1485                 err = f2fs_write_compressed_pages(cc, submitted,
1486                                                         wbc, io_type);
1487                 if (!err)
1488                         return 0;
1489                 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1490         }
1491 write:
1492         f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1493
1494         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1495         f2fs_put_rpages_wbc(cc, wbc, false, 0);
1496 destroy_out:
1497         f2fs_destroy_compress_ctx(cc, false);
1498         return err;
1499 }
1500
1501 static void f2fs_free_dic(struct decompress_io_ctx *dic);
1502
1503 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1504 {
1505         struct decompress_io_ctx *dic;
1506         pgoff_t start_idx = start_idx_of_cluster(cc);
1507         int i;
1508
1509         dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1510         if (!dic)
1511                 return ERR_PTR(-ENOMEM);
1512
1513         dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1514         if (!dic->rpages) {
1515                 kmem_cache_free(dic_entry_slab, dic);
1516                 return ERR_PTR(-ENOMEM);
1517         }
1518
1519         dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1520         dic->inode = cc->inode;
1521         atomic_set(&dic->remaining_pages, cc->nr_cpages);
1522         dic->cluster_idx = cc->cluster_idx;
1523         dic->cluster_size = cc->cluster_size;
1524         dic->log_cluster_size = cc->log_cluster_size;
1525         dic->nr_cpages = cc->nr_cpages;
1526         refcount_set(&dic->refcnt, 1);
1527         dic->failed = false;
1528         dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1529
1530         for (i = 0; i < dic->cluster_size; i++)
1531                 dic->rpages[i] = cc->rpages[i];
1532         dic->nr_rpages = cc->cluster_size;
1533
1534         dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1535         if (!dic->cpages)
1536                 goto out_free;
1537
1538         for (i = 0; i < dic->nr_cpages; i++) {
1539                 struct page *page;
1540
1541                 page = f2fs_compress_alloc_page();
1542                 if (!page)
1543                         goto out_free;
1544
1545                 f2fs_set_compressed_page(page, cc->inode,
1546                                         start_idx + i + 1, dic);
1547                 dic->cpages[i] = page;
1548         }
1549
1550         return dic;
1551
1552 out_free:
1553         f2fs_free_dic(dic);
1554         return ERR_PTR(-ENOMEM);
1555 }
1556
1557 static void f2fs_free_dic(struct decompress_io_ctx *dic)
1558 {
1559         int i;
1560
1561         if (dic->tpages) {
1562                 for (i = 0; i < dic->cluster_size; i++) {
1563                         if (dic->rpages[i])
1564                                 continue;
1565                         if (!dic->tpages[i])
1566                                 continue;
1567                         f2fs_compress_free_page(dic->tpages[i]);
1568                 }
1569                 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1570         }
1571
1572         if (dic->cpages) {
1573                 for (i = 0; i < dic->nr_cpages; i++) {
1574                         if (!dic->cpages[i])
1575                                 continue;
1576                         f2fs_compress_free_page(dic->cpages[i]);
1577                 }
1578                 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1579         }
1580
1581         page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1582         kmem_cache_free(dic_entry_slab, dic);
1583 }
1584
1585 static void f2fs_put_dic(struct decompress_io_ctx *dic)
1586 {
1587         if (refcount_dec_and_test(&dic->refcnt))
1588                 f2fs_free_dic(dic);
1589 }
1590
1591 /*
1592  * Update and unlock the cluster's pagecache pages, and release the reference to
1593  * the decompress_io_ctx that was being held for I/O completion.
1594  */
1595 static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1596 {
1597         int i;
1598
1599         for (i = 0; i < dic->cluster_size; i++) {
1600                 struct page *rpage = dic->rpages[i];
1601
1602                 if (!rpage)
1603                         continue;
1604
1605                 /* PG_error was set if verity failed. */
1606                 if (failed || PageError(rpage)) {
1607                         ClearPageUptodate(rpage);
1608                         /* will re-read again later */
1609                         ClearPageError(rpage);
1610                 } else {
1611                         SetPageUptodate(rpage);
1612                 }
1613                 unlock_page(rpage);
1614         }
1615
1616         f2fs_put_dic(dic);
1617 }
1618
1619 static void f2fs_verify_cluster(struct work_struct *work)
1620 {
1621         struct decompress_io_ctx *dic =
1622                 container_of(work, struct decompress_io_ctx, verity_work);
1623         int i;
1624
1625         /* Verify the cluster's decompressed pages with fs-verity. */
1626         for (i = 0; i < dic->cluster_size; i++) {
1627                 struct page *rpage = dic->rpages[i];
1628
1629                 if (rpage && !fsverity_verify_page(rpage))
1630                         SetPageError(rpage);
1631         }
1632
1633         __f2fs_decompress_end_io(dic, false);
1634 }
1635
1636 /*
1637  * This is called when a compressed cluster has been decompressed
1638  * (or failed to be read and/or decompressed).
1639  */
1640 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1641 {
1642         if (!failed && dic->need_verity) {
1643                 /*
1644                  * Note that to avoid deadlocks, the verity work can't be done
1645                  * on the decompression workqueue.  This is because verifying
1646                  * the data pages can involve reading metadata pages from the
1647                  * file, and these metadata pages may be compressed.
1648                  */
1649                 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1650                 fsverity_enqueue_verify_work(&dic->verity_work);
1651         } else {
1652                 __f2fs_decompress_end_io(dic, failed);
1653         }
1654 }
1655
1656 /*
1657  * Put a reference to a compressed page's decompress_io_ctx.
1658  *
1659  * This is called when the page is no longer needed and can be freed.
1660  */
1661 void f2fs_put_page_dic(struct page *page)
1662 {
1663         struct decompress_io_ctx *dic =
1664                         (struct decompress_io_ctx *)page_private(page);
1665
1666         f2fs_put_dic(dic);
1667 }
1668
1669 const struct address_space_operations f2fs_compress_aops = {
1670         .releasepage = f2fs_release_page,
1671         .invalidatepage = f2fs_invalidate_page,
1672 };
1673
1674 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1675 {
1676         return sbi->compress_inode->i_mapping;
1677 }
1678
1679 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1680 {
1681         if (!sbi->compress_inode)
1682                 return;
1683         invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1684 }
1685
1686 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1687                                                 nid_t ino, block_t blkaddr)
1688 {
1689         struct page *cpage;
1690         int ret;
1691
1692         if (!test_opt(sbi, COMPRESS_CACHE))
1693                 return;
1694
1695         if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1696                 return;
1697
1698         if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1699                 return;
1700
1701         cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1702         if (cpage) {
1703                 f2fs_put_page(cpage, 0);
1704                 return;
1705         }
1706
1707         cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1708         if (!cpage)
1709                 return;
1710
1711         ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1712                                                 blkaddr, GFP_NOFS);
1713         if (ret) {
1714                 f2fs_put_page(cpage, 0);
1715                 return;
1716         }
1717
1718         set_page_private_data(cpage, ino);
1719
1720         if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1721                 goto out;
1722
1723         memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1724         SetPageUptodate(cpage);
1725 out:
1726         f2fs_put_page(cpage, 1);
1727 }
1728
1729 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1730                                                                 block_t blkaddr)
1731 {
1732         struct page *cpage;
1733         bool hitted = false;
1734
1735         if (!test_opt(sbi, COMPRESS_CACHE))
1736                 return false;
1737
1738         cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1739                                 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1740         if (cpage) {
1741                 if (PageUptodate(cpage)) {
1742                         atomic_inc(&sbi->compress_page_hit);
1743                         memcpy(page_address(page),
1744                                 page_address(cpage), PAGE_SIZE);
1745                         hitted = true;
1746                 }
1747                 f2fs_put_page(cpage, 1);
1748         }
1749
1750         return hitted;
1751 }
1752
1753 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1754 {
1755         struct address_space *mapping = sbi->compress_inode->i_mapping;
1756         struct pagevec pvec;
1757         pgoff_t index = 0;
1758         pgoff_t end = MAX_BLKADDR(sbi);
1759
1760         if (!mapping->nrpages)
1761                 return;
1762
1763         pagevec_init(&pvec);
1764
1765         do {
1766                 unsigned int nr_pages;
1767                 int i;
1768
1769                 nr_pages = pagevec_lookup_range(&pvec, mapping,
1770                                                 &index, end - 1);
1771                 if (!nr_pages)
1772                         break;
1773
1774                 for (i = 0; i < nr_pages; i++) {
1775                         struct page *page = pvec.pages[i];
1776
1777                         if (page->index > end)
1778                                 break;
1779
1780                         lock_page(page);
1781                         if (page->mapping != mapping) {
1782                                 unlock_page(page);
1783                                 continue;
1784                         }
1785
1786                         if (ino != get_page_private_data(page)) {
1787                                 unlock_page(page);
1788                                 continue;
1789                         }
1790
1791                         generic_error_remove_page(mapping, page);
1792                         unlock_page(page);
1793                 }
1794                 pagevec_release(&pvec);
1795                 cond_resched();
1796         } while (index < end);
1797 }
1798
1799 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1800 {
1801         struct inode *inode;
1802
1803         if (!test_opt(sbi, COMPRESS_CACHE))
1804                 return 0;
1805
1806         inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1807         if (IS_ERR(inode))
1808                 return PTR_ERR(inode);
1809         sbi->compress_inode = inode;
1810
1811         sbi->compress_percent = COMPRESS_PERCENT;
1812         sbi->compress_watermark = COMPRESS_WATERMARK;
1813
1814         atomic_set(&sbi->compress_page_hit, 0);
1815
1816         return 0;
1817 }
1818
1819 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1820 {
1821         if (!sbi->compress_inode)
1822                 return;
1823         iput(sbi->compress_inode);
1824         sbi->compress_inode = NULL;
1825 }
1826
1827 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1828 {
1829         dev_t dev = sbi->sb->s_bdev->bd_dev;
1830         char slab_name[32];
1831
1832         sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1833
1834         sbi->page_array_slab_size = sizeof(struct page *) <<
1835                                         F2FS_OPTION(sbi).compress_log_size;
1836
1837         sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1838                                         sbi->page_array_slab_size);
1839         if (!sbi->page_array_slab)
1840                 return -ENOMEM;
1841         return 0;
1842 }
1843
1844 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1845 {
1846         kmem_cache_destroy(sbi->page_array_slab);
1847 }
1848
1849 static int __init f2fs_init_cic_cache(void)
1850 {
1851         cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1852                                         sizeof(struct compress_io_ctx));
1853         if (!cic_entry_slab)
1854                 return -ENOMEM;
1855         return 0;
1856 }
1857
1858 static void f2fs_destroy_cic_cache(void)
1859 {
1860         kmem_cache_destroy(cic_entry_slab);
1861 }
1862
1863 static int __init f2fs_init_dic_cache(void)
1864 {
1865         dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1866                                         sizeof(struct decompress_io_ctx));
1867         if (!dic_entry_slab)
1868                 return -ENOMEM;
1869         return 0;
1870 }
1871
1872 static void f2fs_destroy_dic_cache(void)
1873 {
1874         kmem_cache_destroy(dic_entry_slab);
1875 }
1876
1877 int __init f2fs_init_compress_cache(void)
1878 {
1879         int err;
1880
1881         err = f2fs_init_cic_cache();
1882         if (err)
1883                 goto out;
1884         err = f2fs_init_dic_cache();
1885         if (err)
1886                 goto free_cic;
1887         return 0;
1888 free_cic:
1889         f2fs_destroy_cic_cache();
1890 out:
1891         return -ENOMEM;
1892 }
1893
1894 void f2fs_destroy_compress_cache(void)
1895 {
1896         f2fs_destroy_dic_cache();
1897         f2fs_destroy_cic_cache();
1898 }