1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
18 #include <trace/events/f2fs.h>
20 struct f2fs_compress_ops {
21 int (*init_compress_ctx)(struct compress_ctx *cc);
22 void (*destroy_compress_ctx)(struct compress_ctx *cc);
23 int (*compress_pages)(struct compress_ctx *cc);
24 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
25 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
26 int (*decompress_pages)(struct decompress_io_ctx *dic);
29 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
31 return index & (cc->cluster_size - 1);
34 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
36 return index >> cc->log_cluster_size;
39 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
41 return cc->cluster_idx << cc->log_cluster_size;
44 bool f2fs_is_compressed_page(struct page *page)
46 if (!PagePrivate(page))
48 if (!page_private(page))
50 if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
53 * page->private may be set with pid.
54 * pid_max is enough to check if it is traced.
56 if (IS_IO_TRACED_PAGE(page))
59 f2fs_bug_on(F2FS_M_SB(page->mapping),
60 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
64 static void f2fs_set_compressed_page(struct page *page,
65 struct inode *inode, pgoff_t index, void *data)
68 set_page_private(page, (unsigned long)data);
70 /* i_crypto_info and iv index */
72 page->mapping = inode->i_mapping;
75 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
79 for (i = 0; i < len; i++) {
83 unlock_page(cc->rpages[i]);
85 put_page(cc->rpages[i]);
89 static void f2fs_put_rpages(struct compress_ctx *cc)
91 f2fs_drop_rpages(cc, cc->cluster_size, false);
94 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
96 f2fs_drop_rpages(cc, len, true);
99 static void f2fs_put_rpages_mapping(struct address_space *mapping,
100 pgoff_t start, int len)
104 for (i = 0; i < len; i++) {
105 struct page *page = find_get_page(mapping, start + i);
112 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
113 struct writeback_control *wbc, bool redirty, int unlock)
117 for (i = 0; i < cc->cluster_size; i++) {
121 redirty_page_for_writepage(wbc, cc->rpages[i]);
122 f2fs_put_page(cc->rpages[i], unlock);
126 struct page *f2fs_compress_control_page(struct page *page)
128 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
131 int f2fs_init_compress_ctx(struct compress_ctx *cc)
133 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
138 cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
139 cc->log_cluster_size, GFP_NOFS);
140 return cc->rpages ? 0 : -ENOMEM;
143 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
149 cc->cluster_idx = NULL_CLUSTER;
152 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
154 unsigned int cluster_ofs;
156 if (!f2fs_cluster_can_merge_page(cc, page->index))
157 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
159 cluster_ofs = offset_in_cluster(cc, page->index);
160 cc->rpages[cluster_ofs] = page;
162 cc->cluster_idx = cluster_idx(cc, page->index);
165 #ifdef CONFIG_F2FS_FS_LZO
166 static int lzo_init_compress_ctx(struct compress_ctx *cc)
168 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
169 LZO1X_MEM_COMPRESS, GFP_NOFS);
173 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
177 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
183 static int lzo_compress_pages(struct compress_ctx *cc)
187 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
188 &cc->clen, cc->private);
189 if (ret != LZO_E_OK) {
190 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
191 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
197 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
201 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
202 dic->rbuf, &dic->rlen);
203 if (ret != LZO_E_OK) {
204 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
205 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
209 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
210 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
211 "expected:%lu\n", KERN_ERR,
212 F2FS_I_SB(dic->inode)->sb->s_id,
214 PAGE_SIZE << dic->log_cluster_size);
220 static const struct f2fs_compress_ops f2fs_lzo_ops = {
221 .init_compress_ctx = lzo_init_compress_ctx,
222 .destroy_compress_ctx = lzo_destroy_compress_ctx,
223 .compress_pages = lzo_compress_pages,
224 .decompress_pages = lzo_decompress_pages,
228 #ifdef CONFIG_F2FS_FS_LZ4
229 static int lz4_init_compress_ctx(struct compress_ctx *cc)
231 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
232 LZ4_MEM_COMPRESS, GFP_NOFS);
237 * we do not change cc->clen to LZ4_compressBound(inputsize) to
238 * adapt worst compress case, because lz4 compressor can handle
239 * output budget properly.
241 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
245 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
251 static int lz4_compress_pages(struct compress_ctx *cc)
255 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
256 cc->clen, cc->private);
264 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
268 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
269 dic->clen, dic->rlen);
271 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
272 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
276 if (ret != PAGE_SIZE << dic->log_cluster_size) {
277 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
278 "expected:%lu\n", KERN_ERR,
279 F2FS_I_SB(dic->inode)->sb->s_id,
281 PAGE_SIZE << dic->log_cluster_size);
287 static const struct f2fs_compress_ops f2fs_lz4_ops = {
288 .init_compress_ctx = lz4_init_compress_ctx,
289 .destroy_compress_ctx = lz4_destroy_compress_ctx,
290 .compress_pages = lz4_compress_pages,
291 .decompress_pages = lz4_decompress_pages,
295 #ifdef CONFIG_F2FS_FS_ZSTD
296 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
298 static int zstd_init_compress_ctx(struct compress_ctx *cc)
300 ZSTD_parameters params;
301 ZSTD_CStream *stream;
303 unsigned int workspace_size;
305 params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
306 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
308 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
309 workspace_size, GFP_NOFS);
313 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
315 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
316 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
322 cc->private = workspace;
323 cc->private2 = stream;
325 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
329 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
336 static int zstd_compress_pages(struct compress_ctx *cc)
338 ZSTD_CStream *stream = cc->private2;
340 ZSTD_outBuffer outbuf;
341 int src_size = cc->rlen;
342 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
346 inbuf.src = cc->rbuf;
347 inbuf.size = src_size;
350 outbuf.dst = cc->cbuf->cdata;
351 outbuf.size = dst_size;
353 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
354 if (ZSTD_isError(ret)) {
355 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
356 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
357 __func__, ZSTD_getErrorCode(ret));
361 ret = ZSTD_endStream(stream, &outbuf);
362 if (ZSTD_isError(ret)) {
363 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
364 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
365 __func__, ZSTD_getErrorCode(ret));
370 * there is compressed data remained in intermediate buffer due to
371 * no more space in cbuf.cdata
376 cc->clen = outbuf.pos;
380 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
382 ZSTD_DStream *stream;
384 unsigned int workspace_size;
386 workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
388 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
389 workspace_size, GFP_NOFS);
393 stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
394 workspace, workspace_size);
396 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
397 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
403 dic->private = workspace;
404 dic->private2 = stream;
409 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
411 kvfree(dic->private);
413 dic->private2 = NULL;
416 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
418 ZSTD_DStream *stream = dic->private2;
420 ZSTD_outBuffer outbuf;
424 inbuf.src = dic->cbuf->cdata;
425 inbuf.size = dic->clen;
428 outbuf.dst = dic->rbuf;
429 outbuf.size = dic->rlen;
431 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
432 if (ZSTD_isError(ret)) {
433 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
434 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
435 __func__, ZSTD_getErrorCode(ret));
439 if (dic->rlen != outbuf.pos) {
440 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
441 "expected:%lu\n", KERN_ERR,
442 F2FS_I_SB(dic->inode)->sb->s_id,
444 PAGE_SIZE << dic->log_cluster_size);
451 static const struct f2fs_compress_ops f2fs_zstd_ops = {
452 .init_compress_ctx = zstd_init_compress_ctx,
453 .destroy_compress_ctx = zstd_destroy_compress_ctx,
454 .compress_pages = zstd_compress_pages,
455 .init_decompress_ctx = zstd_init_decompress_ctx,
456 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
457 .decompress_pages = zstd_decompress_pages,
461 #ifdef CONFIG_F2FS_FS_LZO
462 #ifdef CONFIG_F2FS_FS_LZORLE
463 static int lzorle_compress_pages(struct compress_ctx *cc)
467 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
468 &cc->clen, cc->private);
469 if (ret != LZO_E_OK) {
470 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
471 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
477 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
478 .init_compress_ctx = lzo_init_compress_ctx,
479 .destroy_compress_ctx = lzo_destroy_compress_ctx,
480 .compress_pages = lzorle_compress_pages,
481 .decompress_pages = lzo_decompress_pages,
486 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
487 #ifdef CONFIG_F2FS_FS_LZO
492 #ifdef CONFIG_F2FS_FS_LZ4
497 #ifdef CONFIG_F2FS_FS_ZSTD
502 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
509 bool f2fs_is_compress_backend_ready(struct inode *inode)
511 if (!f2fs_compressed_file(inode))
513 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
516 static mempool_t *compress_page_pool;
517 static int num_compress_pages = 512;
518 module_param(num_compress_pages, uint, 0444);
519 MODULE_PARM_DESC(num_compress_pages,
520 "Number of intermediate compress pages to preallocate");
522 int f2fs_init_compress_mempool(void)
524 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
525 if (!compress_page_pool)
531 void f2fs_destroy_compress_mempool(void)
533 mempool_destroy(compress_page_pool);
536 static struct page *f2fs_compress_alloc_page(void)
540 page = mempool_alloc(compress_page_pool, GFP_NOFS);
546 static void f2fs_compress_free_page(struct page *page)
550 set_page_private(page, (unsigned long)NULL);
551 ClearPagePrivate(page);
552 page->mapping = NULL;
554 mempool_free(page, compress_page_pool);
557 static int f2fs_compress_pages(struct compress_ctx *cc)
559 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
560 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
561 const struct f2fs_compress_ops *cops =
562 f2fs_cops[fi->i_compress_algorithm];
563 unsigned int max_len, nr_cpages;
566 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
567 cc->cluster_size, fi->i_compress_algorithm);
569 if (cops->init_compress_ctx) {
570 ret = cops->init_compress_ctx(cc);
575 max_len = COMPRESS_HEADER_SIZE + cc->clen;
576 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
578 cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
579 cc->nr_cpages, GFP_NOFS);
582 goto destroy_compress_ctx;
585 for (i = 0; i < cc->nr_cpages; i++) {
586 cc->cpages[i] = f2fs_compress_alloc_page();
587 if (!cc->cpages[i]) {
589 goto out_free_cpages;
593 cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
596 goto out_free_cpages;
599 cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
602 goto out_vunmap_rbuf;
605 ret = cops->compress_pages(cc);
607 goto out_vunmap_cbuf;
609 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
611 if (cc->clen > max_len) {
613 goto out_vunmap_cbuf;
616 cc->cbuf->clen = cpu_to_le32(cc->clen);
618 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
619 cc->cbuf->reserved[i] = cpu_to_le32(0);
621 nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
623 /* zero out any unused part of the last page */
624 memset(&cc->cbuf->cdata[cc->clen], 0,
625 (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
630 for (i = nr_cpages; i < cc->nr_cpages; i++) {
631 f2fs_compress_free_page(cc->cpages[i]);
632 cc->cpages[i] = NULL;
635 if (cops->destroy_compress_ctx)
636 cops->destroy_compress_ctx(cc);
638 cc->nr_cpages = nr_cpages;
640 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
649 for (i = 0; i < cc->nr_cpages; i++) {
651 f2fs_compress_free_page(cc->cpages[i]);
655 destroy_compress_ctx:
656 if (cops->destroy_compress_ctx)
657 cops->destroy_compress_ctx(cc);
659 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
664 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
666 struct decompress_io_ctx *dic =
667 (struct decompress_io_ctx *)page_private(page);
668 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
669 struct f2fs_inode_info *fi= F2FS_I(dic->inode);
670 const struct f2fs_compress_ops *cops =
671 f2fs_cops[fi->i_compress_algorithm];
675 dec_page_count(sbi, F2FS_RD_DATA);
677 if (bio->bi_status || PageError(page))
680 if (refcount_dec_not_one(&dic->ref))
683 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
684 dic->cluster_size, fi->i_compress_algorithm);
686 /* submit partial compressed pages */
692 dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
693 dic->cluster_size, GFP_NOFS);
699 for (i = 0; i < dic->cluster_size; i++) {
700 if (dic->rpages[i]) {
701 dic->tpages[i] = dic->rpages[i];
705 dic->tpages[i] = f2fs_compress_alloc_page();
706 if (!dic->tpages[i]) {
712 if (cops->init_decompress_ctx) {
713 ret = cops->init_decompress_ctx(dic);
718 dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
721 goto destroy_decompress_ctx;
724 dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
727 goto out_vunmap_rbuf;
730 dic->clen = le32_to_cpu(dic->cbuf->clen);
731 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
733 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
735 goto out_vunmap_cbuf;
738 ret = cops->decompress_pages(dic);
744 destroy_decompress_ctx:
745 if (cops->destroy_decompress_ctx)
746 cops->destroy_decompress_ctx(dic);
749 refcount_set(&dic->ref, dic->nr_cpages);
751 f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
754 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
760 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
762 if (cc->cluster_idx == NULL_CLUSTER)
764 return cc->cluster_idx == cluster_idx(cc, index);
767 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
769 return cc->nr_rpages == 0;
772 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
774 return cc->cluster_size == cc->nr_rpages;
777 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
779 if (f2fs_cluster_is_empty(cc))
781 return is_page_in_cluster(cc, index);
784 static bool __cluster_may_compress(struct compress_ctx *cc)
786 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
787 loff_t i_size = i_size_read(cc->inode);
788 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
791 for (i = 0; i < cc->cluster_size; i++) {
792 struct page *page = cc->rpages[i];
794 f2fs_bug_on(sbi, !page);
796 if (unlikely(f2fs_cp_error(sbi)))
798 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
802 if (page->index >= nr_pages)
808 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
810 struct dnode_of_data dn;
813 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
814 ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
822 if (dn.data_blkaddr == COMPRESS_ADDR) {
826 for (i = 1; i < cc->cluster_size; i++) {
829 blkaddr = data_blkaddr(dn.inode,
830 dn.node_page, dn.ofs_in_node + i);
832 if (__is_valid_data_blkaddr(blkaddr))
835 if (blkaddr != NULL_ADDR)
845 /* return # of compressed blocks in compressed cluster */
846 static int f2fs_compressed_blocks(struct compress_ctx *cc)
848 return __f2fs_cluster_blocks(cc, true);
851 /* return # of valid blocks in compressed cluster */
852 static int f2fs_cluster_blocks(struct compress_ctx *cc)
854 return __f2fs_cluster_blocks(cc, false);
857 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
859 struct compress_ctx cc = {
861 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
862 .cluster_size = F2FS_I(inode)->i_cluster_size,
863 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
866 return f2fs_cluster_blocks(&cc);
869 static bool cluster_may_compress(struct compress_ctx *cc)
871 if (!f2fs_compressed_file(cc->inode))
873 if (f2fs_is_atomic_file(cc->inode))
875 if (f2fs_is_mmap_file(cc->inode))
877 if (!f2fs_cluster_is_full(cc))
879 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
881 return __cluster_may_compress(cc);
884 static void set_cluster_writeback(struct compress_ctx *cc)
888 for (i = 0; i < cc->cluster_size; i++) {
890 set_page_writeback(cc->rpages[i]);
894 static void set_cluster_dirty(struct compress_ctx *cc)
898 for (i = 0; i < cc->cluster_size; i++)
900 set_page_dirty(cc->rpages[i]);
903 static int prepare_compress_overwrite(struct compress_ctx *cc,
904 struct page **pagep, pgoff_t index, void **fsdata)
906 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
907 struct address_space *mapping = cc->inode->i_mapping;
909 struct dnode_of_data dn;
910 sector_t last_block_in_bio;
911 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
912 pgoff_t start_idx = start_idx_of_cluster(cc);
917 ret = f2fs_cluster_blocks(cc);
921 /* compressed case */
922 prealloc = (ret < cc->cluster_size);
924 ret = f2fs_init_compress_ctx(cc);
928 /* keep page reference to avoid page reclaim */
929 for (i = 0; i < cc->cluster_size; i++) {
930 page = f2fs_pagecache_get_page(mapping, start_idx + i,
937 if (PageUptodate(page))
940 f2fs_compress_ctx_add_page(cc, page);
943 if (!f2fs_cluster_is_empty(cc)) {
944 struct bio *bio = NULL;
946 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
947 &last_block_in_bio, false, true);
948 f2fs_destroy_compress_ctx(cc);
952 f2fs_submit_bio(sbi, bio, DATA);
954 ret = f2fs_init_compress_ctx(cc);
959 for (i = 0; i < cc->cluster_size; i++) {
960 f2fs_bug_on(sbi, cc->rpages[i]);
962 page = find_lock_page(mapping, start_idx + i);
963 f2fs_bug_on(sbi, !page);
965 f2fs_wait_on_page_writeback(page, DATA, true, true);
967 f2fs_compress_ctx_add_page(cc, page);
968 f2fs_put_page(page, 0);
970 if (!PageUptodate(page)) {
971 f2fs_unlock_rpages(cc, i + 1);
972 f2fs_put_rpages_mapping(mapping, start_idx,
974 f2fs_destroy_compress_ctx(cc);
980 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
982 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
984 for (i = cc->cluster_size - 1; i > 0; i--) {
985 ret = f2fs_get_block(&dn, start_idx + i);
987 i = cc->cluster_size;
991 if (dn.data_blkaddr != NEW_ADDR)
995 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
999 *fsdata = cc->rpages;
1000 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1001 return cc->cluster_size;
1005 f2fs_unlock_rpages(cc, i);
1007 f2fs_put_rpages_mapping(mapping, start_idx, i);
1008 f2fs_destroy_compress_ctx(cc);
1012 int f2fs_prepare_compress_overwrite(struct inode *inode,
1013 struct page **pagep, pgoff_t index, void **fsdata)
1015 struct compress_ctx cc = {
1017 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1018 .cluster_size = F2FS_I(inode)->i_cluster_size,
1019 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1024 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1027 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1028 pgoff_t index, unsigned copied)
1031 struct compress_ctx cc = {
1032 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1033 .cluster_size = F2FS_I(inode)->i_cluster_size,
1036 bool first_index = (index == cc.rpages[0]->index);
1039 set_cluster_dirty(&cc);
1041 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1042 f2fs_destroy_compress_ctx(&cc);
1047 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1049 void *fsdata = NULL;
1051 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1052 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1056 err = f2fs_is_compressed_cluster(inode, start_idx);
1060 /* truncate normal cluster */
1062 return f2fs_do_truncate_blocks(inode, from, lock);
1064 /* truncate compressed cluster */
1065 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1066 start_idx, &fsdata);
1068 /* should not be a normal cluster */
1069 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1075 struct page **rpages = fsdata;
1076 int cluster_size = F2FS_I(inode)->i_cluster_size;
1079 for (i = cluster_size - 1; i >= 0; i--) {
1080 loff_t start = rpages[i]->index << PAGE_SHIFT;
1082 if (from <= start) {
1083 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1085 zero_user_segment(rpages[i], from - start,
1091 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1096 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1098 struct writeback_control *wbc,
1099 enum iostat_type io_type)
1101 struct inode *inode = cc->inode;
1102 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1103 struct f2fs_inode_info *fi = F2FS_I(inode);
1104 struct f2fs_io_info fio = {
1106 .ino = cc->inode->i_ino,
1109 .op_flags = wbc_to_write_flags(wbc),
1110 .old_blkaddr = NEW_ADDR,
1112 .encrypted_page = NULL,
1113 .compressed_page = NULL,
1117 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1119 struct dnode_of_data dn;
1120 struct node_info ni;
1121 struct compress_io_ctx *cic;
1122 pgoff_t start_idx = start_idx_of_cluster(cc);
1123 unsigned int last_index = cc->cluster_size - 1;
1127 if (IS_NOQUOTA(inode)) {
1129 * We need to wait for node_write to avoid block allocation during
1130 * checkpoint. This can only happen to quota writes which can cause
1131 * the below discard race condition.
1133 down_read(&sbi->node_write);
1134 } else if (!f2fs_trylock_op(sbi)) {
1138 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1140 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1144 for (i = 0; i < cc->cluster_size; i++) {
1145 if (data_blkaddr(dn.inode, dn.node_page,
1146 dn.ofs_in_node + i) == NULL_ADDR)
1150 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1152 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1156 fio.version = ni.version;
1158 cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
1162 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1164 refcount_set(&cic->ref, cc->nr_cpages);
1165 cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1166 cc->log_cluster_size, GFP_NOFS);
1170 cic->nr_rpages = cc->cluster_size;
1172 for (i = 0; i < cc->nr_cpages; i++) {
1173 f2fs_set_compressed_page(cc->cpages[i], inode,
1174 cc->rpages[i + 1]->index, cic);
1175 fio.compressed_page = cc->cpages[i];
1177 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1178 dn.ofs_in_node + i + 1);
1180 /* wait for GCed page writeback via META_MAPPING */
1181 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1183 if (fio.encrypted) {
1184 fio.page = cc->rpages[i + 1];
1185 err = f2fs_encrypt_one_page(&fio);
1187 goto out_destroy_crypt;
1188 cc->cpages[i] = fio.encrypted_page;
1192 set_cluster_writeback(cc);
1194 for (i = 0; i < cc->cluster_size; i++)
1195 cic->rpages[i] = cc->rpages[i];
1197 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1200 blkaddr = f2fs_data_blkaddr(&dn);
1201 fio.page = cc->rpages[i];
1202 fio.old_blkaddr = blkaddr;
1204 /* cluster header */
1206 if (blkaddr == COMPRESS_ADDR)
1208 if (__is_valid_data_blkaddr(blkaddr))
1209 f2fs_invalidate_blocks(sbi, blkaddr);
1210 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1211 goto unlock_continue;
1214 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1217 if (i > cc->nr_cpages) {
1218 if (__is_valid_data_blkaddr(blkaddr)) {
1219 f2fs_invalidate_blocks(sbi, blkaddr);
1220 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1222 goto unlock_continue;
1225 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1228 fio.encrypted_page = cc->cpages[i - 1];
1230 fio.compressed_page = cc->cpages[i - 1];
1232 cc->cpages[i - 1] = NULL;
1233 f2fs_outplace_write_data(&dn, &fio);
1236 inode_dec_dirty_pages(cc->inode);
1237 unlock_page(fio.page);
1240 if (fio.compr_blocks)
1241 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1242 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1244 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1245 if (cc->cluster_idx == 0)
1246 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1248 f2fs_put_dnode(&dn);
1249 if (IS_NOQUOTA(inode))
1250 up_read(&sbi->node_write);
1252 f2fs_unlock_op(sbi);
1254 spin_lock(&fi->i_size_lock);
1255 if (fi->last_disk_size < psize)
1256 fi->last_disk_size = psize;
1257 spin_unlock(&fi->i_size_lock);
1259 f2fs_put_rpages(cc);
1260 f2fs_destroy_compress_ctx(cc);
1266 for (--i; i >= 0; i--)
1267 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1268 for (i = 0; i < cc->nr_cpages; i++) {
1271 f2fs_put_page(cc->cpages[i], 1);
1276 f2fs_put_dnode(&dn);
1278 if (IS_NOQUOTA(inode))
1279 up_read(&sbi->node_write);
1281 f2fs_unlock_op(sbi);
1285 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1287 struct f2fs_sb_info *sbi = bio->bi_private;
1288 struct compress_io_ctx *cic =
1289 (struct compress_io_ctx *)page_private(page);
1292 if (unlikely(bio->bi_status))
1293 mapping_set_error(cic->inode->i_mapping, -EIO);
1295 f2fs_compress_free_page(page);
1297 dec_page_count(sbi, F2FS_WB_DATA);
1299 if (refcount_dec_not_one(&cic->ref))
1302 for (i = 0; i < cic->nr_rpages; i++) {
1303 WARN_ON(!cic->rpages[i]);
1304 clear_cold_data(cic->rpages[i]);
1305 end_page_writeback(cic->rpages[i]);
1312 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1314 struct writeback_control *wbc,
1315 enum iostat_type io_type)
1317 struct address_space *mapping = cc->inode->i_mapping;
1318 int _submitted, compr_blocks, ret;
1319 int i = -1, err = 0;
1321 compr_blocks = f2fs_compressed_blocks(cc);
1322 if (compr_blocks < 0) {
1327 for (i = 0; i < cc->cluster_size; i++) {
1331 if (cc->rpages[i]->mapping != mapping) {
1332 unlock_page(cc->rpages[i]);
1336 BUG_ON(!PageLocked(cc->rpages[i]));
1338 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1339 NULL, NULL, wbc, io_type,
1342 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1343 unlock_page(cc->rpages[i]);
1345 } else if (ret == -EAGAIN) {
1347 * for quota file, just redirty left pages to
1348 * avoid deadlock caused by cluster update race
1349 * from foreground operation.
1351 if (IS_NOQUOTA(cc->inode)) {
1357 congestion_wait(BLK_RW_ASYNC,
1358 DEFAULT_IO_TIMEOUT);
1359 lock_page(cc->rpages[i]);
1361 if (!PageDirty(cc->rpages[i])) {
1362 unlock_page(cc->rpages[i]);
1366 clear_page_dirty_for_io(cc->rpages[i]);
1373 *submitted += _submitted;
1377 for (++i; i < cc->cluster_size; i++) {
1380 redirty_page_for_writepage(wbc, cc->rpages[i]);
1381 unlock_page(cc->rpages[i]);
1386 int f2fs_write_multi_pages(struct compress_ctx *cc,
1388 struct writeback_control *wbc,
1389 enum iostat_type io_type)
1391 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1392 const struct f2fs_compress_ops *cops =
1393 f2fs_cops[fi->i_compress_algorithm];
1397 if (cluster_may_compress(cc)) {
1398 err = f2fs_compress_pages(cc);
1399 if (err == -EAGAIN) {
1402 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1406 err = f2fs_write_compressed_pages(cc, submitted,
1408 cops->destroy_compress_ctx(cc);
1413 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1416 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1418 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1419 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1421 f2fs_destroy_compress_ctx(cc);
1425 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1427 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1428 struct decompress_io_ctx *dic;
1429 pgoff_t start_idx = start_idx_of_cluster(cc);
1432 dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1434 return ERR_PTR(-ENOMEM);
1436 dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1437 cc->log_cluster_size, GFP_NOFS);
1440 return ERR_PTR(-ENOMEM);
1443 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1444 dic->inode = cc->inode;
1445 refcount_set(&dic->ref, cc->nr_cpages);
1446 dic->cluster_idx = cc->cluster_idx;
1447 dic->cluster_size = cc->cluster_size;
1448 dic->log_cluster_size = cc->log_cluster_size;
1449 dic->nr_cpages = cc->nr_cpages;
1450 dic->failed = false;
1452 for (i = 0; i < dic->cluster_size; i++)
1453 dic->rpages[i] = cc->rpages[i];
1454 dic->nr_rpages = cc->cluster_size;
1456 dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1457 dic->nr_cpages, GFP_NOFS);
1461 for (i = 0; i < dic->nr_cpages; i++) {
1464 page = f2fs_compress_alloc_page();
1468 f2fs_set_compressed_page(page, cc->inode,
1469 start_idx + i + 1, dic);
1470 dic->cpages[i] = page;
1477 return ERR_PTR(-ENOMEM);
1480 void f2fs_free_dic(struct decompress_io_ctx *dic)
1485 for (i = 0; i < dic->cluster_size; i++) {
1488 if (!dic->tpages[i])
1490 f2fs_compress_free_page(dic->tpages[i]);
1496 for (i = 0; i < dic->nr_cpages; i++) {
1497 if (!dic->cpages[i])
1499 f2fs_compress_free_page(dic->cpages[i]);
1508 void f2fs_decompress_end_io(struct page **rpages,
1509 unsigned int cluster_size, bool err, bool verity)
1513 for (i = 0; i < cluster_size; i++) {
1514 struct page *rpage = rpages[i];
1519 if (err || PageError(rpage))
1520 goto clear_uptodate;
1522 if (!verity || fsverity_verify_page(rpage)) {
1523 SetPageUptodate(rpage);
1527 ClearPageUptodate(rpage);
1528 ClearPageError(rpage);