Merge tag 'f2fs-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[linux-2.6-microblaze.git] / fs / f2fs / data.c
index ed503d5..aa3ccdd 100644 (file)
@@ -119,7 +119,7 @@ struct bio_post_read_ctx {
        block_t fs_blkaddr;
 };
 
-static void f2fs_finish_read_bio(struct bio *bio)
+static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
 {
        struct bio_vec *bv;
        struct bvec_iter_all iter_all;
@@ -133,8 +133,9 @@ static void f2fs_finish_read_bio(struct bio *bio)
 
                if (f2fs_is_compressed_page(page)) {
                        if (bio->bi_status)
-                               f2fs_end_read_compressed_page(page, true, 0);
-                       f2fs_put_page_dic(page);
+                               f2fs_end_read_compressed_page(page, true, 0,
+                                                       in_task);
+                       f2fs_put_page_dic(page, in_task);
                        continue;
                }
 
@@ -191,7 +192,7 @@ static void f2fs_verify_bio(struct work_struct *work)
                fsverity_verify_bio(bio);
        }
 
-       f2fs_finish_read_bio(bio);
+       f2fs_finish_read_bio(bio, true);
 }
 
 /*
@@ -203,7 +204,7 @@ static void f2fs_verify_bio(struct work_struct *work)
  * can involve reading verity metadata pages from the file, and these verity
  * metadata pages may be encrypted and/or compressed.
  */
-static void f2fs_verify_and_finish_bio(struct bio *bio)
+static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
 {
        struct bio_post_read_ctx *ctx = bio->bi_private;
 
@@ -211,7 +212,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
                INIT_WORK(&ctx->work, f2fs_verify_bio);
                fsverity_enqueue_verify_work(&ctx->work);
        } else {
-               f2fs_finish_read_bio(bio);
+               f2fs_finish_read_bio(bio, in_task);
        }
 }
 
@@ -224,7 +225,8 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
  * that the bio includes at least one compressed page.  The actual decompression
  * is done on a per-cluster basis, not a per-bio basis.
  */
-static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
+static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
+               bool in_task)
 {
        struct bio_vec *bv;
        struct bvec_iter_all iter_all;
@@ -237,7 +239,7 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
                /* PG_error was set if decryption failed. */
                if (f2fs_is_compressed_page(page))
                        f2fs_end_read_compressed_page(page, PageError(page),
-                                               blkaddr);
+                                               blkaddr, in_task);
                else
                        all_compressed = false;
 
@@ -262,15 +264,16 @@ static void f2fs_post_read_work(struct work_struct *work)
                fscrypt_decrypt_bio(ctx->bio);
 
        if (ctx->enabled_steps & STEP_DECOMPRESS)
-               f2fs_handle_step_decompress(ctx);
+               f2fs_handle_step_decompress(ctx, true);
 
-       f2fs_verify_and_finish_bio(ctx->bio);
+       f2fs_verify_and_finish_bio(ctx->bio, true);
 }
 
 static void f2fs_read_end_io(struct bio *bio)
 {
        struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
        struct bio_post_read_ctx *ctx;
+       bool intask = in_task();
 
        iostat_update_and_unbind_ctx(bio, 0);
        ctx = bio->bi_private;
@@ -281,16 +284,29 @@ static void f2fs_read_end_io(struct bio *bio)
        }
 
        if (bio->bi_status) {
-               f2fs_finish_read_bio(bio);
+               f2fs_finish_read_bio(bio, intask);
                return;
        }
 
-       if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
-               INIT_WORK(&ctx->work, f2fs_post_read_work);
-               queue_work(ctx->sbi->post_read_wq, &ctx->work);
-       } else {
-               f2fs_verify_and_finish_bio(bio);
+       if (ctx) {
+               unsigned int enabled_steps = ctx->enabled_steps &
+                                       (STEP_DECRYPT | STEP_DECOMPRESS);
+
+               /*
+                * If we have only decompression step between decompression and
+                * decrypt, we don't need post processing for this.
+                */
+               if (enabled_steps == STEP_DECOMPRESS &&
+                               !f2fs_low_mem_mode(sbi)) {
+                       f2fs_handle_step_decompress(ctx, intask);
+               } else if (enabled_steps) {
+                       INIT_WORK(&ctx->work, f2fs_post_read_work);
+                       queue_work(ctx->sbi->post_read_wq, &ctx->work);
+                       return;
+               }
        }
+
+       f2fs_verify_and_finish_bio(bio, intask);
 }
 
 static void f2fs_write_end_io(struct bio *bio)
@@ -1682,8 +1698,6 @@ sync_out:
                 */
                f2fs_wait_on_block_writeback_range(inode,
                                                map->m_pblk, map->m_len);
-               invalidate_mapping_pages(META_MAPPING(sbi),
-                                               map->m_pblk, map->m_pblk);
 
                if (map->m_multidev_dio) {
                        block_t blk_addr = map->m_pblk;
@@ -2223,7 +2237,7 @@ skip_reading_dnode:
 
                if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
                        if (atomic_dec_and_test(&dic->remaining_pages))
-                               f2fs_decompress_cluster(dic);
+                               f2fs_decompress_cluster(dic, true);
                        continue;
                }
 
@@ -2241,7 +2255,7 @@ submit_and_realloc:
                                        page->index, for_write);
                        if (IS_ERR(bio)) {
                                ret = PTR_ERR(bio);
-                               f2fs_decompress_end_io(dic, ret);
+                               f2fs_decompress_end_io(dic, ret, true);
                                f2fs_put_dnode(&dn);
                                *bio_ret = NULL;
                                return ret;
@@ -2731,6 +2745,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
                .submitted = false,
                .compr_blocks = compr_blocks,
                .need_lock = LOCK_RETRY,
+               .post_read = f2fs_post_read_required(inode),
                .io_type = io_type,
                .io_wbc = wbc,
                .bio = bio,
@@ -2902,7 +2917,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 {
        int ret = 0;
        int done = 0, retry = 0;
-       struct pagevec pvec;
+       struct page *pages[F2FS_ONSTACK_PAGES];
        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
        struct bio *bio = NULL;
        sector_t last_block;
@@ -2933,8 +2948,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
        int submitted = 0;
        int i;
 
-       pagevec_init(&pvec);
-
        if (get_dirty_pages(mapping->host) <=
                                SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
                set_inode_flag(mapping->host, FI_HOT_DATA);
@@ -2960,13 +2973,13 @@ retry:
                tag_pages_for_writeback(mapping, index, end);
        done_index = index;
        while (!done && !retry && (index <= end)) {
-               nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-                               tag);
+               nr_pages = find_get_pages_range_tag(mapping, &index, end,
+                               tag, F2FS_ONSTACK_PAGES, pages);
                if (nr_pages == 0)
                        break;
 
                for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
+                       struct page *page = pages[i];
                        bool need_readd;
 readd:
                        need_readd = false;
@@ -2997,6 +3010,10 @@ readd:
                                if (!f2fs_cluster_is_empty(&cc))
                                        goto lock_page;
 
+                               if (f2fs_all_cluster_page_ready(&cc,
+                                       pages, i, nr_pages, true))
+                                       goto lock_page;
+
                                ret2 = f2fs_prepare_compress_overwrite(
                                                        inode, &pagep,
                                                        page->index, &fsdata);
@@ -3007,8 +3024,8 @@ readd:
                                } else if (ret2 &&
                                        (!f2fs_compress_write_end(inode,
                                                fsdata, page->index, 1) ||
-                                        !f2fs_all_cluster_page_loaded(&cc,
-                                               &pvec, i, nr_pages))) {
+                                        !f2fs_all_cluster_page_ready(&cc,
+                                               pages, i, nr_pages, false))) {
                                        retry = 1;
                                        break;
                                }
@@ -3098,7 +3115,7 @@ next:
                        if (need_readd)
                                goto readd;
                }
-               pagevec_release(&pvec);
+               release_pages(pages, nr_pages);
                cond_resched();
        }
 #ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -3408,12 +3425,11 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
        struct inode *cow_inode = F2FS_I(inode)->cow_inode;
        pgoff_t index = page->index;
        int err = 0;
-       block_t ori_blk_addr;
+       block_t ori_blk_addr = NULL_ADDR;
 
        /* If pos is beyond the end of file, reserve a new block in COW inode */
        if ((pos & PAGE_MASK) >= i_size_read(inode))
-               return __reserve_data_block(cow_inode, index, blk_addr,
-                                       node_changed);
+               goto reserve_block;
 
        /* Look for the block in COW inode first */
        err = __find_data_block(cow_inode, index, blk_addr);
@@ -3427,10 +3443,12 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
        if (err)
                return err;
 
+reserve_block:
        /* Finally, we should reserve a new block in COW inode for the update */
        err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
        if (err)
                return err;
+       inc_atomic_write_cnt(inode);
 
        if (ori_blk_addr != NULL_ADDR)
                *blk_addr = ori_blk_addr;