erofs: try to leave (de)compressed_pages on stack if possible
authorGao Xiang <hsiangkao@linux.alibaba.com>
Fri, 15 Jul 2022 15:42:00 +0000 (23:42 +0800)
committerGao Xiang <hsiangkao@linux.alibaba.com>
Thu, 21 Jul 2022 14:55:30 +0000 (22:55 +0800)
For the most cases, small pclusters can be decompressed with page
arrays on stack.

Try to leave both (de)compressed_pages on stack if possible as before.

Acked-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220715154203.48093-14-hsiangkao@linux.alibaba.com
fs/erofs/zdata.c

index 1cf377e..d93ba0a 100644 (file)
@@ -858,6 +858,7 @@ struct z_erofs_decompress_backend {
        struct page **compressed_pages;
 
        struct page **pagepool;
+       unsigned int onstack_used;
 };
 
 static int z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
@@ -904,14 +905,9 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
 {
        struct z_erofs_pcluster *pcl = be->pcl;
        unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
-       struct page **compressed_pages;
        int i, err = 0;
 
-       /* XXX: will have a better approach in the following commits */
-       compressed_pages = kmalloc_array(pclusterpages, sizeof(struct page *),
-                                        GFP_KERNEL | __GFP_NOFAIL);
        *overlapped = false;
-
        for (i = 0; i < pclusterpages; ++i) {
                struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
                struct page *page = bvec->page;
@@ -922,7 +918,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
                        DBG_BUGON(1);
                        continue;
                }
-               compressed_pages[i] = page;
+               be->compressed_pages[i] = page;
 
                if (z_erofs_is_inline_pcluster(pcl)) {
                        if (!PageUptodate(page))
@@ -953,11 +949,8 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
                }
        }
 
-       if (err) {
-               kfree(compressed_pages);
+       if (err)
                return err;
-       }
-       be->compressed_pages = compressed_pages;
        return 0;
 }
 
@@ -976,15 +969,28 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
        mutex_lock(&pcl->lock);
        nr_pages = pcl->nr_pages;
 
+       /* allocate (de)compressed page arrays if cannot be kept on stack */
+       be->decompressed_pages = NULL;
+       be->compressed_pages = NULL;
+       be->onstack_used = 0;
        if (nr_pages <= Z_EROFS_ONSTACK_PAGES) {
                be->decompressed_pages = be->onstack_pages;
+               be->onstack_used = nr_pages;
                memset(be->decompressed_pages, 0,
                       sizeof(struct page *) * nr_pages);
-       } else {
+       }
+
+       if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
+               be->compressed_pages = be->onstack_pages + be->onstack_used;
+
+       if (!be->decompressed_pages)
                be->decompressed_pages =
                        kvcalloc(nr_pages, sizeof(struct page *),
                                 GFP_KERNEL | __GFP_NOFAIL);
-       }
+       if (!be->compressed_pages)
+               be->compressed_pages =
+                       kvcalloc(pclusterpages, sizeof(struct page *),
+                                GFP_KERNEL | __GFP_NOFAIL);
 
        err2 = z_erofs_parse_out_bvecs(be);
        if (err2)
@@ -1041,7 +1047,9 @@ out:
                        WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
                }
        }
-       kfree(be->compressed_pages);
+       if (be->compressed_pages < be->onstack_pages ||
+           be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
+               kvfree(be->compressed_pages);
 
        for (i = 0; i < nr_pages; ++i) {
                page = be->decompressed_pages[i];