erofs: avoid refcounting short-lived pages
authorGao Xiang <hsiangkao@linux.alibaba.com>
Thu, 11 Jul 2024 05:36:59 +0000 (13:36 +0800)
committerGao Xiang <hsiangkao@linux.alibaba.com>
Thu, 11 Jul 2024 07:14:26 +0000 (15:14 +0800)
LZ4 always reuses the decompressed buffer as its LZ77 sliding window
(dynamic dictionary) for optimal performance.  However, in specific
cases, the output buffer may not fully contain valid page cache pages,
resulting in the use of short-lived pages for temporary purposes.

Due to the limited sliding window size, LZ4 shortlived bounce pages can
also be reused in a sliding manner, so each bounce page can be vmapped
multiple times in different relative positions by design.  In order to
avoiding double frees, currently, reuse counts are recorded via page
refcount, but it will no longer be used as-is in the future world of
Memdescs.

Just maintain a lookup table to check if a shortlived page is reused.

Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240711053659.1364989-1-hsiangkao@linux.alibaba.com
fs/erofs/compress.h
fs/erofs/decompressor.c
fs/erofs/zdata.c

index 526edc0..7bfe251 100644 (file)
@@ -54,17 +54,14 @@ struct z_erofs_decompressor {
  */
 
 /*
- * short-lived pages are pages directly from buddy system with specific
- * page->private (no need to set PagePrivate since these are non-LRU /
- * non-movable pages and bypass reclaim / migration code).
+ * Currently, short-lived pages are pages directly from buddy system
+ * with specific page->private (Z_EROFS_SHORTLIVED_PAGE).
+ * In the future world of Memdescs, it should be type 0 (Misc) memory
+ * which type can be checked with a new helper.
  */
 static inline bool z_erofs_is_shortlived_page(struct page *page)
 {
-       if (page->private != Z_EROFS_SHORTLIVED_PAGE)
-               return false;
-
-       DBG_BUGON(page->mapping);
-       return true;
+       return page->private == Z_EROFS_SHORTLIVED_PAGE;
 }
 
 static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
@@ -72,14 +69,7 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
 {
        if (!z_erofs_is_shortlived_page(page))
                return false;
-
-       /* short-lived pages should not be used by others at the same time */
-       if (page_ref_count(page) > 1) {
-               put_page(page);
-       } else {
-               /* follow the pcluster rule above. */
-               erofs_pagepool_add(pagepool, page);
-       }
+       erofs_pagepool_add(pagepool, page);
        return true;
 }
 
index eac9e41..c2253b6 100644 (file)
@@ -110,7 +110,6 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
 
                if (top) {
                        victim = availables[--top];
-                       get_page(victim);
                } else {
                        victim = __erofs_allocpage(pagepool, rq->gfp, true);
                        if (!victim)
index aff3cdf..544fa0f 100644 (file)
@@ -1221,7 +1221,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
        unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
        const struct z_erofs_decompressor *decomp =
                                z_erofs_decomp[pcl->algorithmformat];
-       int i, err2;
+       int i, j, jtop, err2;
        struct page *page;
        bool overlapped;
 
@@ -1279,10 +1279,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
                WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
                put_page(page);
        } else {
+               /* managed folios are still left in compressed_bvecs[] */
                for (i = 0; i < pclusterpages; ++i) {
-                       /* consider shortlived pages added when decompressing */
                        page = be->compressed_pages[i];
-
                        if (!page ||
                            erofs_folio_is_managed(sbi, page_folio(page)))
                                continue;
@@ -1293,21 +1292,31 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
        if (be->compressed_pages < be->onstack_pages ||
            be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
                kvfree(be->compressed_pages);
-       z_erofs_fill_other_copies(be, err);
 
+       jtop = 0;
+       z_erofs_fill_other_copies(be, err);
        for (i = 0; i < be->nr_pages; ++i) {
                page = be->decompressed_pages[i];
                if (!page)
                        continue;
 
                DBG_BUGON(z_erofs_page_is_invalidated(page));
-
-               /* recycle all individual short-lived pages */
-               if (z_erofs_put_shortlivedpage(be->pagepool, page))
+               if (!z_erofs_is_shortlived_page(page)) {
+                       z_erofs_onlinefolio_end(page_folio(page), err);
                        continue;
-               z_erofs_onlinefolio_end(page_folio(page), err);
+               }
+               if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
+                       erofs_pagepool_add(be->pagepool, page);
+                       continue;
+               }
+               for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
+                       ;
+               if (j >= jtop)  /* this bounce page is newly detected */
+                       be->decompressed_pages[jtop++] = page;
        }
-
+       while (jtop)
+               erofs_pagepool_add(be->pagepool,
+                                  be->decompressed_pages[--jtop]);
        if (be->decompressed_pages != be->onstack_pages)
                kvfree(be->decompressed_pages);