1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
16 #include <trace/events/erofs.h>
19 * a compressed_pages[] placeholder in order to avoid
20 * being filled with file pages for in-place decompression.
22 #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
24 /* how to allocate cached pages for a workgroup */
25 enum z_erofs_cache_alloctype {
26 DONTALLOC, /* don't allocate any cached pages */
27 DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
31 * tagged pointer with 1-bit tag for all compressed pages
32 * tag 0 - the page is just found with an extra page reference
34 typedef tagptr1_t compressed_page_t;
36 #define tag_compressed_page_justfound(page) \
37 tagptr_fold(compressed_page_t, page, 1)
39 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
40 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
42 void z_erofs_exit_zip_subsystem(void)
44 destroy_workqueue(z_erofs_workqueue);
45 kmem_cache_destroy(z_erofs_workgroup_cachep);
48 static inline int init_unzip_workqueue(void)
50 const unsigned int onlinecpus = num_possible_cpus();
53 * we don't need too many threads, limiting threads
54 * could improve scheduling performance.
57 alloc_workqueue("erofs_unzipd",
58 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
59 onlinecpus + onlinecpus / 4);
61 return z_erofs_workqueue ? 0 : -ENOMEM;
64 static void init_once(void *ptr)
66 struct z_erofs_vle_workgroup *grp = ptr;
67 struct z_erofs_vle_work *const work =
68 z_erofs_vle_grab_primary_work(grp);
71 mutex_init(&work->lock);
74 for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
75 grp->compressed_pages[i] = NULL;
78 static void init_always(struct z_erofs_vle_workgroup *grp)
80 struct z_erofs_vle_work *const work =
81 z_erofs_vle_grab_primary_work(grp);
83 atomic_set(&grp->obj.refcount, 1);
86 DBG_BUGON(work->nr_pages);
87 DBG_BUGON(work->vcnt);
90 int __init z_erofs_init_zip_subsystem(void)
92 z_erofs_workgroup_cachep =
93 kmem_cache_create("erofs_compress",
94 Z_EROFS_WORKGROUP_SIZE, 0,
95 SLAB_RECLAIM_ACCOUNT, init_once);
97 if (z_erofs_workgroup_cachep) {
98 if (!init_unzip_workqueue())
101 kmem_cache_destroy(z_erofs_workgroup_cachep);
106 enum z_erofs_vle_work_role {
107 Z_EROFS_VLE_WORK_SECONDARY,
108 Z_EROFS_VLE_WORK_PRIMARY,
110 * The current work was the tail of an exist chain, and the previous
111 * processed chained works are all decided to be hooked up to it.
112 * A new chain should be created for the remaining unprocessed works,
113 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
114 * the next work cannot reuse the whole page in the following scenario:
115 * ________________________________________________________________
116 * | tail (partial) page | head (partial) page |
117 * | (belongs to the next work) | (belongs to the current work) |
118 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
120 Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
122 * The current work has been linked with the processed chained works,
123 * and could be also linked with the potential remaining works, which
124 * means if the processing page is the tail partial page of the work,
125 * the current work can safely use the whole page (since the next work
126 * is under control) for in-place decompression, as illustrated below:
127 * ________________________________________________________________
128 * | tail (partial) page | head (partial) page |
129 * | (of the current work) | (of the previous work) |
130 * | PRIMARY_FOLLOWED or | |
131 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
133 * [ (*) the above page can be used for the current work itself. ]
135 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
139 struct z_erofs_vle_work_builder {
140 enum z_erofs_vle_work_role role;
142 * 'hosted = false' means that the current workgroup doesn't belong to
143 * the owned chained workgroups. In the other words, it is none of our
144 * business to submit this workgroup.
148 struct z_erofs_vle_workgroup *grp;
149 struct z_erofs_vle_work *work;
150 struct z_erofs_pagevec_ctor vector;
152 /* pages used for reading the compressed data */
153 struct page **compressed_pages;
154 unsigned int compressed_deficit;
157 #define VLE_WORK_BUILDER_INIT() \
158 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
160 #ifdef EROFS_FS_HAS_MANAGED_CACHE
161 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
162 struct address_space *mc,
164 unsigned int clusterpages,
165 enum z_erofs_cache_alloctype type,
166 struct list_head *pagepool,
169 struct page **const pages = bl->compressed_pages;
170 const unsigned int remaining = bl->compressed_deficit;
171 bool standalone = true;
172 unsigned int i, j = 0;
174 if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
177 gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
179 index += clusterpages - remaining;
181 for (i = 0; i < remaining; ++i) {
185 /* the compressed page was loaded before */
186 if (READ_ONCE(pages[i]))
189 page = find_get_page(mc, index + i);
192 t = tag_compressed_page_justfound(page);
193 } else if (type == DELAYEDALLOC) {
194 t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
195 } else { /* DONTALLOC */
202 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
208 bl->compressed_pages += j;
209 bl->compressed_deficit = remaining - j;
212 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
215 /* called by erofs_shrinker to get rid of all compressed_pages */
216 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
217 struct erofs_workgroup *egrp)
219 struct z_erofs_vle_workgroup *const grp =
220 container_of(egrp, struct z_erofs_vle_workgroup, obj);
221 struct address_space *const mapping = MNGD_MAPPING(sbi);
222 const int clusterpages = erofs_clusterpages(sbi);
226 * refcount of workgroup is now freezed as 1,
227 * therefore no need to worry about available decompression users.
229 for (i = 0; i < clusterpages; ++i) {
230 struct page *page = grp->compressed_pages[i];
232 if (!page || page->mapping != mapping)
235 /* block other users from reclaiming or migrating the page */
236 if (!trylock_page(page))
239 /* barrier is implied in the following 'unlock_page' */
240 WRITE_ONCE(grp->compressed_pages[i], NULL);
242 set_page_private(page, 0);
243 ClearPagePrivate(page);
251 int erofs_try_to_free_cached_page(struct address_space *mapping,
254 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
255 const unsigned int clusterpages = erofs_clusterpages(sbi);
256 struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
257 int ret = 0; /* 0 - busy */
259 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
262 for (i = 0; i < clusterpages; ++i) {
263 if (grp->compressed_pages[i] == page) {
264 WRITE_ONCE(grp->compressed_pages[i], NULL);
269 erofs_workgroup_unfreeze(&grp->obj, 1);
272 ClearPagePrivate(page);
279 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
280 struct address_space *mc,
282 unsigned int clusterpages,
283 enum z_erofs_cache_alloctype type,
284 struct list_head *pagepool,
287 /* nowhere to load compressed pages from */
291 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
292 static inline bool try_to_reuse_as_compressed_page(
293 struct z_erofs_vle_work_builder *b,
296 while (b->compressed_deficit) {
297 --b->compressed_deficit;
298 if (!cmpxchg(b->compressed_pages++, NULL, page))
305 /* callers must be with work->lock held */
306 static int z_erofs_vle_work_add_page(
307 struct z_erofs_vle_work_builder *builder,
309 enum z_erofs_page_type type)
314 /* give priority for the compressed data storage */
315 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
316 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
317 try_to_reuse_as_compressed_page(builder, page))
320 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
321 page, type, &occupied);
322 builder->work->vcnt += (unsigned int)ret;
324 return ret ? 0 : -EAGAIN;
327 static enum z_erofs_vle_work_role
328 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
329 z_erofs_vle_owned_workgrp_t *owned_head,
332 DBG_BUGON(*hosted == true);
334 /* let's claim these following types of workgroup */
336 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
337 /* type 1, nil workgroup */
338 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
339 *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
342 *owned_head = &grp->next;
344 /* lucky, I am the followee :) */
345 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
347 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
349 * type 2, link to the end of a existing open chain,
350 * be careful that its submission itself is governed
351 * by the original owned chain.
353 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
354 *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
356 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
357 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
360 return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
363 struct z_erofs_vle_work_finder {
364 struct super_block *sb;
366 unsigned int pageofs;
368 struct z_erofs_vle_workgroup **grp_ret;
369 enum z_erofs_vle_work_role *role;
370 z_erofs_vle_owned_workgrp_t *owned_head;
374 static struct z_erofs_vle_work *
375 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
378 struct erofs_workgroup *egrp;
379 struct z_erofs_vle_workgroup *grp;
380 struct z_erofs_vle_work *work;
382 egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
388 grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
391 work = z_erofs_vle_grab_work(grp, f->pageofs);
392 /* if multiref is disabled, `primary' is always true */
395 DBG_BUGON(work->pageofs != f->pageofs);
398 * lock must be taken first to avoid grp->next == NIL between
399 * claiming workgroup and adding pages:
403 * mutex_lock(&work->lock)
404 * add all pages to pagevec
406 * [correct locking case 1]:
407 * mutex_lock(grp->work[a])
409 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
410 * ... *role = SECONDARY
411 * add all pages to pagevec
413 * mutex_unlock(grp->work[c])
414 * mutex_lock(grp->work[c])
419 * [correct locking case 2]:
420 * mutex_lock(grp->work[b])
422 * mutex_lock(grp->work[a])
424 * mutex_lock(grp->work[c])
428 * mutex_lock(grp->work[a])
429 * *role = PRIMARY_OWNER
430 * add all pages to pagevec
433 mutex_lock(&work->lock);
437 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
438 else /* claim the workgroup if possible */
439 *f->role = try_to_claim_workgroup(grp, f->owned_head,
444 static struct z_erofs_vle_work *
445 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
446 struct erofs_map_blocks *map)
449 struct z_erofs_vle_workgroup *grp = *f->grp_ret;
450 struct z_erofs_vle_work *work;
452 /* if multiref is disabled, grp should never be nullptr */
455 return ERR_PTR(-EINVAL);
458 /* no available workgroup, let's allocate one */
459 grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
461 return ERR_PTR(-ENOMEM);
464 grp->obj.index = f->idx;
465 grp->llen = map->m_llen;
467 z_erofs_vle_set_workgrp_fmt(grp,
468 (map->m_flags & EROFS_MAP_ZIPPED) ?
469 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
470 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
472 /* new workgrps have been claimed as type 1 */
473 WRITE_ONCE(grp->next, *f->owned_head);
474 /* primary and followed work for all new workgrps */
475 *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
476 /* it should be submitted by ourselves */
480 work = z_erofs_vle_grab_primary_work(grp);
481 work->pageofs = f->pageofs;
484 * lock all primary followed works before visible to others
485 * and mutex_trylock *never* fails for a new workgroup.
487 mutex_trylock(&work->lock);
490 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
493 mutex_unlock(&work->lock);
494 kmem_cache_free(z_erofs_workgroup_cachep, grp);
495 return ERR_PTR(-EAGAIN);
499 *f->owned_head = &grp->next;
504 #define builder_is_hooked(builder) \
505 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
507 #define builder_is_followed(builder) \
508 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
510 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
511 struct super_block *sb,
512 struct erofs_map_blocks *map,
513 z_erofs_vle_owned_workgrp_t *owned_head)
515 const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
516 struct z_erofs_vle_workgroup *grp;
517 const struct z_erofs_vle_work_finder finder = {
519 .idx = erofs_blknr(map->m_pa),
520 .pageofs = map->m_la & ~PAGE_MASK,
522 .role = &builder->role,
523 .owned_head = owned_head,
524 .hosted = &builder->hosted
526 struct z_erofs_vle_work *work;
528 DBG_BUGON(builder->work);
530 /* must be Z_EROFS_WORK_TAIL or the next chained work */
531 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
532 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
534 DBG_BUGON(erofs_blkoff(map->m_pa));
537 work = z_erofs_vle_work_lookup(&finder);
539 unsigned int orig_llen;
541 /* increase workgroup `llen' if needed */
542 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
543 orig_llen != cmpxchg_relaxed(&grp->llen,
544 orig_llen, map->m_llen))
549 work = z_erofs_vle_work_register(&finder, map);
550 if (unlikely(work == ERR_PTR(-EAGAIN)))
554 return PTR_ERR(work);
556 z_erofs_pagevec_ctor_init(&builder->vector,
557 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
559 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
560 /* enable possibly in-place decompression */
561 builder->compressed_pages = grp->compressed_pages;
562 builder->compressed_deficit = clusterpages;
564 builder->compressed_pages = NULL;
565 builder->compressed_deficit = 0;
569 builder->work = work;
574 * keep in mind that no referenced workgroups will be freed
575 * only after a RCU grace period, so rcu_read_lock() could
576 * prevent a workgroup from being freed.
578 static void z_erofs_rcu_callback(struct rcu_head *head)
580 struct z_erofs_vle_work *work = container_of(head,
581 struct z_erofs_vle_work, rcu);
582 struct z_erofs_vle_workgroup *grp =
583 z_erofs_vle_work_workgroup(work, true);
585 kmem_cache_free(z_erofs_workgroup_cachep, grp);
588 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
590 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
591 struct z_erofs_vle_workgroup, obj);
592 struct z_erofs_vle_work *const work = &vgrp->work;
594 call_rcu(&work->rcu, z_erofs_rcu_callback);
597 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
598 struct z_erofs_vle_work *work __maybe_unused)
600 erofs_workgroup_put(&grp->obj);
603 static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
605 struct z_erofs_vle_workgroup *grp =
606 z_erofs_vle_work_workgroup(work, true);
608 __z_erofs_vle_work_release(grp, work);
612 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
614 struct z_erofs_vle_work *work = builder->work;
619 z_erofs_pagevec_ctor_exit(&builder->vector, false);
620 mutex_unlock(&work->lock);
623 * if all pending pages are added, don't hold work reference
624 * any longer if the current work isn't hosted by ourselves.
626 if (!builder->hosted)
627 __z_erofs_vle_work_release(builder->grp, work);
629 builder->work = NULL;
634 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
637 struct page *page = erofs_allocpage(pagepool, gfp);
642 page->mapping = Z_EROFS_MAPPING_STAGING;
646 struct z_erofs_vle_frontend {
647 struct inode *const inode;
649 struct z_erofs_vle_work_builder builder;
650 struct erofs_map_blocks map;
652 z_erofs_vle_owned_workgrp_t owned_head;
654 /* used for applying cache strategy on the fly */
656 erofs_off_t headoffset;
659 #define VLE_FRONTEND_INIT(__i) { \
666 .builder = VLE_WORK_BUILDER_INIT(), \
667 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
670 #ifdef EROFS_FS_HAS_MANAGED_CACHE
672 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
677 if (EROFS_FS_ZIP_CACHE_LVL >= 2)
678 return la < fe->headoffset;
684 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
690 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
692 struct list_head *page_pool)
694 struct super_block *const sb = fe->inode->i_sb;
695 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
696 struct erofs_map_blocks *const map = &fe->map;
697 struct z_erofs_vle_work_builder *const builder = &fe->builder;
698 const loff_t offset = page_offset(page);
700 bool tight = builder_is_hooked(builder);
701 struct z_erofs_vle_work *work = builder->work;
703 enum z_erofs_cache_alloctype cache_strategy;
704 enum z_erofs_page_type page_type;
705 unsigned int cur, end, spiltted, index;
708 /* register locked file pages as online pages in pack */
709 z_erofs_onlinepage_init(page);
716 /* lucky, within the range of the current map_blocks */
717 if (offset + cur >= map->m_la &&
718 offset + cur < map->m_la + map->m_llen) {
719 /* didn't get a valid unzip work previously (very rare) */
725 /* go ahead the next map_blocks */
726 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
728 if (z_erofs_vle_work_iter_end(builder))
729 fe->backmost = false;
731 map->m_la = offset + cur;
733 err = z_erofs_map_blocks_iter(fe->inode, map, 0);
738 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
741 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
742 DBG_BUGON(erofs_blkoff(map->m_pa));
744 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
748 /* preload all compressed pages (maybe downgrade role if necessary) */
749 if (should_alloc_managed_pages(fe, map->m_la))
750 cache_strategy = DELAYEDALLOC;
752 cache_strategy = DONTALLOC;
754 preload_compressed_pages(builder, MNGD_MAPPING(sbi),
755 map->m_pa / PAGE_SIZE,
756 map->m_plen / PAGE_SIZE,
757 cache_strategy, page_pool, GFP_KERNEL);
759 tight &= builder_is_hooked(builder);
760 work = builder->work;
762 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
763 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
764 zero_user_segment(page, cur, end);
768 /* let's derive page type */
769 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
770 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
771 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
772 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
775 tight &= builder_is_followed(builder);
778 err = z_erofs_vle_work_add_page(builder, page, page_type);
779 /* should allocate an additional staging page for pagevec */
780 if (err == -EAGAIN) {
781 struct page *const newpage =
782 __stagingpage_alloc(page_pool, GFP_NOFS);
784 err = z_erofs_vle_work_add_page(builder,
785 newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
793 index = page->index - map->m_la / PAGE_SIZE;
795 /* FIXME! avoid the last relundant fixup & endio */
796 z_erofs_onlinepage_fixup(page, index, true);
798 /* bump up the number of spiltted parts of a page */
800 /* also update nr_pages */
801 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
803 /* can be used for verification */
804 map->m_llen = offset + cur - map->m_la;
811 /* FIXME! avoid the last relundant fixup & endio */
812 z_erofs_onlinepage_endio(page);
814 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
815 __func__, page, spiltted, map->m_llen);
818 /* if some error occurred while processing this page */
824 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
826 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
827 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
828 bool background = tagptr_unfold_tags(t);
833 spin_lock_irqsave(&io->u.wait.lock, flags);
834 if (!atomic_add_return(bios, &io->pending_bios))
835 wake_up_locked(&io->u.wait);
836 spin_unlock_irqrestore(&io->u.wait.lock, flags);
840 if (!atomic_add_return(bios, &io->pending_bios))
841 queue_work(z_erofs_workqueue, &io->u.work);
844 static inline void z_erofs_vle_read_endio(struct bio *bio)
846 const blk_status_t err = bio->bi_status;
848 struct bio_vec *bvec;
849 #ifdef EROFS_FS_HAS_MANAGED_CACHE
850 struct address_space *mc = NULL;
852 struct bvec_iter_all iter_all;
854 bio_for_each_segment_all(bvec, bio, i, iter_all) {
855 struct page *page = bvec->bv_page;
856 bool cachemngd = false;
858 DBG_BUGON(PageUptodate(page));
859 DBG_BUGON(!page->mapping);
861 #ifdef EROFS_FS_HAS_MANAGED_CACHE
862 if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
863 struct inode *const inode = page->mapping->host;
864 struct super_block *const sb = inode->i_sb;
866 mc = MNGD_MAPPING(EROFS_SB(sb));
870 * If mc has not gotten, it equals NULL,
871 * however, page->mapping never be NULL if working properly.
873 cachemngd = (page->mapping == mc);
879 SetPageUptodate(page);
885 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
889 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
890 static DEFINE_MUTEX(z_pagemap_global_lock);
892 static int z_erofs_vle_unzip(struct super_block *sb,
893 struct z_erofs_vle_workgroup *grp,
894 struct list_head *page_pool)
896 struct erofs_sb_info *const sbi = EROFS_SB(sb);
897 const unsigned int clusterpages = erofs_clusterpages(sbi);
899 struct z_erofs_pagevec_ctor ctor;
900 unsigned int nr_pages;
901 unsigned int sparsemem_pages = 0;
902 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
903 struct page **pages, **compressed_pages, *page;
904 unsigned int i, llen;
906 enum z_erofs_page_type page_type;
908 struct z_erofs_vle_work *work;
913 work = z_erofs_vle_grab_primary_work(grp);
914 DBG_BUGON(!READ_ONCE(work->nr_pages));
916 mutex_lock(&work->lock);
917 nr_pages = work->nr_pages;
919 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
920 pages = pages_onstack;
921 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
922 mutex_trylock(&z_pagemap_global_lock))
923 pages = z_pagemap_global;
926 pages = kvmalloc_array(nr_pages,
927 sizeof(struct page *), GFP_KERNEL);
929 /* fallback to global pagemap for the lowmem scenario */
930 if (unlikely(!pages)) {
931 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
934 mutex_lock(&z_pagemap_global_lock);
935 pages = z_pagemap_global;
940 for (i = 0; i < nr_pages; ++i)
943 z_erofs_pagevec_ctor_init(&ctor,
944 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
946 for (i = 0; i < work->vcnt; ++i) {
949 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
951 /* all pages in pagevec ought to be valid */
953 DBG_BUGON(!page->mapping);
955 if (z_erofs_gather_if_stagingpage(page_pool, page))
958 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
961 pagenr = z_erofs_onlinepage_index(page);
963 DBG_BUGON(pagenr >= nr_pages);
964 DBG_BUGON(pages[pagenr]);
966 pages[pagenr] = page;
970 z_erofs_pagevec_ctor_exit(&ctor, true);
973 compressed_pages = grp->compressed_pages;
975 for (i = 0; i < clusterpages; ++i) {
978 page = compressed_pages[i];
980 /* all compressed pages ought to be valid */
982 DBG_BUGON(!page->mapping);
984 if (z_erofs_is_stagingpage(page))
986 #ifdef EROFS_FS_HAS_MANAGED_CACHE
987 if (page->mapping == MNGD_MAPPING(sbi)) {
988 DBG_BUGON(!PageUptodate(page));
993 /* only non-head page could be reused as a compressed page */
994 pagenr = z_erofs_onlinepage_index(page);
996 DBG_BUGON(pagenr >= nr_pages);
997 DBG_BUGON(pages[pagenr]);
999 pages[pagenr] = page;
1004 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1006 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
1007 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
1008 pages, nr_pages, work->pageofs);
1012 if (llen > grp->llen)
1015 err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
1016 pages, llen, work->pageofs);
1017 if (err != -ENOTSUPP)
1020 if (sparsemem_pages >= nr_pages)
1021 goto skip_allocpage;
1023 for (i = 0; i < nr_pages; ++i) {
1027 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
1031 vout = erofs_vmap(pages, nr_pages);
1033 err = z_erofs_vle_unzip_vmap(compressed_pages,
1034 clusterpages, vout, llen, work->pageofs, overlapped);
1036 erofs_vunmap(vout, nr_pages);
1039 /* must handle all compressed pages before endding pages */
1040 for (i = 0; i < clusterpages; ++i) {
1041 page = compressed_pages[i];
1043 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1044 if (page->mapping == MNGD_MAPPING(sbi))
1047 /* recycle all individual staging pages */
1048 (void)z_erofs_gather_if_stagingpage(page_pool, page);
1050 WRITE_ONCE(compressed_pages[i], NULL);
1053 for (i = 0; i < nr_pages; ++i) {
1058 DBG_BUGON(!page->mapping);
1060 /* recycle all individual staging pages */
1061 if (z_erofs_gather_if_stagingpage(page_pool, page))
1064 if (unlikely(err < 0))
1067 z_erofs_onlinepage_endio(page);
1070 if (pages == z_pagemap_global)
1071 mutex_unlock(&z_pagemap_global_lock);
1072 else if (unlikely(pages != pages_onstack))
1078 /* all work locks MUST be taken before the following line */
1080 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1082 /* all work locks SHOULD be released right now */
1083 mutex_unlock(&work->lock);
1085 z_erofs_vle_work_release(work);
1089 static void z_erofs_vle_unzip_all(struct super_block *sb,
1090 struct z_erofs_vle_unzip_io *io,
1091 struct list_head *page_pool)
1093 z_erofs_vle_owned_workgrp_t owned = io->head;
1095 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1096 struct z_erofs_vle_workgroup *grp;
1098 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1099 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1101 /* no possible that 'owned' equals NULL */
1102 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1104 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1105 owned = READ_ONCE(grp->next);
1107 z_erofs_vle_unzip(sb, grp, page_pool);
1111 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1113 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1114 struct z_erofs_vle_unzip_io_sb, io.u.work);
1115 LIST_HEAD(page_pool);
1117 DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1118 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1120 put_pages_list(&page_pool);
1124 static struct page *
1125 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1127 struct list_head *pagepool,
1128 struct address_space *mc,
1131 /* determined at compile time to avoid too many #ifdefs */
1132 const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1133 const pgoff_t index = grp->obj.index;
1134 bool tocache = false;
1136 struct address_space *mapping;
1137 struct page *oldpage, *page;
1139 compressed_page_t t;
1143 page = READ_ONCE(grp->compressed_pages[nr]);
1150 * the cached page has not been allocated and
1151 * an placeholder is out there, prepare it now.
1153 if (!nocache && page == PAGE_UNALLOCATED) {
1158 /* process the target tagged pointer */
1159 t = tagptr_init(compressed_page_t, page);
1160 justfound = tagptr_unfold_tags(t);
1161 page = tagptr_unfold_ptr(t);
1163 mapping = READ_ONCE(page->mapping);
1166 * if managed cache is disabled, it's no way to
1167 * get such a cached-like page.
1170 /* if managed cache is disabled, it is impossible `justfound' */
1171 DBG_BUGON(justfound);
1173 /* and it should be locked, not uptodate, and not truncated */
1174 DBG_BUGON(!PageLocked(page));
1175 DBG_BUGON(PageUptodate(page));
1176 DBG_BUGON(!mapping);
1181 * unmanaged (file) pages are all locked solidly,
1182 * therefore it is impossible for `mapping' to be NULL.
1184 if (mapping && mapping != mc)
1185 /* ought to be unmanaged pages */
1190 /* only true if page reclaim goes wrong, should never happen */
1191 DBG_BUGON(justfound && PagePrivate(page));
1193 /* the page is still in manage cache */
1194 if (page->mapping == mc) {
1195 WRITE_ONCE(grp->compressed_pages[nr], page);
1197 if (!PagePrivate(page)) {
1199 * impossible to be !PagePrivate(page) for
1200 * the current restriction as well if
1201 * the page is already in compressed_pages[].
1203 DBG_BUGON(!justfound);
1206 set_page_private(page, (unsigned long)grp);
1207 SetPagePrivate(page);
1210 /* no need to submit io if it is already up-to-date */
1211 if (PageUptodate(page)) {
1219 * the managed page has been truncated, it's unsafe to
1220 * reuse this one, let's allocate a new cache-managed page.
1222 DBG_BUGON(page->mapping);
1223 DBG_BUGON(!justfound);
1229 page = __stagingpage_alloc(pagepool, gfp);
1230 if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1231 list_add(&page->lru, pagepool);
1235 if (nocache || !tocache)
1237 if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1238 page->mapping = Z_EROFS_MAPPING_STAGING;
1242 set_page_private(page, (unsigned long)grp);
1243 SetPagePrivate(page);
1244 out: /* the only exit (for tracing and debugging) */
1248 static struct z_erofs_vle_unzip_io *
1249 jobqueue_init(struct super_block *sb,
1250 struct z_erofs_vle_unzip_io *io,
1253 struct z_erofs_vle_unzip_io_sb *iosb;
1256 /* waitqueue available for foreground io */
1259 init_waitqueue_head(&io->u.wait);
1260 atomic_set(&io->pending_bios, 0);
1264 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1265 GFP_KERNEL | __GFP_NOFAIL);
1268 /* initialize fields in the allocated descriptor */
1271 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1273 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1277 /* define workgroup jobqueue types */
1279 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1286 static void *jobqueueset_init(struct super_block *sb,
1287 z_erofs_vle_owned_workgrp_t qtail[],
1288 struct z_erofs_vle_unzip_io *q[],
1289 struct z_erofs_vle_unzip_io *fgq,
1292 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1294 * if managed cache is enabled, bypass jobqueue is needed,
1295 * no need to read from device for all workgroups in this queue.
1297 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1298 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1301 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1302 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1304 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1307 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1308 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1309 z_erofs_vle_owned_workgrp_t qtail[],
1310 z_erofs_vle_owned_workgrp_t owned_head)
1312 z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1313 z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1315 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1316 if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1317 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1319 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1321 WRITE_ONCE(*submit_qtail, owned_head);
1322 WRITE_ONCE(*bypass_qtail, &grp->next);
1324 qtail[JQ_BYPASS] = &grp->next;
1327 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1328 unsigned int nr_bios,
1332 * although background is preferred, no one is pending for submission.
1333 * don't issue workqueue for decompression but drop it directly instead.
1335 if (force_fg || nr_bios)
1338 kvfree(container_of(q[JQ_SUBMIT],
1339 struct z_erofs_vle_unzip_io_sb,
1344 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1345 z_erofs_vle_owned_workgrp_t qtail[],
1346 z_erofs_vle_owned_workgrp_t owned_head)
1348 /* impossible to bypass submission for managed cache disabled */
1352 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1353 unsigned int nr_bios,
1356 /* bios should be >0 if managed cache is disabled */
1357 DBG_BUGON(!nr_bios);
1362 static bool z_erofs_vle_submit_all(struct super_block *sb,
1363 z_erofs_vle_owned_workgrp_t owned_head,
1364 struct list_head *pagepool,
1365 struct z_erofs_vle_unzip_io *fgq,
1368 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1369 const unsigned int clusterpages = erofs_clusterpages(sbi);
1370 const gfp_t gfp = GFP_NOFS;
1372 z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1373 struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1376 /* since bio will be NULL, no need to initialize last_index */
1377 pgoff_t uninitialized_var(last_index);
1378 bool force_submit = false;
1379 unsigned int nr_bios;
1381 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1384 force_submit = false;
1387 bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1389 /* by default, all need io submission */
1390 q[JQ_SUBMIT]->head = owned_head;
1393 struct z_erofs_vle_workgroup *grp;
1394 pgoff_t first_index;
1396 unsigned int i = 0, bypass = 0;
1399 /* no possible 'owned_head' equals the following */
1400 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1401 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1403 grp = container_of(owned_head,
1404 struct z_erofs_vle_workgroup, next);
1406 /* close the main owned chain at first */
1407 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1408 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1410 first_index = grp->obj.index;
1411 force_submit |= (first_index != last_index + 1);
1414 page = pickup_page_for_submission(grp, i, pagepool,
1415 MNGD_MAPPING(sbi), gfp);
1417 force_submit = true;
1422 if (bio && force_submit) {
1424 __submit_bio(bio, REQ_OP_READ, 0);
1429 bio = erofs_grab_bio(sb, first_index + i,
1431 z_erofs_vle_read_endio, true);
1432 bio->bi_private = bi_private;
1437 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1438 if (err < PAGE_SIZE)
1439 goto submit_bio_retry;
1441 force_submit = false;
1442 last_index = first_index + i;
1444 if (++i < clusterpages)
1447 if (bypass < clusterpages)
1448 qtail[JQ_SUBMIT] = &grp->next;
1450 move_to_bypass_jobqueue(grp, qtail, owned_head);
1451 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1454 __submit_bio(bio, REQ_OP_READ, 0);
1456 if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1459 z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1463 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1464 struct list_head *pagepool,
1467 struct super_block *sb = f->inode->i_sb;
1468 struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1470 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1473 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1474 z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1479 /* wait until all bios are completed */
1480 wait_event(io[JQ_SUBMIT].u.wait,
1481 !atomic_read(&io[JQ_SUBMIT].pending_bios));
1483 /* let's synchronous decompression */
1484 z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1487 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1490 struct inode *const inode = page->mapping->host;
1491 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1493 LIST_HEAD(pagepool);
1495 trace_erofs_readpage(page, false);
1497 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1499 err = z_erofs_do_read_page(&f, page, &pagepool);
1500 (void)z_erofs_vle_work_iter_end(&f.builder);
1503 errln("%s, failed to read, err [%d]", __func__, err);
1507 z_erofs_submit_and_unzip(&f, &pagepool, true);
1510 put_page(f.map.mpage);
1512 /* clean up the remaining free pages */
1513 put_pages_list(&pagepool);
1517 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1518 struct address_space *mapping,
1519 struct list_head *pages,
1520 unsigned int nr_pages)
1522 struct inode *const inode = mapping->host;
1523 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1525 bool sync = __should_decompress_synchronously(sbi, nr_pages);
1526 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1527 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1528 struct page *head = NULL;
1529 LIST_HEAD(pagepool);
1531 trace_erofs_readpages(mapping->host, lru_to_page(pages),
1534 f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1536 for (; nr_pages; --nr_pages) {
1537 struct page *page = lru_to_page(pages);
1539 prefetchw(&page->flags);
1540 list_del(&page->lru);
1543 * A pure asynchronous readahead is indicated if
1544 * a PG_readahead marked page is hitted at first.
1545 * Let's also do asynchronous decompression for this case.
1547 sync &= !(PageReadahead(page) && !head);
1549 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1550 list_add(&page->lru, &pagepool);
1554 set_page_private(page, (unsigned long)head);
1559 struct page *page = head;
1562 /* traversal in reverse order */
1563 head = (void *)page_private(page);
1565 err = z_erofs_do_read_page(&f, page, &pagepool);
1567 struct erofs_vnode *vi = EROFS_V(inode);
1569 errln("%s, readahead error at page %lu of nid %llu",
1570 __func__, page->index, vi->nid);
1576 (void)z_erofs_vle_work_iter_end(&f.builder);
1578 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1581 put_page(f.map.mpage);
1583 /* clean up the remaining free pages */
1584 put_pages_list(&pagepool);
1588 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1589 .readpage = z_erofs_vle_normalaccess_readpage,
1590 .readpages = z_erofs_vle_normalaccess_readpages,
1594 * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1596 * VLE compression mode attempts to compress a number of logical data into
1597 * a physical cluster with a fixed size.
1598 * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1600 #define __vle_cluster_advise(x, bit, bits) \
1601 ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1603 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1604 Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1606 #define vle_cluster_type(di) \
1607 __vle_cluster_type((di)->di_advise)
1610 vle_decompressed_index_clusterofs(unsigned int *clusterofs,
1611 unsigned int clustersize,
1612 struct z_erofs_vle_decompressed_index *di)
1614 switch (vle_cluster_type(di)) {
1615 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1616 *clusterofs = clustersize;
1618 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1619 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1620 *clusterofs = le16_to_cpu(di->di_clusterofs);
1629 static inline erofs_blk_t
1630 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1632 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1633 struct erofs_vnode *vi = EROFS_V(inode);
1635 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1636 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1637 index * sizeof(struct z_erofs_vle_decompressed_index);
1639 return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1642 static inline unsigned int
1643 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1645 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1646 struct erofs_vnode *vi = EROFS_V(inode);
1648 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1649 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1650 index * sizeof(struct z_erofs_vle_decompressed_index);
1652 return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1655 struct vle_map_blocks_iter_ctx {
1656 struct inode *inode;
1657 struct super_block *sb;
1658 unsigned int clusterbits;
1660 struct page **mpage_ret;
1665 vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
1666 unsigned int lcn, /* logical cluster number */
1667 unsigned long long *ofs,
1669 unsigned int *flags)
1671 const unsigned int clustersize = 1 << ctx->clusterbits;
1672 const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
1673 struct page *mpage = *ctx->mpage_ret; /* extent metapage */
1675 struct z_erofs_vle_decompressed_index *di;
1676 unsigned int cluster_type, delta0;
1678 if (mpage->index != mblk) {
1679 kunmap_atomic(*ctx->kaddr_ret);
1683 mpage = erofs_get_meta_page(ctx->sb, mblk, false);
1684 if (IS_ERR(mpage)) {
1685 *ctx->mpage_ret = NULL;
1686 return PTR_ERR(mpage);
1688 *ctx->mpage_ret = mpage;
1689 *ctx->kaddr_ret = kmap_atomic(mpage);
1692 di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
1694 cluster_type = vle_cluster_type(di);
1695 switch (cluster_type) {
1696 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1697 delta0 = le16_to_cpu(di->di_u.delta[0]);
1698 if (unlikely(!delta0 || delta0 > lcn)) {
1699 errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
1700 delta0, lcn, EROFS_V(ctx->inode)->nid);
1704 return vle_get_logical_extent_head(ctx,
1705 lcn - delta0, ofs, pblk, flags);
1706 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1707 *flags ^= EROFS_MAP_ZIPPED;
1709 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1710 /* clustersize should be a power of two */
1711 *ofs = ((u64)lcn << ctx->clusterbits) +
1712 (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1713 *pblk = le32_to_cpu(di->di_u.blkaddr);
1716 errln("unknown cluster type %u at lcn %u of nid %llu",
1717 cluster_type, lcn, EROFS_V(ctx->inode)->nid);
1724 int z_erofs_map_blocks_iter(struct inode *inode,
1725 struct erofs_map_blocks *map,
1729 const struct vle_map_blocks_iter_ctx ctx = {
1732 .clusterbits = EROFS_I_SB(inode)->clusterbits,
1733 .mpage_ret = &map->mpage,
1736 const unsigned int clustersize = 1 << ctx.clusterbits;
1737 /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1738 const bool initial = !map->m_llen;
1740 /* logicial extent (start, end) offset */
1741 unsigned long long ofs, end;
1745 /* initialize `pblk' to keep gcc from printing foolish warnings */
1746 erofs_blk_t mblk, pblk = 0;
1747 struct page *mpage = map->mpage;
1748 struct z_erofs_vle_decompressed_index *di;
1749 unsigned int cluster_type, logical_cluster_ofs;
1752 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
1754 /* when trying to read beyond EOF, leave it unmapped */
1755 if (unlikely(map->m_la >= inode->i_size)) {
1756 DBG_BUGON(!initial);
1757 map->m_llen = map->m_la + 1 - inode->i_size;
1758 map->m_la = inode->i_size;
1763 debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1764 map->m_la, map->m_llen);
1766 ofs = map->m_la + map->m_llen;
1768 /* clustersize should be power of two */
1769 lcn = ofs >> ctx.clusterbits;
1770 ofs_rem = ofs & (clustersize - 1);
1772 mblk = vle_extent_blkaddr(inode, lcn);
1774 if (!mpage || mpage->index != mblk) {
1778 mpage = erofs_get_meta_page(ctx.sb, mblk, false);
1779 if (IS_ERR(mpage)) {
1780 err = PTR_ERR(mpage);
1786 DBG_BUGON(!PageUptodate(mpage));
1789 kaddr = kmap_atomic(mpage);
1790 di = kaddr + vle_extent_blkoff(inode, lcn);
1792 debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
1793 mblk, vle_extent_blkoff(inode, lcn));
1795 err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
1801 /* [walking mode] 'map' has been already initialized */
1802 map->m_llen += logical_cluster_ofs;
1806 /* by default, compressed */
1807 map->m_flags |= EROFS_MAP_ZIPPED;
1809 end = ((u64)lcn + 1) * clustersize;
1811 cluster_type = vle_cluster_type(di);
1813 switch (cluster_type) {
1814 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1815 if (ofs_rem >= logical_cluster_ofs)
1816 map->m_flags ^= EROFS_MAP_ZIPPED;
1818 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1819 if (ofs_rem == logical_cluster_ofs) {
1820 pblk = le32_to_cpu(di->di_u.blkaddr);
1824 if (ofs_rem > logical_cluster_ofs) {
1825 ofs = (u64)lcn * clustersize | logical_cluster_ofs;
1826 pblk = le32_to_cpu(di->di_u.blkaddr);
1830 /* logical cluster number should be >= 1 */
1831 if (unlikely(!lcn)) {
1832 errln("invalid logical cluster 0 at nid %llu",
1833 EROFS_V(inode)->nid);
1837 end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
1839 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1840 /* get the correspoinding first chunk */
1841 err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
1842 &pblk, &map->m_flags);
1845 if (unlikely(err)) {
1852 errln("unknown cluster type %u at offset %llu of nid %llu",
1853 cluster_type, ofs, EROFS_V(inode)->nid);
1860 map->m_llen = end - ofs;
1861 map->m_plen = clustersize;
1862 map->m_pa = blknr_to_addr(pblk);
1863 map->m_flags |= EROFS_MAP_MAPPED;
1865 kunmap_atomic(kaddr);
1868 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1869 __func__, map->m_la, map->m_pa,
1870 map->m_llen, map->m_plen, map->m_flags);
1872 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
1874 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1875 DBG_BUGON(err < 0 && err != -ENOMEM);