Merge tag '9p-for-6.2-rc1' of https://github.com/martinetd/linux
[linux-2.6-microblaze.git] / fs / erofs / zdata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2022 Alibaba Cloud
6  */
7 #include "zdata.h"
8 #include "compress.h"
9 #include <linux/prefetch.h>
10 #include <linux/psi.h>
11
12 #include <trace/events/erofs.h>
13
14 /*
15  * since pclustersize is variable for big pcluster feature, introduce slab
16  * pools implementation for different pcluster sizes.
17  */
18 struct z_erofs_pcluster_slab {
19         struct kmem_cache *slab;
20         unsigned int maxpages;
21         char name[48];
22 };
23
24 #define _PCLP(n) { .maxpages = n }
25
26 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
27         _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
28         _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
29 };
30
31 struct z_erofs_bvec_iter {
32         struct page *bvpage;
33         struct z_erofs_bvset *bvset;
34         unsigned int nr, cur;
35 };
36
37 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
38 {
39         if (iter->bvpage)
40                 kunmap_local(iter->bvset);
41         return iter->bvpage;
42 }
43
44 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
45 {
46         unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
47         /* have to access nextpage in advance, otherwise it will be unmapped */
48         struct page *nextpage = iter->bvset->nextpage;
49         struct page *oldpage;
50
51         DBG_BUGON(!nextpage);
52         oldpage = z_erofs_bvec_iter_end(iter);
53         iter->bvpage = nextpage;
54         iter->bvset = kmap_local_page(nextpage);
55         iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
56         iter->cur = 0;
57         return oldpage;
58 }
59
60 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
61                                     struct z_erofs_bvset_inline *bvset,
62                                     unsigned int bootstrap_nr,
63                                     unsigned int cur)
64 {
65         *iter = (struct z_erofs_bvec_iter) {
66                 .nr = bootstrap_nr,
67                 .bvset = (struct z_erofs_bvset *)bvset,
68         };
69
70         while (cur > iter->nr) {
71                 cur -= iter->nr;
72                 z_erofs_bvset_flip(iter);
73         }
74         iter->cur = cur;
75 }
76
77 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
78                                 struct z_erofs_bvec *bvec,
79                                 struct page **candidate_bvpage)
80 {
81         if (iter->cur == iter->nr) {
82                 if (!*candidate_bvpage)
83                         return -EAGAIN;
84
85                 DBG_BUGON(iter->bvset->nextpage);
86                 iter->bvset->nextpage = *candidate_bvpage;
87                 z_erofs_bvset_flip(iter);
88
89                 iter->bvset->nextpage = NULL;
90                 *candidate_bvpage = NULL;
91         }
92         iter->bvset->bvec[iter->cur++] = *bvec;
93         return 0;
94 }
95
96 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
97                                  struct z_erofs_bvec *bvec,
98                                  struct page **old_bvpage)
99 {
100         if (iter->cur == iter->nr)
101                 *old_bvpage = z_erofs_bvset_flip(iter);
102         else
103                 *old_bvpage = NULL;
104         *bvec = iter->bvset->bvec[iter->cur++];
105 }
106
107 static void z_erofs_destroy_pcluster_pool(void)
108 {
109         int i;
110
111         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
112                 if (!pcluster_pool[i].slab)
113                         continue;
114                 kmem_cache_destroy(pcluster_pool[i].slab);
115                 pcluster_pool[i].slab = NULL;
116         }
117 }
118
119 static int z_erofs_create_pcluster_pool(void)
120 {
121         struct z_erofs_pcluster_slab *pcs;
122         struct z_erofs_pcluster *a;
123         unsigned int size;
124
125         for (pcs = pcluster_pool;
126              pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
127                 size = struct_size(a, compressed_bvecs, pcs->maxpages);
128
129                 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
130                 pcs->slab = kmem_cache_create(pcs->name, size, 0,
131                                               SLAB_RECLAIM_ACCOUNT, NULL);
132                 if (pcs->slab)
133                         continue;
134
135                 z_erofs_destroy_pcluster_pool();
136                 return -ENOMEM;
137         }
138         return 0;
139 }
140
141 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
142 {
143         int i;
144
145         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
146                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
147                 struct z_erofs_pcluster *pcl;
148
149                 if (nrpages > pcs->maxpages)
150                         continue;
151
152                 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
153                 if (!pcl)
154                         return ERR_PTR(-ENOMEM);
155                 pcl->pclusterpages = nrpages;
156                 return pcl;
157         }
158         return ERR_PTR(-EINVAL);
159 }
160
161 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
162 {
163         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
164         int i;
165
166         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
167                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
168
169                 if (pclusterpages > pcs->maxpages)
170                         continue;
171
172                 kmem_cache_free(pcs->slab, pcl);
173                 return;
174         }
175         DBG_BUGON(1);
176 }
177
178 /*
179  * tagged pointer with 1-bit tag for all compressed pages
180  * tag 0 - the page is just found with an extra page reference
181  */
182 typedef tagptr1_t compressed_page_t;
183
184 #define tag_compressed_page_justfound(page) \
185         tagptr_fold(compressed_page_t, page, 1)
186
187 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
188
189 void z_erofs_exit_zip_subsystem(void)
190 {
191         destroy_workqueue(z_erofs_workqueue);
192         z_erofs_destroy_pcluster_pool();
193 }
194
195 static inline int z_erofs_init_workqueue(void)
196 {
197         const unsigned int onlinecpus = num_possible_cpus();
198
199         /*
200          * no need to spawn too many threads, limiting threads could minimum
201          * scheduling overhead, perhaps per-CPU threads should be better?
202          */
203         z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
204                                             WQ_UNBOUND | WQ_HIGHPRI,
205                                             onlinecpus + onlinecpus / 4);
206         return z_erofs_workqueue ? 0 : -ENOMEM;
207 }
208
209 int __init z_erofs_init_zip_subsystem(void)
210 {
211         int err = z_erofs_create_pcluster_pool();
212
213         if (err)
214                 return err;
215         err = z_erofs_init_workqueue();
216         if (err)
217                 z_erofs_destroy_pcluster_pool();
218         return err;
219 }
220
221 enum z_erofs_pclustermode {
222         Z_EROFS_PCLUSTER_INFLIGHT,
223         /*
224          * The current pclusters was the tail of an exist chain, in addition
225          * that the previous processed chained pclusters are all decided to
226          * be hooked up to it.
227          * A new chain will be created for the remaining pclusters which are
228          * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED,
229          * the next pcluster cannot reuse the whole page safely for inplace I/O
230          * in the following scenario:
231          *  ________________________________________________________________
232          * |      tail (partial) page     |       head (partial) page       |
233          * |   (belongs to the next pcl)  |   (belongs to the current pcl)  |
234          * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________|
235          */
236         Z_EROFS_PCLUSTER_HOOKED,
237         /*
238          * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
239          * could be dispatched into bypass queue later due to uptodated managed
240          * pages. All related online pages cannot be reused for inplace I/O (or
241          * bvpage) since it can be directly decoded without I/O submission.
242          */
243         Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
244         /*
245          * The current collection has been linked with the owned chain, and
246          * could also be linked with the remaining collections, which means
247          * if the processing page is the tail page of the collection, thus
248          * the current collection can safely use the whole page (since
249          * the previous collection is under control) for in-place I/O, as
250          * illustrated below:
251          *  ________________________________________________________________
252          * |  tail (partial) page |          head (partial) page           |
253          * |  (of the current cl) |      (of the previous collection)      |
254          * | PCLUSTER_FOLLOWED or |                                        |
255          * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________|
256          *
257          * [  (*) the above page can be used as inplace I/O.               ]
258          */
259         Z_EROFS_PCLUSTER_FOLLOWED,
260 };
261
262 struct z_erofs_decompress_frontend {
263         struct inode *const inode;
264         struct erofs_map_blocks map;
265         struct z_erofs_bvec_iter biter;
266
267         struct page *candidate_bvpage;
268         struct z_erofs_pcluster *pcl, *tailpcl;
269         z_erofs_next_pcluster_t owned_head;
270         enum z_erofs_pclustermode mode;
271
272         bool readahead;
273         /* used for applying cache strategy on the fly */
274         bool backmost;
275         erofs_off_t headoffset;
276
277         /* a pointer used to pick up inplace I/O pages */
278         unsigned int icur;
279 };
280
281 #define DECOMPRESS_FRONTEND_INIT(__i) { \
282         .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
283         .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
284
285 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
286 {
287         unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
288
289         if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
290                 return false;
291
292         if (fe->backmost)
293                 return true;
294
295         if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
296             fe->map.m_la < fe->headoffset)
297                 return true;
298
299         return false;
300 }
301
302 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
303                                struct page **pagepool)
304 {
305         struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
306         struct z_erofs_pcluster *pcl = fe->pcl;
307         bool shouldalloc = z_erofs_should_alloc_cache(fe);
308         bool standalone = true;
309         /*
310          * optimistic allocation without direct reclaim since inplace I/O
311          * can be used if low memory otherwise.
312          */
313         gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
314                         __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
315         unsigned int i;
316
317         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
318                 return;
319
320         for (i = 0; i < pcl->pclusterpages; ++i) {
321                 struct page *page;
322                 compressed_page_t t;
323                 struct page *newpage = NULL;
324
325                 /* the compressed page was loaded before */
326                 if (READ_ONCE(pcl->compressed_bvecs[i].page))
327                         continue;
328
329                 page = find_get_page(mc, pcl->obj.index + i);
330
331                 if (page) {
332                         t = tag_compressed_page_justfound(page);
333                 } else {
334                         /* I/O is needed, no possible to decompress directly */
335                         standalone = false;
336                         if (!shouldalloc)
337                                 continue;
338
339                         /*
340                          * try to use cached I/O if page allocation
341                          * succeeds or fallback to in-place I/O instead
342                          * to avoid any direct reclaim.
343                          */
344                         newpage = erofs_allocpage(pagepool, gfp);
345                         if (!newpage)
346                                 continue;
347                         set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
348                         t = tag_compressed_page_justfound(newpage);
349                 }
350
351                 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL,
352                                      tagptr_cast_ptr(t)))
353                         continue;
354
355                 if (page)
356                         put_page(page);
357                 else if (newpage)
358                         erofs_pagepool_add(pagepool, newpage);
359         }
360
361         /*
362          * don't do inplace I/O if all compressed pages are available in
363          * managed cache since it can be moved to the bypass queue instead.
364          */
365         if (standalone)
366                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
367 }
368
369 /* called by erofs_shrinker to get rid of all compressed_pages */
370 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
371                                        struct erofs_workgroup *grp)
372 {
373         struct z_erofs_pcluster *const pcl =
374                 container_of(grp, struct z_erofs_pcluster, obj);
375         int i;
376
377         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
378         /*
379          * refcount of workgroup is now freezed as 1,
380          * therefore no need to worry about available decompression users.
381          */
382         for (i = 0; i < pcl->pclusterpages; ++i) {
383                 struct page *page = pcl->compressed_bvecs[i].page;
384
385                 if (!page)
386                         continue;
387
388                 /* block other users from reclaiming or migrating the page */
389                 if (!trylock_page(page))
390                         return -EBUSY;
391
392                 if (!erofs_page_is_managed(sbi, page))
393                         continue;
394
395                 /* barrier is implied in the following 'unlock_page' */
396                 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
397                 detach_page_private(page);
398                 unlock_page(page);
399         }
400         return 0;
401 }
402
403 int erofs_try_to_free_cached_page(struct page *page)
404 {
405         struct z_erofs_pcluster *const pcl = (void *)page_private(page);
406         int ret, i;
407
408         if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
409                 return 0;
410
411         ret = 0;
412         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
413         for (i = 0; i < pcl->pclusterpages; ++i) {
414                 if (pcl->compressed_bvecs[i].page == page) {
415                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
416                         ret = 1;
417                         break;
418                 }
419         }
420         erofs_workgroup_unfreeze(&pcl->obj, 1);
421         if (ret)
422                 detach_page_private(page);
423         return ret;
424 }
425
426 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
427                                    struct z_erofs_bvec *bvec)
428 {
429         struct z_erofs_pcluster *const pcl = fe->pcl;
430
431         while (fe->icur > 0) {
432                 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
433                              NULL, bvec->page)) {
434                         pcl->compressed_bvecs[fe->icur] = *bvec;
435                         return true;
436                 }
437         }
438         return false;
439 }
440
441 /* callers must be with pcluster lock held */
442 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
443                                struct z_erofs_bvec *bvec, bool exclusive)
444 {
445         int ret;
446
447         if (exclusive) {
448                 /* give priority for inplaceio to use file pages first */
449                 if (z_erofs_try_inplace_io(fe, bvec))
450                         return 0;
451                 /* otherwise, check if it can be used as a bvpage */
452                 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
453                     !fe->candidate_bvpage)
454                         fe->candidate_bvpage = bvec->page;
455         }
456         ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage);
457         fe->pcl->vcnt += (ret >= 0);
458         return ret;
459 }
460
461 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
462 {
463         struct z_erofs_pcluster *pcl = f->pcl;
464         z_erofs_next_pcluster_t *owned_head = &f->owned_head;
465
466         /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
467         if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
468                     *owned_head) == Z_EROFS_PCLUSTER_NIL) {
469                 *owned_head = &pcl->next;
470                 /* so we can attach this pcluster to our submission chain. */
471                 f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
472                 return;
473         }
474
475         /*
476          * type 2, link to the end of an existing open chain, be careful
477          * that its submission is controlled by the original attached chain.
478          */
479         if (*owned_head != &pcl->next && pcl != f->tailpcl &&
480             cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
481                     *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
482                 *owned_head = Z_EROFS_PCLUSTER_TAIL;
483                 f->mode = Z_EROFS_PCLUSTER_HOOKED;
484                 f->tailpcl = NULL;
485                 return;
486         }
487         /* type 3, it belongs to a chain, but it isn't the end of the chain */
488         f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
489 }
490
491 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
492 {
493         struct erofs_map_blocks *map = &fe->map;
494         bool ztailpacking = map->m_flags & EROFS_MAP_META;
495         struct z_erofs_pcluster *pcl;
496         struct erofs_workgroup *grp;
497         int err;
498
499         if (!(map->m_flags & EROFS_MAP_ENCODED) ||
500             (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
501                 DBG_BUGON(1);
502                 return -EFSCORRUPTED;
503         }
504
505         /* no available pcluster, let's allocate one */
506         pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 :
507                                      map->m_plen >> PAGE_SHIFT);
508         if (IS_ERR(pcl))
509                 return PTR_ERR(pcl);
510
511         atomic_set(&pcl->obj.refcount, 1);
512         pcl->algorithmformat = map->m_algorithmformat;
513         pcl->length = 0;
514         pcl->partial = true;
515
516         /* new pclusters should be claimed as type 1, primary and followed */
517         pcl->next = fe->owned_head;
518         pcl->pageofs_out = map->m_la & ~PAGE_MASK;
519         fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
520
521         /*
522          * lock all primary followed works before visible to others
523          * and mutex_trylock *never* fails for a new pcluster.
524          */
525         mutex_init(&pcl->lock);
526         DBG_BUGON(!mutex_trylock(&pcl->lock));
527
528         if (ztailpacking) {
529                 pcl->obj.index = 0;     /* which indicates ztailpacking */
530                 pcl->pageofs_in = erofs_blkoff(map->m_pa);
531                 pcl->tailpacking_size = map->m_plen;
532         } else {
533                 pcl->obj.index = map->m_pa >> PAGE_SHIFT;
534
535                 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
536                 if (IS_ERR(grp)) {
537                         err = PTR_ERR(grp);
538                         goto err_out;
539                 }
540
541                 if (grp != &pcl->obj) {
542                         fe->pcl = container_of(grp,
543                                         struct z_erofs_pcluster, obj);
544                         err = -EEXIST;
545                         goto err_out;
546                 }
547         }
548         /* used to check tail merging loop due to corrupted images */
549         if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
550                 fe->tailpcl = pcl;
551         fe->owned_head = &pcl->next;
552         fe->pcl = pcl;
553         return 0;
554
555 err_out:
556         mutex_unlock(&pcl->lock);
557         z_erofs_free_pcluster(pcl);
558         return err;
559 }
560
561 static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
562 {
563         struct erofs_map_blocks *map = &fe->map;
564         struct erofs_workgroup *grp = NULL;
565         int ret;
566
567         DBG_BUGON(fe->pcl);
568
569         /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
570         DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
571         DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
572
573         if (!(map->m_flags & EROFS_MAP_META)) {
574                 grp = erofs_find_workgroup(fe->inode->i_sb,
575                                            map->m_pa >> PAGE_SHIFT);
576         } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
577                 DBG_BUGON(1);
578                 return -EFSCORRUPTED;
579         }
580
581         if (grp) {
582                 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
583                 ret = -EEXIST;
584         } else {
585                 ret = z_erofs_register_pcluster(fe);
586         }
587
588         if (ret == -EEXIST) {
589                 mutex_lock(&fe->pcl->lock);
590                 /* used to check tail merging loop due to corrupted images */
591                 if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
592                         fe->tailpcl = fe->pcl;
593
594                 z_erofs_try_to_claim_pcluster(fe);
595         } else if (ret) {
596                 return ret;
597         }
598         z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
599                                 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
600         /* since file-backed online pages are traversed in reverse order */
601         fe->icur = z_erofs_pclusterpages(fe->pcl);
602         return 0;
603 }
604
605 /*
606  * keep in mind that no referenced pclusters will be freed
607  * only after a RCU grace period.
608  */
609 static void z_erofs_rcu_callback(struct rcu_head *head)
610 {
611         z_erofs_free_pcluster(container_of(head,
612                         struct z_erofs_pcluster, rcu));
613 }
614
615 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
616 {
617         struct z_erofs_pcluster *const pcl =
618                 container_of(grp, struct z_erofs_pcluster, obj);
619
620         call_rcu(&pcl->rcu, z_erofs_rcu_callback);
621 }
622
623 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
624 {
625         struct z_erofs_pcluster *pcl = fe->pcl;
626
627         if (!pcl)
628                 return false;
629
630         z_erofs_bvec_iter_end(&fe->biter);
631         mutex_unlock(&pcl->lock);
632
633         if (fe->candidate_bvpage) {
634                 DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage));
635                 fe->candidate_bvpage = NULL;
636         }
637
638         /*
639          * if all pending pages are added, don't hold its reference
640          * any longer if the pcluster isn't hosted by ourselves.
641          */
642         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
643                 erofs_workgroup_put(&pcl->obj);
644
645         fe->pcl = NULL;
646         return true;
647 }
648
649 static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
650                                  struct page *page, unsigned int pageofs,
651                                  unsigned int len)
652 {
653         struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode;
654         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
655         u8 *src, *dst;
656         unsigned int i, cnt;
657
658         if (!packed_inode)
659                 return -EFSCORRUPTED;
660
661         pos += EROFS_I(inode)->z_fragmentoff;
662         for (i = 0; i < len; i += cnt) {
663                 cnt = min_t(unsigned int, len - i,
664                             EROFS_BLKSIZ - erofs_blkoff(pos));
665                 src = erofs_bread(&buf, packed_inode,
666                                   erofs_blknr(pos), EROFS_KMAP);
667                 if (IS_ERR(src)) {
668                         erofs_put_metabuf(&buf);
669                         return PTR_ERR(src);
670                 }
671
672                 dst = kmap_local_page(page);
673                 memcpy(dst + pageofs + i, src + erofs_blkoff(pos), cnt);
674                 kunmap_local(dst);
675                 pos += cnt;
676         }
677         erofs_put_metabuf(&buf);
678         return 0;
679 }
680
681 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
682                                 struct page *page, struct page **pagepool)
683 {
684         struct inode *const inode = fe->inode;
685         struct erofs_map_blocks *const map = &fe->map;
686         const loff_t offset = page_offset(page);
687         bool tight = true, exclusive;
688         unsigned int cur, end, spiltted;
689         int err = 0;
690
691         /* register locked file pages as online pages in pack */
692         z_erofs_onlinepage_init(page);
693
694         spiltted = 0;
695         end = PAGE_SIZE;
696 repeat:
697         cur = end - 1;
698
699         if (offset + cur < map->m_la ||
700             offset + cur >= map->m_la + map->m_llen) {
701                 erofs_dbg("out-of-range map @ pos %llu", offset + cur);
702
703                 if (z_erofs_collector_end(fe))
704                         fe->backmost = false;
705                 map->m_la = offset + cur;
706                 map->m_llen = 0;
707                 err = z_erofs_map_blocks_iter(inode, map, 0);
708                 if (err)
709                         goto out;
710         } else {
711                 if (fe->pcl)
712                         goto hitted;
713                 /* didn't get a valid pcluster previously (very rare) */
714         }
715
716         if (!(map->m_flags & EROFS_MAP_MAPPED) ||
717             map->m_flags & EROFS_MAP_FRAGMENT)
718                 goto hitted;
719
720         err = z_erofs_collector_begin(fe);
721         if (err)
722                 goto out;
723
724         if (z_erofs_is_inline_pcluster(fe->pcl)) {
725                 void *mp;
726
727                 mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
728                                         erofs_blknr(map->m_pa), EROFS_NO_KMAP);
729                 if (IS_ERR(mp)) {
730                         err = PTR_ERR(mp);
731                         erofs_err(inode->i_sb,
732                                   "failed to get inline page, err %d", err);
733                         goto out;
734                 }
735                 get_page(fe->map.buf.page);
736                 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
737                            fe->map.buf.page);
738                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
739         } else {
740                 /* bind cache first when cached decompression is preferred */
741                 z_erofs_bind_cache(fe, pagepool);
742         }
743 hitted:
744         /*
745          * Ensure the current partial page belongs to this submit chain rather
746          * than other concurrent submit chains or the noio(bypass) chain since
747          * those chains are handled asynchronously thus the page cannot be used
748          * for inplace I/O or bvpage (should be processed in a strict order.)
749          */
750         tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED &&
751                   fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
752
753         cur = end - min_t(unsigned int, offset + end - map->m_la, end);
754         if (!(map->m_flags & EROFS_MAP_MAPPED)) {
755                 zero_user_segment(page, cur, end);
756                 goto next_part;
757         }
758         if (map->m_flags & EROFS_MAP_FRAGMENT) {
759                 unsigned int pageofs, skip, len;
760
761                 if (offset > map->m_la) {
762                         pageofs = 0;
763                         skip = offset - map->m_la;
764                 } else {
765                         pageofs = map->m_la & ~PAGE_MASK;
766                         skip = 0;
767                 }
768                 len = min_t(unsigned int, map->m_llen - skip, end - cur);
769                 err = z_erofs_read_fragment(inode, skip, page, pageofs, len);
770                 if (err)
771                         goto out;
772                 ++spiltted;
773                 tight = false;
774                 goto next_part;
775         }
776
777         exclusive = (!cur && (!spiltted || tight));
778         if (cur)
779                 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
780
781 retry:
782         err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
783                                         .page = page,
784                                         .offset = offset - map->m_la,
785                                         .end = end,
786                                   }), exclusive);
787         /* should allocate an additional short-lived page for bvset */
788         if (err == -EAGAIN && !fe->candidate_bvpage) {
789                 fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL);
790                 set_page_private(fe->candidate_bvpage,
791                                  Z_EROFS_SHORTLIVED_PAGE);
792                 goto retry;
793         }
794
795         if (err) {
796                 DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage);
797                 goto out;
798         }
799
800         z_erofs_onlinepage_split(page);
801         /* bump up the number of spiltted parts of a page */
802         ++spiltted;
803         if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
804                 fe->pcl->multibases = true;
805         if (fe->pcl->length < offset + end - map->m_la) {
806                 fe->pcl->length = offset + end - map->m_la;
807                 fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
808         }
809         if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
810             !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
811             fe->pcl->length == map->m_llen)
812                 fe->pcl->partial = false;
813 next_part:
814         /* shorten the remaining extent to update progress */
815         map->m_llen = offset + cur - map->m_la;
816         map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
817
818         end = cur;
819         if (end > 0)
820                 goto repeat;
821
822 out:
823         if (err)
824                 z_erofs_page_mark_eio(page);
825         z_erofs_onlinepage_endio(page);
826
827         erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
828                   __func__, page, spiltted, map->m_llen);
829         return err;
830 }
831
832 static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
833                                        unsigned int readahead_pages)
834 {
835         /* auto: enable for read_folio, disable for readahead */
836         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
837             !readahead_pages)
838                 return true;
839
840         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
841             (readahead_pages <= sbi->opt.max_sync_decompress_pages))
842                 return true;
843
844         return false;
845 }
846
847 static bool z_erofs_page_is_invalidated(struct page *page)
848 {
849         return !page->mapping && !z_erofs_is_shortlived_page(page);
850 }
851
852 struct z_erofs_decompress_backend {
853         struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
854         struct super_block *sb;
855         struct z_erofs_pcluster *pcl;
856
857         /* pages with the longest decompressed length for deduplication */
858         struct page **decompressed_pages;
859         /* pages to keep the compressed data */
860         struct page **compressed_pages;
861
862         struct list_head decompressed_secondary_bvecs;
863         struct page **pagepool;
864         unsigned int onstack_used, nr_pages;
865 };
866
867 struct z_erofs_bvec_item {
868         struct z_erofs_bvec bvec;
869         struct list_head list;
870 };
871
872 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
873                                          struct z_erofs_bvec *bvec)
874 {
875         struct z_erofs_bvec_item *item;
876
877         if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) {
878                 unsigned int pgnr;
879
880                 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
881                 DBG_BUGON(pgnr >= be->nr_pages);
882                 if (!be->decompressed_pages[pgnr]) {
883                         be->decompressed_pages[pgnr] = bvec->page;
884                         return;
885                 }
886         }
887
888         /* (cold path) one pcluster is requested multiple times */
889         item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
890         item->bvec = *bvec;
891         list_add(&item->list, &be->decompressed_secondary_bvecs);
892 }
893
894 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
895                                       int err)
896 {
897         unsigned int off0 = be->pcl->pageofs_out;
898         struct list_head *p, *n;
899
900         list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
901                 struct z_erofs_bvec_item *bvi;
902                 unsigned int end, cur;
903                 void *dst, *src;
904
905                 bvi = container_of(p, struct z_erofs_bvec_item, list);
906                 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
907                 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
908                             bvi->bvec.end);
909                 dst = kmap_local_page(bvi->bvec.page);
910                 while (cur < end) {
911                         unsigned int pgnr, scur, len;
912
913                         pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
914                         DBG_BUGON(pgnr >= be->nr_pages);
915
916                         scur = bvi->bvec.offset + cur -
917                                         ((pgnr << PAGE_SHIFT) - off0);
918                         len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
919                         if (!be->decompressed_pages[pgnr]) {
920                                 err = -EFSCORRUPTED;
921                                 cur += len;
922                                 continue;
923                         }
924                         src = kmap_local_page(be->decompressed_pages[pgnr]);
925                         memcpy(dst + cur, src + scur, len);
926                         kunmap_local(src);
927                         cur += len;
928                 }
929                 kunmap_local(dst);
930                 if (err)
931                         z_erofs_page_mark_eio(bvi->bvec.page);
932                 z_erofs_onlinepage_endio(bvi->bvec.page);
933                 list_del(p);
934                 kfree(bvi);
935         }
936 }
937
938 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
939 {
940         struct z_erofs_pcluster *pcl = be->pcl;
941         struct z_erofs_bvec_iter biter;
942         struct page *old_bvpage;
943         int i;
944
945         z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
946         for (i = 0; i < pcl->vcnt; ++i) {
947                 struct z_erofs_bvec bvec;
948
949                 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
950
951                 if (old_bvpage)
952                         z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
953
954                 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
955                 z_erofs_do_decompressed_bvec(be, &bvec);
956         }
957
958         old_bvpage = z_erofs_bvec_iter_end(&biter);
959         if (old_bvpage)
960                 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
961 }
962
963 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
964                                   bool *overlapped)
965 {
966         struct z_erofs_pcluster *pcl = be->pcl;
967         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
968         int i, err = 0;
969
970         *overlapped = false;
971         for (i = 0; i < pclusterpages; ++i) {
972                 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
973                 struct page *page = bvec->page;
974
975                 /* compressed pages ought to be present before decompressing */
976                 if (!page) {
977                         DBG_BUGON(1);
978                         continue;
979                 }
980                 be->compressed_pages[i] = page;
981
982                 if (z_erofs_is_inline_pcluster(pcl)) {
983                         if (!PageUptodate(page))
984                                 err = -EIO;
985                         continue;
986                 }
987
988                 DBG_BUGON(z_erofs_page_is_invalidated(page));
989                 if (!z_erofs_is_shortlived_page(page)) {
990                         if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
991                                 if (!PageUptodate(page))
992                                         err = -EIO;
993                                 continue;
994                         }
995                         z_erofs_do_decompressed_bvec(be, bvec);
996                         *overlapped = true;
997                 }
998         }
999
1000         if (err)
1001                 return err;
1002         return 0;
1003 }
1004
1005 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1006                                        int err)
1007 {
1008         struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1009         struct z_erofs_pcluster *pcl = be->pcl;
1010         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1011         unsigned int i, inputsize;
1012         int err2;
1013         struct page *page;
1014         bool overlapped;
1015
1016         mutex_lock(&pcl->lock);
1017         be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1018
1019         /* allocate (de)compressed page arrays if cannot be kept on stack */
1020         be->decompressed_pages = NULL;
1021         be->compressed_pages = NULL;
1022         be->onstack_used = 0;
1023         if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1024                 be->decompressed_pages = be->onstack_pages;
1025                 be->onstack_used = be->nr_pages;
1026                 memset(be->decompressed_pages, 0,
1027                        sizeof(struct page *) * be->nr_pages);
1028         }
1029
1030         if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1031                 be->compressed_pages = be->onstack_pages + be->onstack_used;
1032
1033         if (!be->decompressed_pages)
1034                 be->decompressed_pages =
1035                         kvcalloc(be->nr_pages, sizeof(struct page *),
1036                                  GFP_KERNEL | __GFP_NOFAIL);
1037         if (!be->compressed_pages)
1038                 be->compressed_pages =
1039                         kvcalloc(pclusterpages, sizeof(struct page *),
1040                                  GFP_KERNEL | __GFP_NOFAIL);
1041
1042         z_erofs_parse_out_bvecs(be);
1043         err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1044         if (err2)
1045                 err = err2;
1046         if (err)
1047                 goto out;
1048
1049         if (z_erofs_is_inline_pcluster(pcl))
1050                 inputsize = pcl->tailpacking_size;
1051         else
1052                 inputsize = pclusterpages * PAGE_SIZE;
1053
1054         err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
1055                                         .sb = be->sb,
1056                                         .in = be->compressed_pages,
1057                                         .out = be->decompressed_pages,
1058                                         .pageofs_in = pcl->pageofs_in,
1059                                         .pageofs_out = pcl->pageofs_out,
1060                                         .inputsize = inputsize,
1061                                         .outputsize = pcl->length,
1062                                         .alg = pcl->algorithmformat,
1063                                         .inplace_io = overlapped,
1064                                         .partial_decoding = pcl->partial,
1065                                         .fillgaps = pcl->multibases,
1066                                  }, be->pagepool);
1067
1068 out:
1069         /* must handle all compressed pages before actual file pages */
1070         if (z_erofs_is_inline_pcluster(pcl)) {
1071                 page = pcl->compressed_bvecs[0].page;
1072                 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1073                 put_page(page);
1074         } else {
1075                 for (i = 0; i < pclusterpages; ++i) {
1076                         page = pcl->compressed_bvecs[i].page;
1077
1078                         if (erofs_page_is_managed(sbi, page))
1079                                 continue;
1080
1081                         /* recycle all individual short-lived pages */
1082                         (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1083                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1084                 }
1085         }
1086         if (be->compressed_pages < be->onstack_pages ||
1087             be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1088                 kvfree(be->compressed_pages);
1089         z_erofs_fill_other_copies(be, err);
1090
1091         for (i = 0; i < be->nr_pages; ++i) {
1092                 page = be->decompressed_pages[i];
1093                 if (!page)
1094                         continue;
1095
1096                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1097
1098                 /* recycle all individual short-lived pages */
1099                 if (z_erofs_put_shortlivedpage(be->pagepool, page))
1100                         continue;
1101                 if (err)
1102                         z_erofs_page_mark_eio(page);
1103                 z_erofs_onlinepage_endio(page);
1104         }
1105
1106         if (be->decompressed_pages != be->onstack_pages)
1107                 kvfree(be->decompressed_pages);
1108
1109         pcl->length = 0;
1110         pcl->partial = true;
1111         pcl->multibases = false;
1112         pcl->bvset.nextpage = NULL;
1113         pcl->vcnt = 0;
1114
1115         /* pcluster lock MUST be taken before the following line */
1116         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1117         mutex_unlock(&pcl->lock);
1118         return err;
1119 }
1120
1121 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1122                                      struct page **pagepool)
1123 {
1124         struct z_erofs_decompress_backend be = {
1125                 .sb = io->sb,
1126                 .pagepool = pagepool,
1127                 .decompressed_secondary_bvecs =
1128                         LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1129         };
1130         z_erofs_next_pcluster_t owned = io->head;
1131
1132         while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
1133                 /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1134                 DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
1135                 /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */
1136                 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1137
1138                 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1139                 owned = READ_ONCE(be.pcl->next);
1140
1141                 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1142                 erofs_workgroup_put(&be.pcl->obj);
1143         }
1144 }
1145
1146 static void z_erofs_decompressqueue_work(struct work_struct *work)
1147 {
1148         struct z_erofs_decompressqueue *bgq =
1149                 container_of(work, struct z_erofs_decompressqueue, u.work);
1150         struct page *pagepool = NULL;
1151
1152         DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1153         z_erofs_decompress_queue(bgq, &pagepool);
1154
1155         erofs_release_pages(&pagepool);
1156         kvfree(bgq);
1157 }
1158
1159 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1160                                        bool sync, int bios)
1161 {
1162         struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1163
1164         /* wake up the caller thread for sync decompression */
1165         if (sync) {
1166                 if (!atomic_add_return(bios, &io->pending_bios))
1167                         complete(&io->u.done);
1168                 return;
1169         }
1170
1171         if (atomic_add_return(bios, &io->pending_bios))
1172                 return;
1173         /* Use workqueue and sync decompression for atomic contexts only */
1174         if (in_atomic() || irqs_disabled()) {
1175                 queue_work(z_erofs_workqueue, &io->u.work);
1176                 /* enable sync decompression for readahead */
1177                 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1178                         sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1179                 return;
1180         }
1181         z_erofs_decompressqueue_work(&io->u.work);
1182 }
1183
1184 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
1185                                                unsigned int nr,
1186                                                struct page **pagepool,
1187                                                struct address_space *mc)
1188 {
1189         const pgoff_t index = pcl->obj.index;
1190         gfp_t gfp = mapping_gfp_mask(mc);
1191         bool tocache = false;
1192
1193         struct address_space *mapping;
1194         struct page *oldpage, *page;
1195
1196         compressed_page_t t;
1197         int justfound;
1198
1199 repeat:
1200         page = READ_ONCE(pcl->compressed_bvecs[nr].page);
1201         oldpage = page;
1202
1203         if (!page)
1204                 goto out_allocpage;
1205
1206         /* process the target tagged pointer */
1207         t = tagptr_init(compressed_page_t, page);
1208         justfound = tagptr_unfold_tags(t);
1209         page = tagptr_unfold_ptr(t);
1210
1211         /*
1212          * preallocated cached pages, which is used to avoid direct reclaim
1213          * otherwise, it will go inplace I/O path instead.
1214          */
1215         if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1216                 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1217                 set_page_private(page, 0);
1218                 tocache = true;
1219                 goto out_tocache;
1220         }
1221         mapping = READ_ONCE(page->mapping);
1222
1223         /*
1224          * file-backed online pages in plcuster are all locked steady,
1225          * therefore it is impossible for `mapping' to be NULL.
1226          */
1227         if (mapping && mapping != mc)
1228                 /* ought to be unmanaged pages */
1229                 goto out;
1230
1231         /* directly return for shortlived page as well */
1232         if (z_erofs_is_shortlived_page(page))
1233                 goto out;
1234
1235         lock_page(page);
1236
1237         /* only true if page reclaim goes wrong, should never happen */
1238         DBG_BUGON(justfound && PagePrivate(page));
1239
1240         /* the page is still in manage cache */
1241         if (page->mapping == mc) {
1242                 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1243
1244                 if (!PagePrivate(page)) {
1245                         /*
1246                          * impossible to be !PagePrivate(page) for
1247                          * the current restriction as well if
1248                          * the page is already in compressed_bvecs[].
1249                          */
1250                         DBG_BUGON(!justfound);
1251
1252                         justfound = 0;
1253                         set_page_private(page, (unsigned long)pcl);
1254                         SetPagePrivate(page);
1255                 }
1256
1257                 /* no need to submit io if it is already up-to-date */
1258                 if (PageUptodate(page)) {
1259                         unlock_page(page);
1260                         page = NULL;
1261                 }
1262                 goto out;
1263         }
1264
1265         /*
1266          * the managed page has been truncated, it's unsafe to
1267          * reuse this one, let's allocate a new cache-managed page.
1268          */
1269         DBG_BUGON(page->mapping);
1270         DBG_BUGON(!justfound);
1271
1272         tocache = true;
1273         unlock_page(page);
1274         put_page(page);
1275 out_allocpage:
1276         page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
1277         if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
1278                                oldpage, page)) {
1279                 erofs_pagepool_add(pagepool, page);
1280                 cond_resched();
1281                 goto repeat;
1282         }
1283 out_tocache:
1284         if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1285                 /* turn into temporary page if fails (1 ref) */
1286                 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1287                 goto out;
1288         }
1289         attach_page_private(page, pcl);
1290         /* drop a refcount added by allocpage (then we have 2 refs here) */
1291         put_page(page);
1292
1293 out:    /* the only exit (for tracing and debugging) */
1294         return page;
1295 }
1296
1297 static struct z_erofs_decompressqueue *
1298 jobqueue_init(struct super_block *sb,
1299               struct z_erofs_decompressqueue *fgq, bool *fg)
1300 {
1301         struct z_erofs_decompressqueue *q;
1302
1303         if (fg && !*fg) {
1304                 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1305                 if (!q) {
1306                         *fg = true;
1307                         goto fg_out;
1308                 }
1309                 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1310         } else {
1311 fg_out:
1312                 q = fgq;
1313                 init_completion(&fgq->u.done);
1314                 atomic_set(&fgq->pending_bios, 0);
1315                 q->eio = false;
1316         }
1317         q->sb = sb;
1318         q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1319         return q;
1320 }
1321
1322 /* define decompression jobqueue types */
1323 enum {
1324         JQ_BYPASS,
1325         JQ_SUBMIT,
1326         NR_JOBQUEUES,
1327 };
1328
1329 static void *jobqueueset_init(struct super_block *sb,
1330                               struct z_erofs_decompressqueue *q[],
1331                               struct z_erofs_decompressqueue *fgq, bool *fg)
1332 {
1333         /*
1334          * if managed cache is enabled, bypass jobqueue is needed,
1335          * no need to read from device for all pclusters in this queue.
1336          */
1337         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1338         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
1339
1340         return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
1341 }
1342
1343 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1344                                     z_erofs_next_pcluster_t qtail[],
1345                                     z_erofs_next_pcluster_t owned_head)
1346 {
1347         z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1348         z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1349
1350         DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1351         if (owned_head == Z_EROFS_PCLUSTER_TAIL)
1352                 owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1353
1354         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
1355
1356         WRITE_ONCE(*submit_qtail, owned_head);
1357         WRITE_ONCE(*bypass_qtail, &pcl->next);
1358
1359         qtail[JQ_BYPASS] = &pcl->next;
1360 }
1361
1362 static void z_erofs_decompressqueue_endio(struct bio *bio)
1363 {
1364         tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
1365         struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
1366         blk_status_t err = bio->bi_status;
1367         struct bio_vec *bvec;
1368         struct bvec_iter_all iter_all;
1369
1370         bio_for_each_segment_all(bvec, bio, iter_all) {
1371                 struct page *page = bvec->bv_page;
1372
1373                 DBG_BUGON(PageUptodate(page));
1374                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1375
1376                 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1377                         if (!err)
1378                                 SetPageUptodate(page);
1379                         unlock_page(page);
1380                 }
1381         }
1382         if (err)
1383                 q->eio = true;
1384         z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
1385         bio_put(bio);
1386 }
1387
1388 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1389                                  struct page **pagepool,
1390                                  struct z_erofs_decompressqueue *fgq,
1391                                  bool *force_fg)
1392 {
1393         struct super_block *sb = f->inode->i_sb;
1394         struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1395         z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1396         struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1397         void *bi_private;
1398         z_erofs_next_pcluster_t owned_head = f->owned_head;
1399         /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1400         pgoff_t last_index;
1401         struct block_device *last_bdev;
1402         unsigned int nr_bios = 0;
1403         struct bio *bio = NULL;
1404         unsigned long pflags;
1405         int memstall = 0;
1406
1407         bi_private = jobqueueset_init(sb, q, fgq, force_fg);
1408         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1409         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1410
1411         /* by default, all need io submission */
1412         q[JQ_SUBMIT]->head = owned_head;
1413
1414         do {
1415                 struct erofs_map_dev mdev;
1416                 struct z_erofs_pcluster *pcl;
1417                 pgoff_t cur, end;
1418                 unsigned int i = 0;
1419                 bool bypass = true;
1420
1421                 /* no possible 'owned_head' equals the following */
1422                 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1423                 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1424
1425                 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1426
1427                 /* close the main owned chain at first */
1428                 owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
1429                                      Z_EROFS_PCLUSTER_TAIL_CLOSED);
1430                 if (z_erofs_is_inline_pcluster(pcl)) {
1431                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1432                         continue;
1433                 }
1434
1435                 /* no device id here, thus it will always succeed */
1436                 mdev = (struct erofs_map_dev) {
1437                         .m_pa = blknr_to_addr(pcl->obj.index),
1438                 };
1439                 (void)erofs_map_dev(sb, &mdev);
1440
1441                 cur = erofs_blknr(mdev.m_pa);
1442                 end = cur + pcl->pclusterpages;
1443
1444                 do {
1445                         struct page *page;
1446
1447                         page = pickup_page_for_submission(pcl, i++, pagepool,
1448                                                           mc);
1449                         if (!page)
1450                                 continue;
1451
1452                         if (bio && (cur != last_index + 1 ||
1453                                     last_bdev != mdev.m_bdev)) {
1454 submit_bio_retry:
1455                                 submit_bio(bio);
1456                                 if (memstall) {
1457                                         psi_memstall_leave(&pflags);
1458                                         memstall = 0;
1459                                 }
1460                                 bio = NULL;
1461                         }
1462
1463                         if (unlikely(PageWorkingset(page)) && !memstall) {
1464                                 psi_memstall_enter(&pflags);
1465                                 memstall = 1;
1466                         }
1467
1468                         if (!bio) {
1469                                 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1470                                                 REQ_OP_READ, GFP_NOIO);
1471                                 bio->bi_end_io = z_erofs_decompressqueue_endio;
1472
1473                                 last_bdev = mdev.m_bdev;
1474                                 bio->bi_iter.bi_sector = (sector_t)cur <<
1475                                         LOG_SECTORS_PER_BLOCK;
1476                                 bio->bi_private = bi_private;
1477                                 if (f->readahead)
1478                                         bio->bi_opf |= REQ_RAHEAD;
1479                                 ++nr_bios;
1480                         }
1481
1482                         if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
1483                                 goto submit_bio_retry;
1484
1485                         last_index = cur;
1486                         bypass = false;
1487                 } while (++cur < end);
1488
1489                 if (!bypass)
1490                         qtail[JQ_SUBMIT] = &pcl->next;
1491                 else
1492                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1493         } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1494
1495         if (bio) {
1496                 submit_bio(bio);
1497                 if (memstall)
1498                         psi_memstall_leave(&pflags);
1499         }
1500
1501         /*
1502          * although background is preferred, no one is pending for submission.
1503          * don't issue workqueue for decompression but drop it directly instead.
1504          */
1505         if (!*force_fg && !nr_bios) {
1506                 kvfree(q[JQ_SUBMIT]);
1507                 return;
1508         }
1509         z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
1510 }
1511
1512 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1513                              struct page **pagepool, bool force_fg)
1514 {
1515         struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1516
1517         if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1518                 return;
1519         z_erofs_submit_queue(f, pagepool, io, &force_fg);
1520
1521         /* handle bypass queue (no i/o pclusters) immediately */
1522         z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
1523
1524         if (!force_fg)
1525                 return;
1526
1527         /* wait until all bios are completed */
1528         wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1529
1530         /* handle synchronous decompress queue in the caller context */
1531         z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
1532 }
1533
1534 /*
1535  * Since partial uptodate is still unimplemented for now, we have to use
1536  * approximate readmore strategies as a start.
1537  */
1538 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1539                                       struct readahead_control *rac,
1540                                       erofs_off_t end,
1541                                       struct page **pagepool,
1542                                       bool backmost)
1543 {
1544         struct inode *inode = f->inode;
1545         struct erofs_map_blocks *map = &f->map;
1546         erofs_off_t cur;
1547         int err;
1548
1549         if (backmost) {
1550                 map->m_la = end;
1551                 err = z_erofs_map_blocks_iter(inode, map,
1552                                               EROFS_GET_BLOCKS_READMORE);
1553                 if (err)
1554                         return;
1555
1556                 /* expend ra for the trailing edge if readahead */
1557                 if (rac) {
1558                         loff_t newstart = readahead_pos(rac);
1559
1560                         cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1561                         readahead_expand(rac, newstart, cur - newstart);
1562                         return;
1563                 }
1564                 end = round_up(end, PAGE_SIZE);
1565         } else {
1566                 end = round_up(map->m_la, PAGE_SIZE);
1567
1568                 if (!map->m_llen)
1569                         return;
1570         }
1571
1572         cur = map->m_la + map->m_llen - 1;
1573         while (cur >= end) {
1574                 pgoff_t index = cur >> PAGE_SHIFT;
1575                 struct page *page;
1576
1577                 page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
1578                 if (page) {
1579                         if (PageUptodate(page)) {
1580                                 unlock_page(page);
1581                         } else {
1582                                 err = z_erofs_do_read_page(f, page, pagepool);
1583                                 if (err)
1584                                         erofs_err(inode->i_sb,
1585                                                   "readmore error at page %lu @ nid %llu",
1586                                                   index, EROFS_I(inode)->nid);
1587                         }
1588                         put_page(page);
1589                 }
1590
1591                 if (cur < PAGE_SIZE)
1592                         break;
1593                 cur = (index << PAGE_SHIFT) - 1;
1594         }
1595 }
1596
1597 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1598 {
1599         struct page *page = &folio->page;
1600         struct inode *const inode = page->mapping->host;
1601         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1602         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1603         struct page *pagepool = NULL;
1604         int err;
1605
1606         trace_erofs_readpage(page, false);
1607         f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1608
1609         z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1,
1610                                   &pagepool, true);
1611         err = z_erofs_do_read_page(&f, page, &pagepool);
1612         z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
1613
1614         (void)z_erofs_collector_end(&f);
1615
1616         /* if some compressed cluster ready, need submit them anyway */
1617         z_erofs_runqueue(&f, &pagepool,
1618                          z_erofs_get_sync_decompress_policy(sbi, 0));
1619
1620         if (err)
1621                 erofs_err(inode->i_sb, "failed to read, err [%d]", err);
1622
1623         erofs_put_metabuf(&f.map.buf);
1624         erofs_release_pages(&pagepool);
1625         return err;
1626 }
1627
1628 static void z_erofs_readahead(struct readahead_control *rac)
1629 {
1630         struct inode *const inode = rac->mapping->host;
1631         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1632         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1633         struct page *pagepool = NULL, *head = NULL, *page;
1634         unsigned int nr_pages;
1635
1636         f.readahead = true;
1637         f.headoffset = readahead_pos(rac);
1638
1639         z_erofs_pcluster_readmore(&f, rac, f.headoffset +
1640                                   readahead_length(rac) - 1, &pagepool, true);
1641         nr_pages = readahead_count(rac);
1642         trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
1643
1644         while ((page = readahead_page(rac))) {
1645                 set_page_private(page, (unsigned long)head);
1646                 head = page;
1647         }
1648
1649         while (head) {
1650                 struct page *page = head;
1651                 int err;
1652
1653                 /* traversal in reverse order */
1654                 head = (void *)page_private(page);
1655
1656                 err = z_erofs_do_read_page(&f, page, &pagepool);
1657                 if (err)
1658                         erofs_err(inode->i_sb,
1659                                   "readahead error at page %lu @ nid %llu",
1660                                   page->index, EROFS_I(inode)->nid);
1661                 put_page(page);
1662         }
1663         z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
1664         (void)z_erofs_collector_end(&f);
1665
1666         z_erofs_runqueue(&f, &pagepool,
1667                          z_erofs_get_sync_decompress_policy(sbi, nr_pages));
1668         erofs_put_metabuf(&f.map.buf);
1669         erofs_release_pages(&pagepool);
1670 }
1671
1672 const struct address_space_operations z_erofs_aops = {
1673         .read_folio = z_erofs_read_folio,
1674         .readahead = z_erofs_readahead,
1675 };