1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2022, Alibaba Cloud
5 #include <linux/fscache.h>
8 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
9 loff_t start, size_t len)
11 struct netfs_io_request *rreq;
13 rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
15 return ERR_PTR(-ENOMEM);
19 rreq->mapping = mapping;
20 INIT_LIST_HEAD(&rreq->subrequests);
21 refcount_set(&rreq->ref, 1);
25 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
27 if (!refcount_dec_and_test(&rreq->ref))
29 if (rreq->cache_resources.ops)
30 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
34 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
36 if (!refcount_dec_and_test(&subreq->ref))
38 erofs_fscache_put_request(subreq->rreq);
42 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
44 struct netfs_io_subrequest *subreq;
46 while (!list_empty(&rreq->subrequests)) {
47 subreq = list_first_entry(&rreq->subrequests,
48 struct netfs_io_subrequest, rreq_link);
49 list_del(&subreq->rreq_link);
50 erofs_fscache_put_subrequest(subreq);
54 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
56 struct netfs_io_subrequest *subreq;
58 unsigned int iopos = 0;
59 pgoff_t start_page = rreq->start / PAGE_SIZE;
60 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
61 bool subreq_failed = false;
63 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
65 subreq = list_first_entry(&rreq->subrequests,
66 struct netfs_io_subrequest, rreq_link);
67 subreq_failed = (subreq->error < 0);
70 xas_for_each(&xas, folio, last_page) {
72 (folio_index(folio) - start_page) * PAGE_SIZE;
73 unsigned int pgend = pgpos + folio_size(folio);
74 bool pg_failed = false;
82 pg_failed |= subreq_failed;
83 if (pgend < iopos + subreq->len)
87 if (!list_is_last(&subreq->rreq_link,
88 &rreq->subrequests)) {
89 subreq = list_next_entry(subreq, rreq_link);
90 subreq_failed = (subreq->error < 0);
93 subreq_failed = false;
100 folio_mark_uptodate(folio);
107 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
109 erofs_fscache_rreq_unlock_folios(rreq);
110 erofs_fscache_clear_subrequests(rreq);
111 erofs_fscache_put_request(rreq);
114 static void erofc_fscache_subreq_complete(void *priv,
115 ssize_t transferred_or_error, bool was_async)
117 struct netfs_io_subrequest *subreq = priv;
118 struct netfs_io_request *rreq = subreq->rreq;
120 if (IS_ERR_VALUE(transferred_or_error))
121 subreq->error = transferred_or_error;
123 if (atomic_dec_and_test(&rreq->nr_outstanding))
124 erofs_fscache_rreq_complete(rreq);
126 erofs_fscache_put_subrequest(subreq);
130 * Read data from fscache and fill the read data into page cache described by
131 * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
132 * the start physical address in the cache file.
134 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
135 struct netfs_io_request *rreq, loff_t pstart)
137 enum netfs_io_source source;
138 struct super_block *sb = rreq->mapping->host->i_sb;
139 struct netfs_io_subrequest *subreq;
140 struct netfs_cache_resources *cres = &rreq->cache_resources;
141 struct iov_iter iter;
142 loff_t start = rreq->start;
143 size_t len = rreq->len;
147 atomic_set(&rreq->nr_outstanding, 1);
149 ret = fscache_begin_read_operation(cres, cookie);
154 subreq = kzalloc(sizeof(struct netfs_io_subrequest),
157 INIT_LIST_HEAD(&subreq->rreq_link);
158 refcount_set(&subreq->ref, 2);
160 refcount_inc(&rreq->ref);
166 subreq->start = pstart + done;
167 subreq->len = len - done;
168 subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
170 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
172 source = cres->ops->prepare_read(subreq, LLONG_MAX);
173 if (WARN_ON(subreq->len == 0))
174 source = NETFS_INVALID_READ;
175 if (source != NETFS_READ_FROM_CACHE) {
176 erofs_err(sb, "failed to fscache prepare_read (source %d)",
180 erofs_fscache_put_subrequest(subreq);
184 atomic_inc(&rreq->nr_outstanding);
186 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
187 start + done, subreq->len);
189 ret = fscache_read(cres, subreq->start, &iter,
190 NETFS_READ_HOLE_FAIL,
191 erofc_fscache_subreq_complete, subreq);
192 if (ret == -EIOCBQUEUED)
195 erofs_err(sb, "failed to fscache_read (ret %d)", ret);
202 if (atomic_dec_and_test(&rreq->nr_outstanding))
203 erofs_fscache_rreq_complete(rreq);
208 static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
211 struct folio *folio = page_folio(page);
212 struct super_block *sb = folio_mapping(folio)->host->i_sb;
213 struct netfs_io_request *rreq;
214 struct erofs_map_dev mdev = {
216 .m_pa = folio_pos(folio),
219 ret = erofs_map_dev(sb, &mdev);
223 rreq = erofs_fscache_alloc_request(folio_mapping(folio),
224 folio_pos(folio), folio_size(folio));
228 return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
235 static int erofs_fscache_readpage_inline(struct folio *folio,
236 struct erofs_map_blocks *map)
238 struct super_block *sb = folio_mapping(folio)->host->i_sb;
239 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
244 /* For tail packing layout, the offset may be non-zero. */
245 offset = erofs_blkoff(map->m_pa);
246 blknr = erofs_blknr(map->m_pa);
249 src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
253 dst = kmap_local_folio(folio, 0);
254 memcpy(dst, src + offset, len);
255 memset(dst + len, 0, PAGE_SIZE - len);
258 erofs_put_metabuf(&buf);
262 static int erofs_fscache_readpage(struct file *file, struct page *page)
264 struct folio *folio = page_folio(page);
265 struct inode *inode = folio_mapping(folio)->host;
266 struct super_block *sb = inode->i_sb;
267 struct erofs_map_blocks map;
268 struct erofs_map_dev mdev;
269 struct netfs_io_request *rreq;
274 DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
276 pos = folio_pos(folio);
279 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
283 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
284 folio_zero_range(folio, 0, folio_size(folio));
288 if (map.m_flags & EROFS_MAP_META) {
289 ret = erofs_fscache_readpage_inline(folio, &map);
293 mdev = (struct erofs_map_dev) {
294 .m_deviceid = map.m_deviceid,
298 ret = erofs_map_dev(sb, &mdev);
303 rreq = erofs_fscache_alloc_request(folio_mapping(folio),
304 folio_pos(folio), folio_size(folio));
308 pstart = mdev.m_pa + (pos - map.m_la);
309 return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
314 folio_mark_uptodate(folio);
320 static void erofs_fscache_advance_folios(struct readahead_control *rac,
321 size_t len, bool unlock)
324 struct folio *folio = readahead_folio(rac);
325 len -= folio_size(folio);
327 folio_mark_uptodate(folio);
333 static void erofs_fscache_readahead(struct readahead_control *rac)
335 struct inode *inode = rac->mapping->host;
336 struct super_block *sb = inode->i_sb;
337 size_t len, count, done = 0;
339 loff_t start, offset;
342 if (!readahead_count(rac))
345 start = readahead_pos(rac);
346 len = readahead_length(rac);
349 struct erofs_map_blocks map;
350 struct erofs_map_dev mdev;
351 struct netfs_io_request *rreq;
356 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
360 offset = start + done;
361 count = min_t(size_t, map.m_llen - (pos - map.m_la),
364 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
365 struct iov_iter iter;
367 iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
369 iov_iter_zero(count, &iter);
371 erofs_fscache_advance_folios(rac, count, true);
376 if (map.m_flags & EROFS_MAP_META) {
377 struct folio *folio = readahead_folio(rac);
379 ret = erofs_fscache_readpage_inline(folio, &map);
381 folio_mark_uptodate(folio);
382 ret = folio_size(folio);
389 mdev = (struct erofs_map_dev) {
390 .m_deviceid = map.m_deviceid,
393 ret = erofs_map_dev(sb, &mdev);
397 rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
401 * Drop the ref of folios here. Unlock them in
402 * rreq_unlock_folios() when rreq complete.
404 erofs_fscache_advance_folios(rac, count, false);
405 ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
406 rreq, mdev.m_pa + (pos - map.m_la));
409 } while (ret > 0 && ((done += ret) < len));
412 static const struct address_space_operations erofs_fscache_meta_aops = {
413 .readpage = erofs_fscache_meta_readpage,
416 const struct address_space_operations erofs_fscache_access_aops = {
417 .readpage = erofs_fscache_readpage,
418 .readahead = erofs_fscache_readahead,
421 int erofs_fscache_register_cookie(struct super_block *sb,
422 struct erofs_fscache **fscache,
423 char *name, bool need_inode)
425 struct fscache_volume *volume = EROFS_SB(sb)->volume;
426 struct erofs_fscache *ctx;
427 struct fscache_cookie *cookie;
430 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
434 cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
435 name, strlen(name), NULL, 0, 0);
437 erofs_err(sb, "failed to get cookie for %s", name);
442 fscache_use_cookie(cookie, false);
443 ctx->cookie = cookie;
446 struct inode *const inode = new_inode(sb);
449 erofs_err(sb, "failed to get anon inode for %s", name);
455 inode->i_size = OFFSET_MAX;
456 inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
457 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
466 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
467 fscache_relinquish_cookie(ctx->cookie, false);
474 void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
476 struct erofs_fscache *ctx = *fscache;
481 fscache_unuse_cookie(ctx->cookie, NULL, NULL);
482 fscache_relinquish_cookie(ctx->cookie, false);
492 int erofs_fscache_register_fs(struct super_block *sb)
494 struct erofs_sb_info *sbi = EROFS_SB(sb);
495 struct fscache_volume *volume;
499 name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
503 volume = fscache_acquire_volume(name, NULL, NULL, 0);
504 if (IS_ERR_OR_NULL(volume)) {
505 erofs_err(sb, "failed to register volume for %s", name);
506 ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
510 sbi->volume = volume;
515 void erofs_fscache_unregister_fs(struct super_block *sb)
517 struct erofs_sb_info *sbi = EROFS_SB(sb);
519 fscache_relinquish_volume(sbi->volume, NULL, false);