Merge tag 'fscache-next-20210829' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / fs / erofs / data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "internal.h"
7 #include <linux/prefetch.h>
8
9 #include <trace/events/erofs.h>
10
11 static void erofs_readendio(struct bio *bio)
12 {
13         struct bio_vec *bvec;
14         blk_status_t err = bio->bi_status;
15         struct bvec_iter_all iter_all;
16
17         bio_for_each_segment_all(bvec, bio, iter_all) {
18                 struct page *page = bvec->bv_page;
19
20                 /* page is already locked */
21                 DBG_BUGON(PageUptodate(page));
22
23                 if (err)
24                         SetPageError(page);
25                 else
26                         SetPageUptodate(page);
27
28                 unlock_page(page);
29                 /* page could be reclaimed now */
30         }
31         bio_put(bio);
32 }
33
34 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
35 {
36         struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
37         struct page *page;
38
39         page = read_cache_page_gfp(mapping, blkaddr,
40                                    mapping_gfp_constraint(mapping, ~__GFP_FS));
41         /* should already be PageUptodate */
42         if (!IS_ERR(page))
43                 lock_page(page);
44         return page;
45 }
46
47 static int erofs_map_blocks_flatmode(struct inode *inode,
48                                      struct erofs_map_blocks *map,
49                                      int flags)
50 {
51         int err = 0;
52         erofs_blk_t nblocks, lastblk;
53         u64 offset = map->m_la;
54         struct erofs_inode *vi = EROFS_I(inode);
55         bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
56
57         trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
58
59         nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
60         lastblk = nblocks - tailendpacking;
61
62         if (offset >= inode->i_size) {
63                 /* leave out-of-bound access unmapped */
64                 map->m_flags = 0;
65                 map->m_plen = 0;
66                 goto out;
67         }
68
69         /* there is no hole in flatmode */
70         map->m_flags = EROFS_MAP_MAPPED;
71
72         if (offset < blknr_to_addr(lastblk)) {
73                 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
74                 map->m_plen = blknr_to_addr(lastblk) - offset;
75         } else if (tailendpacking) {
76                 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
77                 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
78
79                 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
80                         vi->xattr_isize + erofs_blkoff(map->m_la);
81                 map->m_plen = inode->i_size - offset;
82
83                 /* inline data should be located in one meta block */
84                 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
85                         erofs_err(inode->i_sb,
86                                   "inline data cross block boundary @ nid %llu",
87                                   vi->nid);
88                         DBG_BUGON(1);
89                         err = -EFSCORRUPTED;
90                         goto err_out;
91                 }
92
93                 map->m_flags |= EROFS_MAP_META;
94         } else {
95                 erofs_err(inode->i_sb,
96                           "internal error @ nid: %llu (size %llu), m_la 0x%llx",
97                           vi->nid, inode->i_size, map->m_la);
98                 DBG_BUGON(1);
99                 err = -EIO;
100                 goto err_out;
101         }
102
103 out:
104         map->m_llen = map->m_plen;
105
106 err_out:
107         trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
108         return err;
109 }
110
111 static inline struct bio *erofs_read_raw_page(struct bio *bio,
112                                               struct address_space *mapping,
113                                               struct page *page,
114                                               erofs_off_t *last_block,
115                                               unsigned int nblocks,
116                                               unsigned int *eblks,
117                                               bool ra)
118 {
119         struct inode *const inode = mapping->host;
120         struct super_block *const sb = inode->i_sb;
121         erofs_off_t current_block = (erofs_off_t)page->index;
122         int err;
123
124         DBG_BUGON(!nblocks);
125
126         if (PageUptodate(page)) {
127                 err = 0;
128                 goto has_updated;
129         }
130
131         /* note that for readpage case, bio also equals to NULL */
132         if (bio &&
133             (*last_block + 1 != current_block || !*eblks)) {
134 submit_bio_retry:
135                 submit_bio(bio);
136                 bio = NULL;
137         }
138
139         if (!bio) {
140                 struct erofs_map_blocks map = {
141                         .m_la = blknr_to_addr(current_block),
142                 };
143                 erofs_blk_t blknr;
144                 unsigned int blkoff;
145
146                 err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
147                 if (err)
148                         goto err_out;
149
150                 /* zero out the holed page */
151                 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
152                         zero_user_segment(page, 0, PAGE_SIZE);
153                         SetPageUptodate(page);
154
155                         /* imply err = 0, see erofs_map_blocks */
156                         goto has_updated;
157                 }
158
159                 /* for RAW access mode, m_plen must be equal to m_llen */
160                 DBG_BUGON(map.m_plen != map.m_llen);
161
162                 blknr = erofs_blknr(map.m_pa);
163                 blkoff = erofs_blkoff(map.m_pa);
164
165                 /* deal with inline page */
166                 if (map.m_flags & EROFS_MAP_META) {
167                         void *vsrc, *vto;
168                         struct page *ipage;
169
170                         DBG_BUGON(map.m_plen > PAGE_SIZE);
171
172                         ipage = erofs_get_meta_page(inode->i_sb, blknr);
173
174                         if (IS_ERR(ipage)) {
175                                 err = PTR_ERR(ipage);
176                                 goto err_out;
177                         }
178
179                         vsrc = kmap_atomic(ipage);
180                         vto = kmap_atomic(page);
181                         memcpy(vto, vsrc + blkoff, map.m_plen);
182                         memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
183                         kunmap_atomic(vto);
184                         kunmap_atomic(vsrc);
185                         flush_dcache_page(page);
186
187                         SetPageUptodate(page);
188                         /* TODO: could we unlock the page earlier? */
189                         unlock_page(ipage);
190                         put_page(ipage);
191
192                         /* imply err = 0, see erofs_map_blocks */
193                         goto has_updated;
194                 }
195
196                 /* pa must be block-aligned for raw reading */
197                 DBG_BUGON(erofs_blkoff(map.m_pa));
198
199                 /* max # of continuous pages */
200                 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
201                         nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
202
203                 *eblks = bio_max_segs(nblocks);
204                 bio = bio_alloc(GFP_NOIO, *eblks);
205
206                 bio->bi_end_io = erofs_readendio;
207                 bio_set_dev(bio, sb->s_bdev);
208                 bio->bi_iter.bi_sector = (sector_t)blknr <<
209                         LOG_SECTORS_PER_BLOCK;
210                 bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
211         }
212
213         err = bio_add_page(bio, page, PAGE_SIZE, 0);
214         /* out of the extent or bio is full */
215         if (err < PAGE_SIZE)
216                 goto submit_bio_retry;
217         --*eblks;
218         *last_block = current_block;
219         return bio;
220
221 err_out:
222         /* for sync reading, set page error immediately */
223         if (!ra) {
224                 SetPageError(page);
225                 ClearPageUptodate(page);
226         }
227 has_updated:
228         unlock_page(page);
229
230         /* if updated manually, continuous pages has a gap */
231         if (bio)
232                 submit_bio(bio);
233         return err ? ERR_PTR(err) : NULL;
234 }
235
236 /*
237  * since we dont have write or truncate flows, so no inode
238  * locking needs to be held at the moment.
239  */
240 static int erofs_raw_access_readpage(struct file *file, struct page *page)
241 {
242         erofs_off_t last_block;
243         unsigned int eblks;
244         struct bio *bio;
245
246         trace_erofs_readpage(page, true);
247
248         bio = erofs_read_raw_page(NULL, page->mapping,
249                                   page, &last_block, 1, &eblks, false);
250
251         if (IS_ERR(bio))
252                 return PTR_ERR(bio);
253
254         if (bio)
255                 submit_bio(bio);
256         return 0;
257 }
258
259 static void erofs_raw_access_readahead(struct readahead_control *rac)
260 {
261         erofs_off_t last_block;
262         unsigned int eblks;
263         struct bio *bio = NULL;
264         struct page *page;
265
266         trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
267                         readahead_count(rac), true);
268
269         while ((page = readahead_page(rac))) {
270                 prefetchw(&page->flags);
271
272                 bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
273                                 readahead_count(rac), &eblks, true);
274
275                 /* all the page errors are ignored when readahead */
276                 if (IS_ERR(bio)) {
277                         pr_err("%s, readahead error at page %lu of nid %llu\n",
278                                __func__, page->index,
279                                EROFS_I(rac->mapping->host)->nid);
280
281                         bio = NULL;
282                 }
283
284                 put_page(page);
285         }
286
287         if (bio)
288                 submit_bio(bio);
289 }
290
291 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
292 {
293         struct inode *inode = mapping->host;
294         struct erofs_map_blocks map = {
295                 .m_la = blknr_to_addr(block),
296         };
297
298         if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
299                 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
300
301                 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
302                         return 0;
303         }
304
305         if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW))
306                 return erofs_blknr(map.m_pa);
307
308         return 0;
309 }
310
311 /* for uncompressed (aligned) files and raw access for other files */
312 const struct address_space_operations erofs_raw_access_aops = {
313         .readpage = erofs_raw_access_readpage,
314         .readahead = erofs_raw_access_readahead,
315         .bmap = erofs_bmap,
316 };