4158572c9e1e83f085daa44240c25b61b8ee6c5c
[linux-2.6-microblaze.git] / fs / erofs / data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "internal.h"
7 #include <linux/prefetch.h>
8 #include <linux/iomap.h>
9
10 #include <trace/events/erofs.h>
11
12 static void erofs_readendio(struct bio *bio)
13 {
14         struct bio_vec *bvec;
15         blk_status_t err = bio->bi_status;
16         struct bvec_iter_all iter_all;
17
18         bio_for_each_segment_all(bvec, bio, iter_all) {
19                 struct page *page = bvec->bv_page;
20
21                 /* page is already locked */
22                 DBG_BUGON(PageUptodate(page));
23
24                 if (err)
25                         SetPageError(page);
26                 else
27                         SetPageUptodate(page);
28
29                 unlock_page(page);
30                 /* page could be reclaimed now */
31         }
32         bio_put(bio);
33 }
34
35 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
36 {
37         struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
38         struct page *page;
39
40         page = read_cache_page_gfp(mapping, blkaddr,
41                                    mapping_gfp_constraint(mapping, ~__GFP_FS));
42         /* should already be PageUptodate */
43         if (!IS_ERR(page))
44                 lock_page(page);
45         return page;
46 }
47
48 static int erofs_map_blocks_flatmode(struct inode *inode,
49                                      struct erofs_map_blocks *map,
50                                      int flags)
51 {
52         int err = 0;
53         erofs_blk_t nblocks, lastblk;
54         u64 offset = map->m_la;
55         struct erofs_inode *vi = EROFS_I(inode);
56         bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
57
58         trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
59
60         nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
61         lastblk = nblocks - tailendpacking;
62
63         if (offset >= inode->i_size) {
64                 /* leave out-of-bound access unmapped */
65                 map->m_flags = 0;
66                 map->m_plen = 0;
67                 goto out;
68         }
69
70         /* there is no hole in flatmode */
71         map->m_flags = EROFS_MAP_MAPPED;
72
73         if (offset < blknr_to_addr(lastblk)) {
74                 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
75                 map->m_plen = blknr_to_addr(lastblk) - offset;
76         } else if (tailendpacking) {
77                 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
78                 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
79
80                 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
81                         vi->xattr_isize + erofs_blkoff(map->m_la);
82                 map->m_plen = inode->i_size - offset;
83
84                 /* inline data should be located in one meta block */
85                 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
86                         erofs_err(inode->i_sb,
87                                   "inline data cross block boundary @ nid %llu",
88                                   vi->nid);
89                         DBG_BUGON(1);
90                         err = -EFSCORRUPTED;
91                         goto err_out;
92                 }
93
94                 map->m_flags |= EROFS_MAP_META;
95         } else {
96                 erofs_err(inode->i_sb,
97                           "internal error @ nid: %llu (size %llu), m_la 0x%llx",
98                           vi->nid, inode->i_size, map->m_la);
99                 DBG_BUGON(1);
100                 err = -EIO;
101                 goto err_out;
102         }
103
104 out:
105         map->m_llen = map->m_plen;
106
107 err_out:
108         trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
109         return err;
110 }
111
112 static inline struct bio *erofs_read_raw_page(struct bio *bio,
113                                               struct address_space *mapping,
114                                               struct page *page,
115                                               erofs_off_t *last_block,
116                                               unsigned int nblocks,
117                                               unsigned int *eblks,
118                                               bool ra)
119 {
120         struct inode *const inode = mapping->host;
121         struct super_block *const sb = inode->i_sb;
122         erofs_off_t current_block = (erofs_off_t)page->index;
123         int err;
124
125         DBG_BUGON(!nblocks);
126
127         if (PageUptodate(page)) {
128                 err = 0;
129                 goto has_updated;
130         }
131
132         /* note that for readpage case, bio also equals to NULL */
133         if (bio &&
134             (*last_block + 1 != current_block || !*eblks)) {
135 submit_bio_retry:
136                 submit_bio(bio);
137                 bio = NULL;
138         }
139
140         if (!bio) {
141                 struct erofs_map_blocks map = {
142                         .m_la = blknr_to_addr(current_block),
143                 };
144                 erofs_blk_t blknr;
145                 unsigned int blkoff;
146
147                 err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
148                 if (err)
149                         goto err_out;
150
151                 /* zero out the holed page */
152                 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
153                         zero_user_segment(page, 0, PAGE_SIZE);
154                         SetPageUptodate(page);
155
156                         /* imply err = 0, see erofs_map_blocks */
157                         goto has_updated;
158                 }
159
160                 /* for RAW access mode, m_plen must be equal to m_llen */
161                 DBG_BUGON(map.m_plen != map.m_llen);
162
163                 blknr = erofs_blknr(map.m_pa);
164                 blkoff = erofs_blkoff(map.m_pa);
165
166                 /* deal with inline page */
167                 if (map.m_flags & EROFS_MAP_META) {
168                         void *vsrc, *vto;
169                         struct page *ipage;
170
171                         DBG_BUGON(map.m_plen > PAGE_SIZE);
172
173                         ipage = erofs_get_meta_page(inode->i_sb, blknr);
174
175                         if (IS_ERR(ipage)) {
176                                 err = PTR_ERR(ipage);
177                                 goto err_out;
178                         }
179
180                         vsrc = kmap_atomic(ipage);
181                         vto = kmap_atomic(page);
182                         memcpy(vto, vsrc + blkoff, map.m_plen);
183                         memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
184                         kunmap_atomic(vto);
185                         kunmap_atomic(vsrc);
186                         flush_dcache_page(page);
187
188                         SetPageUptodate(page);
189                         /* TODO: could we unlock the page earlier? */
190                         unlock_page(ipage);
191                         put_page(ipage);
192
193                         /* imply err = 0, see erofs_map_blocks */
194                         goto has_updated;
195                 }
196
197                 /* pa must be block-aligned for raw reading */
198                 DBG_BUGON(erofs_blkoff(map.m_pa));
199
200                 /* max # of continuous pages */
201                 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
202                         nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
203
204                 *eblks = bio_max_segs(nblocks);
205                 bio = bio_alloc(GFP_NOIO, *eblks);
206
207                 bio->bi_end_io = erofs_readendio;
208                 bio_set_dev(bio, sb->s_bdev);
209                 bio->bi_iter.bi_sector = (sector_t)blknr <<
210                         LOG_SECTORS_PER_BLOCK;
211                 bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
212         }
213
214         err = bio_add_page(bio, page, PAGE_SIZE, 0);
215         /* out of the extent or bio is full */
216         if (err < PAGE_SIZE)
217                 goto submit_bio_retry;
218         --*eblks;
219         *last_block = current_block;
220         return bio;
221
222 err_out:
223         /* for sync reading, set page error immediately */
224         if (!ra) {
225                 SetPageError(page);
226                 ClearPageUptodate(page);
227         }
228 has_updated:
229         unlock_page(page);
230
231         /* if updated manually, continuous pages has a gap */
232         if (bio)
233                 submit_bio(bio);
234         return err ? ERR_PTR(err) : NULL;
235 }
236
237 /*
238  * since we dont have write or truncate flows, so no inode
239  * locking needs to be held at the moment.
240  */
241 static int erofs_raw_access_readpage(struct file *file, struct page *page)
242 {
243         erofs_off_t last_block;
244         unsigned int eblks;
245         struct bio *bio;
246
247         trace_erofs_readpage(page, true);
248
249         bio = erofs_read_raw_page(NULL, page->mapping,
250                                   page, &last_block, 1, &eblks, false);
251
252         if (IS_ERR(bio))
253                 return PTR_ERR(bio);
254
255         if (bio)
256                 submit_bio(bio);
257         return 0;
258 }
259
260 static void erofs_raw_access_readahead(struct readahead_control *rac)
261 {
262         erofs_off_t last_block;
263         unsigned int eblks;
264         struct bio *bio = NULL;
265         struct page *page;
266
267         trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
268                         readahead_count(rac), true);
269
270         while ((page = readahead_page(rac))) {
271                 prefetchw(&page->flags);
272
273                 bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
274                                 readahead_count(rac), &eblks, true);
275
276                 /* all the page errors are ignored when readahead */
277                 if (IS_ERR(bio)) {
278                         pr_err("%s, readahead error at page %lu of nid %llu\n",
279                                __func__, page->index,
280                                EROFS_I(rac->mapping->host)->nid);
281
282                         bio = NULL;
283                 }
284
285                 put_page(page);
286         }
287
288         if (bio)
289                 submit_bio(bio);
290 }
291
292 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
293 {
294         struct inode *inode = mapping->host;
295         struct erofs_map_blocks map = {
296                 .m_la = blknr_to_addr(block),
297         };
298
299         if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
300                 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
301
302                 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
303                         return 0;
304         }
305
306         if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW))
307                 return erofs_blknr(map.m_pa);
308
309         return 0;
310 }
311
312 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
313                 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
314 {
315         int ret;
316         struct erofs_map_blocks map;
317
318         map.m_la = offset;
319         map.m_llen = length;
320
321         ret = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
322         if (ret < 0)
323                 return ret;
324
325         iomap->bdev = inode->i_sb->s_bdev;
326         iomap->offset = map.m_la;
327         iomap->length = map.m_llen;
328         iomap->flags = 0;
329
330         if (!(map.m_flags & EROFS_MAP_MAPPED)) {
331                 iomap->type = IOMAP_HOLE;
332                 iomap->addr = IOMAP_NULL_ADDR;
333                 if (!iomap->length)
334                         iomap->length = length;
335                 return 0;
336         }
337
338         /* that shouldn't happen for now */
339         if (map.m_flags & EROFS_MAP_META) {
340                 DBG_BUGON(1);
341                 return -ENOTBLK;
342         }
343         iomap->type = IOMAP_MAPPED;
344         iomap->addr = map.m_pa;
345         return 0;
346 }
347
348 static const struct iomap_ops erofs_iomap_ops = {
349         .iomap_begin = erofs_iomap_begin,
350 };
351
352 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
353 {
354         struct inode *inode = file_inode(iocb->ki_filp);
355         loff_t align = iocb->ki_pos | iov_iter_count(to) |
356                 iov_iter_alignment(to);
357         struct block_device *bdev = inode->i_sb->s_bdev;
358         unsigned int blksize_mask;
359
360         if (bdev)
361                 blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
362         else
363                 blksize_mask = (1 << inode->i_blkbits) - 1;
364
365         if (align & blksize_mask)
366                 return -EINVAL;
367
368         /*
369          * Temporarily fall back tail-packing inline to buffered I/O instead
370          * since tail-packing inline support relies on an iomap core update.
371          */
372         if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE &&
373             iocb->ki_pos + iov_iter_count(to) >
374                         rounddown(inode->i_size, EROFS_BLKSIZ))
375                 return 1;
376         return 0;
377 }
378
379 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
380 {
381         /* no need taking (shared) inode lock since it's a ro filesystem */
382         if (!iov_iter_count(to))
383                 return 0;
384
385         if (iocb->ki_flags & IOCB_DIRECT) {
386                 int err = erofs_prepare_dio(iocb, to);
387
388                 if (!err)
389                         return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
390                                             NULL, 0);
391                 if (err < 0)
392                         return err;
393         }
394         return filemap_read(iocb, to, 0);
395 }
396
397 /* for uncompressed (aligned) files and raw access for other files */
398 const struct address_space_operations erofs_raw_access_aops = {
399         .readpage = erofs_raw_access_readpage,
400         .readahead = erofs_raw_access_readahead,
401         .bmap = erofs_bmap,
402         .direct_IO = noop_direct_IO,
403 };
404
405 const struct file_operations erofs_file_fops = {
406         .llseek         = generic_file_llseek,
407         .read_iter      = erofs_file_read_iter,
408         .mmap           = generic_file_readonly_mmap,
409         .splice_read    = generic_file_splice_read,
410 };