Merge tag 'erofs-for-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang...
[linux-2.6-microblaze.git] / fs / erofs / data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 #include <linux/dax.h>
10 #include <trace/events/erofs.h>
11
12 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
13 {
14         struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
15         struct page *page;
16
17         page = read_cache_page_gfp(mapping, blkaddr,
18                                    mapping_gfp_constraint(mapping, ~__GFP_FS));
19         /* should already be PageUptodate */
20         if (!IS_ERR(page))
21                 lock_page(page);
22         return page;
23 }
24
25 static int erofs_map_blocks_flatmode(struct inode *inode,
26                                      struct erofs_map_blocks *map,
27                                      int flags)
28 {
29         int err = 0;
30         erofs_blk_t nblocks, lastblk;
31         u64 offset = map->m_la;
32         struct erofs_inode *vi = EROFS_I(inode);
33         bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
34
35         trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
36
37         nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
38         lastblk = nblocks - tailendpacking;
39
40         /* there is no hole in flatmode */
41         map->m_flags = EROFS_MAP_MAPPED;
42
43         if (offset < blknr_to_addr(lastblk)) {
44                 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
45                 map->m_plen = blknr_to_addr(lastblk) - offset;
46         } else if (tailendpacking) {
47                 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
48                 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
49
50                 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
51                         vi->xattr_isize + erofs_blkoff(map->m_la);
52                 map->m_plen = inode->i_size - offset;
53
54                 /* inline data should be located in one meta block */
55                 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
56                         erofs_err(inode->i_sb,
57                                   "inline data cross block boundary @ nid %llu",
58                                   vi->nid);
59                         DBG_BUGON(1);
60                         err = -EFSCORRUPTED;
61                         goto err_out;
62                 }
63
64                 map->m_flags |= EROFS_MAP_META;
65         } else {
66                 erofs_err(inode->i_sb,
67                           "internal error @ nid: %llu (size %llu), m_la 0x%llx",
68                           vi->nid, inode->i_size, map->m_la);
69                 DBG_BUGON(1);
70                 err = -EIO;
71                 goto err_out;
72         }
73
74         map->m_llen = map->m_plen;
75 err_out:
76         trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
77         return err;
78 }
79
80 static int erofs_map_blocks(struct inode *inode,
81                             struct erofs_map_blocks *map, int flags)
82 {
83         struct super_block *sb = inode->i_sb;
84         struct erofs_inode *vi = EROFS_I(inode);
85         struct erofs_inode_chunk_index *idx;
86         struct page *page;
87         u64 chunknr;
88         unsigned int unit;
89         erofs_off_t pos;
90         int err = 0;
91
92         if (map->m_la >= inode->i_size) {
93                 /* leave out-of-bound access unmapped */
94                 map->m_flags = 0;
95                 map->m_plen = 0;
96                 goto out;
97         }
98
99         if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
100                 return erofs_map_blocks_flatmode(inode, map, flags);
101
102         if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
103                 unit = sizeof(*idx);                    /* chunk index */
104         else
105                 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;      /* block map */
106
107         chunknr = map->m_la >> vi->chunkbits;
108         pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
109                     vi->xattr_isize, unit) + unit * chunknr;
110
111         page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos));
112         if (IS_ERR(page))
113                 return PTR_ERR(page);
114
115         map->m_la = chunknr << vi->chunkbits;
116         map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
117                             roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
118
119         /* handle block map */
120         if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
121                 __le32 *blkaddr = page_address(page) + erofs_blkoff(pos);
122
123                 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
124                         map->m_flags = 0;
125                 } else {
126                         map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
127                         map->m_flags = EROFS_MAP_MAPPED;
128                 }
129                 goto out_unlock;
130         }
131         /* parse chunk indexes */
132         idx = page_address(page) + erofs_blkoff(pos);
133         switch (le32_to_cpu(idx->blkaddr)) {
134         case EROFS_NULL_ADDR:
135                 map->m_flags = 0;
136                 break;
137         default:
138                 /* only one device is supported for now */
139                 if (idx->device_id) {
140                         erofs_err(sb, "invalid device id %u @ %llu for nid %llu",
141                                   le16_to_cpu(idx->device_id),
142                                   chunknr, vi->nid);
143                         err = -EFSCORRUPTED;
144                         goto out_unlock;
145                 }
146                 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
147                 map->m_flags = EROFS_MAP_MAPPED;
148                 break;
149         }
150 out_unlock:
151         unlock_page(page);
152         put_page(page);
153 out:
154         map->m_llen = map->m_plen;
155         return err;
156 }
157
158 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
159                 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
160 {
161         int ret;
162         struct erofs_map_blocks map;
163
164         map.m_la = offset;
165         map.m_llen = length;
166
167         ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
168         if (ret < 0)
169                 return ret;
170
171         iomap->bdev = inode->i_sb->s_bdev;
172         iomap->dax_dev = EROFS_I_SB(inode)->dax_dev;
173         iomap->offset = map.m_la;
174         iomap->length = map.m_llen;
175         iomap->flags = 0;
176         iomap->private = NULL;
177
178         if (!(map.m_flags & EROFS_MAP_MAPPED)) {
179                 iomap->type = IOMAP_HOLE;
180                 iomap->addr = IOMAP_NULL_ADDR;
181                 if (!iomap->length)
182                         iomap->length = length;
183                 return 0;
184         }
185
186         if (map.m_flags & EROFS_MAP_META) {
187                 struct page *ipage;
188
189                 iomap->type = IOMAP_INLINE;
190                 ipage = erofs_get_meta_page(inode->i_sb,
191                                             erofs_blknr(map.m_pa));
192                 if (IS_ERR(ipage))
193                         return PTR_ERR(ipage);
194                 iomap->inline_data = page_address(ipage) +
195                                         erofs_blkoff(map.m_pa);
196                 iomap->private = ipage;
197         } else {
198                 iomap->type = IOMAP_MAPPED;
199                 iomap->addr = map.m_pa;
200         }
201         return 0;
202 }
203
204 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
205                 ssize_t written, unsigned int flags, struct iomap *iomap)
206 {
207         struct page *ipage = iomap->private;
208
209         if (ipage) {
210                 DBG_BUGON(iomap->type != IOMAP_INLINE);
211                 unlock_page(ipage);
212                 put_page(ipage);
213         } else {
214                 DBG_BUGON(iomap->type == IOMAP_INLINE);
215         }
216         return written;
217 }
218
219 static const struct iomap_ops erofs_iomap_ops = {
220         .iomap_begin = erofs_iomap_begin,
221         .iomap_end = erofs_iomap_end,
222 };
223
224 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
225                  u64 start, u64 len)
226 {
227         if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
228 #ifdef CONFIG_EROFS_FS_ZIP
229                 return iomap_fiemap(inode, fieinfo, start, len,
230                                     &z_erofs_iomap_report_ops);
231 #else
232                 return -EOPNOTSUPP;
233 #endif
234         }
235         return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
236 }
237
238 /*
239  * since we dont have write or truncate flows, so no inode
240  * locking needs to be held at the moment.
241  */
242 static int erofs_readpage(struct file *file, struct page *page)
243 {
244         return iomap_readpage(page, &erofs_iomap_ops);
245 }
246
247 static void erofs_readahead(struct readahead_control *rac)
248 {
249         return iomap_readahead(rac, &erofs_iomap_ops);
250 }
251
252 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
253 {
254         return iomap_bmap(mapping, block, &erofs_iomap_ops);
255 }
256
257 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
258 {
259         struct inode *inode = file_inode(iocb->ki_filp);
260         loff_t align = iocb->ki_pos | iov_iter_count(to) |
261                 iov_iter_alignment(to);
262         struct block_device *bdev = inode->i_sb->s_bdev;
263         unsigned int blksize_mask;
264
265         if (bdev)
266                 blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
267         else
268                 blksize_mask = (1 << inode->i_blkbits) - 1;
269
270         if (align & blksize_mask)
271                 return -EINVAL;
272         return 0;
273 }
274
275 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
276 {
277         /* no need taking (shared) inode lock since it's a ro filesystem */
278         if (!iov_iter_count(to))
279                 return 0;
280
281 #ifdef CONFIG_FS_DAX
282         if (IS_DAX(iocb->ki_filp->f_mapping->host))
283                 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
284 #endif
285         if (iocb->ki_flags & IOCB_DIRECT) {
286                 int err = erofs_prepare_dio(iocb, to);
287
288                 if (!err)
289                         return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
290                                             NULL, 0);
291                 if (err < 0)
292                         return err;
293         }
294         return filemap_read(iocb, to, 0);
295 }
296
297 /* for uncompressed (aligned) files and raw access for other files */
298 const struct address_space_operations erofs_raw_access_aops = {
299         .readpage = erofs_readpage,
300         .readahead = erofs_readahead,
301         .bmap = erofs_bmap,
302         .direct_IO = noop_direct_IO,
303 };
304
305 #ifdef CONFIG_FS_DAX
306 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
307                 enum page_entry_size pe_size)
308 {
309         return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
310 }
311
312 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
313 {
314         return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
315 }
316
317 static const struct vm_operations_struct erofs_dax_vm_ops = {
318         .fault          = erofs_dax_fault,
319         .huge_fault     = erofs_dax_huge_fault,
320 };
321
322 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
323 {
324         if (!IS_DAX(file_inode(file)))
325                 return generic_file_readonly_mmap(file, vma);
326
327         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
328                 return -EINVAL;
329
330         vma->vm_ops = &erofs_dax_vm_ops;
331         vma->vm_flags |= VM_HUGEPAGE;
332         return 0;
333 }
334 #else
335 #define erofs_file_mmap generic_file_readonly_mmap
336 #endif
337
338 const struct file_operations erofs_file_fops = {
339         .llseek         = generic_file_llseek,
340         .read_iter      = erofs_file_read_iter,
341         .mmap           = erofs_file_mmap,
342         .splice_read    = generic_file_splice_read,
343 };