1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
8 #include <linux/prefetch.h>
10 #include <trace/events/erofs.h>
12 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
14 struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
17 page = read_cache_page_gfp(mapping, blkaddr,
18 mapping_gfp_constraint(mapping, ~__GFP_FS));
19 /* should already be PageUptodate */
25 static int erofs_map_blocks_flatmode(struct inode *inode,
26 struct erofs_map_blocks *map,
30 erofs_blk_t nblocks, lastblk;
31 u64 offset = map->m_la;
32 struct erofs_inode *vi = EROFS_I(inode);
33 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
35 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
37 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
38 lastblk = nblocks - tailendpacking;
40 /* there is no hole in flatmode */
41 map->m_flags = EROFS_MAP_MAPPED;
43 if (offset < blknr_to_addr(lastblk)) {
44 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
45 map->m_plen = blknr_to_addr(lastblk) - offset;
46 } else if (tailendpacking) {
47 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
48 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
50 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
51 vi->xattr_isize + erofs_blkoff(map->m_la);
52 map->m_plen = inode->i_size - offset;
54 /* inline data should be located in one meta block */
55 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
56 erofs_err(inode->i_sb,
57 "inline data cross block boundary @ nid %llu",
64 map->m_flags |= EROFS_MAP_META;
66 erofs_err(inode->i_sb,
67 "internal error @ nid: %llu (size %llu), m_la 0x%llx",
68 vi->nid, inode->i_size, map->m_la);
74 map->m_llen = map->m_plen;
76 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
80 static int erofs_map_blocks(struct inode *inode,
81 struct erofs_map_blocks *map, int flags)
83 struct super_block *sb = inode->i_sb;
84 struct erofs_inode *vi = EROFS_I(inode);
85 struct erofs_inode_chunk_index *idx;
93 if (map->m_la >= inode->i_size) {
94 /* leave out-of-bound access unmapped */
100 if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
101 return erofs_map_blocks_flatmode(inode, map, flags);
103 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
104 unit = sizeof(*idx); /* chunk index */
106 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
108 chunknr = map->m_la >> vi->chunkbits;
109 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
110 vi->xattr_isize, unit) + unit * chunknr;
112 page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos));
114 return PTR_ERR(page);
116 map->m_la = chunknr << vi->chunkbits;
117 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
118 roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
120 /* handle block map */
121 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
122 __le32 *blkaddr = page_address(page) + erofs_blkoff(pos);
124 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
127 map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
128 map->m_flags = EROFS_MAP_MAPPED;
132 /* parse chunk indexes */
133 idx = page_address(page) + erofs_blkoff(pos);
134 switch (le32_to_cpu(idx->blkaddr)) {
135 case EROFS_NULL_ADDR:
139 map->m_deviceid = le16_to_cpu(idx->device_id) &
140 EROFS_SB(sb)->device_id_mask;
141 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
142 map->m_flags = EROFS_MAP_MAPPED;
149 map->m_llen = map->m_plen;
153 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
155 struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
156 struct erofs_device_info *dif;
159 /* primary device by default */
160 map->m_bdev = sb->s_bdev;
161 map->m_daxdev = EROFS_SB(sb)->dax_dev;
163 if (map->m_deviceid) {
164 down_read(&devs->rwsem);
165 dif = idr_find(&devs->tree, map->m_deviceid - 1);
167 up_read(&devs->rwsem);
170 map->m_bdev = dif->bdev;
171 map->m_daxdev = dif->dax_dev;
172 up_read(&devs->rwsem);
173 } else if (devs->extra_devices) {
174 down_read(&devs->rwsem);
175 idr_for_each_entry(&devs->tree, dif, id) {
176 erofs_off_t startoff, length;
178 if (!dif->mapped_blkaddr)
180 startoff = blknr_to_addr(dif->mapped_blkaddr);
181 length = blknr_to_addr(dif->blocks);
183 if (map->m_pa >= startoff &&
184 map->m_pa < startoff + length) {
185 map->m_pa -= startoff;
186 map->m_bdev = dif->bdev;
187 map->m_daxdev = dif->dax_dev;
191 up_read(&devs->rwsem);
196 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
197 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
200 struct erofs_map_blocks map;
201 struct erofs_map_dev mdev;
206 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
210 mdev = (struct erofs_map_dev) {
211 .m_deviceid = map.m_deviceid,
214 ret = erofs_map_dev(inode->i_sb, &mdev);
218 iomap->bdev = mdev.m_bdev;
219 iomap->dax_dev = mdev.m_daxdev;
220 iomap->offset = map.m_la;
221 iomap->length = map.m_llen;
223 iomap->private = NULL;
225 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
226 iomap->type = IOMAP_HOLE;
227 iomap->addr = IOMAP_NULL_ADDR;
229 iomap->length = length;
233 if (map.m_flags & EROFS_MAP_META) {
236 iomap->type = IOMAP_INLINE;
237 ipage = erofs_get_meta_page(inode->i_sb,
238 erofs_blknr(mdev.m_pa));
240 return PTR_ERR(ipage);
241 iomap->inline_data = page_address(ipage) +
242 erofs_blkoff(mdev.m_pa);
243 iomap->private = ipage;
245 iomap->type = IOMAP_MAPPED;
246 iomap->addr = mdev.m_pa;
251 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
252 ssize_t written, unsigned int flags, struct iomap *iomap)
254 struct page *ipage = iomap->private;
257 DBG_BUGON(iomap->type != IOMAP_INLINE);
261 DBG_BUGON(iomap->type == IOMAP_INLINE);
266 static const struct iomap_ops erofs_iomap_ops = {
267 .iomap_begin = erofs_iomap_begin,
268 .iomap_end = erofs_iomap_end,
271 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
274 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
275 #ifdef CONFIG_EROFS_FS_ZIP
276 return iomap_fiemap(inode, fieinfo, start, len,
277 &z_erofs_iomap_report_ops);
282 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
286 * since we dont have write or truncate flows, so no inode
287 * locking needs to be held at the moment.
289 static int erofs_readpage(struct file *file, struct page *page)
291 return iomap_readpage(page, &erofs_iomap_ops);
294 static void erofs_readahead(struct readahead_control *rac)
296 return iomap_readahead(rac, &erofs_iomap_ops);
299 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
301 return iomap_bmap(mapping, block, &erofs_iomap_ops);
304 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
306 struct inode *inode = file_inode(iocb->ki_filp);
307 loff_t align = iocb->ki_pos | iov_iter_count(to) |
308 iov_iter_alignment(to);
309 struct block_device *bdev = inode->i_sb->s_bdev;
310 unsigned int blksize_mask;
313 blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
315 blksize_mask = (1 << inode->i_blkbits) - 1;
317 if (align & blksize_mask)
322 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
324 /* no need taking (shared) inode lock since it's a ro filesystem */
325 if (!iov_iter_count(to))
329 if (IS_DAX(iocb->ki_filp->f_mapping->host))
330 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
332 if (iocb->ki_flags & IOCB_DIRECT) {
333 int err = erofs_prepare_dio(iocb, to);
336 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
341 return filemap_read(iocb, to, 0);
344 /* for uncompressed (aligned) files and raw access for other files */
345 const struct address_space_operations erofs_raw_access_aops = {
346 .readpage = erofs_readpage,
347 .readahead = erofs_readahead,
349 .direct_IO = noop_direct_IO,
353 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
354 enum page_entry_size pe_size)
356 return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
359 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
361 return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
364 static const struct vm_operations_struct erofs_dax_vm_ops = {
365 .fault = erofs_dax_fault,
366 .huge_fault = erofs_dax_huge_fault,
369 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
371 if (!IS_DAX(file_inode(file)))
372 return generic_file_readonly_mmap(file, vma);
374 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
377 vma->vm_ops = &erofs_dax_vm_ops;
378 vma->vm_flags |= VM_HUGEPAGE;
382 #define erofs_file_mmap generic_file_readonly_mmap
385 const struct file_operations erofs_file_fops = {
386 .llseek = generic_file_llseek,
387 .read_iter = erofs_file_read_iter,
388 .mmap = erofs_file_mmap,
389 .splice_read = generic_file_splice_read,