1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
8 #include <linux/sched/mm.h>
9 #include <trace/events/erofs.h>
11 void erofs_unmap_metabuf(struct erofs_buf *buf)
13 if (buf->kmap_type == EROFS_KMAP)
14 kunmap_local(buf->base);
16 buf->kmap_type = EROFS_NO_KMAP;
19 void erofs_put_metabuf(struct erofs_buf *buf)
23 erofs_unmap_metabuf(buf);
29 * Derive the block size from inode->i_blkbits to make compatible with
30 * anonymous inode in fscache mode.
32 void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
33 enum erofs_kmap_type type)
35 struct inode *inode = buf->inode;
36 erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
37 pgoff_t index = offset >> PAGE_SHIFT;
38 struct page *page = buf->page;
40 unsigned int nofs_flag;
42 if (!page || page->index != index) {
43 erofs_put_metabuf(buf);
45 nofs_flag = memalloc_nofs_save();
46 folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
47 memalloc_nofs_restore(nofs_flag);
51 /* should already be PageUptodate, no need to lock page */
52 page = folio_file_page(folio, index);
55 if (buf->kmap_type == EROFS_NO_KMAP) {
56 if (type == EROFS_KMAP)
57 buf->base = kmap_local_page(page);
58 buf->kmap_type = type;
59 } else if (buf->kmap_type != type) {
61 return ERR_PTR(-EFAULT);
63 if (type == EROFS_NO_KMAP)
65 return buf->base + (offset & ~PAGE_MASK);
68 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
70 if (erofs_is_fscache_mode(sb))
71 buf->inode = EROFS_SB(sb)->s_fscache->inode;
73 buf->inode = sb->s_bdev->bd_inode;
76 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
77 erofs_blk_t blkaddr, enum erofs_kmap_type type)
79 erofs_init_metabuf(buf, sb);
80 return erofs_bread(buf, blkaddr, type);
83 static int erofs_map_blocks_flatmode(struct inode *inode,
84 struct erofs_map_blocks *map)
86 erofs_blk_t nblocks, lastblk;
87 u64 offset = map->m_la;
88 struct erofs_inode *vi = EROFS_I(inode);
89 struct super_block *sb = inode->i_sb;
90 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
92 nblocks = erofs_iblks(inode);
93 lastblk = nblocks - tailendpacking;
95 /* there is no hole in flatmode */
96 map->m_flags = EROFS_MAP_MAPPED;
97 if (offset < erofs_pos(sb, lastblk)) {
98 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
99 map->m_plen = erofs_pos(sb, lastblk) - offset;
100 } else if (tailendpacking) {
101 map->m_pa = erofs_iloc(inode) + vi->inode_isize +
102 vi->xattr_isize + erofs_blkoff(sb, offset);
103 map->m_plen = inode->i_size - offset;
105 /* inline data should be located in the same meta block */
106 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
107 erofs_err(sb, "inline data cross block boundary @ nid %llu",
110 return -EFSCORRUPTED;
112 map->m_flags |= EROFS_MAP_META;
114 erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
115 vi->nid, inode->i_size, map->m_la);
122 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
124 struct super_block *sb = inode->i_sb;
125 struct erofs_inode *vi = EROFS_I(inode);
126 struct erofs_inode_chunk_index *idx;
127 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
134 trace_erofs_map_blocks_enter(inode, map, 0);
136 if (map->m_la >= inode->i_size) {
137 /* leave out-of-bound access unmapped */
143 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
144 err = erofs_map_blocks_flatmode(inode, map);
148 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
149 unit = sizeof(*idx); /* chunk index */
151 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
153 chunknr = map->m_la >> vi->chunkbits;
154 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
155 vi->xattr_isize, unit) + unit * chunknr;
157 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
159 err = PTR_ERR(kaddr);
162 map->m_la = chunknr << vi->chunkbits;
163 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
164 round_up(inode->i_size - map->m_la, sb->s_blocksize));
166 /* handle block map */
167 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
168 __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
170 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
173 map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
174 map->m_flags = EROFS_MAP_MAPPED;
178 /* parse chunk indexes */
179 idx = kaddr + erofs_blkoff(sb, pos);
180 switch (le32_to_cpu(idx->blkaddr)) {
181 case EROFS_NULL_ADDR:
185 map->m_deviceid = le16_to_cpu(idx->device_id) &
186 EROFS_SB(sb)->device_id_mask;
187 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
188 map->m_flags = EROFS_MAP_MAPPED;
192 erofs_put_metabuf(&buf);
195 map->m_llen = map->m_plen;
196 trace_erofs_map_blocks_exit(inode, map, 0, err);
200 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
202 struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
203 struct erofs_device_info *dif;
206 map->m_bdev = sb->s_bdev;
207 map->m_daxdev = EROFS_SB(sb)->dax_dev;
208 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
209 map->m_fscache = EROFS_SB(sb)->s_fscache;
211 if (map->m_deviceid) {
212 down_read(&devs->rwsem);
213 dif = idr_find(&devs->tree, map->m_deviceid - 1);
215 up_read(&devs->rwsem);
219 map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
220 up_read(&devs->rwsem);
223 map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL;
224 map->m_daxdev = dif->dax_dev;
225 map->m_dax_part_off = dif->dax_part_off;
226 map->m_fscache = dif->fscache;
227 up_read(&devs->rwsem);
228 } else if (devs->extra_devices && !devs->flatdev) {
229 down_read(&devs->rwsem);
230 idr_for_each_entry(&devs->tree, dif, id) {
231 erofs_off_t startoff, length;
233 if (!dif->mapped_blkaddr)
235 startoff = erofs_pos(sb, dif->mapped_blkaddr);
236 length = erofs_pos(sb, dif->blocks);
238 if (map->m_pa >= startoff &&
239 map->m_pa < startoff + length) {
240 map->m_pa -= startoff;
241 map->m_bdev = dif->bdev_handle ?
242 dif->bdev_handle->bdev : NULL;
243 map->m_daxdev = dif->dax_dev;
244 map->m_dax_part_off = dif->dax_part_off;
245 map->m_fscache = dif->fscache;
249 up_read(&devs->rwsem);
254 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
255 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
258 struct super_block *sb = inode->i_sb;
259 struct erofs_map_blocks map;
260 struct erofs_map_dev mdev;
265 ret = erofs_map_blocks(inode, &map);
269 mdev = (struct erofs_map_dev) {
270 .m_deviceid = map.m_deviceid,
273 ret = erofs_map_dev(sb, &mdev);
277 iomap->offset = map.m_la;
278 if (flags & IOMAP_DAX)
279 iomap->dax_dev = mdev.m_daxdev;
281 iomap->bdev = mdev.m_bdev;
282 iomap->length = map.m_llen;
284 iomap->private = NULL;
286 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
287 iomap->type = IOMAP_HOLE;
288 iomap->addr = IOMAP_NULL_ADDR;
290 iomap->length = length;
294 if (map.m_flags & EROFS_MAP_META) {
296 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
298 iomap->type = IOMAP_INLINE;
299 ptr = erofs_read_metabuf(&buf, sb,
300 erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
303 iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
304 iomap->private = buf.base;
306 iomap->type = IOMAP_MAPPED;
307 iomap->addr = mdev.m_pa;
308 if (flags & IOMAP_DAX)
309 iomap->addr += mdev.m_dax_part_off;
314 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
315 ssize_t written, unsigned int flags, struct iomap *iomap)
317 void *ptr = iomap->private;
320 struct erofs_buf buf = {
321 .page = kmap_to_page(ptr),
323 .kmap_type = EROFS_KMAP,
326 DBG_BUGON(iomap->type != IOMAP_INLINE);
327 erofs_put_metabuf(&buf);
329 DBG_BUGON(iomap->type == IOMAP_INLINE);
334 static const struct iomap_ops erofs_iomap_ops = {
335 .iomap_begin = erofs_iomap_begin,
336 .iomap_end = erofs_iomap_end,
339 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
342 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
343 #ifdef CONFIG_EROFS_FS_ZIP
344 return iomap_fiemap(inode, fieinfo, start, len,
345 &z_erofs_iomap_report_ops);
350 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
354 * since we dont have write or truncate flows, so no inode
355 * locking needs to be held at the moment.
357 static int erofs_read_folio(struct file *file, struct folio *folio)
359 return iomap_read_folio(folio, &erofs_iomap_ops);
362 static void erofs_readahead(struct readahead_control *rac)
364 return iomap_readahead(rac, &erofs_iomap_ops);
367 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
369 return iomap_bmap(mapping, block, &erofs_iomap_ops);
372 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
374 struct inode *inode = file_inode(iocb->ki_filp);
376 /* no need taking (shared) inode lock since it's a ro filesystem */
377 if (!iov_iter_count(to))
382 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
384 if (iocb->ki_flags & IOCB_DIRECT) {
385 struct block_device *bdev = inode->i_sb->s_bdev;
386 unsigned int blksize_mask;
389 blksize_mask = bdev_logical_block_size(bdev) - 1;
391 blksize_mask = i_blocksize(inode) - 1;
393 if ((iocb->ki_pos | iov_iter_count(to) |
394 iov_iter_alignment(to)) & blksize_mask)
397 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
400 return filemap_read(iocb, to, 0);
403 /* for uncompressed (aligned) files and raw access for other files */
404 const struct address_space_operations erofs_raw_access_aops = {
405 .read_folio = erofs_read_folio,
406 .readahead = erofs_readahead,
408 .direct_IO = noop_direct_IO,
409 .release_folio = iomap_release_folio,
410 .invalidate_folio = iomap_invalidate_folio,
414 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
417 return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
420 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
422 return erofs_dax_huge_fault(vmf, 0);
425 static const struct vm_operations_struct erofs_dax_vm_ops = {
426 .fault = erofs_dax_fault,
427 .huge_fault = erofs_dax_huge_fault,
430 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
432 if (!IS_DAX(file_inode(file)))
433 return generic_file_readonly_mmap(file, vma);
435 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
438 vma->vm_ops = &erofs_dax_vm_ops;
439 vm_flags_set(vma, VM_HUGEPAGE);
443 #define erofs_file_mmap generic_file_readonly_mmap
446 const struct file_operations erofs_file_fops = {
447 .llseek = generic_file_llseek,
448 .read_iter = erofs_file_read_iter,
449 .mmap = erofs_file_mmap,
450 .splice_read = filemap_splice_read,