1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/writeback.h>
12 #include <linux/sched/mm.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
21 #include <trace/events/f2fs.h>
23 #ifdef CONFIG_F2FS_FS_COMPRESSION
24 extern const struct address_space_operations f2fs_compress_aops;
27 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
29 if (is_inode_flag_set(inode, FI_NEW_INODE))
32 if (f2fs_inode_dirtied(inode, sync))
35 mark_inode_dirty_sync(inode);
38 void f2fs_set_inode_flags(struct inode *inode)
40 unsigned int flags = F2FS_I(inode)->i_flags;
41 unsigned int new_fl = 0;
43 if (flags & F2FS_SYNC_FL)
45 if (flags & F2FS_APPEND_FL)
47 if (flags & F2FS_IMMUTABLE_FL)
48 new_fl |= S_IMMUTABLE;
49 if (flags & F2FS_NOATIME_FL)
51 if (flags & F2FS_DIRSYNC_FL)
53 if (file_is_encrypt(inode))
54 new_fl |= S_ENCRYPTED;
55 if (file_is_verity(inode))
57 if (flags & F2FS_CASEFOLD_FL)
59 inode_set_flags(inode, new_fl,
60 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
61 S_ENCRYPTED|S_VERITY|S_CASEFOLD);
64 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
66 int extra_size = get_extra_isize(inode);
68 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
69 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
70 if (ri->i_addr[extra_size])
71 inode->i_rdev = old_decode_dev(
72 le32_to_cpu(ri->i_addr[extra_size]));
74 inode->i_rdev = new_decode_dev(
75 le32_to_cpu(ri->i_addr[extra_size + 1]));
79 static int __written_first_block(struct f2fs_sb_info *sbi,
80 struct f2fs_inode *ri)
82 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
84 if (!__is_valid_data_blkaddr(addr))
86 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) {
87 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
93 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
95 int extra_size = get_extra_isize(inode);
97 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
98 if (old_valid_dev(inode->i_rdev)) {
99 ri->i_addr[extra_size] =
100 cpu_to_le32(old_encode_dev(inode->i_rdev));
101 ri->i_addr[extra_size + 1] = 0;
103 ri->i_addr[extra_size] = 0;
104 ri->i_addr[extra_size + 1] =
105 cpu_to_le32(new_encode_dev(inode->i_rdev));
106 ri->i_addr[extra_size + 2] = 0;
111 static void __recover_inline_status(struct inode *inode, struct page *ipage)
113 void *inline_data = inline_data_addr(inode, ipage);
114 __le32 *start = inline_data;
115 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
117 while (start < end) {
119 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
121 set_inode_flag(inode, FI_DATA_EXIST);
122 set_raw_inline(inode, F2FS_INODE(ipage));
123 set_page_dirty(ipage);
130 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
132 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
134 if (!f2fs_sb_has_inode_chksum(sbi))
137 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
140 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
147 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
149 struct f2fs_node *node = F2FS_NODE(page);
150 struct f2fs_inode *ri = &node->i;
151 __le32 ino = node->footer.ino;
152 __le32 gen = ri->i_generation;
153 __u32 chksum, chksum_seed;
155 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
156 unsigned int cs_size = sizeof(dummy_cs);
158 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
160 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
162 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
163 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
165 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
166 F2FS_BLKSIZE - offset);
170 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
172 struct f2fs_inode *ri;
173 __u32 provided, calculated;
175 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
178 #ifdef CONFIG_F2FS_CHECK_FS
179 if (!f2fs_enable_inode_chksum(sbi, page))
181 if (!f2fs_enable_inode_chksum(sbi, page) ||
182 PageDirty(page) || PageWriteback(page))
186 ri = &F2FS_NODE(page)->i;
187 provided = le32_to_cpu(ri->i_inode_checksum);
188 calculated = f2fs_inode_chksum(sbi, page);
190 if (provided != calculated)
191 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
192 page->index, ino_of_node(page), provided, calculated);
194 return provided == calculated;
197 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
199 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
201 if (!f2fs_enable_inode_chksum(sbi, page))
204 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
207 static bool sanity_check_compress_inode(struct inode *inode,
208 struct f2fs_inode *ri)
210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
211 unsigned char clevel;
213 if (ri->i_compress_algorithm >= COMPRESS_MAX) {
215 "%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
216 __func__, inode->i_ino, ri->i_compress_algorithm);
219 if (le64_to_cpu(ri->i_compr_blocks) >
220 SECTOR_TO_BLOCK(inode->i_blocks)) {
222 "%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
223 __func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
224 SECTOR_TO_BLOCK(inode->i_blocks));
227 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
228 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
230 "%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
231 __func__, inode->i_ino, ri->i_log_cluster_size);
235 clevel = le16_to_cpu(ri->i_compress_flag) >>
236 COMPRESS_LEVEL_OFFSET;
237 switch (ri->i_compress_algorithm) {
239 #ifdef CONFIG_F2FS_FS_LZO
244 case COMPRESS_LZORLE:
245 #ifdef CONFIG_F2FS_FS_LZORLE
251 #ifdef CONFIG_F2FS_FS_LZ4
252 #ifdef CONFIG_F2FS_FS_LZ4HC
254 (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL))
263 #ifdef CONFIG_F2FS_FS_ZSTD
264 if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel())
274 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
275 __func__, inode->i_ino, clevel);
277 set_sbi_flag(sbi, SBI_NEED_FSCK);
281 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
283 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
284 struct f2fs_inode_info *fi = F2FS_I(inode);
285 struct f2fs_inode *ri = F2FS_INODE(node_page);
286 unsigned long long iblocks;
288 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
290 set_sbi_flag(sbi, SBI_NEED_FSCK);
291 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
292 __func__, inode->i_ino, iblocks);
296 if (ino_of_node(node_page) != nid_of_node(node_page)) {
297 set_sbi_flag(sbi, SBI_NEED_FSCK);
298 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
299 __func__, inode->i_ino,
300 ino_of_node(node_page), nid_of_node(node_page));
304 if (f2fs_has_extra_attr(inode)) {
305 if (!f2fs_sb_has_extra_attr(sbi)) {
306 set_sbi_flag(sbi, SBI_NEED_FSCK);
307 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
308 __func__, inode->i_ino);
311 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
312 fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
313 fi->i_extra_isize % sizeof(__le32)) {
314 set_sbi_flag(sbi, SBI_NEED_FSCK);
315 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
316 __func__, inode->i_ino, fi->i_extra_isize,
317 F2FS_TOTAL_EXTRA_ATTR_SIZE);
320 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
321 f2fs_has_inline_xattr(inode) &&
322 (!fi->i_inline_xattr_size ||
323 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
324 set_sbi_flag(sbi, SBI_NEED_FSCK);
325 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
326 __func__, inode->i_ino, fi->i_inline_xattr_size,
327 MAX_INLINE_XATTR_SIZE);
330 if (f2fs_sb_has_compression(sbi) &&
331 fi->i_flags & F2FS_COMPR_FL &&
332 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
334 if (!sanity_check_compress_inode(inode, ri))
337 } else if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
338 set_sbi_flag(sbi, SBI_NEED_FSCK);
339 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
340 __func__, inode->i_ino);
344 if (!f2fs_sb_has_extra_attr(sbi)) {
345 if (f2fs_sb_has_project_quota(sbi)) {
346 set_sbi_flag(sbi, SBI_NEED_FSCK);
347 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
348 __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
351 if (f2fs_sb_has_inode_chksum(sbi)) {
352 set_sbi_flag(sbi, SBI_NEED_FSCK);
353 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
354 __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
357 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
358 set_sbi_flag(sbi, SBI_NEED_FSCK);
359 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
360 __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
363 if (f2fs_sb_has_inode_crtime(sbi)) {
364 set_sbi_flag(sbi, SBI_NEED_FSCK);
365 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
366 __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
369 if (f2fs_sb_has_compression(sbi)) {
370 set_sbi_flag(sbi, SBI_NEED_FSCK);
371 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
372 __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
377 if (f2fs_sanity_check_inline_data(inode)) {
378 set_sbi_flag(sbi, SBI_NEED_FSCK);
379 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
380 __func__, inode->i_ino, inode->i_mode);
384 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
385 set_sbi_flag(sbi, SBI_NEED_FSCK);
386 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
387 __func__, inode->i_ino, inode->i_mode);
391 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
392 set_sbi_flag(sbi, SBI_NEED_FSCK);
393 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
394 __func__, inode->i_ino);
401 static void init_idisk_time(struct inode *inode)
403 struct f2fs_inode_info *fi = F2FS_I(inode);
405 fi->i_disk_time[0] = inode->i_atime;
406 fi->i_disk_time[1] = inode->i_ctime;
407 fi->i_disk_time[2] = inode->i_mtime;
410 static int do_read_inode(struct inode *inode)
412 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
413 struct f2fs_inode_info *fi = F2FS_I(inode);
414 struct page *node_page;
415 struct f2fs_inode *ri;
419 /* Check if ino is within scope */
420 if (f2fs_check_nid_range(sbi, inode->i_ino))
423 node_page = f2fs_get_node_page(sbi, inode->i_ino);
424 if (IS_ERR(node_page))
425 return PTR_ERR(node_page);
427 ri = F2FS_INODE(node_page);
429 inode->i_mode = le16_to_cpu(ri->i_mode);
430 i_uid_write(inode, le32_to_cpu(ri->i_uid));
431 i_gid_write(inode, le32_to_cpu(ri->i_gid));
432 set_nlink(inode, le32_to_cpu(ri->i_links));
433 inode->i_size = le64_to_cpu(ri->i_size);
434 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
436 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
437 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
438 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
439 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
440 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
441 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
442 inode->i_generation = le32_to_cpu(ri->i_generation);
443 if (S_ISDIR(inode->i_mode))
444 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
445 else if (S_ISREG(inode->i_mode))
446 fi->i_gc_failures[GC_FAILURE_PIN] =
447 le16_to_cpu(ri->i_gc_failures);
448 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
449 fi->i_flags = le32_to_cpu(ri->i_flags);
450 if (S_ISREG(inode->i_mode))
451 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
452 bitmap_zero(fi->flags, FI_MAX);
453 fi->i_advise = ri->i_advise;
454 fi->i_pino = le32_to_cpu(ri->i_pino);
455 fi->i_dir_level = ri->i_dir_level;
457 get_inline_info(inode, ri);
459 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
460 le16_to_cpu(ri->i_extra_isize) : 0;
462 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
463 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
464 } else if (f2fs_has_inline_xattr(inode) ||
465 f2fs_has_inline_dentry(inode)) {
466 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
470 * Previous inline data or directory always reserved 200 bytes
471 * in inode layout, even if inline_xattr is disabled. In order
472 * to keep inline_dentry's structure for backward compatibility,
473 * we get the space back only from inline_data.
475 fi->i_inline_xattr_size = 0;
478 /* check data exist */
479 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
480 __recover_inline_status(inode, node_page);
482 /* try to recover cold bit for non-dir inode */
483 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
484 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
485 set_cold_node(node_page, false);
486 set_page_dirty(node_page);
489 /* get rdev by using inline_info */
490 __get_inode_rdev(inode, ri);
492 if (S_ISREG(inode->i_mode)) {
493 err = __written_first_block(sbi, ri);
495 f2fs_put_page(node_page, 1);
499 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
502 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
503 fi->last_disk_size = inode->i_size;
505 if (fi->i_flags & F2FS_PROJINHERIT_FL)
506 set_inode_flag(inode, FI_PROJ_INHERIT);
508 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
509 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
510 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
512 i_projid = F2FS_DEF_PROJID;
513 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
515 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
516 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
517 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
518 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
521 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
522 (fi->i_flags & F2FS_COMPR_FL)) {
523 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
525 unsigned short compress_flag;
527 atomic_set(&fi->i_compr_blocks,
528 le64_to_cpu(ri->i_compr_blocks));
529 fi->i_compress_algorithm = ri->i_compress_algorithm;
530 fi->i_log_cluster_size = ri->i_log_cluster_size;
531 compress_flag = le16_to_cpu(ri->i_compress_flag);
532 fi->i_compress_level = compress_flag >>
533 COMPRESS_LEVEL_OFFSET;
534 fi->i_compress_flag = compress_flag &
535 GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
536 fi->i_cluster_size = BIT(fi->i_log_cluster_size);
537 set_inode_flag(inode, FI_COMPRESSED_FILE);
541 init_idisk_time(inode);
543 /* Need all the flag bits */
544 f2fs_init_read_extent_tree(inode, node_page);
545 f2fs_init_age_extent_tree(inode);
547 if (!sanity_check_inode(inode, node_page)) {
548 f2fs_put_page(node_page, 1);
549 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
550 return -EFSCORRUPTED;
553 if (!sanity_check_extent_cache(inode)) {
554 f2fs_put_page(node_page, 1);
555 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
556 return -EFSCORRUPTED;
559 f2fs_put_page(node_page, 1);
561 stat_inc_inline_xattr(inode);
562 stat_inc_inline_inode(inode);
563 stat_inc_inline_dir(inode);
564 stat_inc_compr_inode(inode);
565 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
570 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino)
572 return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) ||
573 ino == F2FS_COMPRESS_INO(sbi);
576 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
578 struct f2fs_sb_info *sbi = F2FS_SB(sb);
582 inode = iget_locked(sb, ino);
584 return ERR_PTR(-ENOMEM);
586 if (!(inode->i_state & I_NEW)) {
587 if (is_meta_ino(sbi, ino)) {
588 f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
589 set_sbi_flag(sbi, SBI_NEED_FSCK);
591 trace_f2fs_iget_exit(inode, ret);
593 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
597 trace_f2fs_iget(inode);
601 if (is_meta_ino(sbi, ino))
604 ret = do_read_inode(inode);
608 if (ino == F2FS_NODE_INO(sbi)) {
609 inode->i_mapping->a_ops = &f2fs_node_aops;
610 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
611 } else if (ino == F2FS_META_INO(sbi)) {
612 inode->i_mapping->a_ops = &f2fs_meta_aops;
613 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
614 } else if (ino == F2FS_COMPRESS_INO(sbi)) {
615 #ifdef CONFIG_F2FS_FS_COMPRESSION
616 inode->i_mapping->a_ops = &f2fs_compress_aops;
618 * generic_error_remove_page only truncates pages of regular
621 inode->i_mode |= S_IFREG;
623 mapping_set_gfp_mask(inode->i_mapping,
624 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
625 } else if (S_ISREG(inode->i_mode)) {
626 inode->i_op = &f2fs_file_inode_operations;
627 inode->i_fop = &f2fs_file_operations;
628 inode->i_mapping->a_ops = &f2fs_dblock_aops;
629 } else if (S_ISDIR(inode->i_mode)) {
630 inode->i_op = &f2fs_dir_inode_operations;
631 inode->i_fop = &f2fs_dir_operations;
632 inode->i_mapping->a_ops = &f2fs_dblock_aops;
633 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
634 } else if (S_ISLNK(inode->i_mode)) {
635 if (file_is_encrypt(inode))
636 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
638 inode->i_op = &f2fs_symlink_inode_operations;
639 inode_nohighmem(inode);
640 inode->i_mapping->a_ops = &f2fs_dblock_aops;
641 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
642 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
643 inode->i_op = &f2fs_special_inode_operations;
644 init_special_inode(inode, inode->i_mode, inode->i_rdev);
649 f2fs_set_inode_flags(inode);
651 if (file_should_truncate(inode) &&
652 !is_sbi_flag_set(sbi, SBI_POR_DOING)) {
653 ret = f2fs_truncate(inode);
656 file_dont_truncate(inode);
659 unlock_new_inode(inode);
660 trace_f2fs_iget(inode);
664 f2fs_inode_synced(inode);
666 trace_f2fs_iget_exit(inode, ret);
670 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
674 inode = f2fs_iget(sb, ino);
676 if (PTR_ERR(inode) == -ENOMEM) {
677 memalloc_retry_wait(GFP_NOFS);
684 void f2fs_update_inode(struct inode *inode, struct page *node_page)
686 struct f2fs_inode *ri;
687 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
689 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
690 set_page_dirty(node_page);
692 f2fs_inode_synced(inode);
694 ri = F2FS_INODE(node_page);
696 ri->i_mode = cpu_to_le16(inode->i_mode);
697 ri->i_advise = F2FS_I(inode)->i_advise;
698 ri->i_uid = cpu_to_le32(i_uid_read(inode));
699 ri->i_gid = cpu_to_le32(i_gid_read(inode));
700 ri->i_links = cpu_to_le32(inode->i_nlink);
701 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
703 if (!f2fs_is_atomic_file(inode) ||
704 is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
705 ri->i_size = cpu_to_le64(i_size_read(inode));
708 read_lock(&et->lock);
709 set_raw_read_extent(&et->largest, &ri->i_ext);
710 read_unlock(&et->lock);
712 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
714 set_raw_inline(inode, ri);
716 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
717 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
718 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
719 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
720 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
721 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
722 if (S_ISDIR(inode->i_mode))
723 ri->i_current_depth =
724 cpu_to_le32(F2FS_I(inode)->i_current_depth);
725 else if (S_ISREG(inode->i_mode))
727 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
728 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
729 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
730 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
731 ri->i_generation = cpu_to_le32(inode->i_generation);
732 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
734 if (f2fs_has_extra_attr(inode)) {
735 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
737 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
738 ri->i_inline_xattr_size =
739 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
741 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
742 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
746 i_projid = from_kprojid(&init_user_ns,
747 F2FS_I(inode)->i_projid);
748 ri->i_projid = cpu_to_le32(i_projid);
751 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
752 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
755 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
757 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
760 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
761 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
763 unsigned short compress_flag;
766 cpu_to_le64(atomic_read(
767 &F2FS_I(inode)->i_compr_blocks));
768 ri->i_compress_algorithm =
769 F2FS_I(inode)->i_compress_algorithm;
770 compress_flag = F2FS_I(inode)->i_compress_flag |
771 F2FS_I(inode)->i_compress_level <<
772 COMPRESS_LEVEL_OFFSET;
773 ri->i_compress_flag = cpu_to_le16(compress_flag);
774 ri->i_log_cluster_size =
775 F2FS_I(inode)->i_log_cluster_size;
779 __set_inode_rdev(inode, ri);
782 if (inode->i_nlink == 0)
783 clear_page_private_inline(node_page);
785 init_idisk_time(inode);
786 #ifdef CONFIG_F2FS_CHECK_FS
787 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
791 void f2fs_update_inode_page(struct inode *inode)
793 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
794 struct page *node_page;
797 node_page = f2fs_get_node_page(sbi, inode->i_ino);
798 if (IS_ERR(node_page)) {
799 int err = PTR_ERR(node_page);
801 /* The node block was truncated. */
805 if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
807 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
810 f2fs_update_inode(inode, node_page);
811 f2fs_put_page(node_page, 1);
814 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
816 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
818 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
819 inode->i_ino == F2FS_META_INO(sbi))
823 * atime could be updated without dirtying f2fs inode in lazytime mode
825 if (f2fs_is_time_consistent(inode) &&
826 !is_inode_flag_set(inode, FI_DIRTY_INODE))
829 if (!f2fs_is_checkpoint_ready(sbi))
833 * We need to balance fs here to prevent from producing dirty node pages
834 * during the urgent cleaning time when running out of free sections.
836 f2fs_update_inode_page(inode);
837 if (wbc && wbc->nr_to_write)
838 f2fs_balance_fs(sbi, true);
843 * Called at the last iput() if i_nlink is zero
845 void f2fs_evict_inode(struct inode *inode)
847 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
848 struct f2fs_inode_info *fi = F2FS_I(inode);
849 nid_t xnid = fi->i_xattr_nid;
852 f2fs_abort_atomic_write(inode, true);
855 clear_inode_flag(fi->cow_inode, FI_COW_FILE);
857 fi->cow_inode = NULL;
860 trace_f2fs_evict_inode(inode);
861 truncate_inode_pages_final(&inode->i_data);
863 if ((inode->i_nlink || is_bad_inode(inode)) &&
864 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
865 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
867 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
868 inode->i_ino == F2FS_META_INO(sbi) ||
869 inode->i_ino == F2FS_COMPRESS_INO(sbi))
872 f2fs_bug_on(sbi, get_dirty_pages(inode));
873 f2fs_remove_dirty_inode(inode);
875 f2fs_destroy_extent_tree(inode);
877 if (inode->i_nlink || is_bad_inode(inode))
880 err = f2fs_dquot_initialize(inode);
883 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
886 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
887 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
888 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
890 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
891 sb_start_intwrite(inode->i_sb);
892 set_inode_flag(inode, FI_NO_ALLOC);
893 i_size_write(inode, 0);
895 if (F2FS_HAS_BLOCKS(inode))
896 err = f2fs_truncate(inode);
898 if (time_to_inject(sbi, FAULT_EVICT_INODE))
903 err = f2fs_remove_inode_page(inode);
905 if (err == -ENOENT) {
909 * in fuzzed image, another node may has the same
910 * block address as inode's, if it was truncated
911 * previously, truncation of inode node will fail.
913 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
914 f2fs_warn(F2FS_I_SB(inode),
915 "f2fs_evict_inode: inconsistent node id, ino:%lu",
917 f2fs_inode_synced(inode);
918 set_sbi_flag(sbi, SBI_NEED_FSCK);
923 /* give more chances, if ENOMEM case */
924 if (err == -ENOMEM) {
930 f2fs_update_inode_page(inode);
931 if (dquot_initialize_needed(inode))
932 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
934 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
935 sb_end_intwrite(inode->i_sb);
939 stat_dec_inline_xattr(inode);
940 stat_dec_inline_dir(inode);
941 stat_dec_inline_inode(inode);
942 stat_dec_compr_inode(inode);
943 stat_sub_compr_blocks(inode,
944 atomic_read(&fi->i_compr_blocks));
946 if (likely(!f2fs_cp_error(sbi) &&
947 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
948 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
950 f2fs_inode_synced(inode);
952 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
954 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
957 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
958 if (inode->i_nlink) {
959 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
960 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
961 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
962 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
964 if (is_inode_flag_set(inode, FI_FREE_NID)) {
965 f2fs_alloc_nid_failed(sbi, inode->i_ino);
966 clear_inode_flag(inode, FI_FREE_NID);
969 * If xattr nid is corrupted, we can reach out error condition,
970 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
971 * In that case, f2fs_check_nid_range() is enough to give a clue.
975 fscrypt_put_encryption_info(inode);
976 fsverity_cleanup_inode(inode);
980 /* caller should call f2fs_lock_op() */
981 void f2fs_handle_failed_inode(struct inode *inode)
983 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
988 * clear nlink of inode in order to release resource of inode
994 * we must call this to avoid inode being remained as dirty, resulting
995 * in a panic when flushing dirty inodes in gdirty_list.
997 f2fs_update_inode_page(inode);
998 f2fs_inode_synced(inode);
1000 /* don't make bad inode, since it becomes a regular file. */
1001 unlock_new_inode(inode);
1004 * Note: we should add inode to orphan list before f2fs_unlock_op()
1005 * so we can prevent losing this orphan when encoutering checkpoint
1006 * and following suddenly power-off.
1008 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1010 set_sbi_flag(sbi, SBI_NEED_FSCK);
1011 set_inode_flag(inode, FI_FREE_NID);
1012 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
1016 if (ni.blk_addr != NULL_ADDR) {
1017 err = f2fs_acquire_orphan_inode(sbi);
1019 set_sbi_flag(sbi, SBI_NEED_FSCK);
1020 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
1022 f2fs_add_orphan_inode(inode);
1024 f2fs_alloc_nid_done(sbi, inode->i_ino);
1026 set_inode_flag(inode, FI_FREE_NID);
1030 f2fs_unlock_op(sbi);
1032 /* iput will drop the inode object */