1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
34 #define f2fs_bug_on(sbi, condition) \
36 if (WARN_ON(condition)) \
37 set_sbi_flag(sbi, SBI_NEED_FSCK); \
46 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */
61 #ifdef CONFIG_F2FS_FAULT_INJECTION
62 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
64 struct f2fs_fault_info {
66 unsigned int inject_rate;
67 unsigned int inject_type;
70 extern const char *f2fs_fault_name[FAULT_MAX];
71 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
77 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
78 #define F2FS_MOUNT_DISCARD 0x00000004
79 #define F2FS_MOUNT_NOHEAP 0x00000008
80 #define F2FS_MOUNT_XATTR_USER 0x00000010
81 #define F2FS_MOUNT_POSIX_ACL 0x00000020
82 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
83 #define F2FS_MOUNT_INLINE_XATTR 0x00000080
84 #define F2FS_MOUNT_INLINE_DATA 0x00000100
85 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200
86 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400
87 #define F2FS_MOUNT_NOBARRIER 0x00000800
88 #define F2FS_MOUNT_FASTBOOT 0x00001000
89 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000
90 #define F2FS_MOUNT_DATA_FLUSH 0x00008000
91 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000
92 #define F2FS_MOUNT_USRQUOTA 0x00080000
93 #define F2FS_MOUNT_GRPQUOTA 0x00100000
94 #define F2FS_MOUNT_PRJQUOTA 0x00200000
95 #define F2FS_MOUNT_QUOTA 0x00400000
96 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
97 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000
98 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
99 #define F2FS_MOUNT_NORECOVERY 0x04000000
100 #define F2FS_MOUNT_ATGC 0x08000000
101 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
102 #define F2FS_MOUNT_GC_MERGE 0x20000000
103 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
105 #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
106 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
107 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
108 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
110 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
111 typecheck(unsigned long long, b) && \
112 ((long long)((a) - (b)) > 0))
114 typedef u32 block_t; /*
115 * should not change u32, since it is the on-disk block
116 * address format, __le32.
120 #define COMPRESS_EXT_NUM 16
122 struct f2fs_mount_info {
124 int write_io_size_bits; /* Write IO size bits */
125 block_t root_reserved_blocks; /* root reserved blocks */
126 kuid_t s_resuid; /* reserved blocks for uid */
127 kgid_t s_resgid; /* reserved blocks for gid */
128 int active_logs; /* # of active logs */
129 int inline_xattr_size; /* inline xattr size */
130 #ifdef CONFIG_F2FS_FAULT_INJECTION
131 struct f2fs_fault_info fault_info; /* For fault injection */
134 /* Names of quota files with journalled quota */
135 char *s_qf_names[MAXQUOTAS];
136 int s_jquota_fmt; /* Format of quota to use */
138 /* For which write hints are passed down to block layer */
140 int alloc_mode; /* segment allocation policy */
141 int fsync_mode; /* fsync policy */
142 int fs_mode; /* fs mode: LFS or ADAPTIVE */
143 int bggc_mode; /* bggc mode: off, on or sync */
145 * discard command's offset/size should
146 * be aligned to this unit: block,
149 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
150 block_t unusable_cap_perc; /* percentage for cap */
151 block_t unusable_cap; /* Amount of space allowed to be
152 * unusable when disabling checkpoint
155 /* For compression */
156 unsigned char compress_algorithm; /* algorithm type */
157 unsigned char compress_log_size; /* cluster log size */
158 unsigned char compress_level; /* compress level */
159 bool compress_chksum; /* compressed data chksum */
160 unsigned char compress_ext_cnt; /* extension count */
161 unsigned char nocompress_ext_cnt; /* nocompress extension count */
162 int compress_mode; /* compression mode */
163 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
164 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
167 #define F2FS_FEATURE_ENCRYPT 0x0001
168 #define F2FS_FEATURE_BLKZONED 0x0002
169 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004
170 #define F2FS_FEATURE_EXTRA_ATTR 0x0008
171 #define F2FS_FEATURE_PRJQUOTA 0x0010
172 #define F2FS_FEATURE_INODE_CHKSUM 0x0020
173 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
174 #define F2FS_FEATURE_QUOTA_INO 0x0080
175 #define F2FS_FEATURE_INODE_CRTIME 0x0100
176 #define F2FS_FEATURE_LOST_FOUND 0x0200
177 #define F2FS_FEATURE_VERITY 0x0400
178 #define F2FS_FEATURE_SB_CHKSUM 0x0800
179 #define F2FS_FEATURE_CASEFOLD 0x1000
180 #define F2FS_FEATURE_COMPRESSION 0x2000
181 #define F2FS_FEATURE_RO 0x4000
183 #define __F2FS_HAS_FEATURE(raw_super, mask) \
184 ((raw_super->feature & cpu_to_le32(mask)) != 0)
185 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
186 #define F2FS_SET_FEATURE(sbi, mask) \
187 (sbi->raw_super->feature |= cpu_to_le32(mask))
188 #define F2FS_CLEAR_FEATURE(sbi, mask) \
189 (sbi->raw_super->feature &= ~cpu_to_le32(mask))
192 * Default values for user and/or group using reserved blocks
194 #define F2FS_DEF_RESUID 0
195 #define F2FS_DEF_RESGID 0
198 * For checkpoint manager
205 #define CP_UMOUNT 0x00000001
206 #define CP_FASTBOOT 0x00000002
207 #define CP_SYNC 0x00000004
208 #define CP_RECOVERY 0x00000008
209 #define CP_DISCARD 0x00000010
210 #define CP_TRIMMED 0x00000020
211 #define CP_PAUSE 0x00000040
212 #define CP_RESIZE 0x00000080
214 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
215 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
216 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
217 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
218 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
219 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
220 #define DEF_CP_INTERVAL 60 /* 60 secs */
221 #define DEF_IDLE_INTERVAL 5 /* 5 secs */
222 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
223 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
224 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
234 * indicate meta/data type
243 DATA_GENERIC, /* check range only */
244 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
245 DATA_GENERIC_ENHANCE_READ, /*
246 * strong check on range and segment
247 * bitmap but no warning due to race
248 * condition of read on truncated area
254 /* for the list of ino */
256 ORPHAN_INO, /* for orphan ino list */
257 APPEND_INO, /* for append ino list */
258 UPDATE_INO, /* for update ino list */
259 TRANS_DIR_INO, /* for trasactions dir ino list */
260 FLUSH_INO, /* for multiple device flushing */
261 MAX_INO_ENTRY, /* max. list */
265 struct list_head list; /* list head */
266 nid_t ino; /* inode number */
267 unsigned int dirty_device; /* dirty device bitmap */
270 /* for the list of inodes to be GCed */
272 struct list_head list; /* list head */
273 struct inode *inode; /* vfs inode pointer */
276 struct fsync_node_entry {
277 struct list_head list; /* list head */
278 struct page *page; /* warm node page pointer */
279 unsigned int seq_id; /* sequence id */
283 struct completion wait; /* completion for checkpoint done */
284 struct llist_node llnode; /* llist_node to be linked in wait queue */
285 int ret; /* return code of checkpoint */
286 ktime_t queue_time; /* request queued time */
289 struct ckpt_req_control {
290 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
291 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
292 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
293 atomic_t issued_ckpt; /* # of actually issued ckpts */
294 atomic_t total_ckpt; /* # of total ckpts */
295 atomic_t queued_ckpt; /* # of queued ckpts */
296 struct llist_head issue_list; /* list for command issue */
297 spinlock_t stat_lock; /* lock for below checkpoint time stats */
298 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
299 unsigned int peak_time; /* peak wait time in msec until now */
302 /* for the bitmap indicate blocks to be discarded */
303 struct discard_entry {
304 struct list_head list; /* list head */
305 block_t start_blkaddr; /* start blockaddr of current segment */
306 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
309 /* default discard granularity of inner discard thread, unit: block count */
310 #define DEFAULT_DISCARD_GRANULARITY 16
312 /* max discard pend list number */
313 #define MAX_PLIST_NUM 512
314 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
315 (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
318 D_PREP, /* initial */
319 D_PARTIAL, /* partially submitted */
320 D_SUBMIT, /* all submitted */
321 D_DONE, /* finished */
324 struct discard_info {
325 block_t lstart; /* logical start address */
326 block_t len; /* length */
327 block_t start; /* actual start address in dev */
331 struct rb_node rb_node; /* rb node located in rb-tree */
334 block_t lstart; /* logical start address */
335 block_t len; /* length */
336 block_t start; /* actual start address in dev */
338 struct discard_info di; /* discard info */
341 struct list_head list; /* command list */
342 struct completion wait; /* compleation */
343 struct block_device *bdev; /* bdev */
344 unsigned short ref; /* reference count */
345 unsigned char state; /* state */
346 unsigned char queued; /* queued discard */
347 int error; /* bio error */
348 spinlock_t lock; /* for state/bio_ref updating */
349 unsigned short bio_ref; /* bio reference count */
360 struct discard_policy {
361 int type; /* type of discard */
362 unsigned int min_interval; /* used for candidates exist */
363 unsigned int mid_interval; /* used for device busy */
364 unsigned int max_interval; /* used for candidates not exist */
365 unsigned int max_requests; /* # of discards issued per round */
366 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
367 bool io_aware; /* issue discard in idle time */
368 bool sync; /* submit discard with REQ_SYNC flag */
369 bool ordered; /* issue discard by lba order */
370 bool timeout; /* discard timeout for put_super */
371 unsigned int granularity; /* discard granularity */
374 struct discard_cmd_control {
375 struct task_struct *f2fs_issue_discard; /* discard thread */
376 struct list_head entry_list; /* 4KB discard entry list */
377 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
378 struct list_head wait_list; /* store on-flushing entries */
379 struct list_head fstrim_list; /* in-flight discard from fstrim */
380 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
381 unsigned int discard_wake; /* to wake up discard thread */
382 struct mutex cmd_lock;
383 unsigned int nr_discards; /* # of discards in the list */
384 unsigned int max_discards; /* max. discards to be issued */
385 unsigned int discard_granularity; /* discard granularity */
386 unsigned int undiscard_blks; /* # of undiscard blocks */
387 unsigned int next_pos; /* next discard position */
388 atomic_t issued_discard; /* # of issued discard */
389 atomic_t queued_discard; /* # of queued discard */
390 atomic_t discard_cmd_cnt; /* # of cached cmd count */
391 struct rb_root_cached root; /* root of discard rb-tree */
392 bool rbtree_check; /* config for consistence check */
395 /* for the list of fsync inodes, used only during recovery */
396 struct fsync_inode_entry {
397 struct list_head list; /* list head */
398 struct inode *inode; /* vfs inode pointer */
399 block_t blkaddr; /* block address locating the last fsync */
400 block_t last_dentry; /* block address locating the last dentry */
403 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
404 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
406 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
407 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
408 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
409 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
411 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
412 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
414 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
416 int before = nats_in_cursum(journal);
418 journal->n_nats = cpu_to_le16(before + i);
422 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
424 int before = sits_in_cursum(journal);
426 journal->n_sits = cpu_to_le16(before + i);
430 static inline bool __has_cursum_space(struct f2fs_journal *journal,
433 if (type == NAT_JOURNAL)
434 return size <= MAX_NAT_JENTRIES(journal);
435 return size <= MAX_SIT_JENTRIES(journal);
438 /* for inline stuff */
439 #define DEF_INLINE_RESERVED_SIZE 1
440 static inline int get_extra_isize(struct inode *inode);
441 static inline int get_inline_xattr_addrs(struct inode *inode);
442 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
443 (CUR_ADDRS_PER_INODE(inode) - \
444 get_inline_xattr_addrs(inode) - \
445 DEF_INLINE_RESERVED_SIZE))
448 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
449 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
451 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
452 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
453 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
454 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
455 NR_INLINE_DENTRY(inode) + \
456 INLINE_DENTRY_BITMAP_SIZE(inode)))
459 * For INODE and NODE manager
461 /* for directory operations */
463 struct f2fs_filename {
465 * The filename the user specified. This is NULL for some
466 * filesystem-internal operations, e.g. converting an inline directory
467 * to a non-inline one, or roll-forward recovering an encrypted dentry.
469 const struct qstr *usr_fname;
472 * The on-disk filename. For encrypted directories, this is encrypted.
473 * This may be NULL for lookups in an encrypted dir without the key.
475 struct fscrypt_str disk_name;
477 /* The dirhash of this filename */
480 #ifdef CONFIG_FS_ENCRYPTION
482 * For lookups in encrypted directories: either the buffer backing
483 * disk_name, or a buffer that holds the decoded no-key name.
485 struct fscrypt_str crypto_buf;
487 #ifdef CONFIG_UNICODE
489 * For casefolded directories: the casefolded name, but it's left NULL
490 * if the original name is not valid Unicode, if the directory is both
491 * casefolded and encrypted and its encryption key is unavailable, or if
492 * the filesystem is doing an internal operation where usr_fname is also
493 * NULL. In all these cases we fall back to treating the name as an
494 * opaque byte sequence.
496 struct fscrypt_str cf_name;
500 struct f2fs_dentry_ptr {
503 struct f2fs_dir_entry *dentry;
504 __u8 (*filename)[F2FS_SLOT_LEN];
509 static inline void make_dentry_ptr_block(struct inode *inode,
510 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
513 d->max = NR_DENTRY_IN_BLOCK;
514 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
515 d->bitmap = t->dentry_bitmap;
516 d->dentry = t->dentry;
517 d->filename = t->filename;
520 static inline void make_dentry_ptr_inline(struct inode *inode,
521 struct f2fs_dentry_ptr *d, void *t)
523 int entry_cnt = NR_INLINE_DENTRY(inode);
524 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
525 int reserved_size = INLINE_RESERVED_SIZE(inode);
529 d->nr_bitmap = bitmap_size;
531 d->dentry = t + bitmap_size + reserved_size;
532 d->filename = t + bitmap_size + reserved_size +
533 SIZE_OF_DIR_ENTRY * entry_cnt;
537 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
538 * as its node offset to distinguish from index node blocks.
539 * But some bits are used to mark the node block.
541 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
544 ALLOC_NODE, /* allocate a new node page if needed */
545 LOOKUP_NODE, /* look up a node without readahead */
547 * look up a node with readahead called
552 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */
554 /* congestion wait timeout value, default: 20ms */
555 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
557 /* maximum retry quota flush count */
558 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
560 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
562 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
564 /* for in-memory extent cache entry */
565 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
567 /* number of extent info in extent cache we try to shrink */
568 #define EXTENT_CACHE_SHRINK_NUMBER 128
571 struct rb_node rb_node; /* rb node located in rb-tree */
574 unsigned int ofs; /* start offset of the entry */
575 unsigned int len; /* length of the entry */
577 unsigned long long key; /* 64-bits key */
582 unsigned int fofs; /* start offset in a file */
583 unsigned int len; /* length of the extent */
584 u32 blk; /* start block address of the extent */
585 #ifdef CONFIG_F2FS_FS_COMPRESSION
586 unsigned int c_len; /* physical extent length of compressed blocks */
591 struct rb_node rb_node; /* rb node located in rb-tree */
592 struct extent_info ei; /* extent info */
593 struct list_head list; /* node in global extent list of sbi */
594 struct extent_tree *et; /* extent tree pointer */
598 nid_t ino; /* inode number */
599 struct rb_root_cached root; /* root of extent info rb-tree */
600 struct extent_node *cached_en; /* recently accessed extent node */
601 struct extent_info largest; /* largested extent info */
602 struct list_head list; /* to be used by sbi->zombie_list */
603 rwlock_t lock; /* protect extent info rb-tree */
604 atomic_t node_cnt; /* # of extent node in rb-tree*/
605 bool largest_updated; /* largest extent updated */
609 * This structure is taken from ext4_map_blocks.
611 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
613 #define F2FS_MAP_NEW (1 << BH_New)
614 #define F2FS_MAP_MAPPED (1 << BH_Mapped)
615 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten)
616 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
619 struct f2fs_map_blocks {
623 unsigned int m_flags;
624 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
625 pgoff_t *m_next_extent; /* point to next possible extent */
627 bool m_may_create; /* indicate it is from write path */
630 /* for flag in get_data_block */
632 F2FS_GET_BLOCK_DEFAULT,
633 F2FS_GET_BLOCK_FIEMAP,
636 F2FS_GET_BLOCK_PRE_DIO,
637 F2FS_GET_BLOCK_PRE_AIO,
638 F2FS_GET_BLOCK_PRECACHE,
642 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
644 #define FADVISE_COLD_BIT 0x01
645 #define FADVISE_LOST_PINO_BIT 0x02
646 #define FADVISE_ENCRYPT_BIT 0x04
647 #define FADVISE_ENC_NAME_BIT 0x08
648 #define FADVISE_KEEP_SIZE_BIT 0x10
649 #define FADVISE_HOT_BIT 0x20
650 #define FADVISE_VERITY_BIT 0x40
652 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
654 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
655 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
656 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
658 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
659 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
660 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
662 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
663 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
665 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
666 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
668 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
669 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
671 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
672 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
673 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
675 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
676 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
678 #define DEF_DIR_LEVEL 0
686 /* used for f2fs_inode_info->flags */
688 FI_NEW_INODE, /* indicate newly allocated inode */
689 FI_DIRTY_INODE, /* indicate inode is dirty or not */
690 FI_AUTO_RECOVER, /* indicate inode is recoverable */
691 FI_DIRTY_DIR, /* indicate directory has dirty pages */
692 FI_INC_LINK, /* need to increment i_nlink */
693 FI_ACL_MODE, /* indicate acl mode */
694 FI_NO_ALLOC, /* should not allocate any blocks */
695 FI_FREE_NID, /* free allocated nide */
696 FI_NO_EXTENT, /* not to use the extent cache */
697 FI_INLINE_XATTR, /* used for inline xattr */
698 FI_INLINE_DATA, /* used for inline data*/
699 FI_INLINE_DENTRY, /* used for inline dentry */
700 FI_APPEND_WRITE, /* inode has appended data */
701 FI_UPDATE_WRITE, /* inode has in-place-update data */
702 FI_NEED_IPU, /* used for ipu per file */
703 FI_ATOMIC_FILE, /* indicate atomic file */
704 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
705 FI_VOLATILE_FILE, /* indicate volatile file */
706 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
707 FI_DROP_CACHE, /* drop dirty page cache */
708 FI_DATA_EXIST, /* indicate data exists */
709 FI_INLINE_DOTS, /* indicate inline dot dentries */
710 FI_DO_DEFRAG, /* indicate defragment is running */
711 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
712 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
713 FI_HOT_DATA, /* indicate file is hot */
714 FI_EXTRA_ATTR, /* indicate file has extra attribute */
715 FI_PROJ_INHERIT, /* indicate file inherits projectid */
716 FI_PIN_FILE, /* indicate file should not be gced */
717 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
718 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
719 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
720 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
721 FI_MMAP_FILE, /* indicate file was mmapped */
722 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
723 FI_COMPRESS_RELEASED, /* compressed blocks were released */
724 FI_ALIGNED_WRITE, /* enable aligned write */
725 FI_MAX, /* max flag, never be used */
728 struct f2fs_inode_info {
729 struct inode vfs_inode; /* serve a vfs inode */
730 unsigned long i_flags; /* keep an inode flags for ioctl */
731 unsigned char i_advise; /* use to give file attribute hints */
732 unsigned char i_dir_level; /* use for dentry level for large dir */
733 unsigned int i_current_depth; /* only for directory depth */
734 /* for gc failure statistic */
735 unsigned int i_gc_failures[MAX_GC_FAILURE];
736 unsigned int i_pino; /* parent inode number */
737 umode_t i_acl_mode; /* keep file acl mode temporarily */
739 /* Use below internally in f2fs*/
740 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
741 struct rw_semaphore i_sem; /* protect fi info */
742 atomic_t dirty_pages; /* # of dirty pages */
743 f2fs_hash_t chash; /* hash value of given file name */
744 unsigned int clevel; /* maximum level of given file name */
745 struct task_struct *task; /* lookup and create consistency */
746 struct task_struct *cp_task; /* separate cp/wb IO stats*/
747 nid_t i_xattr_nid; /* node id that contains xattrs */
748 loff_t last_disk_size; /* lastly written file size */
749 spinlock_t i_size_lock; /* protect last_disk_size */
752 struct dquot *i_dquot[MAXQUOTAS];
754 /* quota space reservation, managed internally by quota code */
755 qsize_t i_reserved_quota;
757 struct list_head dirty_list; /* dirty list for dirs and files */
758 struct list_head gdirty_list; /* linked in global dirty list */
759 struct list_head inmem_ilist; /* list for inmem inodes */
760 struct list_head inmem_pages; /* inmemory pages managed by f2fs */
761 struct task_struct *inmem_task; /* store inmemory task */
762 struct mutex inmem_lock; /* lock for inmemory pages */
763 struct extent_tree *extent_tree; /* cached extent_tree entry */
765 /* avoid racing between foreground op and gc */
766 struct rw_semaphore i_gc_rwsem[2];
767 struct rw_semaphore i_mmap_sem;
768 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
770 int i_extra_isize; /* size of extra space located in i_addr */
771 kprojid_t i_projid; /* id for project quota */
772 int i_inline_xattr_size; /* inline xattr size */
773 struct timespec64 i_crtime; /* inode creation time */
774 struct timespec64 i_disk_time[4];/* inode disk times */
776 /* for file compress */
777 atomic_t i_compr_blocks; /* # of compressed blocks */
778 unsigned char i_compress_algorithm; /* algorithm type */
779 unsigned char i_log_cluster_size; /* log of cluster size */
780 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
781 unsigned short i_compress_flag; /* compress flag */
782 unsigned int i_cluster_size; /* cluster size */
785 static inline void get_extent_info(struct extent_info *ext,
786 struct f2fs_extent *i_ext)
788 ext->fofs = le32_to_cpu(i_ext->fofs);
789 ext->blk = le32_to_cpu(i_ext->blk);
790 ext->len = le32_to_cpu(i_ext->len);
793 static inline void set_raw_extent(struct extent_info *ext,
794 struct f2fs_extent *i_ext)
796 i_ext->fofs = cpu_to_le32(ext->fofs);
797 i_ext->blk = cpu_to_le32(ext->blk);
798 i_ext->len = cpu_to_le32(ext->len);
801 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
802 u32 blk, unsigned int len)
807 #ifdef CONFIG_F2FS_FS_COMPRESSION
812 static inline bool __is_discard_mergeable(struct discard_info *back,
813 struct discard_info *front, unsigned int max_len)
815 return (back->lstart + back->len == front->lstart) &&
816 (back->len + front->len <= max_len);
819 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
820 struct discard_info *back, unsigned int max_len)
822 return __is_discard_mergeable(back, cur, max_len);
825 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
826 struct discard_info *front, unsigned int max_len)
828 return __is_discard_mergeable(cur, front, max_len);
831 static inline bool __is_extent_mergeable(struct extent_info *back,
832 struct extent_info *front)
834 #ifdef CONFIG_F2FS_FS_COMPRESSION
835 if (back->c_len && back->len != back->c_len)
837 if (front->c_len && front->len != front->c_len)
840 return (back->fofs + back->len == front->fofs &&
841 back->blk + back->len == front->blk);
844 static inline bool __is_back_mergeable(struct extent_info *cur,
845 struct extent_info *back)
847 return __is_extent_mergeable(back, cur);
850 static inline bool __is_front_mergeable(struct extent_info *cur,
851 struct extent_info *front)
853 return __is_extent_mergeable(cur, front);
856 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
857 static inline void __try_update_largest_extent(struct extent_tree *et,
858 struct extent_node *en)
860 if (en->ei.len > et->largest.len) {
861 et->largest = en->ei;
862 et->largest_updated = true;
867 * For free nid management
870 FREE_NID, /* newly added to free nid list */
871 PREALLOC_NID, /* it is preallocated */
882 struct f2fs_nm_info {
883 block_t nat_blkaddr; /* base disk address of NAT */
884 nid_t max_nid; /* maximum possible node ids */
885 nid_t available_nids; /* # of available node ids */
886 nid_t next_scan_nid; /* the next nid to be scanned */
887 unsigned int ram_thresh; /* control the memory footprint */
888 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
889 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
891 /* NAT cache management */
892 struct radix_tree_root nat_root;/* root of the nat entry cache */
893 struct radix_tree_root nat_set_root;/* root of the nat set cache */
894 struct rw_semaphore nat_tree_lock; /* protect nat entry tree */
895 struct list_head nat_entries; /* cached nat entry list (clean) */
896 spinlock_t nat_list_lock; /* protect clean nat entry list */
897 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
898 unsigned int nat_blocks; /* # of nat blocks */
900 /* free node ids management */
901 struct radix_tree_root free_nid_root;/* root of the free_nid cache */
902 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
903 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
904 spinlock_t nid_list_lock; /* protect nid lists ops */
905 struct mutex build_lock; /* lock for build free nids */
906 unsigned char **free_nid_bitmap;
907 unsigned char *nat_block_bitmap;
908 unsigned short *free_nid_count; /* free nid count of NAT block */
911 char *nat_bitmap; /* NAT bitmap pointer */
913 unsigned int nat_bits_blocks; /* # of nat bits blocks */
914 unsigned char *nat_bits; /* NAT bits blocks */
915 unsigned char *full_nat_bits; /* full NAT pages */
916 unsigned char *empty_nat_bits; /* empty NAT pages */
917 #ifdef CONFIG_F2FS_CHECK_FS
918 char *nat_bitmap_mir; /* NAT bitmap mirror */
920 int bitmap_size; /* bitmap size */
924 * this structure is used as one of function parameters.
925 * all the information are dedicated to a given direct node block determined
926 * by the data offset in a file.
928 struct dnode_of_data {
929 struct inode *inode; /* vfs inode pointer */
930 struct page *inode_page; /* its inode page, NULL is possible */
931 struct page *node_page; /* cached direct node page */
932 nid_t nid; /* node id of the direct node block */
933 unsigned int ofs_in_node; /* data offset in the node page */
934 bool inode_page_locked; /* inode page is locked or not */
935 bool node_changed; /* is node block changed */
936 char cur_level; /* level of hole node page */
937 char max_level; /* level of current page located */
938 block_t data_blkaddr; /* block address of the node block */
941 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
942 struct page *ipage, struct page *npage, nid_t nid)
944 memset(dn, 0, sizeof(*dn));
946 dn->inode_page = ipage;
947 dn->node_page = npage;
954 * By default, there are 6 active log areas across the whole main area.
955 * When considering hot and cold data separation to reduce cleaning overhead,
956 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
958 * In the current design, you should not change the numbers intentionally.
959 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
960 * logs individually according to the underlying devices. (default: 6)
961 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
962 * data and 8 for node logs.
964 #define NR_CURSEG_DATA_TYPE (3)
965 #define NR_CURSEG_NODE_TYPE (3)
966 #define NR_CURSEG_INMEM_TYPE (2)
967 #define NR_CURSEG_RO_TYPE (2)
968 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
969 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
972 CURSEG_HOT_DATA = 0, /* directory entry blocks */
973 CURSEG_WARM_DATA, /* data blocks */
974 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
975 CURSEG_HOT_NODE, /* direct node blocks of directory files */
976 CURSEG_WARM_NODE, /* direct node blocks of normal files */
977 CURSEG_COLD_NODE, /* indirect node blocks */
978 NR_PERSISTENT_LOG, /* number of persistent log */
979 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
980 /* pinned file that needs consecutive block address */
981 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */
982 NO_CHECK_TYPE, /* number of persistent & inmem log */
986 struct completion wait;
987 struct llist_node llnode;
992 struct flush_cmd_control {
993 struct task_struct *f2fs_issue_flush; /* flush thread */
994 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
995 atomic_t issued_flush; /* # of issued flushes */
996 atomic_t queued_flush; /* # of queued flushes */
997 struct llist_head issue_list; /* list for command issue */
998 struct llist_node *dispatch_list; /* list for command dispatch */
1001 struct f2fs_sm_info {
1002 struct sit_info *sit_info; /* whole segment information */
1003 struct free_segmap_info *free_info; /* free segment information */
1004 struct dirty_seglist_info *dirty_info; /* dirty segment information */
1005 struct curseg_info *curseg_array; /* active segment information */
1007 struct rw_semaphore curseg_lock; /* for preventing curseg change */
1009 block_t seg0_blkaddr; /* block address of 0'th segment */
1010 block_t main_blkaddr; /* start block address of main area */
1011 block_t ssa_blkaddr; /* start block address of SSA area */
1013 unsigned int segment_count; /* total # of segments */
1014 unsigned int main_segments; /* # of segments in main area */
1015 unsigned int reserved_segments; /* # of reserved segments */
1016 unsigned int ovp_segments; /* # of overprovision segments */
1018 /* a threshold to reclaim prefree segments */
1019 unsigned int rec_prefree_segments;
1021 /* for batched trimming */
1022 unsigned int trim_sections; /* # of sections to trim */
1024 struct list_head sit_entry_set; /* sit entry set list */
1026 unsigned int ipu_policy; /* in-place-update policy */
1027 unsigned int min_ipu_util; /* in-place-update threshold */
1028 unsigned int min_fsync_blocks; /* threshold for fsync */
1029 unsigned int min_seq_blocks; /* threshold for sequential blocks */
1030 unsigned int min_hot_blocks; /* threshold for hot block allocation */
1031 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
1033 /* for flush command control */
1034 struct flush_cmd_control *fcc_info;
1036 /* for discard command control */
1037 struct discard_cmd_control *dcc_info;
1044 * COUNT_TYPE for monitoring
1046 * f2fs monitors the number of several block types such as on-writeback,
1047 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1049 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1069 * The below are the page types of bios used in submit_bio().
1070 * The available types are:
1071 * DATA User data pages. It operates as async mode.
1072 * NODE Node pages. It operates as async mode.
1073 * META FS metadata pages such as SIT, NAT, CP.
1074 * NR_PAGE_TYPE The number of page types.
1075 * META_FLUSH Make sure the previous pages are written
1076 * with waiting the bio's completion
1077 * ... Only can be used with META.
1079 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
1086 INMEM, /* the below types are used by tracepoints only. */
1095 HOT = 0, /* must be zero for meta bio */
1101 enum need_lock_type {
1107 enum cp_reason_type {
1123 APP_DIRECT_IO, /* app direct write IOs */
1124 APP_BUFFERED_IO, /* app buffered write IOs */
1125 APP_WRITE_IO, /* app write IOs */
1126 APP_MAPPED_IO, /* app mapped IOs */
1127 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
1128 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
1129 FS_META_IO, /* meta IOs from kworker/reclaimer */
1130 FS_GC_DATA_IO, /* data IOs from forground gc */
1131 FS_GC_NODE_IO, /* node IOs from forground gc */
1132 FS_CP_DATA_IO, /* data IOs from checkpoint */
1133 FS_CP_NODE_IO, /* node IOs from checkpoint */
1134 FS_CP_META_IO, /* meta IOs from checkpoint */
1137 APP_DIRECT_READ_IO, /* app direct read IOs */
1138 APP_BUFFERED_READ_IO, /* app buffered read IOs */
1139 APP_READ_IO, /* app read IOs */
1140 APP_MAPPED_READ_IO, /* app mapped read IOs */
1141 FS_DATA_READ_IO, /* data read IOs */
1142 FS_GDATA_READ_IO, /* data read IOs from background gc */
1143 FS_CDATA_READ_IO, /* compressed data read IOs */
1144 FS_NODE_READ_IO, /* node read IOs */
1145 FS_META_READ_IO, /* meta read IOs */
1148 FS_DISCARD, /* discard */
1152 struct f2fs_io_info {
1153 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
1154 nid_t ino; /* inode number */
1155 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
1156 enum temp_type temp; /* contains HOT/WARM/COLD */
1157 int op; /* contains REQ_OP_ */
1158 int op_flags; /* req_flag_bits */
1159 block_t new_blkaddr; /* new block address to be written */
1160 block_t old_blkaddr; /* old block address before Cow */
1161 struct page *page; /* page to be written */
1162 struct page *encrypted_page; /* encrypted page */
1163 struct page *compressed_page; /* compressed page */
1164 struct list_head list; /* serialize IOs */
1165 bool submitted; /* indicate IO submission */
1166 int need_lock; /* indicate we need to lock cp_rwsem */
1167 bool in_list; /* indicate fio is in io_list */
1168 bool is_por; /* indicate IO is from recovery or not */
1169 bool retry; /* need to reallocate block address */
1170 int compr_blocks; /* # of compressed block addresses */
1171 bool encrypted; /* indicate file is encrypted */
1172 enum iostat_type io_type; /* io type */
1173 struct writeback_control *io_wbc; /* writeback control */
1174 struct bio **bio; /* bio for ipu */
1175 sector_t *last_block; /* last block number in bio */
1176 unsigned char version; /* version of the node */
1181 struct list_head list;
1184 #define is_read_io(rw) ((rw) == READ)
1185 struct f2fs_bio_info {
1186 struct f2fs_sb_info *sbi; /* f2fs superblock */
1187 struct bio *bio; /* bios to merge */
1188 sector_t last_block_in_bio; /* last block number */
1189 struct f2fs_io_info fio; /* store buffered io info. */
1190 struct rw_semaphore io_rwsem; /* blocking op for bio */
1191 spinlock_t io_lock; /* serialize DATA/NODE IOs */
1192 struct list_head io_list; /* track fios */
1193 struct list_head bio_list; /* bio entry list head */
1194 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
1197 #define FDEV(i) (sbi->devs[i])
1198 #define RDEV(i) (raw_super->devs[i])
1199 struct f2fs_dev_info {
1200 struct block_device *bdev;
1201 char path[MAX_PATH_LEN];
1202 unsigned int total_segments;
1205 #ifdef CONFIG_BLK_DEV_ZONED
1206 unsigned int nr_blkz; /* Total number of zones */
1207 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
1208 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */
1213 DIR_INODE, /* for dirty dir inode */
1214 FILE_INODE, /* for dirty regular/symlink inode */
1215 DIRTY_META, /* for all dirtied inode metadata */
1216 ATOMIC_FILE, /* for all atomic files */
1220 /* for inner inode cache management */
1221 struct inode_management {
1222 struct radix_tree_root ino_root; /* ino entry array */
1223 spinlock_t ino_lock; /* for ino entry lock */
1224 struct list_head ino_list; /* inode list head */
1225 unsigned long ino_num; /* number of entries */
1229 struct atgc_management {
1230 bool atgc_enabled; /* ATGC is enabled or not */
1231 struct rb_root_cached root; /* root of victim rb-tree */
1232 struct list_head victim_list; /* linked with all victim entries */
1233 unsigned int victim_count; /* victim count in rb-tree */
1234 unsigned int candidate_ratio; /* candidate ratio */
1235 unsigned int max_candidate_count; /* max candidate count */
1236 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
1237 unsigned long long age_threshold; /* age threshold */
1240 /* For s_flag in struct f2fs_sb_info */
1242 SBI_IS_DIRTY, /* dirty flag for checkpoint */
1243 SBI_IS_CLOSE, /* specify unmounting */
1244 SBI_NEED_FSCK, /* need fsck.f2fs to fix */
1245 SBI_POR_DOING, /* recovery is doing or not */
1246 SBI_NEED_SB_WRITE, /* need to recover superblock */
1247 SBI_NEED_CP, /* need to checkpoint */
1248 SBI_IS_SHUTDOWN, /* shutdown by ioctl */
1249 SBI_IS_RECOVERED, /* recovered orphan/data */
1250 SBI_CP_DISABLED, /* CP was disabled last mount */
1251 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
1252 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
1253 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
1254 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
1255 SBI_IS_RESIZEFS, /* resizefs is in process */
1264 UMOUNT_DISCARD_TIMEOUT,
1279 BGGC_MODE_ON, /* background gc is on */
1280 BGGC_MODE_OFF, /* background gc is off */
1282 * background gc is on, migrating blocks
1283 * like foreground gc
1288 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
1289 FS_MODE_LFS, /* use lfs allocation only */
1293 WHINT_MODE_OFF, /* not pass down write hints */
1294 WHINT_MODE_USER, /* try to pass down hints given by users */
1295 WHINT_MODE_FS, /* pass down hints with F2FS policy */
1299 ALLOC_MODE_DEFAULT, /* stay default */
1300 ALLOC_MODE_REUSE, /* reuse segments as much as possible */
1304 FSYNC_MODE_POSIX, /* fsync follows posix semantics */
1305 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
1306 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
1311 * automatically compress compression
1315 * automatical compression is disabled.
1316 * user can control the file compression
1322 DISCARD_UNIT_BLOCK, /* basic discard unit is block */
1323 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */
1324 DISCARD_UNIT_SECTION, /* basic discard unit is section */
1327 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1328 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1329 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1332 * Layout of f2fs page.private:
1334 * Layout A: lowest bit should be 1
1335 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1336 * bit 0 PAGE_PRIVATE_NOT_POINTER
1337 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE
1338 * bit 2 PAGE_PRIVATE_DUMMY_WRITE
1339 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION
1340 * bit 4 PAGE_PRIVATE_INLINE_INODE
1341 * bit 5 PAGE_PRIVATE_REF_RESOURCE
1342 * bit 6- f2fs private data
1344 * Layout B: lowest bit should be 0
1345 * page.private is a wrapped pointer.
1348 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
1349 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
1350 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
1351 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
1352 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
1353 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
1357 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
1358 static inline bool page_private_##name(struct page *page) \
1360 return PagePrivate(page) && \
1361 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
1362 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1365 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
1366 static inline void set_page_private_##name(struct page *page) \
1368 if (!PagePrivate(page)) { \
1370 SetPagePrivate(page); \
1371 set_page_private(page, 0); \
1373 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
1374 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1377 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
1378 static inline void clear_page_private_##name(struct page *page) \
1380 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1381 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
1382 set_page_private(page, 0); \
1383 if (PagePrivate(page)) { \
1384 ClearPagePrivate(page); \
1390 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
1391 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
1392 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
1393 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
1394 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
1395 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
1397 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
1398 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
1399 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
1400 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
1401 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
1403 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
1404 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
1405 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
1406 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
1407 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
1409 static inline unsigned long get_page_private_data(struct page *page)
1411 unsigned long data = page_private(page);
1413 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
1415 return data >> PAGE_PRIVATE_MAX;
1418 static inline void set_page_private_data(struct page *page, unsigned long data)
1420 if (!PagePrivate(page)) {
1422 SetPagePrivate(page);
1423 set_page_private(page, 0);
1425 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
1426 page_private(page) |= data << PAGE_PRIVATE_MAX;
1429 static inline void clear_page_private_data(struct page *page)
1431 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
1432 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
1433 set_page_private(page, 0);
1434 if (PagePrivate(page)) {
1435 ClearPagePrivate(page);
1441 /* For compression */
1442 enum compress_algorithm_type {
1450 enum compress_flag {
1455 #define COMPRESS_WATERMARK 20
1456 #define COMPRESS_PERCENT 20
1458 #define COMPRESS_DATA_RESERVED_SIZE 4
1459 struct compress_data {
1460 __le32 clen; /* compressed data size */
1461 __le32 chksum; /* compressed data chksum */
1462 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
1463 u8 cdata[]; /* compressed data */
1466 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
1468 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
1470 #define COMPRESS_LEVEL_OFFSET 8
1472 /* compress context */
1473 struct compress_ctx {
1474 struct inode *inode; /* inode the context belong to */
1475 pgoff_t cluster_idx; /* cluster index number */
1476 unsigned int cluster_size; /* page count in cluster */
1477 unsigned int log_cluster_size; /* log of cluster size */
1478 struct page **rpages; /* pages store raw data in cluster */
1479 unsigned int nr_rpages; /* total page number in rpages */
1480 struct page **cpages; /* pages store compressed data in cluster */
1481 unsigned int nr_cpages; /* total page number in cpages */
1482 void *rbuf; /* virtual mapped address on rpages */
1483 struct compress_data *cbuf; /* virtual mapped address on cpages */
1484 size_t rlen; /* valid data length in rbuf */
1485 size_t clen; /* valid data length in cbuf */
1486 void *private; /* payload buffer for specified compression algorithm */
1487 void *private2; /* extra payload buffer */
1490 /* compress context for write IO path */
1491 struct compress_io_ctx {
1492 u32 magic; /* magic number to indicate page is compressed */
1493 struct inode *inode; /* inode the context belong to */
1494 struct page **rpages; /* pages store raw data in cluster */
1495 unsigned int nr_rpages; /* total page number in rpages */
1496 atomic_t pending_pages; /* in-flight compressed page count */
1499 /* Context for decompressing one cluster on the read IO path */
1500 struct decompress_io_ctx {
1501 u32 magic; /* magic number to indicate page is compressed */
1502 struct inode *inode; /* inode the context belong to */
1503 pgoff_t cluster_idx; /* cluster index number */
1504 unsigned int cluster_size; /* page count in cluster */
1505 unsigned int log_cluster_size; /* log of cluster size */
1506 struct page **rpages; /* pages store raw data in cluster */
1507 unsigned int nr_rpages; /* total page number in rpages */
1508 struct page **cpages; /* pages store compressed data in cluster */
1509 unsigned int nr_cpages; /* total page number in cpages */
1510 struct page **tpages; /* temp pages to pad holes in cluster */
1511 void *rbuf; /* virtual mapped address on rpages */
1512 struct compress_data *cbuf; /* virtual mapped address on cpages */
1513 size_t rlen; /* valid data length in rbuf */
1514 size_t clen; /* valid data length in cbuf */
1517 * The number of compressed pages remaining to be read in this cluster.
1518 * This is initially nr_cpages. It is decremented by 1 each time a page
1519 * has been read (or failed to be read). When it reaches 0, the cluster
1520 * is decompressed (or an error is reported).
1522 * If an error occurs before all the pages have been submitted for I/O,
1523 * then this will never reach 0. In this case the I/O submitter is
1524 * responsible for calling f2fs_decompress_end_io() instead.
1526 atomic_t remaining_pages;
1529 * Number of references to this decompress_io_ctx.
1531 * One reference is held for I/O completion. This reference is dropped
1532 * after the pagecache pages are updated and unlocked -- either after
1533 * decompression (and verity if enabled), or after an error.
1535 * In addition, each compressed page holds a reference while it is in a
1536 * bio. These references are necessary prevent compressed pages from
1537 * being freed while they are still in a bio.
1541 bool failed; /* IO error occurred before decompression? */
1542 bool need_verity; /* need fs-verity verification after decompression? */
1543 void *private; /* payload buffer for specified decompression algorithm */
1544 void *private2; /* extra payload buffer */
1545 struct work_struct verity_work; /* work to verify the decompressed pages */
1548 #define NULL_CLUSTER ((unsigned int)(~0))
1549 #define MIN_COMPRESS_LOG_SIZE 2
1550 #define MAX_COMPRESS_LOG_SIZE 8
1551 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
1553 struct f2fs_sb_info {
1554 struct super_block *sb; /* pointer to VFS super block */
1555 struct proc_dir_entry *s_proc; /* proc entry */
1556 struct f2fs_super_block *raw_super; /* raw super block pointer */
1557 struct rw_semaphore sb_lock; /* lock for raw super block */
1558 int valid_super_block; /* valid super block no */
1559 unsigned long s_flag; /* flags for sbi */
1560 struct mutex writepages; /* mutex for writepages() */
1562 #ifdef CONFIG_BLK_DEV_ZONED
1563 unsigned int blocks_per_blkz; /* F2FS blocks per zone */
1564 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
1567 /* for node-related operations */
1568 struct f2fs_nm_info *nm_info; /* node manager */
1569 struct inode *node_inode; /* cache node blocks */
1571 /* for segment-related operations */
1572 struct f2fs_sm_info *sm_info; /* segment manager */
1574 /* for bio operations */
1575 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
1576 /* keep migration IO order for LFS mode */
1577 struct rw_semaphore io_order_lock;
1578 mempool_t *write_io_dummy; /* Dummy pages */
1580 /* for checkpoint */
1581 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
1582 int cur_cp_pack; /* remain current cp pack */
1583 spinlock_t cp_lock; /* for flag in ckpt */
1584 struct inode *meta_inode; /* cache meta blocks */
1585 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */
1586 struct rw_semaphore cp_rwsem; /* blocking FS operations */
1587 struct rw_semaphore node_write; /* locking node writes */
1588 struct rw_semaphore node_change; /* locking node change */
1589 wait_queue_head_t cp_wait;
1590 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
1591 long interval_time[MAX_TIME]; /* to store thresholds */
1592 struct ckpt_req_control cprc_info; /* for checkpoint request control */
1594 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
1596 spinlock_t fsync_node_lock; /* for node entry lock */
1597 struct list_head fsync_node_list; /* node list head */
1598 unsigned int fsync_seg_id; /* sequence id */
1599 unsigned int fsync_node_num; /* number of node entries */
1601 /* for orphan inode, use 0'th array */
1602 unsigned int max_orphans; /* max orphan inodes */
1604 /* for inode management */
1605 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
1606 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
1607 struct mutex flush_lock; /* for flush exclusion */
1609 /* for extent tree cache */
1610 struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1611 struct mutex extent_tree_lock; /* locking extent radix tree */
1612 struct list_head extent_list; /* lru list for shrinker */
1613 spinlock_t extent_lock; /* locking extent lru list */
1614 atomic_t total_ext_tree; /* extent tree count */
1615 struct list_head zombie_list; /* extent zombie tree list */
1616 atomic_t total_zombie_tree; /* extent zombie tree count */
1617 atomic_t total_ext_node; /* extent info count */
1619 /* basic filesystem units */
1620 unsigned int log_sectors_per_block; /* log2 sectors per block */
1621 unsigned int log_blocksize; /* log2 block size */
1622 unsigned int blocksize; /* block size */
1623 unsigned int root_ino_num; /* root inode number*/
1624 unsigned int node_ino_num; /* node inode number*/
1625 unsigned int meta_ino_num; /* meta inode number*/
1626 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
1627 unsigned int blocks_per_seg; /* blocks per segment */
1628 unsigned int segs_per_sec; /* segments per section */
1629 unsigned int secs_per_zone; /* sections per zone */
1630 unsigned int total_sections; /* total section count */
1631 unsigned int total_node_count; /* total node block count */
1632 unsigned int total_valid_node_count; /* valid node block count */
1633 int dir_level; /* directory level */
1634 int readdir_ra; /* readahead inode in readdir */
1635 u64 max_io_bytes; /* max io bytes to merge IOs */
1637 block_t user_block_count; /* # of user blocks */
1638 block_t total_valid_block_count; /* # of valid blocks */
1639 block_t discard_blks; /* discard command candidats */
1640 block_t last_valid_block_count; /* for recovery */
1641 block_t reserved_blocks; /* configurable reserved blocks */
1642 block_t current_reserved_blocks; /* current reserved blocks */
1644 /* Additional tracking for no checkpoint mode */
1645 block_t unusable_block_count; /* # of blocks saved by last cp */
1647 unsigned int nquota_files; /* # of quota sysfile */
1648 struct rw_semaphore quota_sem; /* blocking cp for flags */
1650 /* # of pages, see count_type */
1651 atomic_t nr_pages[NR_COUNT_TYPE];
1652 /* # of allocated blocks */
1653 struct percpu_counter alloc_valid_block_count;
1655 /* writeback control */
1656 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
1658 /* valid inode count */
1659 struct percpu_counter total_valid_inode_count;
1661 struct f2fs_mount_info mount_opt; /* mount options */
1663 /* for cleaning operations */
1664 struct rw_semaphore gc_lock; /*
1665 * semaphore for GC, avoid
1666 * race between GC and GC or CP
1668 struct f2fs_gc_kthread *gc_thread; /* GC thread */
1669 struct atgc_management am; /* atgc management */
1670 unsigned int cur_victim_sec; /* current victim section num */
1671 unsigned int gc_mode; /* current GC state */
1672 unsigned int next_victim_seg[2]; /* next segment in victim section */
1674 /* for skip statistic */
1675 unsigned int atomic_files; /* # of opened atomic file */
1676 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
1677 unsigned long long skipped_gc_rwsem; /* FG_GC only */
1679 /* threshold for gc trials on pinned files */
1680 u64 gc_pin_file_threshold;
1681 struct rw_semaphore pin_sem;
1683 /* maximum # of trials to find a victim segment for SSR and GC */
1684 unsigned int max_victim_search;
1685 /* migration granularity of garbage collection, unit: segment */
1686 unsigned int migration_granularity;
1689 * for stat information.
1690 * one is for the LFS mode, and the other is for the SSR mode.
1692 #ifdef CONFIG_F2FS_STAT_FS
1693 struct f2fs_stat_info *stat_info; /* FS status information */
1694 atomic_t meta_count[META_MAX]; /* # of meta blocks */
1695 unsigned int segment_count[2]; /* # of allocated segments */
1696 unsigned int block_count[2]; /* # of allocated blocks */
1697 atomic_t inplace_count; /* # of inplace update */
1698 atomic64_t total_hit_ext; /* # of lookup extent cache */
1699 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
1700 atomic64_t read_hit_largest; /* # of hit largest extent node */
1701 atomic64_t read_hit_cached; /* # of hit cached extent node */
1702 atomic_t inline_xattr; /* # of inline_xattr inodes */
1703 atomic_t inline_inode; /* # of inline_data inodes */
1704 atomic_t inline_dir; /* # of inline_dentry inodes */
1705 atomic_t compr_inode; /* # of compressed inodes */
1706 atomic64_t compr_blocks; /* # of compressed blocks */
1707 atomic_t vw_cnt; /* # of volatile writes */
1708 atomic_t max_aw_cnt; /* max # of atomic writes */
1709 atomic_t max_vw_cnt; /* max # of volatile writes */
1710 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
1711 unsigned int other_skip_bggc; /* skip background gc for other reasons */
1712 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
1714 spinlock_t stat_lock; /* lock for stat operations */
1716 /* For app/fs IO statistics */
1717 spinlock_t iostat_lock;
1718 unsigned long long rw_iostat[NR_IO_TYPE];
1719 unsigned long long prev_rw_iostat[NR_IO_TYPE];
1721 unsigned long iostat_next_period;
1722 unsigned int iostat_period_ms;
1724 /* to attach REQ_META|REQ_FUA flags */
1725 unsigned int data_io_flag;
1726 unsigned int node_io_flag;
1728 /* For sysfs suppport */
1729 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
1730 struct completion s_kobj_unregister;
1732 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
1733 struct completion s_stat_kobj_unregister;
1735 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
1736 struct completion s_feature_list_kobj_unregister;
1738 /* For shrinker support */
1739 struct list_head s_list;
1740 int s_ndevs; /* number of devices */
1741 struct f2fs_dev_info *devs; /* for device list */
1742 unsigned int dirty_device; /* for checkpoint data flush */
1743 spinlock_t dev_lock; /* protect dirty_device */
1744 struct mutex umount_mutex;
1745 unsigned int shrinker_run_no;
1747 /* For write statistics */
1748 u64 sectors_written_start;
1751 /* Reference to checksum algorithm driver via cryptoapi */
1752 struct crypto_shash *s_chksum_driver;
1754 /* Precomputed FS UUID checksum for seeding other checksums */
1755 __u32 s_chksum_seed;
1757 struct workqueue_struct *post_read_wq; /* post read workqueue */
1759 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
1760 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
1762 /* For reclaimed segs statistics per each GC mode */
1763 unsigned int gc_segment_mode; /* GC state for reclaimed segments */
1764 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */
1766 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */
1768 #ifdef CONFIG_F2FS_FS_COMPRESSION
1769 struct kmem_cache *page_array_slab; /* page array entry */
1770 unsigned int page_array_slab_size; /* default page array slab size */
1772 /* For runtime compression statistics */
1773 u64 compr_written_block;
1774 u64 compr_saved_block;
1775 u32 compr_new_inode;
1777 /* For compressed block cache */
1778 struct inode *compress_inode; /* cache compressed blocks */
1779 unsigned int compress_percent; /* cache page percentage */
1780 unsigned int compress_watermark; /* cache page watermark */
1781 atomic_t compress_page_hit; /* cache hit count */
1785 struct f2fs_private_dio {
1786 struct inode *inode;
1788 bio_end_io_t *orig_end_io;
1792 #ifdef CONFIG_F2FS_FAULT_INJECTION
1793 #define f2fs_show_injection_info(sbi, type) \
1794 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
1795 KERN_INFO, sbi->sb->s_id, \
1796 f2fs_fault_name[type], \
1797 __func__, __builtin_return_address(0))
1798 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1800 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1802 if (!ffi->inject_rate)
1805 if (!IS_FAULT_SET(ffi, type))
1808 atomic_inc(&ffi->inject_ops);
1809 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1810 atomic_set(&ffi->inject_ops, 0);
1816 #define f2fs_show_injection_info(sbi, type) do { } while (0)
1817 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1824 * Test if the mounted volume is a multi-device volume.
1825 * - For a single regular disk volume, sbi->s_ndevs is 0.
1826 * - For a single zoned disk volume, sbi->s_ndevs is 1.
1827 * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1829 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1831 return sbi->s_ndevs > 1;
1834 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1836 unsigned long now = jiffies;
1838 sbi->last_time[type] = now;
1840 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1841 if (type == REQ_TIME) {
1842 sbi->last_time[DISCARD_TIME] = now;
1843 sbi->last_time[GC_TIME] = now;
1847 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1849 unsigned long interval = sbi->interval_time[type] * HZ;
1851 return time_after(jiffies, sbi->last_time[type] + interval);
1854 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1857 unsigned long interval = sbi->interval_time[type] * HZ;
1858 unsigned int wait_ms = 0;
1861 delta = (sbi->last_time[type] + interval) - jiffies;
1863 wait_ms = jiffies_to_msecs(delta);
1871 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1872 const void *address, unsigned int length)
1875 struct shash_desc shash;
1880 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1882 desc.shash.tfm = sbi->s_chksum_driver;
1883 *(u32 *)desc.ctx = crc;
1885 err = crypto_shash_update(&desc.shash, address, length);
1888 return *(u32 *)desc.ctx;
1891 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1892 unsigned int length)
1894 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1897 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1898 void *buf, size_t buf_size)
1900 return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1903 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1904 const void *address, unsigned int length)
1906 return __f2fs_crc32(sbi, crc, address, length);
1909 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1911 return container_of(inode, struct f2fs_inode_info, vfs_inode);
1914 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1916 return sb->s_fs_info;
1919 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1921 return F2FS_SB(inode->i_sb);
1924 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1926 return F2FS_I_SB(mapping->host);
1929 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1931 return F2FS_M_SB(page_file_mapping(page));
1934 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1936 return (struct f2fs_super_block *)(sbi->raw_super);
1939 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1941 return (struct f2fs_checkpoint *)(sbi->ckpt);
1944 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1946 return (struct f2fs_node *)page_address(page);
1949 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1951 return &((struct f2fs_node *)page_address(page))->i;
1954 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1956 return (struct f2fs_nm_info *)(sbi->nm_info);
1959 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1961 return (struct f2fs_sm_info *)(sbi->sm_info);
1964 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1966 return (struct sit_info *)(SM_I(sbi)->sit_info);
1969 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1971 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1974 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1976 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1979 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1981 return sbi->meta_inode->i_mapping;
1984 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1986 return sbi->node_inode->i_mapping;
1989 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1991 return test_bit(type, &sbi->s_flag);
1994 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1996 set_bit(type, &sbi->s_flag);
1999 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2001 clear_bit(type, &sbi->s_flag);
2004 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2006 return le64_to_cpu(cp->checkpoint_ver);
2009 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2011 if (type < F2FS_MAX_QUOTAS)
2012 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2016 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2018 size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2019 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2022 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2024 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2026 return ckpt_flags & f;
2029 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2031 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2034 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2036 unsigned int ckpt_flags;
2038 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2040 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2043 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2045 unsigned long flags;
2047 spin_lock_irqsave(&sbi->cp_lock, flags);
2048 __set_ckpt_flags(F2FS_CKPT(sbi), f);
2049 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2052 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2054 unsigned int ckpt_flags;
2056 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2058 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2061 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2063 unsigned long flags;
2065 spin_lock_irqsave(&sbi->cp_lock, flags);
2066 __clear_ckpt_flags(F2FS_CKPT(sbi), f);
2067 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2070 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
2072 unsigned long flags;
2073 unsigned char *nat_bits;
2076 * In order to re-enable nat_bits we need to call fsck.f2fs by
2077 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
2078 * so let's rely on regular fsck or unclean shutdown.
2082 spin_lock_irqsave(&sbi->cp_lock, flags);
2083 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
2084 nat_bits = NM_I(sbi)->nat_bits;
2085 NM_I(sbi)->nat_bits = NULL;
2087 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2092 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
2093 struct cp_control *cpc)
2095 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
2097 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
2100 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2102 down_read(&sbi->cp_rwsem);
2105 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2107 return down_read_trylock(&sbi->cp_rwsem);
2110 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2112 up_read(&sbi->cp_rwsem);
2115 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2117 down_write(&sbi->cp_rwsem);
2120 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2122 up_write(&sbi->cp_rwsem);
2125 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2127 int reason = CP_SYNC;
2129 if (test_opt(sbi, FASTBOOT))
2130 reason = CP_FASTBOOT;
2131 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2136 static inline bool __remain_node_summaries(int reason)
2138 return (reason & (CP_UMOUNT | CP_FASTBOOT));
2141 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2143 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2144 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2148 * Check whether the inode has blocks or not
2150 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2152 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2154 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2157 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2159 return ofs == XATTR_NODE_OFFSET;
2162 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2163 struct inode *inode, bool cap)
2167 if (!test_opt(sbi, RESERVE_ROOT))
2169 if (IS_NOQUOTA(inode))
2171 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2173 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2174 in_group_p(F2FS_OPTION(sbi).s_resgid))
2176 if (cap && capable(CAP_SYS_RESOURCE))
2181 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2182 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2183 struct inode *inode, blkcnt_t *count)
2185 blkcnt_t diff = 0, release = 0;
2186 block_t avail_user_block_count;
2189 ret = dquot_reserve_block(inode, *count);
2193 if (time_to_inject(sbi, FAULT_BLOCK)) {
2194 f2fs_show_injection_info(sbi, FAULT_BLOCK);
2200 * let's increase this in prior to actual block count change in order
2201 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2203 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2205 spin_lock(&sbi->stat_lock);
2206 sbi->total_valid_block_count += (block_t)(*count);
2207 avail_user_block_count = sbi->user_block_count -
2208 sbi->current_reserved_blocks;
2210 if (!__allow_reserved_blocks(sbi, inode, true))
2211 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2212 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2213 if (avail_user_block_count > sbi->unusable_block_count)
2214 avail_user_block_count -= sbi->unusable_block_count;
2216 avail_user_block_count = 0;
2218 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2219 diff = sbi->total_valid_block_count - avail_user_block_count;
2224 sbi->total_valid_block_count -= diff;
2226 spin_unlock(&sbi->stat_lock);
2230 spin_unlock(&sbi->stat_lock);
2232 if (unlikely(release)) {
2233 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2234 dquot_release_reservation_block(inode, release);
2236 f2fs_i_blocks_write(inode, *count, true, true);
2240 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2242 dquot_release_reservation_block(inode, release);
2247 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2249 #define f2fs_err(sbi, fmt, ...) \
2250 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2251 #define f2fs_warn(sbi, fmt, ...) \
2252 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2253 #define f2fs_notice(sbi, fmt, ...) \
2254 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2255 #define f2fs_info(sbi, fmt, ...) \
2256 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2257 #define f2fs_debug(sbi, fmt, ...) \
2258 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2260 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2261 struct inode *inode,
2264 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2266 spin_lock(&sbi->stat_lock);
2267 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2268 sbi->total_valid_block_count -= (block_t)count;
2269 if (sbi->reserved_blocks &&
2270 sbi->current_reserved_blocks < sbi->reserved_blocks)
2271 sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2272 sbi->current_reserved_blocks + count);
2273 spin_unlock(&sbi->stat_lock);
2274 if (unlikely(inode->i_blocks < sectors)) {
2275 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2277 (unsigned long long)inode->i_blocks,
2278 (unsigned long long)sectors);
2279 set_sbi_flag(sbi, SBI_NEED_FSCK);
2282 f2fs_i_blocks_write(inode, count, false, true);
2285 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2287 atomic_inc(&sbi->nr_pages[count_type]);
2289 if (count_type == F2FS_DIRTY_DENTS ||
2290 count_type == F2FS_DIRTY_NODES ||
2291 count_type == F2FS_DIRTY_META ||
2292 count_type == F2FS_DIRTY_QDATA ||
2293 count_type == F2FS_DIRTY_IMETA)
2294 set_sbi_flag(sbi, SBI_IS_DIRTY);
2297 static inline void inode_inc_dirty_pages(struct inode *inode)
2299 atomic_inc(&F2FS_I(inode)->dirty_pages);
2300 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2301 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2302 if (IS_NOQUOTA(inode))
2303 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2306 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2308 atomic_dec(&sbi->nr_pages[count_type]);
2311 static inline void inode_dec_dirty_pages(struct inode *inode)
2313 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2314 !S_ISLNK(inode->i_mode))
2317 atomic_dec(&F2FS_I(inode)->dirty_pages);
2318 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2319 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2320 if (IS_NOQUOTA(inode))
2321 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2324 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2326 return atomic_read(&sbi->nr_pages[count_type]);
2329 static inline int get_dirty_pages(struct inode *inode)
2331 return atomic_read(&F2FS_I(inode)->dirty_pages);
2334 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2336 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2337 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2338 sbi->log_blocks_per_seg;
2340 return segs / sbi->segs_per_sec;
2343 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2345 return sbi->total_valid_block_count;
2348 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2350 return sbi->discard_blks;
2353 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2355 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2357 /* return NAT or SIT bitmap */
2358 if (flag == NAT_BITMAP)
2359 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2360 else if (flag == SIT_BITMAP)
2361 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2366 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2368 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2371 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2373 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2374 void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2377 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2378 offset = (flag == SIT_BITMAP) ?
2379 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2381 * if large_nat_bitmap feature is enabled, leave checksum
2382 * protection for all nat/sit bitmaps.
2384 return tmp_ptr + offset + sizeof(__le32);
2387 if (__cp_payload(sbi) > 0) {
2388 if (flag == NAT_BITMAP)
2389 return &ckpt->sit_nat_version_bitmap;
2391 return (unsigned char *)ckpt + F2FS_BLKSIZE;
2393 offset = (flag == NAT_BITMAP) ?
2394 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2395 return tmp_ptr + offset;
2399 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2401 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2403 if (sbi->cur_cp_pack == 2)
2404 start_addr += sbi->blocks_per_seg;
2408 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2410 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2412 if (sbi->cur_cp_pack == 1)
2413 start_addr += sbi->blocks_per_seg;
2417 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2419 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2422 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2424 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2427 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2428 struct inode *inode, bool is_inode)
2430 block_t valid_block_count;
2431 unsigned int valid_node_count, user_block_count;
2436 err = dquot_alloc_inode(inode);
2441 err = dquot_reserve_block(inode, 1);
2446 if (time_to_inject(sbi, FAULT_BLOCK)) {
2447 f2fs_show_injection_info(sbi, FAULT_BLOCK);
2451 spin_lock(&sbi->stat_lock);
2453 valid_block_count = sbi->total_valid_block_count +
2454 sbi->current_reserved_blocks + 1;
2456 if (!__allow_reserved_blocks(sbi, inode, false))
2457 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2458 user_block_count = sbi->user_block_count;
2459 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2460 user_block_count -= sbi->unusable_block_count;
2462 if (unlikely(valid_block_count > user_block_count)) {
2463 spin_unlock(&sbi->stat_lock);
2467 valid_node_count = sbi->total_valid_node_count + 1;
2468 if (unlikely(valid_node_count > sbi->total_node_count)) {
2469 spin_unlock(&sbi->stat_lock);
2473 sbi->total_valid_node_count++;
2474 sbi->total_valid_block_count++;
2475 spin_unlock(&sbi->stat_lock);
2479 f2fs_mark_inode_dirty_sync(inode, true);
2481 f2fs_i_blocks_write(inode, 1, true, true);
2484 percpu_counter_inc(&sbi->alloc_valid_block_count);
2490 dquot_free_inode(inode);
2492 dquot_release_reservation_block(inode, 1);
2497 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2498 struct inode *inode, bool is_inode)
2500 spin_lock(&sbi->stat_lock);
2502 f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2503 f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2505 sbi->total_valid_node_count--;
2506 sbi->total_valid_block_count--;
2507 if (sbi->reserved_blocks &&
2508 sbi->current_reserved_blocks < sbi->reserved_blocks)
2509 sbi->current_reserved_blocks++;
2511 spin_unlock(&sbi->stat_lock);
2514 dquot_free_inode(inode);
2516 if (unlikely(inode->i_blocks == 0)) {
2517 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2519 (unsigned long long)inode->i_blocks);
2520 set_sbi_flag(sbi, SBI_NEED_FSCK);
2523 f2fs_i_blocks_write(inode, 1, false, true);
2527 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2529 return sbi->total_valid_node_count;
2532 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2534 percpu_counter_inc(&sbi->total_valid_inode_count);
2537 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2539 percpu_counter_dec(&sbi->total_valid_inode_count);
2542 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2544 return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2547 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2548 pgoff_t index, bool for_write)
2552 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2554 page = find_get_page_flags(mapping, index,
2555 FGP_LOCK | FGP_ACCESSED);
2557 page = find_lock_page(mapping, index);
2561 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2562 f2fs_show_injection_info(F2FS_M_SB(mapping),
2569 return grab_cache_page(mapping, index);
2570 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2573 static inline struct page *f2fs_pagecache_get_page(
2574 struct address_space *mapping, pgoff_t index,
2575 int fgp_flags, gfp_t gfp_mask)
2577 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2578 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2582 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2585 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2587 char *src_kaddr = kmap(src);
2588 char *dst_kaddr = kmap(dst);
2590 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2595 static inline void f2fs_put_page(struct page *page, int unlock)
2601 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2607 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2610 f2fs_put_page(dn->node_page, 1);
2611 if (dn->inode_page && dn->node_page != dn->inode_page)
2612 f2fs_put_page(dn->inode_page, 0);
2613 dn->node_page = NULL;
2614 dn->inode_page = NULL;
2617 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2620 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2623 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2628 entry = kmem_cache_alloc(cachep, flags);
2630 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2634 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2635 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2638 return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2640 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) {
2641 f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC);
2645 return kmem_cache_alloc(cachep, flags);
2648 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2650 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2651 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2652 get_pages(sbi, F2FS_WB_CP_DATA) ||
2653 get_pages(sbi, F2FS_DIO_READ) ||
2654 get_pages(sbi, F2FS_DIO_WRITE))
2657 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2658 atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2661 if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2662 atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2667 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2669 if (sbi->gc_mode == GC_URGENT_HIGH)
2672 if (is_inflight_io(sbi, type))
2675 if (sbi->gc_mode == GC_URGENT_LOW &&
2676 (type == DISCARD_TIME || type == GC_TIME))
2679 return f2fs_time_over(sbi, type);
2682 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2683 unsigned long index, void *item)
2685 while (radix_tree_insert(root, index, item))
2689 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
2691 static inline bool IS_INODE(struct page *page)
2693 struct f2fs_node *p = F2FS_NODE(page);
2695 return RAW_IS_INODE(p);
2698 static inline int offset_in_addr(struct f2fs_inode *i)
2700 return (i->i_inline & F2FS_EXTRA_ATTR) ?
2701 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2704 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2706 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2709 static inline int f2fs_has_extra_attr(struct inode *inode);
2710 static inline block_t data_blkaddr(struct inode *inode,
2711 struct page *node_page, unsigned int offset)
2713 struct f2fs_node *raw_node;
2716 bool is_inode = IS_INODE(node_page);
2718 raw_node = F2FS_NODE(node_page);
2722 /* from GC path only */
2723 base = offset_in_addr(&raw_node->i);
2724 else if (f2fs_has_extra_attr(inode))
2725 base = get_extra_isize(inode);
2728 addr_array = blkaddr_in_node(raw_node);
2729 return le32_to_cpu(addr_array[base + offset]);
2732 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2734 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2737 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2742 mask = 1 << (7 - (nr & 0x07));
2743 return mask & *addr;
2746 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2751 mask = 1 << (7 - (nr & 0x07));
2755 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2760 mask = 1 << (7 - (nr & 0x07));
2764 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2770 mask = 1 << (7 - (nr & 0x07));
2776 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2782 mask = 1 << (7 - (nr & 0x07));
2788 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2793 mask = 1 << (7 - (nr & 0x07));
2798 * On-disk inode flags (f2fs_inode::i_flags)
2800 #define F2FS_COMPR_FL 0x00000004 /* Compress file */
2801 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
2802 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
2803 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
2804 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
2805 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
2806 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
2807 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
2808 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2809 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
2810 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
2812 /* Flags that should be inherited by new inodes from their parent. */
2813 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2814 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2815 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2817 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2818 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2821 /* Flags that are appropriate for non-directories/regular files. */
2822 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2824 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2828 else if (S_ISREG(mode))
2829 return flags & F2FS_REG_FLMASK;
2831 return flags & F2FS_OTHER_FLMASK;
2834 static inline void __mark_inode_dirty_flag(struct inode *inode,
2838 case FI_INLINE_XATTR:
2839 case FI_INLINE_DATA:
2840 case FI_INLINE_DENTRY:
2846 case FI_INLINE_DOTS:
2848 case FI_COMPRESS_RELEASED:
2849 f2fs_mark_inode_dirty_sync(inode, true);
2853 static inline void set_inode_flag(struct inode *inode, int flag)
2855 set_bit(flag, F2FS_I(inode)->flags);
2856 __mark_inode_dirty_flag(inode, flag, true);
2859 static inline int is_inode_flag_set(struct inode *inode, int flag)
2861 return test_bit(flag, F2FS_I(inode)->flags);
2864 static inline void clear_inode_flag(struct inode *inode, int flag)
2866 clear_bit(flag, F2FS_I(inode)->flags);
2867 __mark_inode_dirty_flag(inode, flag, false);
2870 static inline bool f2fs_verity_in_progress(struct inode *inode)
2872 return IS_ENABLED(CONFIG_FS_VERITY) &&
2873 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2876 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2878 F2FS_I(inode)->i_acl_mode = mode;
2879 set_inode_flag(inode, FI_ACL_MODE);
2880 f2fs_mark_inode_dirty_sync(inode, false);
2883 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2889 f2fs_mark_inode_dirty_sync(inode, true);
2892 static inline void f2fs_i_blocks_write(struct inode *inode,
2893 block_t diff, bool add, bool claim)
2895 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2896 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2898 /* add = 1, claim = 1 should be dquot_reserve_block in pair */
2901 dquot_claim_block(inode, diff);
2903 dquot_alloc_block_nofail(inode, diff);
2905 dquot_free_block(inode, diff);
2908 f2fs_mark_inode_dirty_sync(inode, true);
2909 if (clean || recover)
2910 set_inode_flag(inode, FI_AUTO_RECOVER);
2913 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
2915 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2916 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2918 if (i_size_read(inode) == i_size)
2921 i_size_write(inode, i_size);
2922 f2fs_mark_inode_dirty_sync(inode, true);
2923 if (clean || recover)
2924 set_inode_flag(inode, FI_AUTO_RECOVER);
2927 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2929 F2FS_I(inode)->i_current_depth = depth;
2930 f2fs_mark_inode_dirty_sync(inode, true);
2933 static inline void f2fs_i_gc_failures_write(struct inode *inode,
2936 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
2937 f2fs_mark_inode_dirty_sync(inode, true);
2940 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2942 F2FS_I(inode)->i_xattr_nid = xnid;
2943 f2fs_mark_inode_dirty_sync(inode, true);
2946 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
2948 F2FS_I(inode)->i_pino = pino;
2949 f2fs_mark_inode_dirty_sync(inode, true);
2952 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2954 struct f2fs_inode_info *fi = F2FS_I(inode);
2956 if (ri->i_inline & F2FS_INLINE_XATTR)
2957 set_bit(FI_INLINE_XATTR, fi->flags);
2958 if (ri->i_inline & F2FS_INLINE_DATA)
2959 set_bit(FI_INLINE_DATA, fi->flags);
2960 if (ri->i_inline & F2FS_INLINE_DENTRY)
2961 set_bit(FI_INLINE_DENTRY, fi->flags);
2962 if (ri->i_inline & F2FS_DATA_EXIST)
2963 set_bit(FI_DATA_EXIST, fi->flags);
2964 if (ri->i_inline & F2FS_INLINE_DOTS)
2965 set_bit(FI_INLINE_DOTS, fi->flags);
2966 if (ri->i_inline & F2FS_EXTRA_ATTR)
2967 set_bit(FI_EXTRA_ATTR, fi->flags);
2968 if (ri->i_inline & F2FS_PIN_FILE)
2969 set_bit(FI_PIN_FILE, fi->flags);
2970 if (ri->i_inline & F2FS_COMPRESS_RELEASED)
2971 set_bit(FI_COMPRESS_RELEASED, fi->flags);
2974 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2978 if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2979 ri->i_inline |= F2FS_INLINE_XATTR;
2980 if (is_inode_flag_set(inode, FI_INLINE_DATA))
2981 ri->i_inline |= F2FS_INLINE_DATA;
2982 if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2983 ri->i_inline |= F2FS_INLINE_DENTRY;
2984 if (is_inode_flag_set(inode, FI_DATA_EXIST))
2985 ri->i_inline |= F2FS_DATA_EXIST;
2986 if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2987 ri->i_inline |= F2FS_INLINE_DOTS;
2988 if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
2989 ri->i_inline |= F2FS_EXTRA_ATTR;
2990 if (is_inode_flag_set(inode, FI_PIN_FILE))
2991 ri->i_inline |= F2FS_PIN_FILE;
2992 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
2993 ri->i_inline |= F2FS_COMPRESS_RELEASED;
2996 static inline int f2fs_has_extra_attr(struct inode *inode)
2998 return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3001 static inline int f2fs_has_inline_xattr(struct inode *inode)
3003 return is_inode_flag_set(inode, FI_INLINE_XATTR);
3006 static inline int f2fs_compressed_file(struct inode *inode)
3008 return S_ISREG(inode->i_mode) &&
3009 is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3012 static inline bool f2fs_need_compress_data(struct inode *inode)
3014 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3016 if (!f2fs_compressed_file(inode))
3019 if (compress_mode == COMPR_MODE_FS)
3021 else if (compress_mode == COMPR_MODE_USER &&
3022 is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3028 static inline unsigned int addrs_per_inode(struct inode *inode)
3030 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
3031 get_inline_xattr_addrs(inode);
3033 if (!f2fs_compressed_file(inode))
3035 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3038 static inline unsigned int addrs_per_block(struct inode *inode)
3040 if (!f2fs_compressed_file(inode))
3041 return DEF_ADDRS_PER_BLOCK;
3042 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
3045 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3047 struct f2fs_inode *ri = F2FS_INODE(page);
3049 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3050 get_inline_xattr_addrs(inode)]);
3053 static inline int inline_xattr_size(struct inode *inode)
3055 if (f2fs_has_inline_xattr(inode))
3056 return get_inline_xattr_addrs(inode) * sizeof(__le32);
3060 static inline int f2fs_has_inline_data(struct inode *inode)
3062 return is_inode_flag_set(inode, FI_INLINE_DATA);
3065 static inline int f2fs_exist_data(struct inode *inode)
3067 return is_inode_flag_set(inode, FI_DATA_EXIST);
3070 static inline int f2fs_has_inline_dots(struct inode *inode)
3072 return is_inode_flag_set(inode, FI_INLINE_DOTS);
3075 static inline int f2fs_is_mmap_file(struct inode *inode)
3077 return is_inode_flag_set(inode, FI_MMAP_FILE);
3080 static inline bool f2fs_is_pinned_file(struct inode *inode)
3082 return is_inode_flag_set(inode, FI_PIN_FILE);
3085 static inline bool f2fs_is_atomic_file(struct inode *inode)
3087 return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3090 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
3092 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
3095 static inline bool f2fs_is_volatile_file(struct inode *inode)
3097 return is_inode_flag_set(inode, FI_VOLATILE_FILE);
3100 static inline bool f2fs_is_first_block_written(struct inode *inode)
3102 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
3105 static inline bool f2fs_is_drop_cache(struct inode *inode)
3107 return is_inode_flag_set(inode, FI_DROP_CACHE);
3110 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3112 struct f2fs_inode *ri = F2FS_INODE(page);
3113 int extra_size = get_extra_isize(inode);
3115 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
3118 static inline int f2fs_has_inline_dentry(struct inode *inode)
3120 return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3123 static inline int is_file(struct inode *inode, int type)
3125 return F2FS_I(inode)->i_advise & type;
3128 static inline void set_file(struct inode *inode, int type)
3130 F2FS_I(inode)->i_advise |= type;
3131 f2fs_mark_inode_dirty_sync(inode, true);
3134 static inline void clear_file(struct inode *inode, int type)
3136 F2FS_I(inode)->i_advise &= ~type;
3137 f2fs_mark_inode_dirty_sync(inode, true);
3140 static inline bool f2fs_is_time_consistent(struct inode *inode)
3142 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
3144 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
3146 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
3148 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
3149 &F2FS_I(inode)->i_crtime))
3154 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3159 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3161 spin_lock(&sbi->inode_lock[DIRTY_META]);
3162 ret = list_empty(&F2FS_I(inode)->gdirty_list);
3163 spin_unlock(&sbi->inode_lock[DIRTY_META]);
3166 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3167 file_keep_isize(inode) ||
3168 i_size_read(inode) & ~PAGE_MASK)
3171 if (!f2fs_is_time_consistent(inode))
3174 spin_lock(&F2FS_I(inode)->i_size_lock);
3175 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3176 spin_unlock(&F2FS_I(inode)->i_size_lock);
3181 static inline bool f2fs_readonly(struct super_block *sb)
3183 return sb_rdonly(sb);
3186 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3188 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3191 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3193 if (len == 1 && name[0] == '.')
3196 if (len == 2 && name[0] == '.' && name[1] == '.')
3202 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3203 size_t size, gfp_t flags)
3205 if (time_to_inject(sbi, FAULT_KMALLOC)) {
3206 f2fs_show_injection_info(sbi, FAULT_KMALLOC);
3210 return kmalloc(size, flags);
3213 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3214 size_t size, gfp_t flags)
3216 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3219 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3220 size_t size, gfp_t flags)
3222 if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3223 f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3227 return kvmalloc(size, flags);
3230 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3231 size_t size, gfp_t flags)
3233 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3236 static inline int get_extra_isize(struct inode *inode)
3238 return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3241 static inline int get_inline_xattr_addrs(struct inode *inode)
3243 return F2FS_I(inode)->i_inline_xattr_size;
3246 #define f2fs_get_inode_mode(i) \
3247 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3248 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3250 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
3251 (offsetof(struct f2fs_inode, i_extra_end) - \
3252 offsetof(struct f2fs_inode, i_extra_isize)) \
3254 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
3255 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
3256 ((offsetof(typeof(*(f2fs_inode)), field) + \
3257 sizeof((f2fs_inode)->field)) \
3258 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
3260 #define DEFAULT_IOSTAT_PERIOD_MS 3000
3261 #define MIN_IOSTAT_PERIOD_MS 100
3262 /* maximum period of iostat tracing is 1 day */
3263 #define MAX_IOSTAT_PERIOD_MS 8640000
3265 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
3269 spin_lock(&sbi->iostat_lock);
3270 for (i = 0; i < NR_IO_TYPE; i++) {
3271 sbi->rw_iostat[i] = 0;
3272 sbi->prev_rw_iostat[i] = 0;
3274 spin_unlock(&sbi->iostat_lock);
3277 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
3279 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
3280 enum iostat_type type, unsigned long long io_bytes)
3282 if (!sbi->iostat_enable)
3284 spin_lock(&sbi->iostat_lock);
3285 sbi->rw_iostat[type] += io_bytes;
3287 if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
3288 sbi->rw_iostat[APP_BUFFERED_IO] =
3289 sbi->rw_iostat[APP_WRITE_IO] -
3290 sbi->rw_iostat[APP_DIRECT_IO];
3292 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
3293 sbi->rw_iostat[APP_BUFFERED_READ_IO] =
3294 sbi->rw_iostat[APP_READ_IO] -
3295 sbi->rw_iostat[APP_DIRECT_READ_IO];
3296 spin_unlock(&sbi->iostat_lock);
3298 f2fs_record_iostat(sbi);
3301 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
3303 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3305 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3306 block_t blkaddr, int type);
3307 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3308 block_t blkaddr, int type)
3310 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3311 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3313 f2fs_bug_on(sbi, 1);
3317 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3319 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3320 blkaddr == COMPRESS_ADDR)
3328 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3329 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3330 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3331 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3332 int f2fs_truncate(struct inode *inode);
3333 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
3334 struct kstat *stat, u32 request_mask, unsigned int flags);
3335 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
3336 struct iattr *attr);
3337 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3338 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3339 int f2fs_precache_extents(struct inode *inode);
3340 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3341 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3342 struct dentry *dentry, struct fileattr *fa);
3343 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3344 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3345 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3346 int f2fs_pin_file_control(struct inode *inode, bool inc);
3351 void f2fs_set_inode_flags(struct inode *inode);
3352 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3353 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3354 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3355 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3356 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3357 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3358 void f2fs_update_inode_page(struct inode *inode);
3359 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3360 void f2fs_evict_inode(struct inode *inode);
3361 void f2fs_handle_failed_inode(struct inode *inode);
3366 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3367 bool hot, bool set);
3368 struct dentry *f2fs_get_parent(struct dentry *child);
3373 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3374 int f2fs_init_casefolded_name(const struct inode *dir,
3375 struct f2fs_filename *fname);
3376 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3377 int lookup, struct f2fs_filename *fname);
3378 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3379 struct f2fs_filename *fname);
3380 void f2fs_free_filename(struct f2fs_filename *fname);
3381 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3382 const struct f2fs_filename *fname, int *max_slots);
3383 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3384 unsigned int start_pos, struct fscrypt_str *fstr);
3385 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3386 struct f2fs_dentry_ptr *d);
3387 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3388 const struct f2fs_filename *fname, struct page *dpage);
3389 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3390 unsigned int current_depth);
3391 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3392 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3393 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3394 const struct f2fs_filename *fname,
3395 struct page **res_page);
3396 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3397 const struct qstr *child, struct page **res_page);
3398 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3399 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3400 struct page **page);
3401 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3402 struct page *page, struct inode *inode);
3403 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3404 const struct f2fs_filename *fname);
3405 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3406 const struct fscrypt_str *name, f2fs_hash_t name_hash,
3407 unsigned int bit_pos);
3408 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3409 struct inode *inode, nid_t ino, umode_t mode);
3410 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3411 struct inode *inode, nid_t ino, umode_t mode);
3412 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3413 struct inode *inode, nid_t ino, umode_t mode);
3414 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3415 struct inode *dir, struct inode *inode);
3416 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3417 bool f2fs_empty_dir(struct inode *dir);
3419 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3421 if (fscrypt_is_nokey_name(dentry))
3423 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3424 inode, inode->i_ino, inode->i_mode);
3430 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3431 void f2fs_inode_synced(struct inode *inode);
3432 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3433 int f2fs_quota_sync(struct super_block *sb, int type);
3434 loff_t max_file_blocks(struct inode *inode);
3435 void f2fs_quota_off_umount(struct super_block *sb);
3436 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3437 int f2fs_sync_fs(struct super_block *sb, int sync);
3438 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3443 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3450 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3451 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3452 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3453 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3454 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3455 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3456 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3457 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3458 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3459 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3460 struct node_info *ni);
3461 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3462 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3463 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3464 int f2fs_truncate_xattr_node(struct inode *inode);
3465 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3466 unsigned int seq_id);
3467 int f2fs_remove_inode_page(struct inode *inode);
3468 struct page *f2fs_new_inode_page(struct inode *inode);
3469 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3470 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3471 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3472 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3473 int f2fs_move_node_page(struct page *node_page, int gc_type);
3474 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3475 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3476 struct writeback_control *wbc, bool atomic,
3477 unsigned int *seq_id);
3478 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3479 struct writeback_control *wbc,
3480 bool do_balance, enum iostat_type io_type);
3481 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3482 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3483 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3484 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3485 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3486 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3487 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3488 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3489 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3490 unsigned int segno, struct f2fs_summary_block *sum);
3491 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3492 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3493 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3494 int __init f2fs_create_node_manager_caches(void);
3495 void f2fs_destroy_node_manager_caches(void);
3500 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3501 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3502 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3503 void f2fs_drop_inmem_pages(struct inode *inode);
3504 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3505 int f2fs_commit_inmem_pages(struct inode *inode);
3506 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3507 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3508 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3509 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3510 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3511 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3512 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3513 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3514 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3515 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3516 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3517 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3518 struct cp_control *cpc);
3519 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3520 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3521 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3522 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3523 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3524 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3525 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3526 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3527 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3528 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3529 unsigned int *newseg, bool new_sec, int dir);
3530 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3531 unsigned int start, unsigned int end);
3532 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3533 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3534 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3535 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3536 struct cp_control *cpc);
3537 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3538 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3540 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3541 enum iostat_type io_type);
3542 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3543 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3544 struct f2fs_io_info *fio);
3545 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3546 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3547 block_t old_blkaddr, block_t new_blkaddr,
3548 bool recover_curseg, bool recover_newaddr,
3550 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3551 block_t old_addr, block_t new_addr,
3552 unsigned char version, bool recover_curseg,
3553 bool recover_newaddr);
3554 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3555 block_t old_blkaddr, block_t *new_blkaddr,
3556 struct f2fs_summary *sum, int type,
3557 struct f2fs_io_info *fio);
3558 void f2fs_wait_on_page_writeback(struct page *page,
3559 enum page_type type, bool ordered, bool locked);
3560 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3561 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3563 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3564 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3565 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3566 unsigned int val, int alloc);
3567 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3568 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3569 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3570 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3571 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3572 int __init f2fs_create_segment_manager_caches(void);
3573 void f2fs_destroy_segment_manager_caches(void);
3574 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3575 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3576 enum page_type type, enum temp_type temp);
3577 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3578 unsigned int segno);
3579 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3580 unsigned int segno);
3585 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3586 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3587 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3588 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3589 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3590 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3591 block_t blkaddr, int type);
3592 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3593 int type, bool sync);
3594 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3595 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3596 long nr_to_write, enum iostat_type io_type);
3597 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3598 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3599 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3600 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3601 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3602 unsigned int devidx, int type);
3603 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3604 unsigned int devidx, int type);
3605 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3606 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3607 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3608 void f2fs_add_orphan_inode(struct inode *inode);
3609 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3610 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3611 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3612 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3613 void f2fs_remove_dirty_inode(struct inode *inode);
3614 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3615 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3616 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3617 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3618 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3619 int __init f2fs_create_checkpoint_caches(void);
3620 void f2fs_destroy_checkpoint_caches(void);
3621 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3622 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3623 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3624 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3629 int __init f2fs_init_bioset(void);
3630 void f2fs_destroy_bioset(void);
3631 int f2fs_init_bio_entry_cache(void);
3632 void f2fs_destroy_bio_entry_cache(void);
3633 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3634 struct bio *bio, enum page_type type);
3635 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3636 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3637 struct inode *inode, struct page *page,
3638 nid_t ino, enum page_type type);
3639 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3640 struct bio **bio, struct page *page);
3641 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3642 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3643 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3644 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3645 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3646 block_t blk_addr, struct bio *bio);
3647 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3648 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3649 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3650 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3651 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3652 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3653 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
3654 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3655 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3656 int op_flags, bool for_write);
3657 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3658 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3660 struct page *f2fs_get_new_data_page(struct inode *inode,
3661 struct page *ipage, pgoff_t index, bool new_i_size);
3662 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3663 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3664 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3665 int create, int flag);
3666 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3667 u64 start, u64 len);
3668 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3669 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3670 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3671 int f2fs_write_single_data_page(struct page *page, int *submitted,
3672 struct bio **bio, sector_t *last_block,
3673 struct writeback_control *wbc,
3674 enum iostat_type io_type,
3675 int compr_blocks, bool allow_balance);
3676 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3677 unsigned int length);
3678 int f2fs_release_page(struct page *page, gfp_t wait);
3679 #ifdef CONFIG_MIGRATION
3680 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3681 struct page *page, enum migrate_mode mode);
3683 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3684 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3685 int f2fs_init_post_read_processing(void);
3686 void f2fs_destroy_post_read_processing(void);
3687 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3688 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3693 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3694 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3695 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3696 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
3697 unsigned int segno);
3698 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3699 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3700 int __init f2fs_create_garbage_collection_cache(void);
3701 void f2fs_destroy_garbage_collection_cache(void);
3706 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3707 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3708 int __init f2fs_create_recovery_cache(void);
3709 void f2fs_destroy_recovery_cache(void);
3714 #ifdef CONFIG_F2FS_STAT_FS
3715 struct f2fs_stat_info {
3716 struct list_head stat_list;
3717 struct f2fs_sb_info *sbi;
3718 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3719 int main_area_segs, main_area_sections, main_area_zones;
3720 unsigned long long hit_largest, hit_cached, hit_rbtree;
3721 unsigned long long hit_total, total_ext;
3722 int ext_tree, zombie_tree, ext_node;
3723 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3724 int ndirty_data, ndirty_qdata;
3726 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3727 int nats, dirty_nats, sits, dirty_sits;
3728 int free_nids, avail_nids, alloc_nids;
3729 int total_count, utilization;
3730 int bg_gc, nr_wb_cp_data, nr_wb_data;
3731 int nr_rd_data, nr_rd_node, nr_rd_meta;
3732 int nr_dio_read, nr_dio_write;
3733 unsigned int io_skip_bggc, other_skip_bggc;
3734 int nr_flushing, nr_flushed, flush_list_empty;
3735 int nr_discarding, nr_discarded;
3737 unsigned int undiscard_blks;
3738 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3739 unsigned int cur_ckpt_time, peak_ckpt_time;
3740 int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3742 unsigned long long compr_blocks;
3743 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3744 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3745 unsigned int bimodal, avg_vblocks;
3746 int util_free, util_valid, util_invalid;
3747 int rsvd_segs, overp_segs;
3748 int dirty_count, node_pages, meta_pages, compress_pages;
3749 int compress_page_hit;
3750 int prefree_count, call_count, cp_count, bg_cp_count;
3751 int tot_segs, node_segs, data_segs, free_segs, free_secs;
3752 int bg_node_segs, bg_data_segs;
3753 int tot_blks, data_blks, node_blks;
3754 int bg_data_blks, bg_node_blks;
3755 unsigned long long skipped_atomic_files[2];
3756 int curseg[NR_CURSEG_TYPE];
3757 int cursec[NR_CURSEG_TYPE];
3758 int curzone[NR_CURSEG_TYPE];
3759 unsigned int dirty_seg[NR_CURSEG_TYPE];
3760 unsigned int full_seg[NR_CURSEG_TYPE];
3761 unsigned int valid_blks[NR_CURSEG_TYPE];
3763 unsigned int meta_count[META_MAX];
3764 unsigned int segment_count[2];
3765 unsigned int block_count[2];
3766 unsigned int inplace_count;
3767 unsigned long long base_mem, cache_mem, page_mem;
3770 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3772 return (struct f2fs_stat_info *)sbi->stat_info;
3775 #define stat_inc_cp_count(si) ((si)->cp_count++)
3776 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
3777 #define stat_inc_call_count(si) ((si)->call_count++)
3778 #define stat_inc_bggc_count(si) ((si)->bg_gc++)
3779 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
3780 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
3781 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
3782 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
3783 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
3784 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
3785 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
3786 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
3787 #define stat_inc_inline_xattr(inode) \
3789 if (f2fs_has_inline_xattr(inode)) \
3790 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
3792 #define stat_dec_inline_xattr(inode) \
3794 if (f2fs_has_inline_xattr(inode)) \
3795 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
3797 #define stat_inc_inline_inode(inode) \
3799 if (f2fs_has_inline_data(inode)) \
3800 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
3802 #define stat_dec_inline_inode(inode) \
3804 if (f2fs_has_inline_data(inode)) \
3805 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
3807 #define stat_inc_inline_dir(inode) \
3809 if (f2fs_has_inline_dentry(inode)) \
3810 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
3812 #define stat_dec_inline_dir(inode) \
3814 if (f2fs_has_inline_dentry(inode)) \
3815 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
3817 #define stat_inc_compr_inode(inode) \
3819 if (f2fs_compressed_file(inode)) \
3820 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
3822 #define stat_dec_compr_inode(inode) \
3824 if (f2fs_compressed_file(inode)) \
3825 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
3827 #define stat_add_compr_blocks(inode, blocks) \
3828 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3829 #define stat_sub_compr_blocks(inode, blocks) \
3830 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3831 #define stat_inc_meta_count(sbi, blkaddr) \
3833 if (blkaddr < SIT_I(sbi)->sit_base_addr) \
3834 atomic_inc(&(sbi)->meta_count[META_CP]); \
3835 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
3836 atomic_inc(&(sbi)->meta_count[META_SIT]); \
3837 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
3838 atomic_inc(&(sbi)->meta_count[META_NAT]); \
3839 else if (blkaddr < SM_I(sbi)->main_blkaddr) \
3840 atomic_inc(&(sbi)->meta_count[META_SSA]); \
3842 #define stat_inc_seg_type(sbi, curseg) \
3843 ((sbi)->segment_count[(curseg)->alloc_type]++)
3844 #define stat_inc_block_count(sbi, curseg) \
3845 ((sbi)->block_count[(curseg)->alloc_type]++)
3846 #define stat_inc_inplace_blocks(sbi) \
3847 (atomic_inc(&(sbi)->inplace_count))
3848 #define stat_update_max_atomic_write(inode) \
3850 int cur = F2FS_I_SB(inode)->atomic_files; \
3851 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
3853 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
3855 #define stat_inc_volatile_write(inode) \
3856 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3857 #define stat_dec_volatile_write(inode) \
3858 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3859 #define stat_update_max_volatile_write(inode) \
3861 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
3862 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
3864 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
3866 #define stat_inc_seg_count(sbi, type, gc_type) \
3868 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3870 if ((type) == SUM_TYPE_DATA) { \
3872 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
3875 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
3879 #define stat_inc_tot_blk_count(si, blks) \
3880 ((si)->tot_blks += (blks))
3882 #define stat_inc_data_blk_count(sbi, blks, gc_type) \
3884 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3885 stat_inc_tot_blk_count(si, blks); \
3886 si->data_blks += (blks); \
3887 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3890 #define stat_inc_node_blk_count(sbi, blks, gc_type) \
3892 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3893 stat_inc_tot_blk_count(si, blks); \
3894 si->node_blks += (blks); \
3895 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3898 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3899 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3900 void __init f2fs_create_root_stats(void);
3901 void f2fs_destroy_root_stats(void);
3902 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3904 #define stat_inc_cp_count(si) do { } while (0)
3905 #define stat_inc_bg_cp_count(si) do { } while (0)
3906 #define stat_inc_call_count(si) do { } while (0)
3907 #define stat_inc_bggc_count(si) do { } while (0)
3908 #define stat_io_skip_bggc_count(sbi) do { } while (0)
3909 #define stat_other_skip_bggc_count(sbi) do { } while (0)
3910 #define stat_inc_dirty_inode(sbi, type) do { } while (0)
3911 #define stat_dec_dirty_inode(sbi, type) do { } while (0)
3912 #define stat_inc_total_hit(sbi) do { } while (0)
3913 #define stat_inc_rbtree_node_hit(sbi) do { } while (0)
3914 #define stat_inc_largest_node_hit(sbi) do { } while (0)
3915 #define stat_inc_cached_node_hit(sbi) do { } while (0)
3916 #define stat_inc_inline_xattr(inode) do { } while (0)
3917 #define stat_dec_inline_xattr(inode) do { } while (0)
3918 #define stat_inc_inline_inode(inode) do { } while (0)
3919 #define stat_dec_inline_inode(inode) do { } while (0)
3920 #define stat_inc_inline_dir(inode) do { } while (0)
3921 #define stat_dec_inline_dir(inode) do { } while (0)
3922 #define stat_inc_compr_inode(inode) do { } while (0)
3923 #define stat_dec_compr_inode(inode) do { } while (0)
3924 #define stat_add_compr_blocks(inode, blocks) do { } while (0)
3925 #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
3926 #define stat_update_max_atomic_write(inode) do { } while (0)
3927 #define stat_inc_volatile_write(inode) do { } while (0)
3928 #define stat_dec_volatile_write(inode) do { } while (0)
3929 #define stat_update_max_volatile_write(inode) do { } while (0)
3930 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
3931 #define stat_inc_seg_type(sbi, curseg) do { } while (0)
3932 #define stat_inc_block_count(sbi, curseg) do { } while (0)
3933 #define stat_inc_inplace_blocks(sbi) do { } while (0)
3934 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
3935 #define stat_inc_tot_blk_count(si, blks) do { } while (0)
3936 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
3937 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
3939 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
3940 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
3941 static inline void __init f2fs_create_root_stats(void) { }
3942 static inline void f2fs_destroy_root_stats(void) { }
3943 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
3946 extern const struct file_operations f2fs_dir_operations;
3947 extern const struct file_operations f2fs_file_operations;
3948 extern const struct inode_operations f2fs_file_inode_operations;
3949 extern const struct address_space_operations f2fs_dblock_aops;
3950 extern const struct address_space_operations f2fs_node_aops;
3951 extern const struct address_space_operations f2fs_meta_aops;
3952 extern const struct inode_operations f2fs_dir_inode_operations;
3953 extern const struct inode_operations f2fs_symlink_inode_operations;
3954 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
3955 extern const struct inode_operations f2fs_special_inode_operations;
3956 extern struct kmem_cache *f2fs_inode_entry_slab;
3961 bool f2fs_may_inline_data(struct inode *inode);
3962 bool f2fs_may_inline_dentry(struct inode *inode);
3963 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
3964 void f2fs_truncate_inline_inode(struct inode *inode,
3965 struct page *ipage, u64 from);
3966 int f2fs_read_inline_data(struct inode *inode, struct page *page);
3967 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
3968 int f2fs_convert_inline_inode(struct inode *inode);
3969 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
3970 int f2fs_write_inline_data(struct inode *inode, struct page *page);
3971 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
3972 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
3973 const struct f2fs_filename *fname,
3974 struct page **res_page);
3975 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
3976 struct page *ipage);
3977 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
3978 struct inode *inode, nid_t ino, umode_t mode);
3979 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
3980 struct page *page, struct inode *dir,
3981 struct inode *inode);
3982 bool f2fs_empty_inline_dir(struct inode *dir);
3983 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
3984 struct fscrypt_str *fstr);
3985 int f2fs_inline_data_fiemap(struct inode *inode,
3986 struct fiemap_extent_info *fieinfo,
3987 __u64 start, __u64 len);
3992 unsigned long f2fs_shrink_count(struct shrinker *shrink,
3993 struct shrink_control *sc);
3994 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
3995 struct shrink_control *sc);
3996 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
3997 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4002 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
4003 struct rb_entry *cached_re, unsigned int ofs);
4004 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
4005 struct rb_root_cached *root,
4006 struct rb_node **parent,
4007 unsigned long long key, bool *left_most);
4008 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
4009 struct rb_root_cached *root,
4010 struct rb_node **parent,
4011 unsigned int ofs, bool *leftmost);
4012 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
4013 struct rb_entry *cached_re, unsigned int ofs,
4014 struct rb_entry **prev_entry, struct rb_entry **next_entry,
4015 struct rb_node ***insert_p, struct rb_node **insert_parent,
4016 bool force, bool *leftmost);
4017 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
4018 struct rb_root_cached *root, bool check_key);
4019 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
4020 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
4021 void f2fs_drop_extent_tree(struct inode *inode);
4022 unsigned int f2fs_destroy_extent_node(struct inode *inode);
4023 void f2fs_destroy_extent_tree(struct inode *inode);
4024 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
4025 struct extent_info *ei);
4026 void f2fs_update_extent_cache(struct dnode_of_data *dn);
4027 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
4028 pgoff_t fofs, block_t blkaddr, unsigned int len);
4029 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4030 int __init f2fs_create_extent_cache(void);
4031 void f2fs_destroy_extent_cache(void);
4036 #define MIN_RA_MUL 2
4037 #define MAX_RA_MUL 256
4039 int __init f2fs_init_sysfs(void);
4040 void f2fs_exit_sysfs(void);
4041 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4042 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4045 extern const struct fsverity_operations f2fs_verityops;
4050 static inline bool f2fs_encrypted_file(struct inode *inode)
4052 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4055 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4057 #ifdef CONFIG_FS_ENCRYPTION
4058 file_set_encrypt(inode);
4059 f2fs_set_inode_flags(inode);
4064 * Returns true if the reads of the inode's data need to undergo some
4065 * postprocessing step, like decryption or authenticity verification.
4067 static inline bool f2fs_post_read_required(struct inode *inode)
4069 return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4070 f2fs_compressed_file(inode);
4076 #ifdef CONFIG_F2FS_FS_COMPRESSION
4077 bool f2fs_is_compressed_page(struct page *page);
4078 struct page *f2fs_compress_control_page(struct page *page);
4079 int f2fs_prepare_compress_overwrite(struct inode *inode,
4080 struct page **pagep, pgoff_t index, void **fsdata);
4081 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4082 pgoff_t index, unsigned copied);
4083 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4084 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4085 bool f2fs_is_compress_backend_ready(struct inode *inode);
4086 int f2fs_init_compress_mempool(void);
4087 void f2fs_destroy_compress_mempool(void);
4088 void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
4089 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4091 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4092 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4093 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4094 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
4095 int f2fs_write_multi_pages(struct compress_ctx *cc,
4097 struct writeback_control *wbc,
4098 enum iostat_type io_type);
4099 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4100 void f2fs_update_extent_tree_range_compressed(struct inode *inode,
4101 pgoff_t fofs, block_t blkaddr, unsigned int llen,
4102 unsigned int c_len);
4103 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4104 unsigned nr_pages, sector_t *last_block_in_bio,
4105 bool is_readahead, bool for_write);
4106 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4107 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
4108 void f2fs_put_page_dic(struct page *page);
4109 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
4110 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4111 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4112 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4113 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4114 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4115 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4116 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4117 int __init f2fs_init_compress_cache(void);
4118 void f2fs_destroy_compress_cache(void);
4119 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4120 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4121 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4122 nid_t ino, block_t blkaddr);
4123 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4125 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4126 #define inc_compr_inode_stat(inode) \
4128 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4129 sbi->compr_new_inode++; \
4131 #define add_compr_block_stat(inode, blocks) \
4133 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4134 int diff = F2FS_I(inode)->i_cluster_size - blocks; \
4135 sbi->compr_written_block += blocks; \
4136 sbi->compr_saved_block += diff; \
4139 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
4140 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4142 if (!f2fs_compressed_file(inode))
4144 /* not support compression */
4147 static inline struct page *f2fs_compress_control_page(struct page *page)
4150 return ERR_PTR(-EINVAL);
4152 static inline int f2fs_init_compress_mempool(void) { return 0; }
4153 static inline void f2fs_destroy_compress_mempool(void) { }
4154 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
4155 static inline void f2fs_end_read_compressed_page(struct page *page,
4156 bool failed, block_t blkaddr)
4160 static inline void f2fs_put_page_dic(struct page *page)
4164 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
4165 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
4166 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
4167 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
4168 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
4169 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
4170 static inline int __init f2fs_init_compress_cache(void) { return 0; }
4171 static inline void f2fs_destroy_compress_cache(void) { }
4172 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4173 block_t blkaddr) { }
4174 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4175 struct page *page, nid_t ino, block_t blkaddr) { }
4176 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4177 struct page *page, block_t blkaddr) { return false; }
4178 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4180 #define inc_compr_inode_stat(inode) do { } while (0)
4181 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
4182 pgoff_t fofs, block_t blkaddr, unsigned int llen,
4183 unsigned int c_len) { }
4186 static inline void set_compress_context(struct inode *inode)
4188 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4190 F2FS_I(inode)->i_compress_algorithm =
4191 F2FS_OPTION(sbi).compress_algorithm;
4192 F2FS_I(inode)->i_log_cluster_size =
4193 F2FS_OPTION(sbi).compress_log_size;
4194 F2FS_I(inode)->i_compress_flag =
4195 F2FS_OPTION(sbi).compress_chksum ?
4196 1 << COMPRESS_CHKSUM : 0;
4197 F2FS_I(inode)->i_cluster_size =
4198 1 << F2FS_I(inode)->i_log_cluster_size;
4199 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
4200 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
4201 F2FS_OPTION(sbi).compress_level)
4202 F2FS_I(inode)->i_compress_flag |=
4203 F2FS_OPTION(sbi).compress_level <<
4204 COMPRESS_LEVEL_OFFSET;
4205 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4206 set_inode_flag(inode, FI_COMPRESSED_FILE);
4207 stat_inc_compr_inode(inode);
4208 inc_compr_inode_stat(inode);
4209 f2fs_mark_inode_dirty_sync(inode, true);
4212 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4214 struct f2fs_inode_info *fi = F2FS_I(inode);
4216 if (!f2fs_compressed_file(inode))
4218 if (S_ISREG(inode->i_mode) &&
4219 (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks)))
4222 fi->i_flags &= ~F2FS_COMPR_FL;
4223 stat_dec_compr_inode(inode);
4224 clear_inode_flag(inode, FI_COMPRESSED_FILE);
4225 f2fs_mark_inode_dirty_sync(inode, true);
4229 #define F2FS_FEATURE_FUNCS(name, flagname) \
4230 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4232 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4235 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4236 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4237 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4238 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4239 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4240 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4241 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4242 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4243 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4244 F2FS_FEATURE_FUNCS(verity, VERITY);
4245 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4246 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4247 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4248 F2FS_FEATURE_FUNCS(readonly, RO);
4250 static inline bool f2fs_may_extent_tree(struct inode *inode)
4252 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4254 if (!test_opt(sbi, EXTENT_CACHE) ||
4255 is_inode_flag_set(inode, FI_NO_EXTENT) ||
4256 (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
4257 !f2fs_sb_has_readonly(sbi)))
4261 * for recovered files during mount do not create extents
4262 * if shrinker is not registered.
4264 if (list_empty(&sbi->s_list))
4267 return S_ISREG(inode->i_mode);
4270 #ifdef CONFIG_BLK_DEV_ZONED
4271 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4274 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
4276 return test_bit(zno, FDEV(devi).blkz_seq);
4280 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4282 return f2fs_sb_has_blkzoned(sbi);
4285 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4287 return blk_queue_discard(bdev_get_queue(bdev)) ||
4288 bdev_is_zoned(bdev);
4291 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4295 if (!f2fs_is_multi_device(sbi))
4296 return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4298 for (i = 0; i < sbi->s_ndevs; i++)
4299 if (f2fs_bdev_support_discard(FDEV(i).bdev))
4304 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4306 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4307 f2fs_hw_should_discard(sbi);
4310 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4314 if (!f2fs_is_multi_device(sbi))
4315 return bdev_read_only(sbi->sb->s_bdev);
4317 for (i = 0; i < sbi->s_ndevs; i++)
4318 if (bdev_read_only(FDEV(i).bdev))
4323 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4325 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4328 static inline bool f2fs_may_compress(struct inode *inode)
4330 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4331 f2fs_is_atomic_file(inode) ||
4332 f2fs_is_volatile_file(inode))
4334 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4337 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4338 u64 blocks, bool add)
4340 int diff = F2FS_I(inode)->i_cluster_size - blocks;
4341 struct f2fs_inode_info *fi = F2FS_I(inode);
4343 /* don't update i_compr_blocks if saved blocks were released */
4344 if (!add && !atomic_read(&fi->i_compr_blocks))
4348 atomic_add(diff, &fi->i_compr_blocks);
4349 stat_add_compr_blocks(inode, diff);
4351 atomic_sub(diff, &fi->i_compr_blocks);
4352 stat_sub_compr_blocks(inode, diff);
4354 f2fs_mark_inode_dirty_sync(inode, true);
4357 static inline int block_unaligned_IO(struct inode *inode,
4358 struct kiocb *iocb, struct iov_iter *iter)
4360 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4361 unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4362 loff_t offset = iocb->ki_pos;
4363 unsigned long align = offset | iov_iter_alignment(iter);
4365 return align & blocksize_mask;
4368 static inline bool f2fs_force_buffered_io(struct inode *inode,
4369 struct kiocb *iocb, struct iov_iter *iter)
4371 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4372 int rw = iov_iter_rw(iter);
4374 if (f2fs_post_read_required(inode))
4376 if (f2fs_is_multi_device(sbi))
4379 * for blkzoned device, fallback direct IO to buffered IO, so
4380 * all IOs can be serialized by log-structured write.
4382 if (f2fs_sb_has_blkzoned(sbi))
4384 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4385 if (block_unaligned_IO(inode, iocb, iter))
4387 if (F2FS_IO_ALIGNED(sbi))
4390 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
4396 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4398 return fsverity_active(inode) &&
4399 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4402 #ifdef CONFIG_F2FS_FAULT_INJECTION
4403 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4406 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
4409 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4412 if (f2fs_sb_has_quota_ino(sbi))
4414 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4415 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4416 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4422 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4424 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4427 #define EFSBADCRC EBADMSG /* Bad CRC detected */
4428 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
4430 #endif /* _LINUX_F2FS_H */