1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/sched/mm.h>
12 #include <linux/statfs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
28 #include <linux/zstd.h>
29 #include <linux/lz4.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/f2fs.h>
41 static struct kmem_cache *f2fs_inode_cachep;
43 #ifdef CONFIG_F2FS_FAULT_INJECTION
45 const char *f2fs_fault_name[FAULT_MAX] = {
46 [FAULT_KMALLOC] = "kmalloc",
47 [FAULT_KVMALLOC] = "kvmalloc",
48 [FAULT_PAGE_ALLOC] = "page alloc",
49 [FAULT_PAGE_GET] = "page get",
50 [FAULT_ALLOC_NID] = "alloc nid",
51 [FAULT_ORPHAN] = "orphan",
52 [FAULT_BLOCK] = "no more block",
53 [FAULT_DIR_DEPTH] = "too big dir depth",
54 [FAULT_EVICT_INODE] = "evict_inode fail",
55 [FAULT_TRUNCATE] = "truncate fail",
56 [FAULT_READ_IO] = "read IO error",
57 [FAULT_CHECKPOINT] = "checkpoint error",
58 [FAULT_DISCARD] = "discard error",
59 [FAULT_WRITE_IO] = "write IO error",
60 [FAULT_SLAB_ALLOC] = "slab alloc",
61 [FAULT_DQUOT_INIT] = "dquot initialize",
62 [FAULT_LOCK_OP] = "lock_op",
65 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
68 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
71 atomic_set(&ffi->inject_ops, 0);
72 ffi->inject_rate = rate;
76 ffi->inject_type = type;
79 memset(ffi, 0, sizeof(struct f2fs_fault_info));
83 /* f2fs-wide shrinker description */
84 static struct shrinker f2fs_shrinker_info = {
85 .scan_objects = f2fs_shrink_scan,
86 .count_objects = f2fs_shrink_count,
87 .seeks = DEFAULT_SEEKS,
92 Opt_disable_roll_forward,
103 Opt_disable_ext_identify,
106 Opt_inline_xattr_size,
143 Opt_test_dummy_encryption,
145 Opt_checkpoint_disable,
146 Opt_checkpoint_disable_cap,
147 Opt_checkpoint_disable_cap_perc,
148 Opt_checkpoint_enable,
149 Opt_checkpoint_merge,
150 Opt_nocheckpoint_merge,
151 Opt_compress_algorithm,
152 Opt_compress_log_size,
153 Opt_compress_extension,
154 Opt_nocompress_extension,
165 static match_table_t f2fs_tokens = {
166 {Opt_gc_background, "background_gc=%s"},
167 {Opt_disable_roll_forward, "disable_roll_forward"},
168 {Opt_norecovery, "norecovery"},
169 {Opt_discard, "discard"},
170 {Opt_nodiscard, "nodiscard"},
171 {Opt_noheap, "no_heap"},
173 {Opt_user_xattr, "user_xattr"},
174 {Opt_nouser_xattr, "nouser_xattr"},
176 {Opt_noacl, "noacl"},
177 {Opt_active_logs, "active_logs=%u"},
178 {Opt_disable_ext_identify, "disable_ext_identify"},
179 {Opt_inline_xattr, "inline_xattr"},
180 {Opt_noinline_xattr, "noinline_xattr"},
181 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
182 {Opt_inline_data, "inline_data"},
183 {Opt_inline_dentry, "inline_dentry"},
184 {Opt_noinline_dentry, "noinline_dentry"},
185 {Opt_flush_merge, "flush_merge"},
186 {Opt_noflush_merge, "noflush_merge"},
187 {Opt_nobarrier, "nobarrier"},
188 {Opt_fastboot, "fastboot"},
189 {Opt_extent_cache, "extent_cache"},
190 {Opt_noextent_cache, "noextent_cache"},
191 {Opt_noinline_data, "noinline_data"},
192 {Opt_data_flush, "data_flush"},
193 {Opt_reserve_root, "reserve_root=%u"},
194 {Opt_resgid, "resgid=%u"},
195 {Opt_resuid, "resuid=%u"},
196 {Opt_mode, "mode=%s"},
197 {Opt_io_size_bits, "io_bits=%u"},
198 {Opt_fault_injection, "fault_injection=%u"},
199 {Opt_fault_type, "fault_type=%u"},
200 {Opt_lazytime, "lazytime"},
201 {Opt_nolazytime, "nolazytime"},
202 {Opt_quota, "quota"},
203 {Opt_noquota, "noquota"},
204 {Opt_usrquota, "usrquota"},
205 {Opt_grpquota, "grpquota"},
206 {Opt_prjquota, "prjquota"},
207 {Opt_usrjquota, "usrjquota=%s"},
208 {Opt_grpjquota, "grpjquota=%s"},
209 {Opt_prjjquota, "prjjquota=%s"},
210 {Opt_offusrjquota, "usrjquota="},
211 {Opt_offgrpjquota, "grpjquota="},
212 {Opt_offprjjquota, "prjjquota="},
213 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
214 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
215 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
216 {Opt_alloc, "alloc_mode=%s"},
217 {Opt_fsync, "fsync_mode=%s"},
218 {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
219 {Opt_test_dummy_encryption, "test_dummy_encryption"},
220 {Opt_inlinecrypt, "inlinecrypt"},
221 {Opt_checkpoint_disable, "checkpoint=disable"},
222 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
223 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
224 {Opt_checkpoint_enable, "checkpoint=enable"},
225 {Opt_checkpoint_merge, "checkpoint_merge"},
226 {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
227 {Opt_compress_algorithm, "compress_algorithm=%s"},
228 {Opt_compress_log_size, "compress_log_size=%u"},
229 {Opt_compress_extension, "compress_extension=%s"},
230 {Opt_nocompress_extension, "nocompress_extension=%s"},
231 {Opt_compress_chksum, "compress_chksum"},
232 {Opt_compress_mode, "compress_mode=%s"},
233 {Opt_compress_cache, "compress_cache"},
235 {Opt_gc_merge, "gc_merge"},
236 {Opt_nogc_merge, "nogc_merge"},
237 {Opt_discard_unit, "discard_unit=%s"},
241 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
243 struct va_format vaf;
249 level = printk_get_level(fmt);
250 vaf.fmt = printk_skip_level(fmt);
252 printk("%c%cF2FS-fs (%s): %pV\n",
253 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
258 #if IS_ENABLED(CONFIG_UNICODE)
259 static const struct f2fs_sb_encodings {
262 unsigned int version;
263 } f2fs_sb_encoding_map[] = {
264 {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
267 static const struct f2fs_sb_encodings *
268 f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
270 __u16 magic = le16_to_cpu(sb->s_encoding);
273 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
274 if (magic == f2fs_sb_encoding_map[i].magic)
275 return &f2fs_sb_encoding_map[i];
280 struct kmem_cache *f2fs_cf_name_slab;
281 static int __init f2fs_create_casefold_cache(void)
283 f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
285 if (!f2fs_cf_name_slab)
290 static void f2fs_destroy_casefold_cache(void)
292 kmem_cache_destroy(f2fs_cf_name_slab);
295 static int __init f2fs_create_casefold_cache(void) { return 0; }
296 static void f2fs_destroy_casefold_cache(void) { }
299 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
301 block_t limit = min((sbi->user_block_count << 1) / 1000,
302 sbi->user_block_count - sbi->reserved_blocks);
305 if (test_opt(sbi, RESERVE_ROOT) &&
306 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
307 F2FS_OPTION(sbi).root_reserved_blocks = limit;
308 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
309 F2FS_OPTION(sbi).root_reserved_blocks);
311 if (!test_opt(sbi, RESERVE_ROOT) &&
312 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
313 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
314 !gid_eq(F2FS_OPTION(sbi).s_resgid,
315 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
316 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
317 from_kuid_munged(&init_user_ns,
318 F2FS_OPTION(sbi).s_resuid),
319 from_kgid_munged(&init_user_ns,
320 F2FS_OPTION(sbi).s_resgid));
323 static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
325 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
326 unsigned int avg_vblocks;
327 unsigned int wanted_reserved_segments;
328 block_t avail_user_block_count;
330 if (!F2FS_IO_ALIGNED(sbi))
333 /* average valid block count in section in worst case */
334 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
337 * we need enough free space when migrating one section in worst case
339 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
340 reserved_segments(sbi);
341 wanted_reserved_segments -= reserved_segments(sbi);
343 avail_user_block_count = sbi->user_block_count -
344 sbi->current_reserved_blocks -
345 F2FS_OPTION(sbi).root_reserved_blocks;
347 if (wanted_reserved_segments * sbi->blocks_per_seg >
348 avail_user_block_count) {
349 f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
350 wanted_reserved_segments,
351 avail_user_block_count >> sbi->log_blocks_per_seg);
355 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
357 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
358 wanted_reserved_segments);
363 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
365 if (!F2FS_OPTION(sbi).unusable_cap_perc)
368 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
369 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
371 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
372 F2FS_OPTION(sbi).unusable_cap_perc;
374 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
375 F2FS_OPTION(sbi).unusable_cap,
376 F2FS_OPTION(sbi).unusable_cap_perc);
379 static void init_once(void *foo)
381 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
383 inode_init_once(&fi->vfs_inode);
387 static const char * const quotatypes[] = INITQFNAMES;
388 #define QTYPE2NAME(t) (quotatypes[t])
389 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
392 struct f2fs_sb_info *sbi = F2FS_SB(sb);
396 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
397 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
400 if (f2fs_sb_has_quota_ino(sbi)) {
401 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
405 qname = match_strdup(args);
407 f2fs_err(sbi, "Not enough memory for storing quotafile name");
410 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
411 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
414 f2fs_err(sbi, "%s quota file already specified",
418 if (strchr(qname, '/')) {
419 f2fs_err(sbi, "quotafile must be on filesystem root");
422 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
430 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
432 struct f2fs_sb_info *sbi = F2FS_SB(sb);
434 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
435 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
438 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
439 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
443 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
446 * We do the test below only for project quotas. 'usrquota' and
447 * 'grpquota' mount options are allowed even without quota feature
448 * to support legacy quotas in quota files.
450 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
451 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
454 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
455 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
456 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
457 if (test_opt(sbi, USRQUOTA) &&
458 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
459 clear_opt(sbi, USRQUOTA);
461 if (test_opt(sbi, GRPQUOTA) &&
462 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
463 clear_opt(sbi, GRPQUOTA);
465 if (test_opt(sbi, PRJQUOTA) &&
466 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
467 clear_opt(sbi, PRJQUOTA);
469 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
470 test_opt(sbi, PRJQUOTA)) {
471 f2fs_err(sbi, "old and new quota format mixing");
475 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
476 f2fs_err(sbi, "journaled quota format not specified");
481 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
482 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
483 F2FS_OPTION(sbi).s_jquota_fmt = 0;
489 static int f2fs_set_test_dummy_encryption(struct super_block *sb,
491 const substring_t *arg,
494 struct f2fs_sb_info *sbi = F2FS_SB(sb);
495 #ifdef CONFIG_FS_ENCRYPTION
498 if (!f2fs_sb_has_encrypt(sbi)) {
499 f2fs_err(sbi, "Encrypt feature is off");
504 * This mount option is just for testing, and it's not worthwhile to
505 * implement the extra complexity (e.g. RCU protection) that would be
506 * needed to allow it to be set or changed during remount. We do allow
507 * it to be specified during remount, but only if there is no change.
509 if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
510 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
513 err = fscrypt_set_test_dummy_encryption(
514 sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
518 "Can't change test_dummy_encryption on remount");
519 else if (err == -EINVAL)
520 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
523 f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
527 f2fs_warn(sbi, "Test dummy encryption mode enabled");
529 f2fs_warn(sbi, "Test dummy encryption mount option ignored");
534 #ifdef CONFIG_F2FS_FS_COMPRESSION
536 * 1. The same extension name cannot not appear in both compress and non-compress extension
538 * 2. If the compress extension specifies all files, the types specified by the non-compress
539 * extension will be treated as special cases and will not be compressed.
540 * 3. Don't allow the non-compress extension specifies all files.
542 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
544 unsigned char (*ext)[F2FS_EXTENSION_LEN];
545 unsigned char (*noext)[F2FS_EXTENSION_LEN];
546 int ext_cnt, noext_cnt, index = 0, no_index = 0;
548 ext = F2FS_OPTION(sbi).extensions;
549 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
550 noext = F2FS_OPTION(sbi).noextensions;
551 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
556 for (no_index = 0; no_index < noext_cnt; no_index++) {
557 if (!strcasecmp("*", noext[no_index])) {
558 f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
561 for (index = 0; index < ext_cnt; index++) {
562 if (!strcasecmp(ext[index], noext[no_index])) {
563 f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
572 #ifdef CONFIG_F2FS_FS_LZ4
573 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
575 #ifdef CONFIG_F2FS_FS_LZ4HC
579 if (strlen(str) == 3) {
580 F2FS_OPTION(sbi).compress_level = 0;
584 #ifdef CONFIG_F2FS_FS_LZ4HC
588 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
591 if (kstrtouint(str + 1, 10, &level))
594 if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
595 f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
599 F2FS_OPTION(sbi).compress_level = level;
602 f2fs_info(sbi, "kernel doesn't support lz4hc compression");
608 #ifdef CONFIG_F2FS_FS_ZSTD
609 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
614 if (strlen(str) == len) {
615 F2FS_OPTION(sbi).compress_level = 0;
622 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
625 if (kstrtouint(str + 1, 10, &level))
628 if (!level || level > zstd_max_clevel()) {
629 f2fs_info(sbi, "invalid zstd compress level: %d", level);
633 F2FS_OPTION(sbi).compress_level = level;
639 static int parse_options(struct super_block *sb, char *options, bool is_remount)
641 struct f2fs_sb_info *sbi = F2FS_SB(sb);
642 substring_t args[MAX_OPT_ARGS];
643 #ifdef CONFIG_F2FS_FS_COMPRESSION
644 unsigned char (*ext)[F2FS_EXTENSION_LEN];
645 unsigned char (*noext)[F2FS_EXTENSION_LEN];
646 int ext_cnt, noext_cnt;
657 while ((p = strsep(&options, ",")) != NULL) {
663 * Initialize args struct so we know whether arg was
664 * found; some options take optional arguments.
666 args[0].to = args[0].from = NULL;
667 token = match_token(p, f2fs_tokens, args);
670 case Opt_gc_background:
671 name = match_strdup(&args[0]);
675 if (!strcmp(name, "on")) {
676 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
677 } else if (!strcmp(name, "off")) {
678 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
679 } else if (!strcmp(name, "sync")) {
680 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
687 case Opt_disable_roll_forward:
688 set_opt(sbi, DISABLE_ROLL_FORWARD);
691 /* this option mounts f2fs with ro */
692 set_opt(sbi, NORECOVERY);
693 if (!f2fs_readonly(sb))
697 if (!f2fs_hw_support_discard(sbi)) {
698 f2fs_warn(sbi, "device does not support discard");
701 set_opt(sbi, DISCARD);
704 if (f2fs_hw_should_discard(sbi)) {
705 f2fs_warn(sbi, "discard is required for zoned block devices");
708 clear_opt(sbi, DISCARD);
711 set_opt(sbi, NOHEAP);
714 clear_opt(sbi, NOHEAP);
716 #ifdef CONFIG_F2FS_FS_XATTR
718 set_opt(sbi, XATTR_USER);
720 case Opt_nouser_xattr:
721 clear_opt(sbi, XATTR_USER);
723 case Opt_inline_xattr:
724 set_opt(sbi, INLINE_XATTR);
726 case Opt_noinline_xattr:
727 clear_opt(sbi, INLINE_XATTR);
729 case Opt_inline_xattr_size:
730 if (args->from && match_int(args, &arg))
732 set_opt(sbi, INLINE_XATTR_SIZE);
733 F2FS_OPTION(sbi).inline_xattr_size = arg;
737 f2fs_info(sbi, "user_xattr options not supported");
739 case Opt_nouser_xattr:
740 f2fs_info(sbi, "nouser_xattr options not supported");
742 case Opt_inline_xattr:
743 f2fs_info(sbi, "inline_xattr options not supported");
745 case Opt_noinline_xattr:
746 f2fs_info(sbi, "noinline_xattr options not supported");
749 #ifdef CONFIG_F2FS_FS_POSIX_ACL
751 set_opt(sbi, POSIX_ACL);
754 clear_opt(sbi, POSIX_ACL);
758 f2fs_info(sbi, "acl options not supported");
761 f2fs_info(sbi, "noacl options not supported");
764 case Opt_active_logs:
765 if (args->from && match_int(args, &arg))
767 if (arg != 2 && arg != 4 &&
768 arg != NR_CURSEG_PERSIST_TYPE)
770 F2FS_OPTION(sbi).active_logs = arg;
772 case Opt_disable_ext_identify:
773 set_opt(sbi, DISABLE_EXT_IDENTIFY);
775 case Opt_inline_data:
776 set_opt(sbi, INLINE_DATA);
778 case Opt_inline_dentry:
779 set_opt(sbi, INLINE_DENTRY);
781 case Opt_noinline_dentry:
782 clear_opt(sbi, INLINE_DENTRY);
784 case Opt_flush_merge:
785 set_opt(sbi, FLUSH_MERGE);
787 case Opt_noflush_merge:
788 clear_opt(sbi, FLUSH_MERGE);
791 set_opt(sbi, NOBARRIER);
794 set_opt(sbi, FASTBOOT);
796 case Opt_extent_cache:
797 set_opt(sbi, EXTENT_CACHE);
799 case Opt_noextent_cache:
800 clear_opt(sbi, EXTENT_CACHE);
802 case Opt_noinline_data:
803 clear_opt(sbi, INLINE_DATA);
806 set_opt(sbi, DATA_FLUSH);
808 case Opt_reserve_root:
809 if (args->from && match_int(args, &arg))
811 if (test_opt(sbi, RESERVE_ROOT)) {
812 f2fs_info(sbi, "Preserve previous reserve_root=%u",
813 F2FS_OPTION(sbi).root_reserved_blocks);
815 F2FS_OPTION(sbi).root_reserved_blocks = arg;
816 set_opt(sbi, RESERVE_ROOT);
820 if (args->from && match_int(args, &arg))
822 uid = make_kuid(current_user_ns(), arg);
823 if (!uid_valid(uid)) {
824 f2fs_err(sbi, "Invalid uid value %d", arg);
827 F2FS_OPTION(sbi).s_resuid = uid;
830 if (args->from && match_int(args, &arg))
832 gid = make_kgid(current_user_ns(), arg);
833 if (!gid_valid(gid)) {
834 f2fs_err(sbi, "Invalid gid value %d", arg);
837 F2FS_OPTION(sbi).s_resgid = gid;
840 name = match_strdup(&args[0]);
844 if (!strcmp(name, "adaptive")) {
845 if (f2fs_sb_has_blkzoned(sbi)) {
846 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
850 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
851 } else if (!strcmp(name, "lfs")) {
852 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
853 } else if (!strcmp(name, "fragment:segment")) {
854 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
855 } else if (!strcmp(name, "fragment:block")) {
856 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
863 case Opt_io_size_bits:
864 if (args->from && match_int(args, &arg))
866 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
867 f2fs_warn(sbi, "Not support %d, larger than %d",
868 1 << arg, BIO_MAX_VECS);
871 F2FS_OPTION(sbi).write_io_size_bits = arg;
873 #ifdef CONFIG_F2FS_FAULT_INJECTION
874 case Opt_fault_injection:
875 if (args->from && match_int(args, &arg))
877 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
878 set_opt(sbi, FAULT_INJECTION);
882 if (args->from && match_int(args, &arg))
884 f2fs_build_fault_attr(sbi, 0, arg);
885 set_opt(sbi, FAULT_INJECTION);
888 case Opt_fault_injection:
889 f2fs_info(sbi, "fault_injection options not supported");
893 f2fs_info(sbi, "fault_type options not supported");
897 sb->s_flags |= SB_LAZYTIME;
900 sb->s_flags &= ~SB_LAZYTIME;
905 set_opt(sbi, USRQUOTA);
908 set_opt(sbi, GRPQUOTA);
911 set_opt(sbi, PRJQUOTA);
914 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
919 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
924 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
928 case Opt_offusrjquota:
929 ret = f2fs_clear_qf_name(sb, USRQUOTA);
933 case Opt_offgrpjquota:
934 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
938 case Opt_offprjjquota:
939 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
943 case Opt_jqfmt_vfsold:
944 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
946 case Opt_jqfmt_vfsv0:
947 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
949 case Opt_jqfmt_vfsv1:
950 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
953 clear_opt(sbi, QUOTA);
954 clear_opt(sbi, USRQUOTA);
955 clear_opt(sbi, GRPQUOTA);
956 clear_opt(sbi, PRJQUOTA);
966 case Opt_offusrjquota:
967 case Opt_offgrpjquota:
968 case Opt_offprjjquota:
969 case Opt_jqfmt_vfsold:
970 case Opt_jqfmt_vfsv0:
971 case Opt_jqfmt_vfsv1:
973 f2fs_info(sbi, "quota operations not supported");
977 name = match_strdup(&args[0]);
981 if (!strcmp(name, "default")) {
982 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
983 } else if (!strcmp(name, "reuse")) {
984 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
992 name = match_strdup(&args[0]);
995 if (!strcmp(name, "posix")) {
996 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
997 } else if (!strcmp(name, "strict")) {
998 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
999 } else if (!strcmp(name, "nobarrier")) {
1000 F2FS_OPTION(sbi).fsync_mode =
1001 FSYNC_MODE_NOBARRIER;
1008 case Opt_test_dummy_encryption:
1009 ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
1014 case Opt_inlinecrypt:
1015 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
1016 sb->s_flags |= SB_INLINECRYPT;
1018 f2fs_info(sbi, "inline encryption not supported");
1021 case Opt_checkpoint_disable_cap_perc:
1022 if (args->from && match_int(args, &arg))
1024 if (arg < 0 || arg > 100)
1026 F2FS_OPTION(sbi).unusable_cap_perc = arg;
1027 set_opt(sbi, DISABLE_CHECKPOINT);
1029 case Opt_checkpoint_disable_cap:
1030 if (args->from && match_int(args, &arg))
1032 F2FS_OPTION(sbi).unusable_cap = arg;
1033 set_opt(sbi, DISABLE_CHECKPOINT);
1035 case Opt_checkpoint_disable:
1036 set_opt(sbi, DISABLE_CHECKPOINT);
1038 case Opt_checkpoint_enable:
1039 clear_opt(sbi, DISABLE_CHECKPOINT);
1041 case Opt_checkpoint_merge:
1042 set_opt(sbi, MERGE_CHECKPOINT);
1044 case Opt_nocheckpoint_merge:
1045 clear_opt(sbi, MERGE_CHECKPOINT);
1047 #ifdef CONFIG_F2FS_FS_COMPRESSION
1048 case Opt_compress_algorithm:
1049 if (!f2fs_sb_has_compression(sbi)) {
1050 f2fs_info(sbi, "Image doesn't support compression");
1053 name = match_strdup(&args[0]);
1056 if (!strcmp(name, "lzo")) {
1057 #ifdef CONFIG_F2FS_FS_LZO
1058 F2FS_OPTION(sbi).compress_level = 0;
1059 F2FS_OPTION(sbi).compress_algorithm =
1062 f2fs_info(sbi, "kernel doesn't support lzo compression");
1064 } else if (!strncmp(name, "lz4", 3)) {
1065 #ifdef CONFIG_F2FS_FS_LZ4
1066 ret = f2fs_set_lz4hc_level(sbi, name);
1071 F2FS_OPTION(sbi).compress_algorithm =
1074 f2fs_info(sbi, "kernel doesn't support lz4 compression");
1076 } else if (!strncmp(name, "zstd", 4)) {
1077 #ifdef CONFIG_F2FS_FS_ZSTD
1078 ret = f2fs_set_zstd_level(sbi, name);
1083 F2FS_OPTION(sbi).compress_algorithm =
1086 f2fs_info(sbi, "kernel doesn't support zstd compression");
1088 } else if (!strcmp(name, "lzo-rle")) {
1089 #ifdef CONFIG_F2FS_FS_LZORLE
1090 F2FS_OPTION(sbi).compress_level = 0;
1091 F2FS_OPTION(sbi).compress_algorithm =
1094 f2fs_info(sbi, "kernel doesn't support lzorle compression");
1102 case Opt_compress_log_size:
1103 if (!f2fs_sb_has_compression(sbi)) {
1104 f2fs_info(sbi, "Image doesn't support compression");
1107 if (args->from && match_int(args, &arg))
1109 if (arg < MIN_COMPRESS_LOG_SIZE ||
1110 arg > MAX_COMPRESS_LOG_SIZE) {
1112 "Compress cluster log size is out of range");
1115 F2FS_OPTION(sbi).compress_log_size = arg;
1117 case Opt_compress_extension:
1118 if (!f2fs_sb_has_compression(sbi)) {
1119 f2fs_info(sbi, "Image doesn't support compression");
1122 name = match_strdup(&args[0]);
1126 ext = F2FS_OPTION(sbi).extensions;
1127 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1129 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1130 ext_cnt >= COMPRESS_EXT_NUM) {
1132 "invalid extension length/number");
1137 strcpy(ext[ext_cnt], name);
1138 F2FS_OPTION(sbi).compress_ext_cnt++;
1141 case Opt_nocompress_extension:
1142 if (!f2fs_sb_has_compression(sbi)) {
1143 f2fs_info(sbi, "Image doesn't support compression");
1146 name = match_strdup(&args[0]);
1150 noext = F2FS_OPTION(sbi).noextensions;
1151 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1153 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1154 noext_cnt >= COMPRESS_EXT_NUM) {
1156 "invalid extension length/number");
1161 strcpy(noext[noext_cnt], name);
1162 F2FS_OPTION(sbi).nocompress_ext_cnt++;
1165 case Opt_compress_chksum:
1166 F2FS_OPTION(sbi).compress_chksum = true;
1168 case Opt_compress_mode:
1169 name = match_strdup(&args[0]);
1172 if (!strcmp(name, "fs")) {
1173 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1174 } else if (!strcmp(name, "user")) {
1175 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1182 case Opt_compress_cache:
1183 set_opt(sbi, COMPRESS_CACHE);
1186 case Opt_compress_algorithm:
1187 case Opt_compress_log_size:
1188 case Opt_compress_extension:
1189 case Opt_nocompress_extension:
1190 case Opt_compress_chksum:
1191 case Opt_compress_mode:
1192 case Opt_compress_cache:
1193 f2fs_info(sbi, "compression options not supported");
1200 set_opt(sbi, GC_MERGE);
1202 case Opt_nogc_merge:
1203 clear_opt(sbi, GC_MERGE);
1205 case Opt_discard_unit:
1206 name = match_strdup(&args[0]);
1209 if (!strcmp(name, "block")) {
1210 F2FS_OPTION(sbi).discard_unit =
1212 } else if (!strcmp(name, "segment")) {
1213 F2FS_OPTION(sbi).discard_unit =
1214 DISCARD_UNIT_SEGMENT;
1215 } else if (!strcmp(name, "section")) {
1216 F2FS_OPTION(sbi).discard_unit =
1217 DISCARD_UNIT_SECTION;
1225 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1232 if (f2fs_check_quota_options(sbi))
1235 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1236 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1239 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1240 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1244 #if !IS_ENABLED(CONFIG_UNICODE)
1245 if (f2fs_sb_has_casefold(sbi)) {
1247 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1252 * The BLKZONED feature indicates that the drive was formatted with
1253 * zone alignment optimization. This is optional for host-aware
1254 * devices, but mandatory for host-managed zoned block devices.
1256 #ifndef CONFIG_BLK_DEV_ZONED
1257 if (f2fs_sb_has_blkzoned(sbi)) {
1258 f2fs_err(sbi, "Zoned block device support is not enabled");
1262 if (f2fs_sb_has_blkzoned(sbi)) {
1263 if (F2FS_OPTION(sbi).discard_unit !=
1264 DISCARD_UNIT_SECTION) {
1265 f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1266 F2FS_OPTION(sbi).discard_unit =
1267 DISCARD_UNIT_SECTION;
1271 #ifdef CONFIG_F2FS_FS_COMPRESSION
1272 if (f2fs_test_compress_extension(sbi)) {
1273 f2fs_err(sbi, "invalid compress or nocompress extension");
1278 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1279 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
1280 F2FS_IO_SIZE_KB(sbi));
1284 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1285 int min_size, max_size;
1287 if (!f2fs_sb_has_extra_attr(sbi) ||
1288 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1289 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1292 if (!test_opt(sbi, INLINE_XATTR)) {
1293 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1297 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1298 max_size = MAX_INLINE_XATTR_SIZE;
1300 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1301 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1302 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1303 min_size, max_size);
1308 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1309 f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
1313 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1314 f2fs_err(sbi, "Allow to mount readonly mode only");
1320 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1322 struct f2fs_inode_info *fi;
1324 if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC)) {
1325 f2fs_show_injection_info(F2FS_SB(sb), FAULT_SLAB_ALLOC);
1329 fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
1333 init_once((void *) fi);
1335 /* Initialize f2fs-specific inode info */
1336 atomic_set(&fi->dirty_pages, 0);
1337 atomic_set(&fi->i_compr_blocks, 0);
1338 init_f2fs_rwsem(&fi->i_sem);
1339 spin_lock_init(&fi->i_size_lock);
1340 INIT_LIST_HEAD(&fi->dirty_list);
1341 INIT_LIST_HEAD(&fi->gdirty_list);
1342 INIT_LIST_HEAD(&fi->inmem_ilist);
1343 INIT_LIST_HEAD(&fi->inmem_pages);
1344 mutex_init(&fi->inmem_lock);
1345 init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1346 init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1347 init_f2fs_rwsem(&fi->i_xattr_sem);
1349 /* Will be used by directory only */
1350 fi->i_dir_level = F2FS_SB(sb)->dir_level;
1352 return &fi->vfs_inode;
1355 static int f2fs_drop_inode(struct inode *inode)
1357 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1361 * during filesystem shutdown, if checkpoint is disabled,
1362 * drop useless meta/node dirty pages.
1364 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1365 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1366 inode->i_ino == F2FS_META_INO(sbi)) {
1367 trace_f2fs_drop_inode(inode, 1);
1373 * This is to avoid a deadlock condition like below.
1374 * writeback_single_inode(inode)
1375 * - f2fs_write_data_page
1376 * - f2fs_gc -> iput -> evict
1377 * - inode_wait_for_writeback(inode)
1379 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1380 if (!inode->i_nlink && !is_bad_inode(inode)) {
1381 /* to avoid evict_inode call simultaneously */
1382 atomic_inc(&inode->i_count);
1383 spin_unlock(&inode->i_lock);
1385 /* some remained atomic pages should discarded */
1386 if (f2fs_is_atomic_file(inode))
1387 f2fs_drop_inmem_pages(inode);
1389 /* should remain fi->extent_tree for writepage */
1390 f2fs_destroy_extent_node(inode);
1392 sb_start_intwrite(inode->i_sb);
1393 f2fs_i_size_write(inode, 0);
1395 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1396 inode, NULL, 0, DATA);
1397 truncate_inode_pages_final(inode->i_mapping);
1399 if (F2FS_HAS_BLOCKS(inode))
1400 f2fs_truncate(inode);
1402 sb_end_intwrite(inode->i_sb);
1404 spin_lock(&inode->i_lock);
1405 atomic_dec(&inode->i_count);
1407 trace_f2fs_drop_inode(inode, 0);
1410 ret = generic_drop_inode(inode);
1412 ret = fscrypt_drop_inode(inode);
1413 trace_f2fs_drop_inode(inode, ret);
1417 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1419 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1422 spin_lock(&sbi->inode_lock[DIRTY_META]);
1423 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1426 set_inode_flag(inode, FI_DIRTY_INODE);
1427 stat_inc_dirty_inode(sbi, DIRTY_META);
1429 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1430 list_add_tail(&F2FS_I(inode)->gdirty_list,
1431 &sbi->inode_list[DIRTY_META]);
1432 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1434 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1438 void f2fs_inode_synced(struct inode *inode)
1440 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1442 spin_lock(&sbi->inode_lock[DIRTY_META]);
1443 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1444 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1447 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1448 list_del_init(&F2FS_I(inode)->gdirty_list);
1449 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1451 clear_inode_flag(inode, FI_DIRTY_INODE);
1452 clear_inode_flag(inode, FI_AUTO_RECOVER);
1453 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1454 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1458 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1460 * We should call set_dirty_inode to write the dirty inode through write_inode.
1462 static void f2fs_dirty_inode(struct inode *inode, int flags)
1464 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1466 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1467 inode->i_ino == F2FS_META_INO(sbi))
1470 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1471 clear_inode_flag(inode, FI_AUTO_RECOVER);
1473 f2fs_inode_dirtied(inode, false);
1476 static void f2fs_free_inode(struct inode *inode)
1478 fscrypt_free_inode(inode);
1479 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1482 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1484 percpu_counter_destroy(&sbi->total_valid_inode_count);
1485 percpu_counter_destroy(&sbi->rf_node_block_count);
1486 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1489 static void destroy_device_list(struct f2fs_sb_info *sbi)
1493 for (i = 0; i < sbi->s_ndevs; i++) {
1494 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1495 #ifdef CONFIG_BLK_DEV_ZONED
1496 kvfree(FDEV(i).blkz_seq);
1497 kfree(FDEV(i).zone_capacity_blocks);
1503 static void f2fs_put_super(struct super_block *sb)
1505 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1509 /* unregister procfs/sysfs entries in advance to avoid race case */
1510 f2fs_unregister_sysfs(sbi);
1512 f2fs_quota_off_umount(sb);
1514 /* prevent remaining shrinker jobs */
1515 mutex_lock(&sbi->umount_mutex);
1518 * flush all issued checkpoints and stop checkpoint issue thread.
1519 * after then, all checkpoints should be done by each process context.
1521 f2fs_stop_ckpt_thread(sbi);
1524 * We don't need to do checkpoint when superblock is clean.
1525 * But, the previous checkpoint was not done by umount, it needs to do
1526 * clean checkpoint again.
1528 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1529 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1530 struct cp_control cpc = {
1531 .reason = CP_UMOUNT,
1533 f2fs_write_checkpoint(sbi, &cpc);
1536 /* be sure to wait for any on-going discard commands */
1537 dropped = f2fs_issue_discard_timeout(sbi);
1539 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1540 !sbi->discard_blks && !dropped) {
1541 struct cp_control cpc = {
1542 .reason = CP_UMOUNT | CP_TRIMMED,
1544 f2fs_write_checkpoint(sbi, &cpc);
1548 * normally superblock is clean, so we need to release this.
1549 * In addition, EIO will skip do checkpoint, we need this as well.
1551 f2fs_release_ino_entry(sbi, true);
1553 f2fs_leave_shrinker(sbi);
1554 mutex_unlock(&sbi->umount_mutex);
1556 /* our cp_error case, we can wait for any writeback page */
1557 f2fs_flush_merged_writes(sbi);
1559 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1561 f2fs_bug_on(sbi, sbi->fsync_node_num);
1563 f2fs_destroy_compress_inode(sbi);
1565 iput(sbi->node_inode);
1566 sbi->node_inode = NULL;
1568 iput(sbi->meta_inode);
1569 sbi->meta_inode = NULL;
1572 * iput() can update stat information, if f2fs_write_checkpoint()
1573 * above failed with error.
1575 f2fs_destroy_stats(sbi);
1577 /* destroy f2fs internal modules */
1578 f2fs_destroy_node_manager(sbi);
1579 f2fs_destroy_segment_manager(sbi);
1581 f2fs_destroy_post_read_wq(sbi);
1585 sb->s_fs_info = NULL;
1586 if (sbi->s_chksum_driver)
1587 crypto_free_shash(sbi->s_chksum_driver);
1588 kfree(sbi->raw_super);
1590 destroy_device_list(sbi);
1591 f2fs_destroy_page_array_cache(sbi);
1592 f2fs_destroy_xattr_caches(sbi);
1593 mempool_destroy(sbi->write_io_dummy);
1595 for (i = 0; i < MAXQUOTAS; i++)
1596 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1598 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1599 destroy_percpu_info(sbi);
1600 f2fs_destroy_iostat(sbi);
1601 for (i = 0; i < NR_PAGE_TYPE; i++)
1602 kvfree(sbi->write_io[i]);
1603 #if IS_ENABLED(CONFIG_UNICODE)
1604 utf8_unload(sb->s_encoding);
1609 int f2fs_sync_fs(struct super_block *sb, int sync)
1611 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1614 if (unlikely(f2fs_cp_error(sbi)))
1616 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1619 trace_f2fs_sync_fs(sb, sync);
1621 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1625 err = f2fs_issue_checkpoint(sbi);
1630 static int f2fs_freeze(struct super_block *sb)
1632 if (f2fs_readonly(sb))
1635 /* IO error happened before */
1636 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1639 /* must be clean, since sync_filesystem() was already called */
1640 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1643 /* ensure no checkpoint required */
1644 if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
1647 /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
1648 set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1652 static int f2fs_unfreeze(struct super_block *sb)
1654 clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1659 static int f2fs_statfs_project(struct super_block *sb,
1660 kprojid_t projid, struct kstatfs *buf)
1663 struct dquot *dquot;
1667 qid = make_kqid_projid(projid);
1668 dquot = dqget(sb, qid);
1670 return PTR_ERR(dquot);
1671 spin_lock(&dquot->dq_dqb_lock);
1673 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1674 dquot->dq_dqb.dqb_bhardlimit);
1676 limit >>= sb->s_blocksize_bits;
1678 if (limit && buf->f_blocks > limit) {
1679 curblock = (dquot->dq_dqb.dqb_curspace +
1680 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1681 buf->f_blocks = limit;
1682 buf->f_bfree = buf->f_bavail =
1683 (buf->f_blocks > curblock) ?
1684 (buf->f_blocks - curblock) : 0;
1687 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1688 dquot->dq_dqb.dqb_ihardlimit);
1690 if (limit && buf->f_files > limit) {
1691 buf->f_files = limit;
1693 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1694 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1697 spin_unlock(&dquot->dq_dqb_lock);
1703 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1705 struct super_block *sb = dentry->d_sb;
1706 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1707 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1708 block_t total_count, user_block_count, start_count;
1709 u64 avail_node_count;
1711 total_count = le64_to_cpu(sbi->raw_super->block_count);
1712 user_block_count = sbi->user_block_count;
1713 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1714 buf->f_type = F2FS_SUPER_MAGIC;
1715 buf->f_bsize = sbi->blocksize;
1717 buf->f_blocks = total_count - start_count;
1718 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1719 sbi->current_reserved_blocks;
1721 spin_lock(&sbi->stat_lock);
1722 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1725 buf->f_bfree -= sbi->unusable_block_count;
1726 spin_unlock(&sbi->stat_lock);
1728 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1729 buf->f_bavail = buf->f_bfree -
1730 F2FS_OPTION(sbi).root_reserved_blocks;
1734 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1736 if (avail_node_count > user_block_count) {
1737 buf->f_files = user_block_count;
1738 buf->f_ffree = buf->f_bavail;
1740 buf->f_files = avail_node_count;
1741 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1745 buf->f_namelen = F2FS_NAME_LEN;
1746 buf->f_fsid = u64_to_fsid(id);
1749 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1750 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1751 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1757 static inline void f2fs_show_quota_options(struct seq_file *seq,
1758 struct super_block *sb)
1761 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1763 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1766 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1777 seq_printf(seq, ",jqfmt=%s", fmtname);
1780 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1781 seq_show_option(seq, "usrjquota",
1782 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1784 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1785 seq_show_option(seq, "grpjquota",
1786 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1788 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1789 seq_show_option(seq, "prjjquota",
1790 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1794 #ifdef CONFIG_F2FS_FS_COMPRESSION
1795 static inline void f2fs_show_compress_options(struct seq_file *seq,
1796 struct super_block *sb)
1798 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1802 if (!f2fs_sb_has_compression(sbi))
1805 switch (F2FS_OPTION(sbi).compress_algorithm) {
1815 case COMPRESS_LZORLE:
1816 algtype = "lzo-rle";
1819 seq_printf(seq, ",compress_algorithm=%s", algtype);
1821 if (F2FS_OPTION(sbi).compress_level)
1822 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1824 seq_printf(seq, ",compress_log_size=%u",
1825 F2FS_OPTION(sbi).compress_log_size);
1827 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1828 seq_printf(seq, ",compress_extension=%s",
1829 F2FS_OPTION(sbi).extensions[i]);
1832 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
1833 seq_printf(seq, ",nocompress_extension=%s",
1834 F2FS_OPTION(sbi).noextensions[i]);
1837 if (F2FS_OPTION(sbi).compress_chksum)
1838 seq_puts(seq, ",compress_chksum");
1840 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1841 seq_printf(seq, ",compress_mode=%s", "fs");
1842 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1843 seq_printf(seq, ",compress_mode=%s", "user");
1845 if (test_opt(sbi, COMPRESS_CACHE))
1846 seq_puts(seq, ",compress_cache");
1850 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1852 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1854 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1855 seq_printf(seq, ",background_gc=%s", "sync");
1856 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1857 seq_printf(seq, ",background_gc=%s", "on");
1858 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1859 seq_printf(seq, ",background_gc=%s", "off");
1861 if (test_opt(sbi, GC_MERGE))
1862 seq_puts(seq, ",gc_merge");
1864 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1865 seq_puts(seq, ",disable_roll_forward");
1866 if (test_opt(sbi, NORECOVERY))
1867 seq_puts(seq, ",norecovery");
1868 if (test_opt(sbi, DISCARD))
1869 seq_puts(seq, ",discard");
1871 seq_puts(seq, ",nodiscard");
1872 if (test_opt(sbi, NOHEAP))
1873 seq_puts(seq, ",no_heap");
1875 seq_puts(seq, ",heap");
1876 #ifdef CONFIG_F2FS_FS_XATTR
1877 if (test_opt(sbi, XATTR_USER))
1878 seq_puts(seq, ",user_xattr");
1880 seq_puts(seq, ",nouser_xattr");
1881 if (test_opt(sbi, INLINE_XATTR))
1882 seq_puts(seq, ",inline_xattr");
1884 seq_puts(seq, ",noinline_xattr");
1885 if (test_opt(sbi, INLINE_XATTR_SIZE))
1886 seq_printf(seq, ",inline_xattr_size=%u",
1887 F2FS_OPTION(sbi).inline_xattr_size);
1889 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1890 if (test_opt(sbi, POSIX_ACL))
1891 seq_puts(seq, ",acl");
1893 seq_puts(seq, ",noacl");
1895 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1896 seq_puts(seq, ",disable_ext_identify");
1897 if (test_opt(sbi, INLINE_DATA))
1898 seq_puts(seq, ",inline_data");
1900 seq_puts(seq, ",noinline_data");
1901 if (test_opt(sbi, INLINE_DENTRY))
1902 seq_puts(seq, ",inline_dentry");
1904 seq_puts(seq, ",noinline_dentry");
1905 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1906 seq_puts(seq, ",flush_merge");
1907 if (test_opt(sbi, NOBARRIER))
1908 seq_puts(seq, ",nobarrier");
1909 if (test_opt(sbi, FASTBOOT))
1910 seq_puts(seq, ",fastboot");
1911 if (test_opt(sbi, EXTENT_CACHE))
1912 seq_puts(seq, ",extent_cache");
1914 seq_puts(seq, ",noextent_cache");
1915 if (test_opt(sbi, DATA_FLUSH))
1916 seq_puts(seq, ",data_flush");
1918 seq_puts(seq, ",mode=");
1919 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1920 seq_puts(seq, "adaptive");
1921 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1922 seq_puts(seq, "lfs");
1923 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
1924 seq_puts(seq, "fragment:segment");
1925 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
1926 seq_puts(seq, "fragment:block");
1927 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1928 if (test_opt(sbi, RESERVE_ROOT))
1929 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1930 F2FS_OPTION(sbi).root_reserved_blocks,
1931 from_kuid_munged(&init_user_ns,
1932 F2FS_OPTION(sbi).s_resuid),
1933 from_kgid_munged(&init_user_ns,
1934 F2FS_OPTION(sbi).s_resgid));
1935 if (F2FS_IO_SIZE_BITS(sbi))
1936 seq_printf(seq, ",io_bits=%u",
1937 F2FS_OPTION(sbi).write_io_size_bits);
1938 #ifdef CONFIG_F2FS_FAULT_INJECTION
1939 if (test_opt(sbi, FAULT_INJECTION)) {
1940 seq_printf(seq, ",fault_injection=%u",
1941 F2FS_OPTION(sbi).fault_info.inject_rate);
1942 seq_printf(seq, ",fault_type=%u",
1943 F2FS_OPTION(sbi).fault_info.inject_type);
1947 if (test_opt(sbi, QUOTA))
1948 seq_puts(seq, ",quota");
1949 if (test_opt(sbi, USRQUOTA))
1950 seq_puts(seq, ",usrquota");
1951 if (test_opt(sbi, GRPQUOTA))
1952 seq_puts(seq, ",grpquota");
1953 if (test_opt(sbi, PRJQUOTA))
1954 seq_puts(seq, ",prjquota");
1956 f2fs_show_quota_options(seq, sbi->sb);
1958 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
1960 if (sbi->sb->s_flags & SB_INLINECRYPT)
1961 seq_puts(seq, ",inlinecrypt");
1963 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1964 seq_printf(seq, ",alloc_mode=%s", "default");
1965 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1966 seq_printf(seq, ",alloc_mode=%s", "reuse");
1968 if (test_opt(sbi, DISABLE_CHECKPOINT))
1969 seq_printf(seq, ",checkpoint=disable:%u",
1970 F2FS_OPTION(sbi).unusable_cap);
1971 if (test_opt(sbi, MERGE_CHECKPOINT))
1972 seq_puts(seq, ",checkpoint_merge");
1974 seq_puts(seq, ",nocheckpoint_merge");
1975 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1976 seq_printf(seq, ",fsync_mode=%s", "posix");
1977 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1978 seq_printf(seq, ",fsync_mode=%s", "strict");
1979 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1980 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1982 #ifdef CONFIG_F2FS_FS_COMPRESSION
1983 f2fs_show_compress_options(seq, sbi->sb);
1986 if (test_opt(sbi, ATGC))
1987 seq_puts(seq, ",atgc");
1989 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
1990 seq_printf(seq, ",discard_unit=%s", "block");
1991 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
1992 seq_printf(seq, ",discard_unit=%s", "segment");
1993 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
1994 seq_printf(seq, ",discard_unit=%s", "section");
1999 static void default_options(struct f2fs_sb_info *sbi)
2001 /* init some FS parameters */
2002 if (f2fs_sb_has_readonly(sbi))
2003 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
2005 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
2007 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
2008 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
2009 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
2010 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2011 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2012 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
2013 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
2014 F2FS_OPTION(sbi).compress_ext_cnt = 0;
2015 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2016 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2018 sbi->sb->s_flags &= ~SB_INLINECRYPT;
2020 set_opt(sbi, INLINE_XATTR);
2021 set_opt(sbi, INLINE_DATA);
2022 set_opt(sbi, INLINE_DENTRY);
2023 set_opt(sbi, EXTENT_CACHE);
2024 set_opt(sbi, NOHEAP);
2025 clear_opt(sbi, DISABLE_CHECKPOINT);
2026 set_opt(sbi, MERGE_CHECKPOINT);
2027 F2FS_OPTION(sbi).unusable_cap = 0;
2028 sbi->sb->s_flags |= SB_LAZYTIME;
2029 set_opt(sbi, FLUSH_MERGE);
2030 if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2031 set_opt(sbi, DISCARD);
2032 if (f2fs_sb_has_blkzoned(sbi)) {
2033 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2034 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2036 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2037 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2040 #ifdef CONFIG_F2FS_FS_XATTR
2041 set_opt(sbi, XATTR_USER);
2043 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2044 set_opt(sbi, POSIX_ACL);
2047 f2fs_build_fault_attr(sbi, 0, 0);
2051 static int f2fs_enable_quotas(struct super_block *sb);
2054 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2056 unsigned int s_flags = sbi->sb->s_flags;
2057 struct cp_control cpc;
2058 unsigned int gc_mode;
2063 if (s_flags & SB_RDONLY) {
2064 f2fs_err(sbi, "checkpoint=disable on readonly fs");
2067 sbi->sb->s_flags |= SB_ACTIVE;
2069 f2fs_update_time(sbi, DISABLE_TIME);
2071 gc_mode = sbi->gc_mode;
2072 sbi->gc_mode = GC_URGENT_HIGH;
2074 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2075 f2fs_down_write(&sbi->gc_lock);
2076 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
2077 if (err == -ENODATA) {
2081 if (err && err != -EAGAIN)
2085 ret = sync_filesystem(sbi->sb);
2087 err = ret ? ret : err;
2091 unusable = f2fs_get_unusable_blocks(sbi);
2092 if (f2fs_disable_cp_again(sbi, unusable)) {
2097 f2fs_down_write(&sbi->gc_lock);
2098 cpc.reason = CP_PAUSE;
2099 set_sbi_flag(sbi, SBI_CP_DISABLED);
2100 err = f2fs_write_checkpoint(sbi, &cpc);
2104 spin_lock(&sbi->stat_lock);
2105 sbi->unusable_block_count = unusable;
2106 spin_unlock(&sbi->stat_lock);
2109 f2fs_up_write(&sbi->gc_lock);
2111 sbi->gc_mode = gc_mode;
2112 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2116 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2118 int retry = DEFAULT_RETRY_IO_COUNT;
2120 /* we should flush all the data to keep data consistency */
2122 sync_inodes_sb(sbi->sb);
2123 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2124 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
2126 if (unlikely(retry < 0))
2127 f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
2129 f2fs_down_write(&sbi->gc_lock);
2130 f2fs_dirty_to_prefree(sbi);
2132 clear_sbi_flag(sbi, SBI_CP_DISABLED);
2133 set_sbi_flag(sbi, SBI_IS_DIRTY);
2134 f2fs_up_write(&sbi->gc_lock);
2136 f2fs_sync_fs(sbi->sb, 1);
2139 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2141 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2142 struct f2fs_mount_info org_mount_opt;
2143 unsigned long old_sb_flags;
2145 bool need_restart_gc = false, need_stop_gc = false;
2146 bool need_restart_ckpt = false, need_stop_ckpt = false;
2147 bool need_restart_flush = false, need_stop_flush = false;
2148 bool need_restart_discard = false, need_stop_discard = false;
2149 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
2150 bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2151 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2152 bool no_atgc = !test_opt(sbi, ATGC);
2153 bool no_discard = !test_opt(sbi, DISCARD);
2154 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2155 bool block_unit_discard = f2fs_block_unit_discard(sbi);
2156 struct discard_cmd_control *dcc;
2162 * Save the old mount options in case we
2163 * need to restore them.
2165 org_mount_opt = sbi->mount_opt;
2166 old_sb_flags = sb->s_flags;
2169 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2170 for (i = 0; i < MAXQUOTAS; i++) {
2171 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2172 org_mount_opt.s_qf_names[i] =
2173 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2175 if (!org_mount_opt.s_qf_names[i]) {
2176 for (j = 0; j < i; j++)
2177 kfree(org_mount_opt.s_qf_names[j]);
2181 org_mount_opt.s_qf_names[i] = NULL;
2186 /* recover superblocks we couldn't write due to previous RO mount */
2187 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2188 err = f2fs_commit_super(sbi, false);
2189 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2192 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2195 default_options(sbi);
2197 /* parse mount options */
2198 err = parse_options(sb, data, true);
2203 * Previous and new state of filesystem is RO,
2204 * so skip checking GC and FLUSH_MERGE conditions.
2206 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2209 if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
2215 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2216 err = dquot_suspend(sb, -1);
2219 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2220 /* dquot_resume needs RW */
2221 sb->s_flags &= ~SB_RDONLY;
2222 if (sb_any_quota_suspended(sb)) {
2223 dquot_resume(sb, -1);
2224 } else if (f2fs_sb_has_quota_ino(sbi)) {
2225 err = f2fs_enable_quotas(sb);
2231 /* disallow enable atgc dynamically */
2232 if (no_atgc == !!test_opt(sbi, ATGC)) {
2234 f2fs_warn(sbi, "switch atgc option is not allowed");
2238 /* disallow enable/disable extent_cache dynamically */
2239 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
2241 f2fs_warn(sbi, "switch extent_cache option is not allowed");
2245 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2247 f2fs_warn(sbi, "switch io_bits option is not allowed");
2251 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2253 f2fs_warn(sbi, "switch compress_cache option is not allowed");
2257 if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2259 f2fs_warn(sbi, "switch discard_unit option is not allowed");
2263 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2265 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2270 * We stop the GC thread if FS is mounted as RO
2271 * or if background_gc = off is passed in mount
2272 * option. Also sync the filesystem.
2274 if ((*flags & SB_RDONLY) ||
2275 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2276 !test_opt(sbi, GC_MERGE))) {
2277 if (sbi->gc_thread) {
2278 f2fs_stop_gc_thread(sbi);
2279 need_restart_gc = true;
2281 } else if (!sbi->gc_thread) {
2282 err = f2fs_start_gc_thread(sbi);
2285 need_stop_gc = true;
2288 if (*flags & SB_RDONLY) {
2291 set_sbi_flag(sbi, SBI_IS_DIRTY);
2292 set_sbi_flag(sbi, SBI_IS_CLOSE);
2293 f2fs_sync_fs(sb, 1);
2294 clear_sbi_flag(sbi, SBI_IS_CLOSE);
2297 if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2298 !test_opt(sbi, MERGE_CHECKPOINT)) {
2299 f2fs_stop_ckpt_thread(sbi);
2300 need_restart_ckpt = true;
2302 err = f2fs_start_ckpt_thread(sbi);
2305 "Failed to start F2FS issue_checkpoint_thread (%d)",
2309 need_stop_ckpt = true;
2313 * We stop issue flush thread if FS is mounted as RO
2314 * or if flush_merge is not passed in mount option.
2316 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2317 clear_opt(sbi, FLUSH_MERGE);
2318 f2fs_destroy_flush_cmd_control(sbi, false);
2319 need_restart_flush = true;
2321 err = f2fs_create_flush_cmd_control(sbi);
2324 need_stop_flush = true;
2327 if (no_discard == !!test_opt(sbi, DISCARD)) {
2328 if (test_opt(sbi, DISCARD)) {
2329 err = f2fs_start_discard_thread(sbi);
2332 need_stop_discard = true;
2334 dcc = SM_I(sbi)->dcc_info;
2335 f2fs_stop_discard_thread(sbi);
2336 if (atomic_read(&dcc->discard_cmd_cnt))
2337 f2fs_issue_discard_timeout(sbi);
2338 need_restart_discard = true;
2342 if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2343 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2344 err = f2fs_disable_checkpoint(sbi);
2346 goto restore_discard;
2348 f2fs_enable_checkpoint(sbi);
2354 /* Release old quota file names */
2355 for (i = 0; i < MAXQUOTAS; i++)
2356 kfree(org_mount_opt.s_qf_names[i]);
2358 /* Update the POSIXACL Flag */
2359 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2360 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2362 limit_reserve_root(sbi);
2363 adjust_unusable_cap_perc(sbi);
2364 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2367 if (need_restart_discard) {
2368 if (f2fs_start_discard_thread(sbi))
2369 f2fs_warn(sbi, "discard has been stopped");
2370 } else if (need_stop_discard) {
2371 f2fs_stop_discard_thread(sbi);
2374 if (need_restart_flush) {
2375 if (f2fs_create_flush_cmd_control(sbi))
2376 f2fs_warn(sbi, "background flush thread has stopped");
2377 } else if (need_stop_flush) {
2378 clear_opt(sbi, FLUSH_MERGE);
2379 f2fs_destroy_flush_cmd_control(sbi, false);
2382 if (need_restart_ckpt) {
2383 if (f2fs_start_ckpt_thread(sbi))
2384 f2fs_warn(sbi, "background ckpt thread has stopped");
2385 } else if (need_stop_ckpt) {
2386 f2fs_stop_ckpt_thread(sbi);
2389 if (need_restart_gc) {
2390 if (f2fs_start_gc_thread(sbi))
2391 f2fs_warn(sbi, "background gc thread has stopped");
2392 } else if (need_stop_gc) {
2393 f2fs_stop_gc_thread(sbi);
2397 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2398 for (i = 0; i < MAXQUOTAS; i++) {
2399 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2400 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2403 sbi->mount_opt = org_mount_opt;
2404 sb->s_flags = old_sb_flags;
2409 /* Read data from quotafile */
2410 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2411 size_t len, loff_t off)
2413 struct inode *inode = sb_dqopt(sb)->files[type];
2414 struct address_space *mapping = inode->i_mapping;
2415 block_t blkidx = F2FS_BYTES_TO_BLK(off);
2416 int offset = off & (sb->s_blocksize - 1);
2419 loff_t i_size = i_size_read(inode);
2426 if (off + len > i_size)
2429 while (toread > 0) {
2430 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2432 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2434 if (PTR_ERR(page) == -ENOMEM) {
2435 memalloc_retry_wait(GFP_NOFS);
2438 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2439 return PTR_ERR(page);
2444 if (unlikely(page->mapping != mapping)) {
2445 f2fs_put_page(page, 1);
2448 if (unlikely(!PageUptodate(page))) {
2449 f2fs_put_page(page, 1);
2450 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2454 kaddr = kmap_atomic(page);
2455 memcpy(data, kaddr + offset, tocopy);
2456 kunmap_atomic(kaddr);
2457 f2fs_put_page(page, 1);
2467 /* Write to quotafile */
2468 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2469 const char *data, size_t len, loff_t off)
2471 struct inode *inode = sb_dqopt(sb)->files[type];
2472 struct address_space *mapping = inode->i_mapping;
2473 const struct address_space_operations *a_ops = mapping->a_ops;
2474 int offset = off & (sb->s_blocksize - 1);
2475 size_t towrite = len;
2477 void *fsdata = NULL;
2482 while (towrite > 0) {
2483 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2486 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
2488 if (unlikely(err)) {
2489 if (err == -ENOMEM) {
2490 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2493 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2497 kaddr = kmap_atomic(page);
2498 memcpy(kaddr + offset, data, tocopy);
2499 kunmap_atomic(kaddr);
2500 flush_dcache_page(page);
2502 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2513 inode->i_mtime = inode->i_ctime = current_time(inode);
2514 f2fs_mark_inode_dirty_sync(inode, false);
2515 return len - towrite;
2518 int f2fs_dquot_initialize(struct inode *inode)
2520 if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT)) {
2521 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_DQUOT_INIT);
2525 return dquot_initialize(inode);
2528 static struct dquot **f2fs_get_dquots(struct inode *inode)
2530 return F2FS_I(inode)->i_dquot;
2533 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2535 return &F2FS_I(inode)->i_reserved_quota;
2538 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2540 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2541 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2545 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2546 F2FS_OPTION(sbi).s_jquota_fmt, type);
2549 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2554 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2555 err = f2fs_enable_quotas(sbi->sb);
2557 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2563 for (i = 0; i < MAXQUOTAS; i++) {
2564 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2565 err = f2fs_quota_on_mount(sbi, i);
2570 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2577 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2580 struct inode *qf_inode;
2581 unsigned long qf_inum;
2584 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2586 qf_inum = f2fs_qf_ino(sb, type);
2590 qf_inode = f2fs_iget(sb, qf_inum);
2591 if (IS_ERR(qf_inode)) {
2592 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2593 return PTR_ERR(qf_inode);
2596 /* Don't account quota for quota files to avoid recursion */
2597 qf_inode->i_flags |= S_NOQUOTA;
2598 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2603 static int f2fs_enable_quotas(struct super_block *sb)
2605 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2607 unsigned long qf_inum;
2608 bool quota_mopt[MAXQUOTAS] = {
2609 test_opt(sbi, USRQUOTA),
2610 test_opt(sbi, GRPQUOTA),
2611 test_opt(sbi, PRJQUOTA),
2614 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2615 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2619 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2621 for (type = 0; type < MAXQUOTAS; type++) {
2622 qf_inum = f2fs_qf_ino(sb, type);
2624 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2625 DQUOT_USAGE_ENABLED |
2626 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2628 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2630 for (type--; type >= 0; type--)
2631 dquot_quota_off(sb, type);
2632 set_sbi_flag(F2FS_SB(sb),
2633 SBI_QUOTA_NEED_REPAIR);
2641 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2643 struct quota_info *dqopt = sb_dqopt(sbi->sb);
2644 struct address_space *mapping = dqopt->files[type]->i_mapping;
2647 ret = dquot_writeback_dquots(sbi->sb, type);
2651 ret = filemap_fdatawrite(mapping);
2655 /* if we are using journalled quota */
2656 if (is_journalled_quota(sbi))
2659 ret = filemap_fdatawait(mapping);
2661 truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2664 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2668 int f2fs_quota_sync(struct super_block *sb, int type)
2670 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2671 struct quota_info *dqopt = sb_dqopt(sb);
2676 * Now when everything is written we can discard the pagecache so
2677 * that userspace sees the changes.
2679 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2681 if (type != -1 && cnt != type)
2684 if (!sb_has_quota_active(sb, cnt))
2687 inode_lock(dqopt->files[cnt]);
2692 * f2fs_down_read(quota_sem)
2693 * dquot_writeback_dquots()
2696 * f2fs_down_read(quota_sem)
2699 f2fs_down_read(&sbi->quota_sem);
2701 ret = f2fs_quota_sync_file(sbi, cnt);
2703 f2fs_up_read(&sbi->quota_sem);
2704 f2fs_unlock_op(sbi);
2706 inode_unlock(dqopt->files[cnt]);
2714 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2715 const struct path *path)
2717 struct inode *inode;
2720 /* if quota sysfile exists, deny enabling quota with specific file */
2721 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2722 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2726 err = f2fs_quota_sync(sb, type);
2730 err = dquot_quota_on(sb, type, format_id, path);
2734 inode = d_inode(path->dentry);
2737 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2738 f2fs_set_inode_flags(inode);
2739 inode_unlock(inode);
2740 f2fs_mark_inode_dirty_sync(inode, false);
2745 static int __f2fs_quota_off(struct super_block *sb, int type)
2747 struct inode *inode = sb_dqopt(sb)->files[type];
2750 if (!inode || !igrab(inode))
2751 return dquot_quota_off(sb, type);
2753 err = f2fs_quota_sync(sb, type);
2757 err = dquot_quota_off(sb, type);
2758 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2762 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2763 f2fs_set_inode_flags(inode);
2764 inode_unlock(inode);
2765 f2fs_mark_inode_dirty_sync(inode, false);
2771 static int f2fs_quota_off(struct super_block *sb, int type)
2773 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2776 err = __f2fs_quota_off(sb, type);
2779 * quotactl can shutdown journalled quota, result in inconsistence
2780 * between quota record and fs data by following updates, tag the
2781 * flag to let fsck be aware of it.
2783 if (is_journalled_quota(sbi))
2784 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2788 void f2fs_quota_off_umount(struct super_block *sb)
2793 for (type = 0; type < MAXQUOTAS; type++) {
2794 err = __f2fs_quota_off(sb, type);
2796 int ret = dquot_quota_off(sb, type);
2798 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2800 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2804 * In case of checkpoint=disable, we must flush quota blocks.
2805 * This can cause NULL exception for node_inode in end_io, since
2806 * put_super already dropped it.
2808 sync_filesystem(sb);
2811 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2813 struct quota_info *dqopt = sb_dqopt(sb);
2816 for (type = 0; type < MAXQUOTAS; type++) {
2817 if (!dqopt->files[type])
2819 f2fs_inode_synced(dqopt->files[type]);
2823 static int f2fs_dquot_commit(struct dquot *dquot)
2825 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2828 f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2829 ret = dquot_commit(dquot);
2831 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2832 f2fs_up_read(&sbi->quota_sem);
2836 static int f2fs_dquot_acquire(struct dquot *dquot)
2838 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2841 f2fs_down_read(&sbi->quota_sem);
2842 ret = dquot_acquire(dquot);
2844 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2845 f2fs_up_read(&sbi->quota_sem);
2849 static int f2fs_dquot_release(struct dquot *dquot)
2851 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2852 int ret = dquot_release(dquot);
2855 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2859 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2861 struct super_block *sb = dquot->dq_sb;
2862 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2863 int ret = dquot_mark_dquot_dirty(dquot);
2865 /* if we are using journalled quota */
2866 if (is_journalled_quota(sbi))
2867 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2872 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2874 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2875 int ret = dquot_commit_info(sb, type);
2878 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2882 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2884 *projid = F2FS_I(inode)->i_projid;
2888 static const struct dquot_operations f2fs_quota_operations = {
2889 .get_reserved_space = f2fs_get_reserved_space,
2890 .write_dquot = f2fs_dquot_commit,
2891 .acquire_dquot = f2fs_dquot_acquire,
2892 .release_dquot = f2fs_dquot_release,
2893 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2894 .write_info = f2fs_dquot_commit_info,
2895 .alloc_dquot = dquot_alloc,
2896 .destroy_dquot = dquot_destroy,
2897 .get_projid = f2fs_get_projid,
2898 .get_next_id = dquot_get_next_id,
2901 static const struct quotactl_ops f2fs_quotactl_ops = {
2902 .quota_on = f2fs_quota_on,
2903 .quota_off = f2fs_quota_off,
2904 .quota_sync = f2fs_quota_sync,
2905 .get_state = dquot_get_state,
2906 .set_info = dquot_set_dqinfo,
2907 .get_dqblk = dquot_get_dqblk,
2908 .set_dqblk = dquot_set_dqblk,
2909 .get_nextdqblk = dquot_get_next_dqblk,
2912 int f2fs_dquot_initialize(struct inode *inode)
2917 int f2fs_quota_sync(struct super_block *sb, int type)
2922 void f2fs_quota_off_umount(struct super_block *sb)
2927 static const struct super_operations f2fs_sops = {
2928 .alloc_inode = f2fs_alloc_inode,
2929 .free_inode = f2fs_free_inode,
2930 .drop_inode = f2fs_drop_inode,
2931 .write_inode = f2fs_write_inode,
2932 .dirty_inode = f2fs_dirty_inode,
2933 .show_options = f2fs_show_options,
2935 .quota_read = f2fs_quota_read,
2936 .quota_write = f2fs_quota_write,
2937 .get_dquots = f2fs_get_dquots,
2939 .evict_inode = f2fs_evict_inode,
2940 .put_super = f2fs_put_super,
2941 .sync_fs = f2fs_sync_fs,
2942 .freeze_fs = f2fs_freeze,
2943 .unfreeze_fs = f2fs_unfreeze,
2944 .statfs = f2fs_statfs,
2945 .remount_fs = f2fs_remount,
2948 #ifdef CONFIG_FS_ENCRYPTION
2949 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2951 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2952 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2956 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2959 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2962 * Encrypting the root directory is not allowed because fsck
2963 * expects lost+found directory to exist and remain unencrypted
2964 * if LOST_FOUND feature is enabled.
2967 if (f2fs_sb_has_lost_found(sbi) &&
2968 inode->i_ino == F2FS_ROOT_INO(sbi))
2971 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2972 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2973 ctx, len, fs_data, XATTR_CREATE);
2976 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
2978 return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
2981 static bool f2fs_has_stable_inodes(struct super_block *sb)
2986 static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
2987 int *ino_bits_ret, int *lblk_bits_ret)
2989 *ino_bits_ret = 8 * sizeof(nid_t);
2990 *lblk_bits_ret = 8 * sizeof(block_t);
2993 static int f2fs_get_num_devices(struct super_block *sb)
2995 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2997 if (f2fs_is_multi_device(sbi))
2998 return sbi->s_ndevs;
3002 static void f2fs_get_devices(struct super_block *sb,
3003 struct request_queue **devs)
3005 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3008 for (i = 0; i < sbi->s_ndevs; i++)
3009 devs[i] = bdev_get_queue(FDEV(i).bdev);
3012 static const struct fscrypt_operations f2fs_cryptops = {
3013 .key_prefix = "f2fs:",
3014 .get_context = f2fs_get_context,
3015 .set_context = f2fs_set_context,
3016 .get_dummy_policy = f2fs_get_dummy_policy,
3017 .empty_dir = f2fs_empty_dir,
3018 .has_stable_inodes = f2fs_has_stable_inodes,
3019 .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
3020 .get_num_devices = f2fs_get_num_devices,
3021 .get_devices = f2fs_get_devices,
3025 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
3026 u64 ino, u32 generation)
3028 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3029 struct inode *inode;
3031 if (f2fs_check_nid_range(sbi, ino))
3032 return ERR_PTR(-ESTALE);
3035 * f2fs_iget isn't quite right if the inode is currently unallocated!
3036 * However f2fs_iget currently does appropriate checks to handle stale
3037 * inodes so everything is OK.
3039 inode = f2fs_iget(sb, ino);
3041 return ERR_CAST(inode);
3042 if (unlikely(generation && inode->i_generation != generation)) {
3043 /* we didn't find the right inode.. */
3045 return ERR_PTR(-ESTALE);
3050 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3051 int fh_len, int fh_type)
3053 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3054 f2fs_nfs_get_inode);
3057 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3058 int fh_len, int fh_type)
3060 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3061 f2fs_nfs_get_inode);
3064 static const struct export_operations f2fs_export_ops = {
3065 .fh_to_dentry = f2fs_fh_to_dentry,
3066 .fh_to_parent = f2fs_fh_to_parent,
3067 .get_parent = f2fs_get_parent,
3070 loff_t max_file_blocks(struct inode *inode)
3076 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3077 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3078 * space in inode.i_addr, it will be more safe to reassign
3082 if (inode && f2fs_compressed_file(inode))
3083 leaf_count = ADDRS_PER_BLOCK(inode);
3085 leaf_count = DEF_ADDRS_PER_BLOCK;
3087 /* two direct node blocks */
3088 result += (leaf_count * 2);
3090 /* two indirect node blocks */
3091 leaf_count *= NIDS_PER_BLOCK;
3092 result += (leaf_count * 2);
3094 /* one double indirect node block */
3095 leaf_count *= NIDS_PER_BLOCK;
3096 result += leaf_count;
3101 static int __f2fs_commit_super(struct buffer_head *bh,
3102 struct f2fs_super_block *super)
3106 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
3107 set_buffer_dirty(bh);
3110 /* it's rare case, we can do fua all the time */
3111 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
3114 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3115 struct buffer_head *bh)
3117 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3118 (bh->b_data + F2FS_SUPER_OFFSET);
3119 struct super_block *sb = sbi->sb;
3120 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3121 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3122 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3123 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3124 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3125 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3126 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3127 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3128 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3129 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3130 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3131 u32 segment_count = le32_to_cpu(raw_super->segment_count);
3132 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3133 u64 main_end_blkaddr = main_blkaddr +
3134 (segment_count_main << log_blocks_per_seg);
3135 u64 seg_end_blkaddr = segment0_blkaddr +
3136 (segment_count << log_blocks_per_seg);
3138 if (segment0_blkaddr != cp_blkaddr) {
3139 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3140 segment0_blkaddr, cp_blkaddr);
3144 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3146 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3147 cp_blkaddr, sit_blkaddr,
3148 segment_count_ckpt << log_blocks_per_seg);
3152 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3154 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3155 sit_blkaddr, nat_blkaddr,
3156 segment_count_sit << log_blocks_per_seg);
3160 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3162 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3163 nat_blkaddr, ssa_blkaddr,
3164 segment_count_nat << log_blocks_per_seg);
3168 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3170 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3171 ssa_blkaddr, main_blkaddr,
3172 segment_count_ssa << log_blocks_per_seg);
3176 if (main_end_blkaddr > seg_end_blkaddr) {
3177 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3178 main_blkaddr, seg_end_blkaddr,
3179 segment_count_main << log_blocks_per_seg);
3181 } else if (main_end_blkaddr < seg_end_blkaddr) {
3185 /* fix in-memory information all the time */
3186 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3187 segment0_blkaddr) >> log_blocks_per_seg);
3189 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
3190 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3193 err = __f2fs_commit_super(bh, NULL);
3194 res = err ? "failed" : "done";
3196 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3197 res, main_blkaddr, seg_end_blkaddr,
3198 segment_count_main << log_blocks_per_seg);
3205 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3206 struct buffer_head *bh)
3208 block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3209 block_t total_sections, blocks_per_seg;
3210 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3211 (bh->b_data + F2FS_SUPER_OFFSET);
3212 size_t crc_offset = 0;
3215 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3216 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3217 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3221 /* Check checksum_offset and crc in superblock */
3222 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3223 crc_offset = le32_to_cpu(raw_super->checksum_offset);
3225 offsetof(struct f2fs_super_block, crc)) {
3226 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3228 return -EFSCORRUPTED;
3230 crc = le32_to_cpu(raw_super->crc);
3231 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3232 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3233 return -EFSCORRUPTED;
3237 /* Currently, support only 4KB block size */
3238 if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3239 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3240 le32_to_cpu(raw_super->log_blocksize),
3242 return -EFSCORRUPTED;
3245 /* check log blocks per segment */
3246 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3247 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3248 le32_to_cpu(raw_super->log_blocks_per_seg));
3249 return -EFSCORRUPTED;
3252 /* Currently, support 512/1024/2048/4096 bytes sector size */
3253 if (le32_to_cpu(raw_super->log_sectorsize) >
3254 F2FS_MAX_LOG_SECTOR_SIZE ||
3255 le32_to_cpu(raw_super->log_sectorsize) <
3256 F2FS_MIN_LOG_SECTOR_SIZE) {
3257 f2fs_info(sbi, "Invalid log sectorsize (%u)",
3258 le32_to_cpu(raw_super->log_sectorsize));
3259 return -EFSCORRUPTED;
3261 if (le32_to_cpu(raw_super->log_sectors_per_block) +
3262 le32_to_cpu(raw_super->log_sectorsize) !=
3263 F2FS_MAX_LOG_SECTOR_SIZE) {
3264 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3265 le32_to_cpu(raw_super->log_sectors_per_block),
3266 le32_to_cpu(raw_super->log_sectorsize));
3267 return -EFSCORRUPTED;
3270 segment_count = le32_to_cpu(raw_super->segment_count);
3271 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3272 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3273 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3274 total_sections = le32_to_cpu(raw_super->section_count);
3276 /* blocks_per_seg should be 512, given the above check */
3277 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
3279 if (segment_count > F2FS_MAX_SEGMENT ||
3280 segment_count < F2FS_MIN_SEGMENTS) {
3281 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3282 return -EFSCORRUPTED;
3285 if (total_sections > segment_count_main || total_sections < 1 ||
3286 segs_per_sec > segment_count || !segs_per_sec) {
3287 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3288 segment_count, total_sections, segs_per_sec);
3289 return -EFSCORRUPTED;
3292 if (segment_count_main != total_sections * segs_per_sec) {
3293 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3294 segment_count_main, total_sections, segs_per_sec);
3295 return -EFSCORRUPTED;
3298 if ((segment_count / segs_per_sec) < total_sections) {
3299 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3300 segment_count, segs_per_sec, total_sections);
3301 return -EFSCORRUPTED;
3304 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3305 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3306 segment_count, le64_to_cpu(raw_super->block_count));
3307 return -EFSCORRUPTED;
3310 if (RDEV(0).path[0]) {
3311 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3314 while (i < MAX_DEVICES && RDEV(i).path[0]) {
3315 dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3318 if (segment_count != dev_seg_count) {
3319 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3320 segment_count, dev_seg_count);
3321 return -EFSCORRUPTED;
3324 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3325 !bdev_is_zoned(sbi->sb->s_bdev)) {
3326 f2fs_info(sbi, "Zoned block device path is missing");
3327 return -EFSCORRUPTED;
3331 if (secs_per_zone > total_sections || !secs_per_zone) {
3332 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3333 secs_per_zone, total_sections);
3334 return -EFSCORRUPTED;
3336 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3337 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3338 (le32_to_cpu(raw_super->extension_count) +
3339 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3340 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3341 le32_to_cpu(raw_super->extension_count),
3342 raw_super->hot_ext_count,
3343 F2FS_MAX_EXTENSION);
3344 return -EFSCORRUPTED;
3347 if (le32_to_cpu(raw_super->cp_payload) >=
3348 (blocks_per_seg - F2FS_CP_PACKS -
3349 NR_CURSEG_PERSIST_TYPE)) {
3350 f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
3351 le32_to_cpu(raw_super->cp_payload),
3352 blocks_per_seg - F2FS_CP_PACKS -
3353 NR_CURSEG_PERSIST_TYPE);
3354 return -EFSCORRUPTED;
3357 /* check reserved ino info */
3358 if (le32_to_cpu(raw_super->node_ino) != 1 ||
3359 le32_to_cpu(raw_super->meta_ino) != 2 ||
3360 le32_to_cpu(raw_super->root_ino) != 3) {
3361 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3362 le32_to_cpu(raw_super->node_ino),
3363 le32_to_cpu(raw_super->meta_ino),
3364 le32_to_cpu(raw_super->root_ino));
3365 return -EFSCORRUPTED;
3368 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3369 if (sanity_check_area_boundary(sbi, bh))
3370 return -EFSCORRUPTED;
3375 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3377 unsigned int total, fsmeta;
3378 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3379 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3380 unsigned int ovp_segments, reserved_segments;
3381 unsigned int main_segs, blocks_per_seg;
3382 unsigned int sit_segs, nat_segs;
3383 unsigned int sit_bitmap_size, nat_bitmap_size;
3384 unsigned int log_blocks_per_seg;
3385 unsigned int segment_count_main;
3386 unsigned int cp_pack_start_sum, cp_payload;
3387 block_t user_block_count, valid_user_blocks;
3388 block_t avail_node_count, valid_node_count;
3389 unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
3392 total = le32_to_cpu(raw_super->segment_count);
3393 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3394 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3396 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3398 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3399 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3401 if (unlikely(fsmeta >= total))
3404 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3405 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3407 if (!f2fs_sb_has_readonly(sbi) &&
3408 unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3409 ovp_segments == 0 || reserved_segments == 0)) {
3410 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3413 user_block_count = le64_to_cpu(ckpt->user_block_count);
3414 segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3415 (f2fs_sb_has_readonly(sbi) ? 1 : 0);
3416 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3417 if (!user_block_count || user_block_count >=
3418 segment_count_main << log_blocks_per_seg) {
3419 f2fs_err(sbi, "Wrong user_block_count: %u",
3424 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3425 if (valid_user_blocks > user_block_count) {
3426 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3427 valid_user_blocks, user_block_count);
3431 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3432 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3433 if (valid_node_count > avail_node_count) {
3434 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3435 valid_node_count, avail_node_count);
3439 main_segs = le32_to_cpu(raw_super->segment_count_main);
3440 blocks_per_seg = sbi->blocks_per_seg;
3442 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3443 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3444 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3447 if (f2fs_sb_has_readonly(sbi))
3450 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3451 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3452 le32_to_cpu(ckpt->cur_node_segno[j])) {
3453 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3455 le32_to_cpu(ckpt->cur_node_segno[i]));
3461 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3462 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3463 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3466 if (f2fs_sb_has_readonly(sbi))
3469 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3470 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3471 le32_to_cpu(ckpt->cur_data_segno[j])) {
3472 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3474 le32_to_cpu(ckpt->cur_data_segno[i]));
3479 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3480 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3481 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3482 le32_to_cpu(ckpt->cur_data_segno[j])) {
3483 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3485 le32_to_cpu(ckpt->cur_node_segno[i]));
3491 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3492 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3494 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3495 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3496 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3497 sit_bitmap_size, nat_bitmap_size);
3501 cp_pack_start_sum = __start_sum_addr(sbi);
3502 cp_payload = __cp_payload(sbi);
3503 if (cp_pack_start_sum < cp_payload + 1 ||
3504 cp_pack_start_sum > blocks_per_seg - 1 -
3505 NR_CURSEG_PERSIST_TYPE) {
3506 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3511 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3512 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3513 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3514 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3515 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3516 le32_to_cpu(ckpt->checksum_offset));
3520 nat_blocks = nat_segs << log_blocks_per_seg;
3521 nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3522 nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3523 if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3524 (cp_payload + F2FS_CP_PACKS +
3525 NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3526 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3527 cp_payload, nat_bits_blocks);
3531 if (unlikely(f2fs_cp_error(sbi))) {
3532 f2fs_err(sbi, "A bug case: need to run fsck");
3538 static void init_sb_info(struct f2fs_sb_info *sbi)
3540 struct f2fs_super_block *raw_super = sbi->raw_super;
3543 sbi->log_sectors_per_block =
3544 le32_to_cpu(raw_super->log_sectors_per_block);
3545 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3546 sbi->blocksize = 1 << sbi->log_blocksize;
3547 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3548 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
3549 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3550 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3551 sbi->total_sections = le32_to_cpu(raw_super->section_count);
3552 sbi->total_node_count =
3553 (le32_to_cpu(raw_super->segment_count_nat) / 2)
3554 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3555 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3556 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3557 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3558 sbi->cur_victim_sec = NULL_SECNO;
3559 sbi->gc_mode = GC_NORMAL;
3560 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3561 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3562 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3563 sbi->migration_granularity = sbi->segs_per_sec;
3564 sbi->seq_file_ra_mul = MIN_RA_MUL;
3565 sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
3566 sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
3567 spin_lock_init(&sbi->gc_urgent_high_lock);
3569 sbi->dir_level = DEF_DIR_LEVEL;
3570 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3571 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3572 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3573 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3574 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3575 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3576 DEF_UMOUNT_DISCARD_TIMEOUT;
3577 clear_sbi_flag(sbi, SBI_NEED_FSCK);
3579 for (i = 0; i < NR_COUNT_TYPE; i++)
3580 atomic_set(&sbi->nr_pages[i], 0);
3582 for (i = 0; i < META; i++)
3583 atomic_set(&sbi->wb_sync_req[i], 0);
3585 INIT_LIST_HEAD(&sbi->s_list);
3586 mutex_init(&sbi->umount_mutex);
3587 init_f2fs_rwsem(&sbi->io_order_lock);
3588 spin_lock_init(&sbi->cp_lock);
3590 sbi->dirty_device = 0;
3591 spin_lock_init(&sbi->dev_lock);
3593 init_f2fs_rwsem(&sbi->sb_lock);
3594 init_f2fs_rwsem(&sbi->pin_sem);
3597 static int init_percpu_info(struct f2fs_sb_info *sbi)
3601 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3605 err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
3607 goto err_valid_block;
3609 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3612 goto err_node_block;
3616 percpu_counter_destroy(&sbi->rf_node_block_count);
3618 percpu_counter_destroy(&sbi->alloc_valid_block_count);
3622 #ifdef CONFIG_BLK_DEV_ZONED
3624 struct f2fs_report_zones_args {
3625 struct f2fs_dev_info *dev;
3626 bool zone_cap_mismatch;
3629 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3632 struct f2fs_report_zones_args *rz_args = data;
3634 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3637 set_bit(idx, rz_args->dev->blkz_seq);
3638 rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
3639 F2FS_LOG_SECTORS_PER_BLOCK;
3640 if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
3641 rz_args->zone_cap_mismatch = true;
3646 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3648 struct block_device *bdev = FDEV(devi).bdev;
3649 sector_t nr_sectors = bdev_nr_sectors(bdev);
3650 struct f2fs_report_zones_args rep_zone_arg;
3653 if (!f2fs_sb_has_blkzoned(sbi))
3656 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3657 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3659 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3660 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3661 __ilog2_u32(sbi->blocks_per_blkz))
3663 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3664 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3665 sbi->log_blocks_per_blkz;
3666 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3667 FDEV(devi).nr_blkz++;
3669 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3670 BITS_TO_LONGS(FDEV(devi).nr_blkz)
3671 * sizeof(unsigned long),
3673 if (!FDEV(devi).blkz_seq)
3676 /* Get block zones type and zone-capacity */
3677 FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
3678 FDEV(devi).nr_blkz * sizeof(block_t),
3680 if (!FDEV(devi).zone_capacity_blocks)
3683 rep_zone_arg.dev = &FDEV(devi);
3684 rep_zone_arg.zone_cap_mismatch = false;
3686 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3691 if (!rep_zone_arg.zone_cap_mismatch) {
3692 kfree(FDEV(devi).zone_capacity_blocks);
3693 FDEV(devi).zone_capacity_blocks = NULL;
3701 * Read f2fs raw super block.
3702 * Because we have two copies of super block, so read both of them
3703 * to get the first valid one. If any one of them is broken, we pass
3704 * them recovery flag back to the caller.
3706 static int read_raw_super_block(struct f2fs_sb_info *sbi,
3707 struct f2fs_super_block **raw_super,
3708 int *valid_super_block, int *recovery)
3710 struct super_block *sb = sbi->sb;
3712 struct buffer_head *bh;
3713 struct f2fs_super_block *super;
3716 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3720 for (block = 0; block < 2; block++) {
3721 bh = sb_bread(sb, block);
3723 f2fs_err(sbi, "Unable to read %dth superblock",
3730 /* sanity checking of raw super */
3731 err = sanity_check_raw_super(sbi, bh);
3733 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3741 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3743 *valid_super_block = block;
3749 /* No valid superblock */
3758 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3760 struct buffer_head *bh;
3764 if ((recover && f2fs_readonly(sbi->sb)) ||
3765 bdev_read_only(sbi->sb->s_bdev)) {
3766 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3770 /* we should update superblock crc here */
3771 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3772 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3773 offsetof(struct f2fs_super_block, crc));
3774 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3777 /* write back-up superblock first */
3778 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3781 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3784 /* if we are in recovery path, skip writing valid superblock */
3788 /* write current valid superblock */
3789 bh = sb_bread(sbi->sb, sbi->valid_super_block);
3792 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3797 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3799 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3800 unsigned int max_devices = MAX_DEVICES;
3801 unsigned int logical_blksize;
3804 /* Initialize single device information */
3805 if (!RDEV(0).path[0]) {
3806 if (!bdev_is_zoned(sbi->sb->s_bdev))
3812 * Initialize multiple devices information, or single
3813 * zoned block device information.
3815 sbi->devs = f2fs_kzalloc(sbi,
3816 array_size(max_devices,
3817 sizeof(struct f2fs_dev_info)),
3822 logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
3823 sbi->aligned_blksize = true;
3825 for (i = 0; i < max_devices; i++) {
3827 if (i > 0 && !RDEV(i).path[0])
3830 if (max_devices == 1) {
3831 /* Single zoned block device mount */
3833 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3834 sbi->sb->s_mode, sbi->sb->s_type);
3836 /* Multi-device mount */
3837 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3838 FDEV(i).total_segments =
3839 le32_to_cpu(RDEV(i).total_segments);
3841 FDEV(i).start_blk = 0;
3842 FDEV(i).end_blk = FDEV(i).start_blk +
3843 (FDEV(i).total_segments <<
3844 sbi->log_blocks_per_seg) - 1 +
3845 le32_to_cpu(raw_super->segment0_blkaddr);
3847 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3848 FDEV(i).end_blk = FDEV(i).start_blk +
3849 (FDEV(i).total_segments <<
3850 sbi->log_blocks_per_seg) - 1;
3852 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3853 sbi->sb->s_mode, sbi->sb->s_type);
3855 if (IS_ERR(FDEV(i).bdev))
3856 return PTR_ERR(FDEV(i).bdev);
3858 /* to release errored devices */
3859 sbi->s_ndevs = i + 1;
3861 if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
3862 sbi->aligned_blksize = false;
3864 #ifdef CONFIG_BLK_DEV_ZONED
3865 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3866 !f2fs_sb_has_blkzoned(sbi)) {
3867 f2fs_err(sbi, "Zoned block device feature not enabled");
3870 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3871 if (init_blkz_info(sbi, i)) {
3872 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3875 if (max_devices == 1)
3877 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3879 FDEV(i).total_segments,
3880 FDEV(i).start_blk, FDEV(i).end_blk,
3881 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3882 "Host-aware" : "Host-managed");
3886 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3888 FDEV(i).total_segments,
3889 FDEV(i).start_blk, FDEV(i).end_blk);
3892 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3896 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3898 #if IS_ENABLED(CONFIG_UNICODE)
3899 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
3900 const struct f2fs_sb_encodings *encoding_info;
3901 struct unicode_map *encoding;
3902 __u16 encoding_flags;
3904 encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
3905 if (!encoding_info) {
3907 "Encoding requested by superblock is unknown");
3911 encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
3912 encoding = utf8_load(encoding_info->version);
3913 if (IS_ERR(encoding)) {
3915 "can't mount with superblock charset: %s-%u.%u.%u "
3916 "not supported by the kernel. flags: 0x%x.",
3917 encoding_info->name,
3918 unicode_major(encoding_info->version),
3919 unicode_minor(encoding_info->version),
3920 unicode_rev(encoding_info->version),
3922 return PTR_ERR(encoding);
3924 f2fs_info(sbi, "Using encoding defined by superblock: "
3925 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
3926 unicode_major(encoding_info->version),
3927 unicode_minor(encoding_info->version),
3928 unicode_rev(encoding_info->version),
3931 sbi->sb->s_encoding = encoding;
3932 sbi->sb->s_encoding_flags = encoding_flags;
3935 if (f2fs_sb_has_casefold(sbi)) {
3936 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3943 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3945 struct f2fs_sm_info *sm_i = SM_I(sbi);
3947 /* adjust parameters according to the volume size */
3948 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3949 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3950 if (f2fs_block_unit_discard(sbi))
3951 sm_i->dcc_info->discard_granularity = 1;
3952 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE |
3953 1 << F2FS_IPU_HONOR_OPU_WRITE;
3956 sbi->readdir_ra = 1;
3959 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3961 struct f2fs_sb_info *sbi;
3962 struct f2fs_super_block *raw_super;
3965 bool skip_recovery = false, need_fsck = false;
3966 char *options = NULL;
3967 int recovery, i, valid_super_block;
3968 struct curseg_info *seg_i;
3974 valid_super_block = -1;
3977 /* allocate memory for f2fs-specific super block info */
3978 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3984 /* Load the checksum driver */
3985 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3986 if (IS_ERR(sbi->s_chksum_driver)) {
3987 f2fs_err(sbi, "Cannot load crc32 driver.");
3988 err = PTR_ERR(sbi->s_chksum_driver);
3989 sbi->s_chksum_driver = NULL;
3993 /* set a block size */
3994 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3995 f2fs_err(sbi, "unable to set blocksize");
3999 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
4004 sb->s_fs_info = sbi;
4005 sbi->raw_super = raw_super;
4007 /* precompute checksum seed for metadata */
4008 if (f2fs_sb_has_inode_chksum(sbi))
4009 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
4010 sizeof(raw_super->uuid));
4012 default_options(sbi);
4013 /* parse mount options */
4014 options = kstrdup((const char *)data, GFP_KERNEL);
4015 if (data && !options) {
4020 err = parse_options(sb, options, false);
4024 sb->s_maxbytes = max_file_blocks(NULL) <<
4025 le32_to_cpu(raw_super->log_blocksize);
4026 sb->s_max_links = F2FS_LINK_MAX;
4028 err = f2fs_setup_casefold(sbi);
4033 sb->dq_op = &f2fs_quota_operations;
4034 sb->s_qcop = &f2fs_quotactl_ops;
4035 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4037 if (f2fs_sb_has_quota_ino(sbi)) {
4038 for (i = 0; i < MAXQUOTAS; i++) {
4039 if (f2fs_qf_ino(sbi->sb, i))
4040 sbi->nquota_files++;
4045 sb->s_op = &f2fs_sops;
4046 #ifdef CONFIG_FS_ENCRYPTION
4047 sb->s_cop = &f2fs_cryptops;
4049 #ifdef CONFIG_FS_VERITY
4050 sb->s_vop = &f2fs_verityops;
4052 sb->s_xattr = f2fs_xattr_handlers;
4053 sb->s_export_op = &f2fs_export_ops;
4054 sb->s_magic = F2FS_SUPER_MAGIC;
4055 sb->s_time_gran = 1;
4056 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4057 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
4058 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
4059 sb->s_iflags |= SB_I_CGROUPWB;
4061 /* init f2fs-specific super block info */
4062 sbi->valid_super_block = valid_super_block;
4063 init_f2fs_rwsem(&sbi->gc_lock);
4064 mutex_init(&sbi->writepages);
4065 init_f2fs_rwsem(&sbi->cp_global_sem);
4066 init_f2fs_rwsem(&sbi->node_write);
4067 init_f2fs_rwsem(&sbi->node_change);
4069 /* disallow all the data/node/meta page writes */
4070 set_sbi_flag(sbi, SBI_POR_DOING);
4071 spin_lock_init(&sbi->stat_lock);
4073 for (i = 0; i < NR_PAGE_TYPE; i++) {
4074 int n = (i == META) ? 1 : NR_TEMP_TYPE;
4080 sizeof(struct f2fs_bio_info)),
4082 if (!sbi->write_io[i]) {
4087 for (j = HOT; j < n; j++) {
4088 init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
4089 sbi->write_io[i][j].sbi = sbi;
4090 sbi->write_io[i][j].bio = NULL;
4091 spin_lock_init(&sbi->write_io[i][j].io_lock);
4092 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
4093 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
4094 init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
4098 init_f2fs_rwsem(&sbi->cp_rwsem);
4099 init_f2fs_rwsem(&sbi->quota_sem);
4100 init_waitqueue_head(&sbi->cp_wait);
4103 err = f2fs_init_iostat(sbi);
4107 err = init_percpu_info(sbi);
4111 if (F2FS_IO_ALIGNED(sbi)) {
4112 sbi->write_io_dummy =
4113 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
4114 if (!sbi->write_io_dummy) {
4120 /* init per sbi slab cache */
4121 err = f2fs_init_xattr_caches(sbi);
4124 err = f2fs_init_page_array_cache(sbi);
4126 goto free_xattr_cache;
4128 /* get an inode for meta space */
4129 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
4130 if (IS_ERR(sbi->meta_inode)) {
4131 f2fs_err(sbi, "Failed to read F2FS meta data inode");
4132 err = PTR_ERR(sbi->meta_inode);
4133 goto free_page_array_cache;
4136 err = f2fs_get_valid_checkpoint(sbi);
4138 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
4139 goto free_meta_inode;
4142 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
4143 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
4144 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
4145 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4146 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
4149 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
4150 set_sbi_flag(sbi, SBI_NEED_FSCK);
4152 /* Initialize device list */
4153 err = f2fs_scan_devices(sbi);
4155 f2fs_err(sbi, "Failed to find devices");
4159 err = f2fs_init_post_read_wq(sbi);
4161 f2fs_err(sbi, "Failed to initialize post read workqueue");
4165 sbi->total_valid_node_count =
4166 le32_to_cpu(sbi->ckpt->valid_node_count);
4167 percpu_counter_set(&sbi->total_valid_inode_count,
4168 le32_to_cpu(sbi->ckpt->valid_inode_count));
4169 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
4170 sbi->total_valid_block_count =
4171 le64_to_cpu(sbi->ckpt->valid_block_count);
4172 sbi->last_valid_block_count = sbi->total_valid_block_count;
4173 sbi->reserved_blocks = 0;
4174 sbi->current_reserved_blocks = 0;
4175 limit_reserve_root(sbi);
4176 adjust_unusable_cap_perc(sbi);
4178 for (i = 0; i < NR_INODE_TYPE; i++) {
4179 INIT_LIST_HEAD(&sbi->inode_list[i]);
4180 spin_lock_init(&sbi->inode_lock[i]);
4182 mutex_init(&sbi->flush_lock);
4184 f2fs_init_extent_cache_info(sbi);
4186 f2fs_init_ino_entry_info(sbi);
4188 f2fs_init_fsync_node_info(sbi);
4190 /* setup checkpoint request control and start checkpoint issue thread */
4191 f2fs_init_ckpt_req_control(sbi);
4192 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4193 test_opt(sbi, MERGE_CHECKPOINT)) {
4194 err = f2fs_start_ckpt_thread(sbi);
4197 "Failed to start F2FS issue_checkpoint_thread (%d)",
4199 goto stop_ckpt_thread;
4203 /* setup f2fs internal modules */
4204 err = f2fs_build_segment_manager(sbi);
4206 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4210 err = f2fs_build_node_manager(sbi);
4212 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4217 err = adjust_reserved_segment(sbi);
4221 /* For write statistics */
4222 sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4224 /* Read accumulated write IO statistics if exists */
4225 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4226 if (__exist_node_summaries(sbi))
4227 sbi->kbytes_written =
4228 le64_to_cpu(seg_i->journal->info.kbytes_written);
4230 f2fs_build_gc_manager(sbi);
4232 err = f2fs_build_stats(sbi);
4236 /* get an inode for node space */
4237 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4238 if (IS_ERR(sbi->node_inode)) {
4239 f2fs_err(sbi, "Failed to read node inode");
4240 err = PTR_ERR(sbi->node_inode);
4244 /* read root inode and dentry */
4245 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4247 f2fs_err(sbi, "Failed to read root inode");
4248 err = PTR_ERR(root);
4249 goto free_node_inode;
4251 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4252 !root->i_size || !root->i_nlink) {
4255 goto free_node_inode;
4258 sb->s_root = d_make_root(root); /* allocate root dentry */
4261 goto free_node_inode;
4264 err = f2fs_init_compress_inode(sbi);
4266 goto free_root_inode;
4268 err = f2fs_register_sysfs(sbi);
4270 goto free_compress_inode;
4273 /* Enable quota usage during mount */
4274 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4275 err = f2fs_enable_quotas(sb);
4277 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4280 /* if there are any orphan inodes, free them */
4281 err = f2fs_recover_orphan_inodes(sbi);
4285 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4286 goto reset_checkpoint;
4288 /* recover fsynced data */
4289 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4290 !test_opt(sbi, NORECOVERY)) {
4292 * mount should be failed, when device has readonly mode, and
4293 * previous checkpoint was not done by clean system shutdown.
4295 if (f2fs_hw_is_readonly(sbi)) {
4296 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4297 err = f2fs_recover_fsync_data(sbi, true);
4300 f2fs_err(sbi, "Need to recover fsync data, but "
4301 "write access unavailable, please try "
4302 "mount w/ disable_roll_forward or norecovery");
4307 f2fs_info(sbi, "write access unavailable, skipping recovery");
4308 goto reset_checkpoint;
4312 set_sbi_flag(sbi, SBI_NEED_FSCK);
4315 goto reset_checkpoint;
4317 err = f2fs_recover_fsync_data(sbi, false);
4320 skip_recovery = true;
4322 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4327 err = f2fs_recover_fsync_data(sbi, true);
4329 if (!f2fs_readonly(sb) && err > 0) {
4331 f2fs_err(sbi, "Need to recover fsync data");
4337 * If the f2fs is not readonly and fsync data recovery succeeds,
4338 * check zoned block devices' write pointer consistency.
4340 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4341 err = f2fs_check_write_pointer(sbi);
4347 f2fs_init_inmem_curseg(sbi);
4349 /* f2fs_recover_fsync_data() cleared this already */
4350 clear_sbi_flag(sbi, SBI_POR_DOING);
4352 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4353 err = f2fs_disable_checkpoint(sbi);
4355 goto sync_free_meta;
4356 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4357 f2fs_enable_checkpoint(sbi);
4361 * If filesystem is not mounted as read-only then
4362 * do start the gc_thread.
4364 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4365 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4366 /* After POR, we can run background GC thread.*/
4367 err = f2fs_start_gc_thread(sbi);
4369 goto sync_free_meta;
4373 /* recover broken superblock */
4375 err = f2fs_commit_super(sbi, true);
4376 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4377 sbi->valid_super_block ? 1 : 2, err);
4380 f2fs_join_shrinker(sbi);
4382 f2fs_tuning_parameters(sbi);
4384 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4385 cur_cp_version(F2FS_CKPT(sbi)));
4386 f2fs_update_time(sbi, CP_TIME);
4387 f2fs_update_time(sbi, REQ_TIME);
4388 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4392 /* safe to flush all the data */
4393 sync_filesystem(sbi->sb);
4398 f2fs_truncate_quota_inode_pages(sb);
4399 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4400 f2fs_quota_off_umount(sbi->sb);
4403 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4404 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4405 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4406 * falls into an infinite loop in f2fs_sync_meta_pages().
4408 truncate_inode_pages_final(META_MAPPING(sbi));
4409 /* evict some inodes being cached by GC */
4411 f2fs_unregister_sysfs(sbi);
4412 free_compress_inode:
4413 f2fs_destroy_compress_inode(sbi);
4418 f2fs_release_ino_entry(sbi, true);
4419 truncate_inode_pages_final(NODE_MAPPING(sbi));
4420 iput(sbi->node_inode);
4421 sbi->node_inode = NULL;
4423 f2fs_destroy_stats(sbi);
4425 /* stop discard thread before destroying node manager */
4426 f2fs_stop_discard_thread(sbi);
4427 f2fs_destroy_node_manager(sbi);
4429 f2fs_destroy_segment_manager(sbi);
4430 f2fs_destroy_post_read_wq(sbi);
4432 f2fs_stop_ckpt_thread(sbi);
4434 destroy_device_list(sbi);
4437 make_bad_inode(sbi->meta_inode);
4438 iput(sbi->meta_inode);
4439 sbi->meta_inode = NULL;
4440 free_page_array_cache:
4441 f2fs_destroy_page_array_cache(sbi);
4443 f2fs_destroy_xattr_caches(sbi);
4445 mempool_destroy(sbi->write_io_dummy);
4447 destroy_percpu_info(sbi);
4449 f2fs_destroy_iostat(sbi);
4451 for (i = 0; i < NR_PAGE_TYPE; i++)
4452 kvfree(sbi->write_io[i]);
4454 #if IS_ENABLED(CONFIG_UNICODE)
4455 utf8_unload(sb->s_encoding);
4456 sb->s_encoding = NULL;
4460 for (i = 0; i < MAXQUOTAS; i++)
4461 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4463 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4468 if (sbi->s_chksum_driver)
4469 crypto_free_shash(sbi->s_chksum_driver);
4472 /* give only one another chance */
4473 if (retry_cnt > 0 && skip_recovery) {
4475 shrink_dcache_sb(sb);
4481 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4482 const char *dev_name, void *data)
4484 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4487 static void kill_f2fs_super(struct super_block *sb)
4490 struct f2fs_sb_info *sbi = F2FS_SB(sb);
4492 set_sbi_flag(sbi, SBI_IS_CLOSE);
4493 f2fs_stop_gc_thread(sbi);
4494 f2fs_stop_discard_thread(sbi);
4496 #ifdef CONFIG_F2FS_FS_COMPRESSION
4498 * latter evict_inode() can bypass checking and invalidating
4499 * compress inode cache.
4501 if (test_opt(sbi, COMPRESS_CACHE))
4502 truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4505 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4506 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4507 struct cp_control cpc = {
4508 .reason = CP_UMOUNT,
4510 f2fs_write_checkpoint(sbi, &cpc);
4513 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4514 sb->s_flags &= ~SB_RDONLY;
4516 kill_block_super(sb);
4519 static struct file_system_type f2fs_fs_type = {
4520 .owner = THIS_MODULE,
4522 .mount = f2fs_mount,
4523 .kill_sb = kill_f2fs_super,
4524 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
4526 MODULE_ALIAS_FS("f2fs");
4528 static int __init init_inodecache(void)
4530 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4531 sizeof(struct f2fs_inode_info), 0,
4532 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4533 if (!f2fs_inode_cachep)
4538 static void destroy_inodecache(void)
4541 * Make sure all delayed rcu free inodes are flushed before we
4545 kmem_cache_destroy(f2fs_inode_cachep);
4548 static int __init init_f2fs_fs(void)
4552 if (PAGE_SIZE != F2FS_BLKSIZE) {
4553 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4554 PAGE_SIZE, F2FS_BLKSIZE);
4558 err = init_inodecache();
4561 err = f2fs_create_node_manager_caches();
4563 goto free_inodecache;
4564 err = f2fs_create_segment_manager_caches();
4566 goto free_node_manager_caches;
4567 err = f2fs_create_checkpoint_caches();
4569 goto free_segment_manager_caches;
4570 err = f2fs_create_recovery_cache();
4572 goto free_checkpoint_caches;
4573 err = f2fs_create_extent_cache();
4575 goto free_recovery_cache;
4576 err = f2fs_create_garbage_collection_cache();
4578 goto free_extent_cache;
4579 err = f2fs_init_sysfs();
4581 goto free_garbage_collection_cache;
4582 err = register_shrinker(&f2fs_shrinker_info);
4585 err = register_filesystem(&f2fs_fs_type);
4588 f2fs_create_root_stats();
4589 err = f2fs_init_post_read_processing();
4591 goto free_root_stats;
4592 err = f2fs_init_iostat_processing();
4594 goto free_post_read;
4595 err = f2fs_init_bio_entry_cache();
4598 err = f2fs_init_bioset();
4600 goto free_bio_enrty_cache;
4601 err = f2fs_init_compress_mempool();
4604 err = f2fs_init_compress_cache();
4606 goto free_compress_mempool;
4607 err = f2fs_create_casefold_cache();
4609 goto free_compress_cache;
4611 free_compress_cache:
4612 f2fs_destroy_compress_cache();
4613 free_compress_mempool:
4614 f2fs_destroy_compress_mempool();
4616 f2fs_destroy_bioset();
4617 free_bio_enrty_cache:
4618 f2fs_destroy_bio_entry_cache();
4620 f2fs_destroy_iostat_processing();
4622 f2fs_destroy_post_read_processing();
4624 f2fs_destroy_root_stats();
4625 unregister_filesystem(&f2fs_fs_type);
4627 unregister_shrinker(&f2fs_shrinker_info);
4630 free_garbage_collection_cache:
4631 f2fs_destroy_garbage_collection_cache();
4633 f2fs_destroy_extent_cache();
4634 free_recovery_cache:
4635 f2fs_destroy_recovery_cache();
4636 free_checkpoint_caches:
4637 f2fs_destroy_checkpoint_caches();
4638 free_segment_manager_caches:
4639 f2fs_destroy_segment_manager_caches();
4640 free_node_manager_caches:
4641 f2fs_destroy_node_manager_caches();
4643 destroy_inodecache();
4648 static void __exit exit_f2fs_fs(void)
4650 f2fs_destroy_casefold_cache();
4651 f2fs_destroy_compress_cache();
4652 f2fs_destroy_compress_mempool();
4653 f2fs_destroy_bioset();
4654 f2fs_destroy_bio_entry_cache();
4655 f2fs_destroy_iostat_processing();
4656 f2fs_destroy_post_read_processing();
4657 f2fs_destroy_root_stats();
4658 unregister_filesystem(&f2fs_fs_type);
4659 unregister_shrinker(&f2fs_shrinker_info);
4661 f2fs_destroy_garbage_collection_cache();
4662 f2fs_destroy_extent_cache();
4663 f2fs_destroy_recovery_cache();
4664 f2fs_destroy_checkpoint_caches();
4665 f2fs_destroy_segment_manager_caches();
4666 f2fs_destroy_node_manager_caches();
4667 destroy_inodecache();
4670 module_init(init_f2fs_fs)
4671 module_exit(exit_f2fs_fs)
4673 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4674 MODULE_DESCRIPTION("Flash Friendly File System");
4675 MODULE_LICENSE("GPL");
4676 MODULE_SOFTDEP("pre: crc32");