1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
28 #include <linux/zstd.h>
29 #include <linux/lz4.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/f2fs.h>
40 static struct kmem_cache *f2fs_inode_cachep;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 const char *f2fs_fault_name[FAULT_MAX] = {
45 [FAULT_KMALLOC] = "kmalloc",
46 [FAULT_KVMALLOC] = "kvmalloc",
47 [FAULT_PAGE_ALLOC] = "page alloc",
48 [FAULT_PAGE_GET] = "page get",
49 [FAULT_ALLOC_NID] = "alloc nid",
50 [FAULT_ORPHAN] = "orphan",
51 [FAULT_BLOCK] = "no more block",
52 [FAULT_DIR_DEPTH] = "too big dir depth",
53 [FAULT_EVICT_INODE] = "evict_inode fail",
54 [FAULT_TRUNCATE] = "truncate fail",
55 [FAULT_READ_IO] = "read IO error",
56 [FAULT_CHECKPOINT] = "checkpoint error",
57 [FAULT_DISCARD] = "discard error",
58 [FAULT_WRITE_IO] = "write IO error",
61 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
64 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
67 atomic_set(&ffi->inject_ops, 0);
68 ffi->inject_rate = rate;
72 ffi->inject_type = type;
75 memset(ffi, 0, sizeof(struct f2fs_fault_info));
79 /* f2fs-wide shrinker description */
80 static struct shrinker f2fs_shrinker_info = {
81 .scan_objects = f2fs_shrink_scan,
82 .count_objects = f2fs_shrink_count,
83 .seeks = DEFAULT_SEEKS,
88 Opt_disable_roll_forward,
99 Opt_disable_ext_identify,
102 Opt_inline_xattr_size,
140 Opt_test_dummy_encryption,
142 Opt_checkpoint_disable,
143 Opt_checkpoint_disable_cap,
144 Opt_checkpoint_disable_cap_perc,
145 Opt_checkpoint_enable,
146 Opt_checkpoint_merge,
147 Opt_nocheckpoint_merge,
148 Opt_compress_algorithm,
149 Opt_compress_log_size,
150 Opt_compress_extension,
151 Opt_nocompress_extension,
161 static match_table_t f2fs_tokens = {
162 {Opt_gc_background, "background_gc=%s"},
163 {Opt_disable_roll_forward, "disable_roll_forward"},
164 {Opt_norecovery, "norecovery"},
165 {Opt_discard, "discard"},
166 {Opt_nodiscard, "nodiscard"},
167 {Opt_noheap, "no_heap"},
169 {Opt_user_xattr, "user_xattr"},
170 {Opt_nouser_xattr, "nouser_xattr"},
172 {Opt_noacl, "noacl"},
173 {Opt_active_logs, "active_logs=%u"},
174 {Opt_disable_ext_identify, "disable_ext_identify"},
175 {Opt_inline_xattr, "inline_xattr"},
176 {Opt_noinline_xattr, "noinline_xattr"},
177 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
178 {Opt_inline_data, "inline_data"},
179 {Opt_inline_dentry, "inline_dentry"},
180 {Opt_noinline_dentry, "noinline_dentry"},
181 {Opt_flush_merge, "flush_merge"},
182 {Opt_noflush_merge, "noflush_merge"},
183 {Opt_nobarrier, "nobarrier"},
184 {Opt_fastboot, "fastboot"},
185 {Opt_extent_cache, "extent_cache"},
186 {Opt_noextent_cache, "noextent_cache"},
187 {Opt_noinline_data, "noinline_data"},
188 {Opt_data_flush, "data_flush"},
189 {Opt_reserve_root, "reserve_root=%u"},
190 {Opt_resgid, "resgid=%u"},
191 {Opt_resuid, "resuid=%u"},
192 {Opt_mode, "mode=%s"},
193 {Opt_io_size_bits, "io_bits=%u"},
194 {Opt_fault_injection, "fault_injection=%u"},
195 {Opt_fault_type, "fault_type=%u"},
196 {Opt_lazytime, "lazytime"},
197 {Opt_nolazytime, "nolazytime"},
198 {Opt_quota, "quota"},
199 {Opt_noquota, "noquota"},
200 {Opt_usrquota, "usrquota"},
201 {Opt_grpquota, "grpquota"},
202 {Opt_prjquota, "prjquota"},
203 {Opt_usrjquota, "usrjquota=%s"},
204 {Opt_grpjquota, "grpjquota=%s"},
205 {Opt_prjjquota, "prjjquota=%s"},
206 {Opt_offusrjquota, "usrjquota="},
207 {Opt_offgrpjquota, "grpjquota="},
208 {Opt_offprjjquota, "prjjquota="},
209 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
210 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
211 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
212 {Opt_whint, "whint_mode=%s"},
213 {Opt_alloc, "alloc_mode=%s"},
214 {Opt_fsync, "fsync_mode=%s"},
215 {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
216 {Opt_test_dummy_encryption, "test_dummy_encryption"},
217 {Opt_inlinecrypt, "inlinecrypt"},
218 {Opt_checkpoint_disable, "checkpoint=disable"},
219 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
220 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
221 {Opt_checkpoint_enable, "checkpoint=enable"},
222 {Opt_checkpoint_merge, "checkpoint_merge"},
223 {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
224 {Opt_compress_algorithm, "compress_algorithm=%s"},
225 {Opt_compress_log_size, "compress_log_size=%u"},
226 {Opt_compress_extension, "compress_extension=%s"},
227 {Opt_nocompress_extension, "nocompress_extension=%s"},
228 {Opt_compress_chksum, "compress_chksum"},
229 {Opt_compress_mode, "compress_mode=%s"},
230 {Opt_compress_cache, "compress_cache"},
232 {Opt_gc_merge, "gc_merge"},
233 {Opt_nogc_merge, "nogc_merge"},
237 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
239 struct va_format vaf;
245 level = printk_get_level(fmt);
246 vaf.fmt = printk_skip_level(fmt);
248 printk("%c%cF2FS-fs (%s): %pV\n",
249 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
254 #ifdef CONFIG_UNICODE
255 static const struct f2fs_sb_encodings {
259 } f2fs_sb_encoding_map[] = {
260 {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
263 static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
264 const struct f2fs_sb_encodings **encoding,
267 __u16 magic = le16_to_cpu(sb->s_encoding);
270 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
271 if (magic == f2fs_sb_encoding_map[i].magic)
274 if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
277 *encoding = &f2fs_sb_encoding_map[i];
278 *flags = le16_to_cpu(sb->s_encoding_flags);
283 struct kmem_cache *f2fs_cf_name_slab;
284 static int __init f2fs_create_casefold_cache(void)
286 f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
288 if (!f2fs_cf_name_slab)
293 static void f2fs_destroy_casefold_cache(void)
295 kmem_cache_destroy(f2fs_cf_name_slab);
298 static int __init f2fs_create_casefold_cache(void) { return 0; }
299 static void f2fs_destroy_casefold_cache(void) { }
302 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
304 block_t limit = min((sbi->user_block_count << 1) / 1000,
305 sbi->user_block_count - sbi->reserved_blocks);
308 if (test_opt(sbi, RESERVE_ROOT) &&
309 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
310 F2FS_OPTION(sbi).root_reserved_blocks = limit;
311 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
312 F2FS_OPTION(sbi).root_reserved_blocks);
314 if (!test_opt(sbi, RESERVE_ROOT) &&
315 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
316 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
317 !gid_eq(F2FS_OPTION(sbi).s_resgid,
318 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
319 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
320 from_kuid_munged(&init_user_ns,
321 F2FS_OPTION(sbi).s_resuid),
322 from_kgid_munged(&init_user_ns,
323 F2FS_OPTION(sbi).s_resgid));
326 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
328 if (!F2FS_OPTION(sbi).unusable_cap_perc)
331 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
332 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
334 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
335 F2FS_OPTION(sbi).unusable_cap_perc;
337 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
338 F2FS_OPTION(sbi).unusable_cap,
339 F2FS_OPTION(sbi).unusable_cap_perc);
342 static void init_once(void *foo)
344 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
346 inode_init_once(&fi->vfs_inode);
350 static const char * const quotatypes[] = INITQFNAMES;
351 #define QTYPE2NAME(t) (quotatypes[t])
352 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
355 struct f2fs_sb_info *sbi = F2FS_SB(sb);
359 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
360 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
363 if (f2fs_sb_has_quota_ino(sbi)) {
364 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
368 qname = match_strdup(args);
370 f2fs_err(sbi, "Not enough memory for storing quotafile name");
373 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
374 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
377 f2fs_err(sbi, "%s quota file already specified",
381 if (strchr(qname, '/')) {
382 f2fs_err(sbi, "quotafile must be on filesystem root");
385 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
393 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
395 struct f2fs_sb_info *sbi = F2FS_SB(sb);
397 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
398 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
401 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
402 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
406 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
409 * We do the test below only for project quotas. 'usrquota' and
410 * 'grpquota' mount options are allowed even without quota feature
411 * to support legacy quotas in quota files.
413 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
414 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
417 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
418 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
419 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
420 if (test_opt(sbi, USRQUOTA) &&
421 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
422 clear_opt(sbi, USRQUOTA);
424 if (test_opt(sbi, GRPQUOTA) &&
425 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
426 clear_opt(sbi, GRPQUOTA);
428 if (test_opt(sbi, PRJQUOTA) &&
429 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
430 clear_opt(sbi, PRJQUOTA);
432 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
433 test_opt(sbi, PRJQUOTA)) {
434 f2fs_err(sbi, "old and new quota format mixing");
438 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
439 f2fs_err(sbi, "journaled quota format not specified");
444 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
445 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
446 F2FS_OPTION(sbi).s_jquota_fmt = 0;
452 static int f2fs_set_test_dummy_encryption(struct super_block *sb,
454 const substring_t *arg,
457 struct f2fs_sb_info *sbi = F2FS_SB(sb);
458 #ifdef CONFIG_FS_ENCRYPTION
461 if (!f2fs_sb_has_encrypt(sbi)) {
462 f2fs_err(sbi, "Encrypt feature is off");
467 * This mount option is just for testing, and it's not worthwhile to
468 * implement the extra complexity (e.g. RCU protection) that would be
469 * needed to allow it to be set or changed during remount. We do allow
470 * it to be specified during remount, but only if there is no change.
472 if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
473 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
476 err = fscrypt_set_test_dummy_encryption(
477 sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
481 "Can't change test_dummy_encryption on remount");
482 else if (err == -EINVAL)
483 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
486 f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
490 f2fs_warn(sbi, "Test dummy encryption mode enabled");
492 f2fs_warn(sbi, "Test dummy encryption mount option ignored");
497 #ifdef CONFIG_F2FS_FS_COMPRESSION
499 * 1. The same extension name cannot not appear in both compress and non-compress extension
501 * 2. If the compress extension specifies all files, the types specified by the non-compress
502 * extension will be treated as special cases and will not be compressed.
503 * 3. Don't allow the non-compress extension specifies all files.
505 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
507 unsigned char (*ext)[F2FS_EXTENSION_LEN];
508 unsigned char (*noext)[F2FS_EXTENSION_LEN];
509 int ext_cnt, noext_cnt, index = 0, no_index = 0;
511 ext = F2FS_OPTION(sbi).extensions;
512 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
513 noext = F2FS_OPTION(sbi).noextensions;
514 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
519 for (no_index = 0; no_index < noext_cnt; no_index++) {
520 if (!strcasecmp("*", noext[no_index])) {
521 f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
524 for (index = 0; index < ext_cnt; index++) {
525 if (!strcasecmp(ext[index], noext[no_index])) {
526 f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
535 #ifdef CONFIG_F2FS_FS_LZ4
536 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
538 #ifdef CONFIG_F2FS_FS_LZ4HC
542 if (strlen(str) == 3) {
543 F2FS_OPTION(sbi).compress_level = 0;
547 #ifdef CONFIG_F2FS_FS_LZ4HC
551 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
554 if (kstrtouint(str + 1, 10, &level))
557 if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
558 f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
562 F2FS_OPTION(sbi).compress_level = level;
565 f2fs_info(sbi, "kernel doesn't support lz4hc compression");
571 #ifdef CONFIG_F2FS_FS_ZSTD
572 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
577 if (strlen(str) == len) {
578 F2FS_OPTION(sbi).compress_level = 0;
585 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
588 if (kstrtouint(str + 1, 10, &level))
591 if (!level || level > ZSTD_maxCLevel()) {
592 f2fs_info(sbi, "invalid zstd compress level: %d", level);
596 F2FS_OPTION(sbi).compress_level = level;
602 static int parse_options(struct super_block *sb, char *options, bool is_remount)
604 struct f2fs_sb_info *sbi = F2FS_SB(sb);
605 substring_t args[MAX_OPT_ARGS];
606 #ifdef CONFIG_F2FS_FS_COMPRESSION
607 unsigned char (*ext)[F2FS_EXTENSION_LEN];
608 unsigned char (*noext)[F2FS_EXTENSION_LEN];
609 int ext_cnt, noext_cnt;
620 while ((p = strsep(&options, ",")) != NULL) {
626 * Initialize args struct so we know whether arg was
627 * found; some options take optional arguments.
629 args[0].to = args[0].from = NULL;
630 token = match_token(p, f2fs_tokens, args);
633 case Opt_gc_background:
634 name = match_strdup(&args[0]);
638 if (!strcmp(name, "on")) {
639 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
640 } else if (!strcmp(name, "off")) {
641 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
642 } else if (!strcmp(name, "sync")) {
643 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
650 case Opt_disable_roll_forward:
651 set_opt(sbi, DISABLE_ROLL_FORWARD);
654 /* this option mounts f2fs with ro */
655 set_opt(sbi, NORECOVERY);
656 if (!f2fs_readonly(sb))
660 set_opt(sbi, DISCARD);
663 if (f2fs_sb_has_blkzoned(sbi)) {
664 f2fs_warn(sbi, "discard is required for zoned block devices");
667 clear_opt(sbi, DISCARD);
670 set_opt(sbi, NOHEAP);
673 clear_opt(sbi, NOHEAP);
675 #ifdef CONFIG_F2FS_FS_XATTR
677 set_opt(sbi, XATTR_USER);
679 case Opt_nouser_xattr:
680 clear_opt(sbi, XATTR_USER);
682 case Opt_inline_xattr:
683 set_opt(sbi, INLINE_XATTR);
685 case Opt_noinline_xattr:
686 clear_opt(sbi, INLINE_XATTR);
688 case Opt_inline_xattr_size:
689 if (args->from && match_int(args, &arg))
691 set_opt(sbi, INLINE_XATTR_SIZE);
692 F2FS_OPTION(sbi).inline_xattr_size = arg;
696 f2fs_info(sbi, "user_xattr options not supported");
698 case Opt_nouser_xattr:
699 f2fs_info(sbi, "nouser_xattr options not supported");
701 case Opt_inline_xattr:
702 f2fs_info(sbi, "inline_xattr options not supported");
704 case Opt_noinline_xattr:
705 f2fs_info(sbi, "noinline_xattr options not supported");
708 #ifdef CONFIG_F2FS_FS_POSIX_ACL
710 set_opt(sbi, POSIX_ACL);
713 clear_opt(sbi, POSIX_ACL);
717 f2fs_info(sbi, "acl options not supported");
720 f2fs_info(sbi, "noacl options not supported");
723 case Opt_active_logs:
724 if (args->from && match_int(args, &arg))
726 if (arg != 2 && arg != 4 &&
727 arg != NR_CURSEG_PERSIST_TYPE)
729 F2FS_OPTION(sbi).active_logs = arg;
731 case Opt_disable_ext_identify:
732 set_opt(sbi, DISABLE_EXT_IDENTIFY);
734 case Opt_inline_data:
735 set_opt(sbi, INLINE_DATA);
737 case Opt_inline_dentry:
738 set_opt(sbi, INLINE_DENTRY);
740 case Opt_noinline_dentry:
741 clear_opt(sbi, INLINE_DENTRY);
743 case Opt_flush_merge:
744 set_opt(sbi, FLUSH_MERGE);
746 case Opt_noflush_merge:
747 clear_opt(sbi, FLUSH_MERGE);
750 set_opt(sbi, NOBARRIER);
753 set_opt(sbi, FASTBOOT);
755 case Opt_extent_cache:
756 set_opt(sbi, EXTENT_CACHE);
758 case Opt_noextent_cache:
759 clear_opt(sbi, EXTENT_CACHE);
761 case Opt_noinline_data:
762 clear_opt(sbi, INLINE_DATA);
765 set_opt(sbi, DATA_FLUSH);
767 case Opt_reserve_root:
768 if (args->from && match_int(args, &arg))
770 if (test_opt(sbi, RESERVE_ROOT)) {
771 f2fs_info(sbi, "Preserve previous reserve_root=%u",
772 F2FS_OPTION(sbi).root_reserved_blocks);
774 F2FS_OPTION(sbi).root_reserved_blocks = arg;
775 set_opt(sbi, RESERVE_ROOT);
779 if (args->from && match_int(args, &arg))
781 uid = make_kuid(current_user_ns(), arg);
782 if (!uid_valid(uid)) {
783 f2fs_err(sbi, "Invalid uid value %d", arg);
786 F2FS_OPTION(sbi).s_resuid = uid;
789 if (args->from && match_int(args, &arg))
791 gid = make_kgid(current_user_ns(), arg);
792 if (!gid_valid(gid)) {
793 f2fs_err(sbi, "Invalid gid value %d", arg);
796 F2FS_OPTION(sbi).s_resgid = gid;
799 name = match_strdup(&args[0]);
803 if (!strcmp(name, "adaptive")) {
804 if (f2fs_sb_has_blkzoned(sbi)) {
805 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
809 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
810 } else if (!strcmp(name, "lfs")) {
811 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
818 case Opt_io_size_bits:
819 if (args->from && match_int(args, &arg))
821 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
822 f2fs_warn(sbi, "Not support %d, larger than %d",
823 1 << arg, BIO_MAX_VECS);
826 F2FS_OPTION(sbi).write_io_size_bits = arg;
828 #ifdef CONFIG_F2FS_FAULT_INJECTION
829 case Opt_fault_injection:
830 if (args->from && match_int(args, &arg))
832 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
833 set_opt(sbi, FAULT_INJECTION);
837 if (args->from && match_int(args, &arg))
839 f2fs_build_fault_attr(sbi, 0, arg);
840 set_opt(sbi, FAULT_INJECTION);
843 case Opt_fault_injection:
844 f2fs_info(sbi, "fault_injection options not supported");
848 f2fs_info(sbi, "fault_type options not supported");
852 sb->s_flags |= SB_LAZYTIME;
855 sb->s_flags &= ~SB_LAZYTIME;
860 set_opt(sbi, USRQUOTA);
863 set_opt(sbi, GRPQUOTA);
866 set_opt(sbi, PRJQUOTA);
869 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
874 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
879 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
883 case Opt_offusrjquota:
884 ret = f2fs_clear_qf_name(sb, USRQUOTA);
888 case Opt_offgrpjquota:
889 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
893 case Opt_offprjjquota:
894 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
898 case Opt_jqfmt_vfsold:
899 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
901 case Opt_jqfmt_vfsv0:
902 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
904 case Opt_jqfmt_vfsv1:
905 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
908 clear_opt(sbi, QUOTA);
909 clear_opt(sbi, USRQUOTA);
910 clear_opt(sbi, GRPQUOTA);
911 clear_opt(sbi, PRJQUOTA);
921 case Opt_offusrjquota:
922 case Opt_offgrpjquota:
923 case Opt_offprjjquota:
924 case Opt_jqfmt_vfsold:
925 case Opt_jqfmt_vfsv0:
926 case Opt_jqfmt_vfsv1:
928 f2fs_info(sbi, "quota operations not supported");
932 name = match_strdup(&args[0]);
935 if (!strcmp(name, "user-based")) {
936 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
937 } else if (!strcmp(name, "off")) {
938 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
939 } else if (!strcmp(name, "fs-based")) {
940 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
948 name = match_strdup(&args[0]);
952 if (!strcmp(name, "default")) {
953 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
954 } else if (!strcmp(name, "reuse")) {
955 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
963 name = match_strdup(&args[0]);
966 if (!strcmp(name, "posix")) {
967 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
968 } else if (!strcmp(name, "strict")) {
969 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
970 } else if (!strcmp(name, "nobarrier")) {
971 F2FS_OPTION(sbi).fsync_mode =
972 FSYNC_MODE_NOBARRIER;
979 case Opt_test_dummy_encryption:
980 ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
985 case Opt_inlinecrypt:
986 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
987 sb->s_flags |= SB_INLINECRYPT;
989 f2fs_info(sbi, "inline encryption not supported");
992 case Opt_checkpoint_disable_cap_perc:
993 if (args->from && match_int(args, &arg))
995 if (arg < 0 || arg > 100)
997 F2FS_OPTION(sbi).unusable_cap_perc = arg;
998 set_opt(sbi, DISABLE_CHECKPOINT);
1000 case Opt_checkpoint_disable_cap:
1001 if (args->from && match_int(args, &arg))
1003 F2FS_OPTION(sbi).unusable_cap = arg;
1004 set_opt(sbi, DISABLE_CHECKPOINT);
1006 case Opt_checkpoint_disable:
1007 set_opt(sbi, DISABLE_CHECKPOINT);
1009 case Opt_checkpoint_enable:
1010 clear_opt(sbi, DISABLE_CHECKPOINT);
1012 case Opt_checkpoint_merge:
1013 set_opt(sbi, MERGE_CHECKPOINT);
1015 case Opt_nocheckpoint_merge:
1016 clear_opt(sbi, MERGE_CHECKPOINT);
1018 #ifdef CONFIG_F2FS_FS_COMPRESSION
1019 case Opt_compress_algorithm:
1020 if (!f2fs_sb_has_compression(sbi)) {
1021 f2fs_info(sbi, "Image doesn't support compression");
1024 name = match_strdup(&args[0]);
1027 if (!strcmp(name, "lzo")) {
1028 #ifdef CONFIG_F2FS_FS_LZO
1029 F2FS_OPTION(sbi).compress_level = 0;
1030 F2FS_OPTION(sbi).compress_algorithm =
1033 f2fs_info(sbi, "kernel doesn't support lzo compression");
1035 } else if (!strncmp(name, "lz4", 3)) {
1036 #ifdef CONFIG_F2FS_FS_LZ4
1037 ret = f2fs_set_lz4hc_level(sbi, name);
1042 F2FS_OPTION(sbi).compress_algorithm =
1045 f2fs_info(sbi, "kernel doesn't support lz4 compression");
1047 } else if (!strncmp(name, "zstd", 4)) {
1048 #ifdef CONFIG_F2FS_FS_ZSTD
1049 ret = f2fs_set_zstd_level(sbi, name);
1054 F2FS_OPTION(sbi).compress_algorithm =
1057 f2fs_info(sbi, "kernel doesn't support zstd compression");
1059 } else if (!strcmp(name, "lzo-rle")) {
1060 #ifdef CONFIG_F2FS_FS_LZORLE
1061 F2FS_OPTION(sbi).compress_level = 0;
1062 F2FS_OPTION(sbi).compress_algorithm =
1065 f2fs_info(sbi, "kernel doesn't support lzorle compression");
1073 case Opt_compress_log_size:
1074 if (!f2fs_sb_has_compression(sbi)) {
1075 f2fs_info(sbi, "Image doesn't support compression");
1078 if (args->from && match_int(args, &arg))
1080 if (arg < MIN_COMPRESS_LOG_SIZE ||
1081 arg > MAX_COMPRESS_LOG_SIZE) {
1083 "Compress cluster log size is out of range");
1086 F2FS_OPTION(sbi).compress_log_size = arg;
1088 case Opt_compress_extension:
1089 if (!f2fs_sb_has_compression(sbi)) {
1090 f2fs_info(sbi, "Image doesn't support compression");
1093 name = match_strdup(&args[0]);
1097 ext = F2FS_OPTION(sbi).extensions;
1098 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1100 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1101 ext_cnt >= COMPRESS_EXT_NUM) {
1103 "invalid extension length/number");
1108 strcpy(ext[ext_cnt], name);
1109 F2FS_OPTION(sbi).compress_ext_cnt++;
1112 case Opt_nocompress_extension:
1113 if (!f2fs_sb_has_compression(sbi)) {
1114 f2fs_info(sbi, "Image doesn't support compression");
1117 name = match_strdup(&args[0]);
1121 noext = F2FS_OPTION(sbi).noextensions;
1122 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1124 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1125 noext_cnt >= COMPRESS_EXT_NUM) {
1127 "invalid extension length/number");
1132 strcpy(noext[noext_cnt], name);
1133 F2FS_OPTION(sbi).nocompress_ext_cnt++;
1136 case Opt_compress_chksum:
1137 F2FS_OPTION(sbi).compress_chksum = true;
1139 case Opt_compress_mode:
1140 name = match_strdup(&args[0]);
1143 if (!strcmp(name, "fs")) {
1144 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1145 } else if (!strcmp(name, "user")) {
1146 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1153 case Opt_compress_cache:
1154 set_opt(sbi, COMPRESS_CACHE);
1157 case Opt_compress_algorithm:
1158 case Opt_compress_log_size:
1159 case Opt_compress_extension:
1160 case Opt_nocompress_extension:
1161 case Opt_compress_chksum:
1162 case Opt_compress_mode:
1163 case Opt_compress_cache:
1164 f2fs_info(sbi, "compression options not supported");
1171 set_opt(sbi, GC_MERGE);
1173 case Opt_nogc_merge:
1174 clear_opt(sbi, GC_MERGE);
1177 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1184 if (f2fs_check_quota_options(sbi))
1187 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1188 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1191 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1192 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1196 #ifndef CONFIG_UNICODE
1197 if (f2fs_sb_has_casefold(sbi)) {
1199 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1204 * The BLKZONED feature indicates that the drive was formatted with
1205 * zone alignment optimization. This is optional for host-aware
1206 * devices, but mandatory for host-managed zoned block devices.
1208 #ifndef CONFIG_BLK_DEV_ZONED
1209 if (f2fs_sb_has_blkzoned(sbi)) {
1210 f2fs_err(sbi, "Zoned block device support is not enabled");
1215 #ifdef CONFIG_F2FS_FS_COMPRESSION
1216 if (f2fs_test_compress_extension(sbi)) {
1217 f2fs_err(sbi, "invalid compress or nocompress extension");
1222 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1223 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
1224 F2FS_IO_SIZE_KB(sbi));
1228 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1229 int min_size, max_size;
1231 if (!f2fs_sb_has_extra_attr(sbi) ||
1232 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1233 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1236 if (!test_opt(sbi, INLINE_XATTR)) {
1237 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1241 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1242 max_size = MAX_INLINE_XATTR_SIZE;
1244 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1245 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1246 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1247 min_size, max_size);
1252 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1253 f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
1257 /* Not pass down write hints if the number of active logs is lesser
1258 * than NR_CURSEG_PERSIST_TYPE.
1260 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
1261 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1263 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1264 f2fs_err(sbi, "Allow to mount readonly mode only");
1270 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1272 struct f2fs_inode_info *fi;
1274 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
1278 init_once((void *) fi);
1280 /* Initialize f2fs-specific inode info */
1281 atomic_set(&fi->dirty_pages, 0);
1282 atomic_set(&fi->i_compr_blocks, 0);
1283 init_rwsem(&fi->i_sem);
1284 spin_lock_init(&fi->i_size_lock);
1285 INIT_LIST_HEAD(&fi->dirty_list);
1286 INIT_LIST_HEAD(&fi->gdirty_list);
1287 INIT_LIST_HEAD(&fi->inmem_ilist);
1288 INIT_LIST_HEAD(&fi->inmem_pages);
1289 mutex_init(&fi->inmem_lock);
1290 init_rwsem(&fi->i_gc_rwsem[READ]);
1291 init_rwsem(&fi->i_gc_rwsem[WRITE]);
1292 init_rwsem(&fi->i_mmap_sem);
1293 init_rwsem(&fi->i_xattr_sem);
1295 /* Will be used by directory only */
1296 fi->i_dir_level = F2FS_SB(sb)->dir_level;
1298 return &fi->vfs_inode;
1301 static int f2fs_drop_inode(struct inode *inode)
1303 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1307 * during filesystem shutdown, if checkpoint is disabled,
1308 * drop useless meta/node dirty pages.
1310 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1311 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1312 inode->i_ino == F2FS_META_INO(sbi)) {
1313 trace_f2fs_drop_inode(inode, 1);
1319 * This is to avoid a deadlock condition like below.
1320 * writeback_single_inode(inode)
1321 * - f2fs_write_data_page
1322 * - f2fs_gc -> iput -> evict
1323 * - inode_wait_for_writeback(inode)
1325 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1326 if (!inode->i_nlink && !is_bad_inode(inode)) {
1327 /* to avoid evict_inode call simultaneously */
1328 atomic_inc(&inode->i_count);
1329 spin_unlock(&inode->i_lock);
1331 /* some remained atomic pages should discarded */
1332 if (f2fs_is_atomic_file(inode))
1333 f2fs_drop_inmem_pages(inode);
1335 /* should remain fi->extent_tree for writepage */
1336 f2fs_destroy_extent_node(inode);
1338 sb_start_intwrite(inode->i_sb);
1339 f2fs_i_size_write(inode, 0);
1341 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1342 inode, NULL, 0, DATA);
1343 truncate_inode_pages_final(inode->i_mapping);
1345 if (F2FS_HAS_BLOCKS(inode))
1346 f2fs_truncate(inode);
1348 sb_end_intwrite(inode->i_sb);
1350 spin_lock(&inode->i_lock);
1351 atomic_dec(&inode->i_count);
1353 trace_f2fs_drop_inode(inode, 0);
1356 ret = generic_drop_inode(inode);
1358 ret = fscrypt_drop_inode(inode);
1359 trace_f2fs_drop_inode(inode, ret);
1363 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1365 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1368 spin_lock(&sbi->inode_lock[DIRTY_META]);
1369 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1372 set_inode_flag(inode, FI_DIRTY_INODE);
1373 stat_inc_dirty_inode(sbi, DIRTY_META);
1375 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1376 list_add_tail(&F2FS_I(inode)->gdirty_list,
1377 &sbi->inode_list[DIRTY_META]);
1378 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1380 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1384 void f2fs_inode_synced(struct inode *inode)
1386 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1388 spin_lock(&sbi->inode_lock[DIRTY_META]);
1389 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1390 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1393 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1394 list_del_init(&F2FS_I(inode)->gdirty_list);
1395 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1397 clear_inode_flag(inode, FI_DIRTY_INODE);
1398 clear_inode_flag(inode, FI_AUTO_RECOVER);
1399 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1400 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1404 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1406 * We should call set_dirty_inode to write the dirty inode through write_inode.
1408 static void f2fs_dirty_inode(struct inode *inode, int flags)
1410 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1412 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1413 inode->i_ino == F2FS_META_INO(sbi))
1416 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1417 clear_inode_flag(inode, FI_AUTO_RECOVER);
1419 f2fs_inode_dirtied(inode, false);
1422 static void f2fs_free_inode(struct inode *inode)
1424 fscrypt_free_inode(inode);
1425 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1428 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1430 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1431 percpu_counter_destroy(&sbi->total_valid_inode_count);
1434 static void destroy_device_list(struct f2fs_sb_info *sbi)
1438 for (i = 0; i < sbi->s_ndevs; i++) {
1439 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1440 #ifdef CONFIG_BLK_DEV_ZONED
1441 kvfree(FDEV(i).blkz_seq);
1442 kfree(FDEV(i).zone_capacity_blocks);
1448 static void f2fs_put_super(struct super_block *sb)
1450 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1454 /* unregister procfs/sysfs entries in advance to avoid race case */
1455 f2fs_unregister_sysfs(sbi);
1457 f2fs_quota_off_umount(sb);
1459 /* prevent remaining shrinker jobs */
1460 mutex_lock(&sbi->umount_mutex);
1463 * flush all issued checkpoints and stop checkpoint issue thread.
1464 * after then, all checkpoints should be done by each process context.
1466 f2fs_stop_ckpt_thread(sbi);
1469 * We don't need to do checkpoint when superblock is clean.
1470 * But, the previous checkpoint was not done by umount, it needs to do
1471 * clean checkpoint again.
1473 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1474 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1475 struct cp_control cpc = {
1476 .reason = CP_UMOUNT,
1478 f2fs_write_checkpoint(sbi, &cpc);
1481 /* be sure to wait for any on-going discard commands */
1482 dropped = f2fs_issue_discard_timeout(sbi);
1484 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1485 !sbi->discard_blks && !dropped) {
1486 struct cp_control cpc = {
1487 .reason = CP_UMOUNT | CP_TRIMMED,
1489 f2fs_write_checkpoint(sbi, &cpc);
1493 * normally superblock is clean, so we need to release this.
1494 * In addition, EIO will skip do checkpoint, we need this as well.
1496 f2fs_release_ino_entry(sbi, true);
1498 f2fs_leave_shrinker(sbi);
1499 mutex_unlock(&sbi->umount_mutex);
1501 /* our cp_error case, we can wait for any writeback page */
1502 f2fs_flush_merged_writes(sbi);
1504 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1506 f2fs_bug_on(sbi, sbi->fsync_node_num);
1508 f2fs_destroy_compress_inode(sbi);
1510 iput(sbi->node_inode);
1511 sbi->node_inode = NULL;
1513 iput(sbi->meta_inode);
1514 sbi->meta_inode = NULL;
1517 * iput() can update stat information, if f2fs_write_checkpoint()
1518 * above failed with error.
1520 f2fs_destroy_stats(sbi);
1522 /* destroy f2fs internal modules */
1523 f2fs_destroy_node_manager(sbi);
1524 f2fs_destroy_segment_manager(sbi);
1526 f2fs_destroy_post_read_wq(sbi);
1530 sb->s_fs_info = NULL;
1531 if (sbi->s_chksum_driver)
1532 crypto_free_shash(sbi->s_chksum_driver);
1533 kfree(sbi->raw_super);
1535 destroy_device_list(sbi);
1536 f2fs_destroy_page_array_cache(sbi);
1537 f2fs_destroy_xattr_caches(sbi);
1538 mempool_destroy(sbi->write_io_dummy);
1540 for (i = 0; i < MAXQUOTAS; i++)
1541 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1543 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1544 destroy_percpu_info(sbi);
1545 for (i = 0; i < NR_PAGE_TYPE; i++)
1546 kvfree(sbi->write_io[i]);
1547 #ifdef CONFIG_UNICODE
1548 utf8_unload(sb->s_encoding);
1553 int f2fs_sync_fs(struct super_block *sb, int sync)
1555 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1558 if (unlikely(f2fs_cp_error(sbi)))
1560 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1563 trace_f2fs_sync_fs(sb, sync);
1565 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1569 err = f2fs_issue_checkpoint(sbi);
1574 static int f2fs_freeze(struct super_block *sb)
1576 if (f2fs_readonly(sb))
1579 /* IO error happened before */
1580 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1583 /* must be clean, since sync_filesystem() was already called */
1584 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1587 /* ensure no checkpoint required */
1588 if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
1593 static int f2fs_unfreeze(struct super_block *sb)
1599 static int f2fs_statfs_project(struct super_block *sb,
1600 kprojid_t projid, struct kstatfs *buf)
1603 struct dquot *dquot;
1607 qid = make_kqid_projid(projid);
1608 dquot = dqget(sb, qid);
1610 return PTR_ERR(dquot);
1611 spin_lock(&dquot->dq_dqb_lock);
1613 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1614 dquot->dq_dqb.dqb_bhardlimit);
1616 limit >>= sb->s_blocksize_bits;
1618 if (limit && buf->f_blocks > limit) {
1619 curblock = (dquot->dq_dqb.dqb_curspace +
1620 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1621 buf->f_blocks = limit;
1622 buf->f_bfree = buf->f_bavail =
1623 (buf->f_blocks > curblock) ?
1624 (buf->f_blocks - curblock) : 0;
1627 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1628 dquot->dq_dqb.dqb_ihardlimit);
1630 if (limit && buf->f_files > limit) {
1631 buf->f_files = limit;
1633 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1634 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1637 spin_unlock(&dquot->dq_dqb_lock);
1643 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1645 struct super_block *sb = dentry->d_sb;
1646 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1647 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1648 block_t total_count, user_block_count, start_count;
1649 u64 avail_node_count;
1651 total_count = le64_to_cpu(sbi->raw_super->block_count);
1652 user_block_count = sbi->user_block_count;
1653 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1654 buf->f_type = F2FS_SUPER_MAGIC;
1655 buf->f_bsize = sbi->blocksize;
1657 buf->f_blocks = total_count - start_count;
1658 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1659 sbi->current_reserved_blocks;
1661 spin_lock(&sbi->stat_lock);
1662 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1665 buf->f_bfree -= sbi->unusable_block_count;
1666 spin_unlock(&sbi->stat_lock);
1668 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1669 buf->f_bavail = buf->f_bfree -
1670 F2FS_OPTION(sbi).root_reserved_blocks;
1674 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1676 if (avail_node_count > user_block_count) {
1677 buf->f_files = user_block_count;
1678 buf->f_ffree = buf->f_bavail;
1680 buf->f_files = avail_node_count;
1681 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1685 buf->f_namelen = F2FS_NAME_LEN;
1686 buf->f_fsid = u64_to_fsid(id);
1689 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1690 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1691 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1697 static inline void f2fs_show_quota_options(struct seq_file *seq,
1698 struct super_block *sb)
1701 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1703 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1706 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1717 seq_printf(seq, ",jqfmt=%s", fmtname);
1720 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1721 seq_show_option(seq, "usrjquota",
1722 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1724 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1725 seq_show_option(seq, "grpjquota",
1726 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1728 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1729 seq_show_option(seq, "prjjquota",
1730 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1734 #ifdef CONFIG_F2FS_FS_COMPRESSION
1735 static inline void f2fs_show_compress_options(struct seq_file *seq,
1736 struct super_block *sb)
1738 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1742 if (!f2fs_sb_has_compression(sbi))
1745 switch (F2FS_OPTION(sbi).compress_algorithm) {
1755 case COMPRESS_LZORLE:
1756 algtype = "lzo-rle";
1759 seq_printf(seq, ",compress_algorithm=%s", algtype);
1761 if (F2FS_OPTION(sbi).compress_level)
1762 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1764 seq_printf(seq, ",compress_log_size=%u",
1765 F2FS_OPTION(sbi).compress_log_size);
1767 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1768 seq_printf(seq, ",compress_extension=%s",
1769 F2FS_OPTION(sbi).extensions[i]);
1772 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
1773 seq_printf(seq, ",nocompress_extension=%s",
1774 F2FS_OPTION(sbi).noextensions[i]);
1777 if (F2FS_OPTION(sbi).compress_chksum)
1778 seq_puts(seq, ",compress_chksum");
1780 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1781 seq_printf(seq, ",compress_mode=%s", "fs");
1782 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1783 seq_printf(seq, ",compress_mode=%s", "user");
1785 if (test_opt(sbi, COMPRESS_CACHE))
1786 seq_puts(seq, ",compress_cache");
1790 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1792 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1794 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1795 seq_printf(seq, ",background_gc=%s", "sync");
1796 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1797 seq_printf(seq, ",background_gc=%s", "on");
1798 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1799 seq_printf(seq, ",background_gc=%s", "off");
1801 if (test_opt(sbi, GC_MERGE))
1802 seq_puts(seq, ",gc_merge");
1804 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1805 seq_puts(seq, ",disable_roll_forward");
1806 if (test_opt(sbi, NORECOVERY))
1807 seq_puts(seq, ",norecovery");
1808 if (test_opt(sbi, DISCARD))
1809 seq_puts(seq, ",discard");
1811 seq_puts(seq, ",nodiscard");
1812 if (test_opt(sbi, NOHEAP))
1813 seq_puts(seq, ",no_heap");
1815 seq_puts(seq, ",heap");
1816 #ifdef CONFIG_F2FS_FS_XATTR
1817 if (test_opt(sbi, XATTR_USER))
1818 seq_puts(seq, ",user_xattr");
1820 seq_puts(seq, ",nouser_xattr");
1821 if (test_opt(sbi, INLINE_XATTR))
1822 seq_puts(seq, ",inline_xattr");
1824 seq_puts(seq, ",noinline_xattr");
1825 if (test_opt(sbi, INLINE_XATTR_SIZE))
1826 seq_printf(seq, ",inline_xattr_size=%u",
1827 F2FS_OPTION(sbi).inline_xattr_size);
1829 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1830 if (test_opt(sbi, POSIX_ACL))
1831 seq_puts(seq, ",acl");
1833 seq_puts(seq, ",noacl");
1835 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1836 seq_puts(seq, ",disable_ext_identify");
1837 if (test_opt(sbi, INLINE_DATA))
1838 seq_puts(seq, ",inline_data");
1840 seq_puts(seq, ",noinline_data");
1841 if (test_opt(sbi, INLINE_DENTRY))
1842 seq_puts(seq, ",inline_dentry");
1844 seq_puts(seq, ",noinline_dentry");
1845 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1846 seq_puts(seq, ",flush_merge");
1847 if (test_opt(sbi, NOBARRIER))
1848 seq_puts(seq, ",nobarrier");
1849 if (test_opt(sbi, FASTBOOT))
1850 seq_puts(seq, ",fastboot");
1851 if (test_opt(sbi, EXTENT_CACHE))
1852 seq_puts(seq, ",extent_cache");
1854 seq_puts(seq, ",noextent_cache");
1855 if (test_opt(sbi, DATA_FLUSH))
1856 seq_puts(seq, ",data_flush");
1858 seq_puts(seq, ",mode=");
1859 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1860 seq_puts(seq, "adaptive");
1861 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1862 seq_puts(seq, "lfs");
1863 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1864 if (test_opt(sbi, RESERVE_ROOT))
1865 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1866 F2FS_OPTION(sbi).root_reserved_blocks,
1867 from_kuid_munged(&init_user_ns,
1868 F2FS_OPTION(sbi).s_resuid),
1869 from_kgid_munged(&init_user_ns,
1870 F2FS_OPTION(sbi).s_resgid));
1871 if (F2FS_IO_SIZE_BITS(sbi))
1872 seq_printf(seq, ",io_bits=%u",
1873 F2FS_OPTION(sbi).write_io_size_bits);
1874 #ifdef CONFIG_F2FS_FAULT_INJECTION
1875 if (test_opt(sbi, FAULT_INJECTION)) {
1876 seq_printf(seq, ",fault_injection=%u",
1877 F2FS_OPTION(sbi).fault_info.inject_rate);
1878 seq_printf(seq, ",fault_type=%u",
1879 F2FS_OPTION(sbi).fault_info.inject_type);
1883 if (test_opt(sbi, QUOTA))
1884 seq_puts(seq, ",quota");
1885 if (test_opt(sbi, USRQUOTA))
1886 seq_puts(seq, ",usrquota");
1887 if (test_opt(sbi, GRPQUOTA))
1888 seq_puts(seq, ",grpquota");
1889 if (test_opt(sbi, PRJQUOTA))
1890 seq_puts(seq, ",prjquota");
1892 f2fs_show_quota_options(seq, sbi->sb);
1893 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1894 seq_printf(seq, ",whint_mode=%s", "user-based");
1895 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1896 seq_printf(seq, ",whint_mode=%s", "fs-based");
1898 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
1900 if (sbi->sb->s_flags & SB_INLINECRYPT)
1901 seq_puts(seq, ",inlinecrypt");
1903 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1904 seq_printf(seq, ",alloc_mode=%s", "default");
1905 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1906 seq_printf(seq, ",alloc_mode=%s", "reuse");
1908 if (test_opt(sbi, DISABLE_CHECKPOINT))
1909 seq_printf(seq, ",checkpoint=disable:%u",
1910 F2FS_OPTION(sbi).unusable_cap);
1911 if (test_opt(sbi, MERGE_CHECKPOINT))
1912 seq_puts(seq, ",checkpoint_merge");
1914 seq_puts(seq, ",nocheckpoint_merge");
1915 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1916 seq_printf(seq, ",fsync_mode=%s", "posix");
1917 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1918 seq_printf(seq, ",fsync_mode=%s", "strict");
1919 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1920 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1922 #ifdef CONFIG_F2FS_FS_COMPRESSION
1923 f2fs_show_compress_options(seq, sbi->sb);
1926 if (test_opt(sbi, ATGC))
1927 seq_puts(seq, ",atgc");
1931 static void default_options(struct f2fs_sb_info *sbi)
1933 /* init some FS parameters */
1934 if (f2fs_sb_has_readonly(sbi))
1935 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
1937 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
1939 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1940 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1941 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1942 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1943 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1944 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1945 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
1946 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
1947 F2FS_OPTION(sbi).compress_ext_cnt = 0;
1948 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1949 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
1951 sbi->sb->s_flags &= ~SB_INLINECRYPT;
1953 set_opt(sbi, INLINE_XATTR);
1954 set_opt(sbi, INLINE_DATA);
1955 set_opt(sbi, INLINE_DENTRY);
1956 set_opt(sbi, EXTENT_CACHE);
1957 set_opt(sbi, NOHEAP);
1958 clear_opt(sbi, DISABLE_CHECKPOINT);
1959 set_opt(sbi, MERGE_CHECKPOINT);
1960 F2FS_OPTION(sbi).unusable_cap = 0;
1961 sbi->sb->s_flags |= SB_LAZYTIME;
1962 set_opt(sbi, FLUSH_MERGE);
1963 set_opt(sbi, DISCARD);
1964 if (f2fs_sb_has_blkzoned(sbi))
1965 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
1967 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
1969 #ifdef CONFIG_F2FS_FS_XATTR
1970 set_opt(sbi, XATTR_USER);
1972 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1973 set_opt(sbi, POSIX_ACL);
1976 f2fs_build_fault_attr(sbi, 0, 0);
1980 static int f2fs_enable_quotas(struct super_block *sb);
1983 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1985 unsigned int s_flags = sbi->sb->s_flags;
1986 struct cp_control cpc;
1991 if (s_flags & SB_RDONLY) {
1992 f2fs_err(sbi, "checkpoint=disable on readonly fs");
1995 sbi->sb->s_flags |= SB_ACTIVE;
1997 f2fs_update_time(sbi, DISABLE_TIME);
1999 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2000 down_write(&sbi->gc_lock);
2001 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
2002 if (err == -ENODATA) {
2006 if (err && err != -EAGAIN)
2010 ret = sync_filesystem(sbi->sb);
2012 err = ret ? ret : err;
2016 unusable = f2fs_get_unusable_blocks(sbi);
2017 if (f2fs_disable_cp_again(sbi, unusable)) {
2022 down_write(&sbi->gc_lock);
2023 cpc.reason = CP_PAUSE;
2024 set_sbi_flag(sbi, SBI_CP_DISABLED);
2025 err = f2fs_write_checkpoint(sbi, &cpc);
2029 spin_lock(&sbi->stat_lock);
2030 sbi->unusable_block_count = unusable;
2031 spin_unlock(&sbi->stat_lock);
2034 up_write(&sbi->gc_lock);
2036 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2040 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2042 /* we should flush all the data to keep data consistency */
2043 sync_inodes_sb(sbi->sb);
2045 down_write(&sbi->gc_lock);
2046 f2fs_dirty_to_prefree(sbi);
2048 clear_sbi_flag(sbi, SBI_CP_DISABLED);
2049 set_sbi_flag(sbi, SBI_IS_DIRTY);
2050 up_write(&sbi->gc_lock);
2052 f2fs_sync_fs(sbi->sb, 1);
2055 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2057 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2058 struct f2fs_mount_info org_mount_opt;
2059 unsigned long old_sb_flags;
2061 bool need_restart_gc = false, need_stop_gc = false;
2062 bool need_restart_ckpt = false, need_stop_ckpt = false;
2063 bool need_restart_flush = false, need_stop_flush = false;
2064 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
2065 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
2066 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2067 bool no_atgc = !test_opt(sbi, ATGC);
2068 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2069 bool checkpoint_changed;
2075 * Save the old mount options in case we
2076 * need to restore them.
2078 org_mount_opt = sbi->mount_opt;
2079 old_sb_flags = sb->s_flags;
2082 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2083 for (i = 0; i < MAXQUOTAS; i++) {
2084 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2085 org_mount_opt.s_qf_names[i] =
2086 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2088 if (!org_mount_opt.s_qf_names[i]) {
2089 for (j = 0; j < i; j++)
2090 kfree(org_mount_opt.s_qf_names[j]);
2094 org_mount_opt.s_qf_names[i] = NULL;
2099 /* recover superblocks we couldn't write due to previous RO mount */
2100 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2101 err = f2fs_commit_super(sbi, false);
2102 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2105 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2108 default_options(sbi);
2110 /* parse mount options */
2111 err = parse_options(sb, data, true);
2114 checkpoint_changed =
2115 disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
2118 * Previous and new state of filesystem is RO,
2119 * so skip checking GC and FLUSH_MERGE conditions.
2121 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2124 if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
2130 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2131 err = dquot_suspend(sb, -1);
2134 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2135 /* dquot_resume needs RW */
2136 sb->s_flags &= ~SB_RDONLY;
2137 if (sb_any_quota_suspended(sb)) {
2138 dquot_resume(sb, -1);
2139 } else if (f2fs_sb_has_quota_ino(sbi)) {
2140 err = f2fs_enable_quotas(sb);
2146 /* disallow enable atgc dynamically */
2147 if (no_atgc == !!test_opt(sbi, ATGC)) {
2149 f2fs_warn(sbi, "switch atgc option is not allowed");
2153 /* disallow enable/disable extent_cache dynamically */
2154 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
2156 f2fs_warn(sbi, "switch extent_cache option is not allowed");
2160 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2162 f2fs_warn(sbi, "switch io_bits option is not allowed");
2166 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2168 f2fs_warn(sbi, "switch compress_cache option is not allowed");
2172 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2174 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2179 * We stop the GC thread if FS is mounted as RO
2180 * or if background_gc = off is passed in mount
2181 * option. Also sync the filesystem.
2183 if ((*flags & SB_RDONLY) ||
2184 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2185 !test_opt(sbi, GC_MERGE))) {
2186 if (sbi->gc_thread) {
2187 f2fs_stop_gc_thread(sbi);
2188 need_restart_gc = true;
2190 } else if (!sbi->gc_thread) {
2191 err = f2fs_start_gc_thread(sbi);
2194 need_stop_gc = true;
2197 if (*flags & SB_RDONLY ||
2198 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
2201 set_sbi_flag(sbi, SBI_IS_DIRTY);
2202 set_sbi_flag(sbi, SBI_IS_CLOSE);
2203 f2fs_sync_fs(sb, 1);
2204 clear_sbi_flag(sbi, SBI_IS_CLOSE);
2207 if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2208 !test_opt(sbi, MERGE_CHECKPOINT)) {
2209 f2fs_stop_ckpt_thread(sbi);
2210 need_restart_ckpt = true;
2212 err = f2fs_start_ckpt_thread(sbi);
2215 "Failed to start F2FS issue_checkpoint_thread (%d)",
2219 need_stop_ckpt = true;
2223 * We stop issue flush thread if FS is mounted as RO
2224 * or if flush_merge is not passed in mount option.
2226 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2227 clear_opt(sbi, FLUSH_MERGE);
2228 f2fs_destroy_flush_cmd_control(sbi, false);
2229 need_restart_flush = true;
2231 err = f2fs_create_flush_cmd_control(sbi);
2234 need_stop_flush = true;
2237 if (checkpoint_changed) {
2238 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2239 err = f2fs_disable_checkpoint(sbi);
2243 f2fs_enable_checkpoint(sbi);
2249 /* Release old quota file names */
2250 for (i = 0; i < MAXQUOTAS; i++)
2251 kfree(org_mount_opt.s_qf_names[i]);
2253 /* Update the POSIXACL Flag */
2254 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2255 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2257 limit_reserve_root(sbi);
2258 adjust_unusable_cap_perc(sbi);
2259 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2262 if (need_restart_flush) {
2263 if (f2fs_create_flush_cmd_control(sbi))
2264 f2fs_warn(sbi, "background flush thread has stopped");
2265 } else if (need_stop_flush) {
2266 clear_opt(sbi, FLUSH_MERGE);
2267 f2fs_destroy_flush_cmd_control(sbi, false);
2270 if (need_restart_ckpt) {
2271 if (f2fs_start_ckpt_thread(sbi))
2272 f2fs_warn(sbi, "background ckpt thread has stopped");
2273 } else if (need_stop_ckpt) {
2274 f2fs_stop_ckpt_thread(sbi);
2277 if (need_restart_gc) {
2278 if (f2fs_start_gc_thread(sbi))
2279 f2fs_warn(sbi, "background gc thread has stopped");
2280 } else if (need_stop_gc) {
2281 f2fs_stop_gc_thread(sbi);
2285 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2286 for (i = 0; i < MAXQUOTAS; i++) {
2287 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2288 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2291 sbi->mount_opt = org_mount_opt;
2292 sb->s_flags = old_sb_flags;
2297 /* Read data from quotafile */
2298 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2299 size_t len, loff_t off)
2301 struct inode *inode = sb_dqopt(sb)->files[type];
2302 struct address_space *mapping = inode->i_mapping;
2303 block_t blkidx = F2FS_BYTES_TO_BLK(off);
2304 int offset = off & (sb->s_blocksize - 1);
2307 loff_t i_size = i_size_read(inode);
2314 if (off + len > i_size)
2317 while (toread > 0) {
2318 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2320 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2322 if (PTR_ERR(page) == -ENOMEM) {
2323 congestion_wait(BLK_RW_ASYNC,
2324 DEFAULT_IO_TIMEOUT);
2327 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2328 return PTR_ERR(page);
2333 if (unlikely(page->mapping != mapping)) {
2334 f2fs_put_page(page, 1);
2337 if (unlikely(!PageUptodate(page))) {
2338 f2fs_put_page(page, 1);
2339 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2343 kaddr = kmap_atomic(page);
2344 memcpy(data, kaddr + offset, tocopy);
2345 kunmap_atomic(kaddr);
2346 f2fs_put_page(page, 1);
2356 /* Write to quotafile */
2357 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2358 const char *data, size_t len, loff_t off)
2360 struct inode *inode = sb_dqopt(sb)->files[type];
2361 struct address_space *mapping = inode->i_mapping;
2362 const struct address_space_operations *a_ops = mapping->a_ops;
2363 int offset = off & (sb->s_blocksize - 1);
2364 size_t towrite = len;
2366 void *fsdata = NULL;
2371 while (towrite > 0) {
2372 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2375 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
2377 if (unlikely(err)) {
2378 if (err == -ENOMEM) {
2379 congestion_wait(BLK_RW_ASYNC,
2380 DEFAULT_IO_TIMEOUT);
2383 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2387 kaddr = kmap_atomic(page);
2388 memcpy(kaddr + offset, data, tocopy);
2389 kunmap_atomic(kaddr);
2390 flush_dcache_page(page);
2392 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2403 inode->i_mtime = inode->i_ctime = current_time(inode);
2404 f2fs_mark_inode_dirty_sync(inode, false);
2405 return len - towrite;
2408 static struct dquot **f2fs_get_dquots(struct inode *inode)
2410 return F2FS_I(inode)->i_dquot;
2413 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2415 return &F2FS_I(inode)->i_reserved_quota;
2418 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2420 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2421 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2425 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2426 F2FS_OPTION(sbi).s_jquota_fmt, type);
2429 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2434 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2435 err = f2fs_enable_quotas(sbi->sb);
2437 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2443 for (i = 0; i < MAXQUOTAS; i++) {
2444 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2445 err = f2fs_quota_on_mount(sbi, i);
2450 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2457 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2460 struct inode *qf_inode;
2461 unsigned long qf_inum;
2464 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2466 qf_inum = f2fs_qf_ino(sb, type);
2470 qf_inode = f2fs_iget(sb, qf_inum);
2471 if (IS_ERR(qf_inode)) {
2472 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2473 return PTR_ERR(qf_inode);
2476 /* Don't account quota for quota files to avoid recursion */
2477 qf_inode->i_flags |= S_NOQUOTA;
2478 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2483 static int f2fs_enable_quotas(struct super_block *sb)
2485 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2487 unsigned long qf_inum;
2488 bool quota_mopt[MAXQUOTAS] = {
2489 test_opt(sbi, USRQUOTA),
2490 test_opt(sbi, GRPQUOTA),
2491 test_opt(sbi, PRJQUOTA),
2494 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2495 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2499 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2501 for (type = 0; type < MAXQUOTAS; type++) {
2502 qf_inum = f2fs_qf_ino(sb, type);
2504 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2505 DQUOT_USAGE_ENABLED |
2506 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2508 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2510 for (type--; type >= 0; type--)
2511 dquot_quota_off(sb, type);
2512 set_sbi_flag(F2FS_SB(sb),
2513 SBI_QUOTA_NEED_REPAIR);
2521 int f2fs_quota_sync(struct super_block *sb, int type)
2523 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2524 struct quota_info *dqopt = sb_dqopt(sb);
2531 * down_read(quota_sem)
2532 * dquot_writeback_dquots()
2535 * down_read(quota_sem)
2539 down_read(&sbi->quota_sem);
2540 ret = dquot_writeback_dquots(sb, type);
2545 * Now when everything is written we can discard the pagecache so
2546 * that userspace sees the changes.
2548 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2549 struct address_space *mapping;
2551 if (type != -1 && cnt != type)
2553 if (!sb_has_quota_active(sb, cnt))
2556 mapping = dqopt->files[cnt]->i_mapping;
2558 ret = filemap_fdatawrite(mapping);
2562 /* if we are using journalled quota */
2563 if (is_journalled_quota(sbi))
2566 ret = filemap_fdatawait(mapping);
2568 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2570 inode_lock(dqopt->files[cnt]);
2571 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2572 inode_unlock(dqopt->files[cnt]);
2576 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2577 up_read(&sbi->quota_sem);
2578 f2fs_unlock_op(sbi);
2582 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2583 const struct path *path)
2585 struct inode *inode;
2588 /* if quota sysfile exists, deny enabling quota with specific file */
2589 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2590 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2594 err = f2fs_quota_sync(sb, type);
2598 err = dquot_quota_on(sb, type, format_id, path);
2602 inode = d_inode(path->dentry);
2605 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2606 f2fs_set_inode_flags(inode);
2607 inode_unlock(inode);
2608 f2fs_mark_inode_dirty_sync(inode, false);
2613 static int __f2fs_quota_off(struct super_block *sb, int type)
2615 struct inode *inode = sb_dqopt(sb)->files[type];
2618 if (!inode || !igrab(inode))
2619 return dquot_quota_off(sb, type);
2621 err = f2fs_quota_sync(sb, type);
2625 err = dquot_quota_off(sb, type);
2626 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2630 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2631 f2fs_set_inode_flags(inode);
2632 inode_unlock(inode);
2633 f2fs_mark_inode_dirty_sync(inode, false);
2639 static int f2fs_quota_off(struct super_block *sb, int type)
2641 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2644 err = __f2fs_quota_off(sb, type);
2647 * quotactl can shutdown journalled quota, result in inconsistence
2648 * between quota record and fs data by following updates, tag the
2649 * flag to let fsck be aware of it.
2651 if (is_journalled_quota(sbi))
2652 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2656 void f2fs_quota_off_umount(struct super_block *sb)
2661 for (type = 0; type < MAXQUOTAS; type++) {
2662 err = __f2fs_quota_off(sb, type);
2664 int ret = dquot_quota_off(sb, type);
2666 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2668 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2672 * In case of checkpoint=disable, we must flush quota blocks.
2673 * This can cause NULL exception for node_inode in end_io, since
2674 * put_super already dropped it.
2676 sync_filesystem(sb);
2679 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2681 struct quota_info *dqopt = sb_dqopt(sb);
2684 for (type = 0; type < MAXQUOTAS; type++) {
2685 if (!dqopt->files[type])
2687 f2fs_inode_synced(dqopt->files[type]);
2691 static int f2fs_dquot_commit(struct dquot *dquot)
2693 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2696 down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2697 ret = dquot_commit(dquot);
2699 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2700 up_read(&sbi->quota_sem);
2704 static int f2fs_dquot_acquire(struct dquot *dquot)
2706 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2709 down_read(&sbi->quota_sem);
2710 ret = dquot_acquire(dquot);
2712 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2713 up_read(&sbi->quota_sem);
2717 static int f2fs_dquot_release(struct dquot *dquot)
2719 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2720 int ret = dquot_release(dquot);
2723 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2727 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2729 struct super_block *sb = dquot->dq_sb;
2730 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2731 int ret = dquot_mark_dquot_dirty(dquot);
2733 /* if we are using journalled quota */
2734 if (is_journalled_quota(sbi))
2735 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2740 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2742 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2743 int ret = dquot_commit_info(sb, type);
2746 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2750 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2752 *projid = F2FS_I(inode)->i_projid;
2756 static const struct dquot_operations f2fs_quota_operations = {
2757 .get_reserved_space = f2fs_get_reserved_space,
2758 .write_dquot = f2fs_dquot_commit,
2759 .acquire_dquot = f2fs_dquot_acquire,
2760 .release_dquot = f2fs_dquot_release,
2761 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2762 .write_info = f2fs_dquot_commit_info,
2763 .alloc_dquot = dquot_alloc,
2764 .destroy_dquot = dquot_destroy,
2765 .get_projid = f2fs_get_projid,
2766 .get_next_id = dquot_get_next_id,
2769 static const struct quotactl_ops f2fs_quotactl_ops = {
2770 .quota_on = f2fs_quota_on,
2771 .quota_off = f2fs_quota_off,
2772 .quota_sync = f2fs_quota_sync,
2773 .get_state = dquot_get_state,
2774 .set_info = dquot_set_dqinfo,
2775 .get_dqblk = dquot_get_dqblk,
2776 .set_dqblk = dquot_set_dqblk,
2777 .get_nextdqblk = dquot_get_next_dqblk,
2780 int f2fs_quota_sync(struct super_block *sb, int type)
2785 void f2fs_quota_off_umount(struct super_block *sb)
2790 static const struct super_operations f2fs_sops = {
2791 .alloc_inode = f2fs_alloc_inode,
2792 .free_inode = f2fs_free_inode,
2793 .drop_inode = f2fs_drop_inode,
2794 .write_inode = f2fs_write_inode,
2795 .dirty_inode = f2fs_dirty_inode,
2796 .show_options = f2fs_show_options,
2798 .quota_read = f2fs_quota_read,
2799 .quota_write = f2fs_quota_write,
2800 .get_dquots = f2fs_get_dquots,
2802 .evict_inode = f2fs_evict_inode,
2803 .put_super = f2fs_put_super,
2804 .sync_fs = f2fs_sync_fs,
2805 .freeze_fs = f2fs_freeze,
2806 .unfreeze_fs = f2fs_unfreeze,
2807 .statfs = f2fs_statfs,
2808 .remount_fs = f2fs_remount,
2811 #ifdef CONFIG_FS_ENCRYPTION
2812 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2814 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2815 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2819 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2822 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2825 * Encrypting the root directory is not allowed because fsck
2826 * expects lost+found directory to exist and remain unencrypted
2827 * if LOST_FOUND feature is enabled.
2830 if (f2fs_sb_has_lost_found(sbi) &&
2831 inode->i_ino == F2FS_ROOT_INO(sbi))
2834 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2835 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2836 ctx, len, fs_data, XATTR_CREATE);
2839 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
2841 return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
2844 static bool f2fs_has_stable_inodes(struct super_block *sb)
2849 static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
2850 int *ino_bits_ret, int *lblk_bits_ret)
2852 *ino_bits_ret = 8 * sizeof(nid_t);
2853 *lblk_bits_ret = 8 * sizeof(block_t);
2856 static int f2fs_get_num_devices(struct super_block *sb)
2858 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2860 if (f2fs_is_multi_device(sbi))
2861 return sbi->s_ndevs;
2865 static void f2fs_get_devices(struct super_block *sb,
2866 struct request_queue **devs)
2868 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2871 for (i = 0; i < sbi->s_ndevs; i++)
2872 devs[i] = bdev_get_queue(FDEV(i).bdev);
2875 static const struct fscrypt_operations f2fs_cryptops = {
2876 .key_prefix = "f2fs:",
2877 .get_context = f2fs_get_context,
2878 .set_context = f2fs_set_context,
2879 .get_dummy_policy = f2fs_get_dummy_policy,
2880 .empty_dir = f2fs_empty_dir,
2881 .max_namelen = F2FS_NAME_LEN,
2882 .has_stable_inodes = f2fs_has_stable_inodes,
2883 .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
2884 .get_num_devices = f2fs_get_num_devices,
2885 .get_devices = f2fs_get_devices,
2889 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2890 u64 ino, u32 generation)
2892 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2893 struct inode *inode;
2895 if (f2fs_check_nid_range(sbi, ino))
2896 return ERR_PTR(-ESTALE);
2899 * f2fs_iget isn't quite right if the inode is currently unallocated!
2900 * However f2fs_iget currently does appropriate checks to handle stale
2901 * inodes so everything is OK.
2903 inode = f2fs_iget(sb, ino);
2905 return ERR_CAST(inode);
2906 if (unlikely(generation && inode->i_generation != generation)) {
2907 /* we didn't find the right inode.. */
2909 return ERR_PTR(-ESTALE);
2914 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2915 int fh_len, int fh_type)
2917 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2918 f2fs_nfs_get_inode);
2921 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2922 int fh_len, int fh_type)
2924 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2925 f2fs_nfs_get_inode);
2928 static const struct export_operations f2fs_export_ops = {
2929 .fh_to_dentry = f2fs_fh_to_dentry,
2930 .fh_to_parent = f2fs_fh_to_parent,
2931 .get_parent = f2fs_get_parent,
2934 loff_t max_file_blocks(struct inode *inode)
2940 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2941 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2942 * space in inode.i_addr, it will be more safe to reassign
2946 if (inode && f2fs_compressed_file(inode))
2947 leaf_count = ADDRS_PER_BLOCK(inode);
2949 leaf_count = DEF_ADDRS_PER_BLOCK;
2951 /* two direct node blocks */
2952 result += (leaf_count * 2);
2954 /* two indirect node blocks */
2955 leaf_count *= NIDS_PER_BLOCK;
2956 result += (leaf_count * 2);
2958 /* one double indirect node block */
2959 leaf_count *= NIDS_PER_BLOCK;
2960 result += leaf_count;
2965 static int __f2fs_commit_super(struct buffer_head *bh,
2966 struct f2fs_super_block *super)
2970 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2971 set_buffer_dirty(bh);
2974 /* it's rare case, we can do fua all the time */
2975 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2978 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2979 struct buffer_head *bh)
2981 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2982 (bh->b_data + F2FS_SUPER_OFFSET);
2983 struct super_block *sb = sbi->sb;
2984 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2985 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2986 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2987 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2988 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2989 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2990 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2991 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2992 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2993 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2994 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2995 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2996 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2997 u64 main_end_blkaddr = main_blkaddr +
2998 (segment_count_main << log_blocks_per_seg);
2999 u64 seg_end_blkaddr = segment0_blkaddr +
3000 (segment_count << log_blocks_per_seg);
3002 if (segment0_blkaddr != cp_blkaddr) {
3003 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3004 segment0_blkaddr, cp_blkaddr);
3008 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3010 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3011 cp_blkaddr, sit_blkaddr,
3012 segment_count_ckpt << log_blocks_per_seg);
3016 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3018 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3019 sit_blkaddr, nat_blkaddr,
3020 segment_count_sit << log_blocks_per_seg);
3024 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3026 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3027 nat_blkaddr, ssa_blkaddr,
3028 segment_count_nat << log_blocks_per_seg);
3032 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3034 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3035 ssa_blkaddr, main_blkaddr,
3036 segment_count_ssa << log_blocks_per_seg);
3040 if (main_end_blkaddr > seg_end_blkaddr) {
3041 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3042 main_blkaddr, seg_end_blkaddr,
3043 segment_count_main << log_blocks_per_seg);
3045 } else if (main_end_blkaddr < seg_end_blkaddr) {
3049 /* fix in-memory information all the time */
3050 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3051 segment0_blkaddr) >> log_blocks_per_seg);
3053 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
3054 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3057 err = __f2fs_commit_super(bh, NULL);
3058 res = err ? "failed" : "done";
3060 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3061 res, main_blkaddr, seg_end_blkaddr,
3062 segment_count_main << log_blocks_per_seg);
3069 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3070 struct buffer_head *bh)
3072 block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3073 block_t total_sections, blocks_per_seg;
3074 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3075 (bh->b_data + F2FS_SUPER_OFFSET);
3076 size_t crc_offset = 0;
3079 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3080 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3081 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3085 /* Check checksum_offset and crc in superblock */
3086 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3087 crc_offset = le32_to_cpu(raw_super->checksum_offset);
3089 offsetof(struct f2fs_super_block, crc)) {
3090 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3092 return -EFSCORRUPTED;
3094 crc = le32_to_cpu(raw_super->crc);
3095 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3096 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3097 return -EFSCORRUPTED;
3101 /* Currently, support only 4KB block size */
3102 if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3103 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3104 le32_to_cpu(raw_super->log_blocksize),
3106 return -EFSCORRUPTED;
3109 /* check log blocks per segment */
3110 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3111 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3112 le32_to_cpu(raw_super->log_blocks_per_seg));
3113 return -EFSCORRUPTED;
3116 /* Currently, support 512/1024/2048/4096 bytes sector size */
3117 if (le32_to_cpu(raw_super->log_sectorsize) >
3118 F2FS_MAX_LOG_SECTOR_SIZE ||
3119 le32_to_cpu(raw_super->log_sectorsize) <
3120 F2FS_MIN_LOG_SECTOR_SIZE) {
3121 f2fs_info(sbi, "Invalid log sectorsize (%u)",
3122 le32_to_cpu(raw_super->log_sectorsize));
3123 return -EFSCORRUPTED;
3125 if (le32_to_cpu(raw_super->log_sectors_per_block) +
3126 le32_to_cpu(raw_super->log_sectorsize) !=
3127 F2FS_MAX_LOG_SECTOR_SIZE) {
3128 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3129 le32_to_cpu(raw_super->log_sectors_per_block),
3130 le32_to_cpu(raw_super->log_sectorsize));
3131 return -EFSCORRUPTED;
3134 segment_count = le32_to_cpu(raw_super->segment_count);
3135 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3136 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3137 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3138 total_sections = le32_to_cpu(raw_super->section_count);
3140 /* blocks_per_seg should be 512, given the above check */
3141 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
3143 if (segment_count > F2FS_MAX_SEGMENT ||
3144 segment_count < F2FS_MIN_SEGMENTS) {
3145 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3146 return -EFSCORRUPTED;
3149 if (total_sections > segment_count_main || total_sections < 1 ||
3150 segs_per_sec > segment_count || !segs_per_sec) {
3151 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3152 segment_count, total_sections, segs_per_sec);
3153 return -EFSCORRUPTED;
3156 if (segment_count_main != total_sections * segs_per_sec) {
3157 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3158 segment_count_main, total_sections, segs_per_sec);
3159 return -EFSCORRUPTED;
3162 if ((segment_count / segs_per_sec) < total_sections) {
3163 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3164 segment_count, segs_per_sec, total_sections);
3165 return -EFSCORRUPTED;
3168 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3169 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3170 segment_count, le64_to_cpu(raw_super->block_count));
3171 return -EFSCORRUPTED;
3174 if (RDEV(0).path[0]) {
3175 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3178 while (i < MAX_DEVICES && RDEV(i).path[0]) {
3179 dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3182 if (segment_count != dev_seg_count) {
3183 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3184 segment_count, dev_seg_count);
3185 return -EFSCORRUPTED;
3188 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3189 !bdev_is_zoned(sbi->sb->s_bdev)) {
3190 f2fs_info(sbi, "Zoned block device path is missing");
3191 return -EFSCORRUPTED;
3195 if (secs_per_zone > total_sections || !secs_per_zone) {
3196 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3197 secs_per_zone, total_sections);
3198 return -EFSCORRUPTED;
3200 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3201 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3202 (le32_to_cpu(raw_super->extension_count) +
3203 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3204 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3205 le32_to_cpu(raw_super->extension_count),
3206 raw_super->hot_ext_count,
3207 F2FS_MAX_EXTENSION);
3208 return -EFSCORRUPTED;
3211 if (le32_to_cpu(raw_super->cp_payload) >
3212 (blocks_per_seg - F2FS_CP_PACKS)) {
3213 f2fs_info(sbi, "Insane cp_payload (%u > %u)",
3214 le32_to_cpu(raw_super->cp_payload),
3215 blocks_per_seg - F2FS_CP_PACKS);
3216 return -EFSCORRUPTED;
3219 /* check reserved ino info */
3220 if (le32_to_cpu(raw_super->node_ino) != 1 ||
3221 le32_to_cpu(raw_super->meta_ino) != 2 ||
3222 le32_to_cpu(raw_super->root_ino) != 3) {
3223 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3224 le32_to_cpu(raw_super->node_ino),
3225 le32_to_cpu(raw_super->meta_ino),
3226 le32_to_cpu(raw_super->root_ino));
3227 return -EFSCORRUPTED;
3230 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3231 if (sanity_check_area_boundary(sbi, bh))
3232 return -EFSCORRUPTED;
3237 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3239 unsigned int total, fsmeta;
3240 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3241 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3242 unsigned int ovp_segments, reserved_segments;
3243 unsigned int main_segs, blocks_per_seg;
3244 unsigned int sit_segs, nat_segs;
3245 unsigned int sit_bitmap_size, nat_bitmap_size;
3246 unsigned int log_blocks_per_seg;
3247 unsigned int segment_count_main;
3248 unsigned int cp_pack_start_sum, cp_payload;
3249 block_t user_block_count, valid_user_blocks;
3250 block_t avail_node_count, valid_node_count;
3253 total = le32_to_cpu(raw_super->segment_count);
3254 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3255 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3257 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3259 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3260 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3262 if (unlikely(fsmeta >= total))
3265 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3266 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3268 if (!f2fs_sb_has_readonly(sbi) &&
3269 unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3270 ovp_segments == 0 || reserved_segments == 0)) {
3271 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3274 user_block_count = le64_to_cpu(ckpt->user_block_count);
3275 segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3276 (f2fs_sb_has_readonly(sbi) ? 1 : 0);
3277 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3278 if (!user_block_count || user_block_count >=
3279 segment_count_main << log_blocks_per_seg) {
3280 f2fs_err(sbi, "Wrong user_block_count: %u",
3285 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3286 if (valid_user_blocks > user_block_count) {
3287 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3288 valid_user_blocks, user_block_count);
3292 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3293 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3294 if (valid_node_count > avail_node_count) {
3295 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3296 valid_node_count, avail_node_count);
3300 main_segs = le32_to_cpu(raw_super->segment_count_main);
3301 blocks_per_seg = sbi->blocks_per_seg;
3303 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3304 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3305 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3308 if (f2fs_sb_has_readonly(sbi))
3311 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3312 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3313 le32_to_cpu(ckpt->cur_node_segno[j])) {
3314 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3316 le32_to_cpu(ckpt->cur_node_segno[i]));
3322 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3323 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3324 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3327 if (f2fs_sb_has_readonly(sbi))
3330 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3331 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3332 le32_to_cpu(ckpt->cur_data_segno[j])) {
3333 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3335 le32_to_cpu(ckpt->cur_data_segno[i]));
3340 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3341 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3342 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3343 le32_to_cpu(ckpt->cur_data_segno[j])) {
3344 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3346 le32_to_cpu(ckpt->cur_node_segno[i]));
3352 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3353 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3355 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3356 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3357 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3358 sit_bitmap_size, nat_bitmap_size);
3362 cp_pack_start_sum = __start_sum_addr(sbi);
3363 cp_payload = __cp_payload(sbi);
3364 if (cp_pack_start_sum < cp_payload + 1 ||
3365 cp_pack_start_sum > blocks_per_seg - 1 -
3366 NR_CURSEG_PERSIST_TYPE) {
3367 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3372 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3373 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3374 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3375 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3376 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3377 le32_to_cpu(ckpt->checksum_offset));
3381 if (unlikely(f2fs_cp_error(sbi))) {
3382 f2fs_err(sbi, "A bug case: need to run fsck");
3388 static void init_sb_info(struct f2fs_sb_info *sbi)
3390 struct f2fs_super_block *raw_super = sbi->raw_super;
3393 sbi->log_sectors_per_block =
3394 le32_to_cpu(raw_super->log_sectors_per_block);
3395 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3396 sbi->blocksize = 1 << sbi->log_blocksize;
3397 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3398 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
3399 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3400 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3401 sbi->total_sections = le32_to_cpu(raw_super->section_count);
3402 sbi->total_node_count =
3403 (le32_to_cpu(raw_super->segment_count_nat) / 2)
3404 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3405 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3406 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3407 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3408 sbi->cur_victim_sec = NULL_SECNO;
3409 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3410 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3411 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3412 sbi->migration_granularity = sbi->segs_per_sec;
3414 sbi->dir_level = DEF_DIR_LEVEL;
3415 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3416 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3417 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3418 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3419 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3420 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3421 DEF_UMOUNT_DISCARD_TIMEOUT;
3422 clear_sbi_flag(sbi, SBI_NEED_FSCK);
3424 for (i = 0; i < NR_COUNT_TYPE; i++)
3425 atomic_set(&sbi->nr_pages[i], 0);
3427 for (i = 0; i < META; i++)
3428 atomic_set(&sbi->wb_sync_req[i], 0);
3430 INIT_LIST_HEAD(&sbi->s_list);
3431 mutex_init(&sbi->umount_mutex);
3432 init_rwsem(&sbi->io_order_lock);
3433 spin_lock_init(&sbi->cp_lock);
3435 sbi->dirty_device = 0;
3436 spin_lock_init(&sbi->dev_lock);
3438 init_rwsem(&sbi->sb_lock);
3439 init_rwsem(&sbi->pin_sem);
3442 static int init_percpu_info(struct f2fs_sb_info *sbi)
3446 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3450 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3453 percpu_counter_destroy(&sbi->alloc_valid_block_count);
3458 #ifdef CONFIG_BLK_DEV_ZONED
3460 struct f2fs_report_zones_args {
3461 struct f2fs_dev_info *dev;
3462 bool zone_cap_mismatch;
3465 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3468 struct f2fs_report_zones_args *rz_args = data;
3470 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3473 set_bit(idx, rz_args->dev->blkz_seq);
3474 rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
3475 F2FS_LOG_SECTORS_PER_BLOCK;
3476 if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
3477 rz_args->zone_cap_mismatch = true;
3482 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3484 struct block_device *bdev = FDEV(devi).bdev;
3485 sector_t nr_sectors = bdev_nr_sectors(bdev);
3486 struct f2fs_report_zones_args rep_zone_arg;
3489 if (!f2fs_sb_has_blkzoned(sbi))
3492 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3493 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3495 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3496 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3497 __ilog2_u32(sbi->blocks_per_blkz))
3499 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3500 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3501 sbi->log_blocks_per_blkz;
3502 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3503 FDEV(devi).nr_blkz++;
3505 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3506 BITS_TO_LONGS(FDEV(devi).nr_blkz)
3507 * sizeof(unsigned long),
3509 if (!FDEV(devi).blkz_seq)
3512 /* Get block zones type and zone-capacity */
3513 FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
3514 FDEV(devi).nr_blkz * sizeof(block_t),
3516 if (!FDEV(devi).zone_capacity_blocks)
3519 rep_zone_arg.dev = &FDEV(devi);
3520 rep_zone_arg.zone_cap_mismatch = false;
3522 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3527 if (!rep_zone_arg.zone_cap_mismatch) {
3528 kfree(FDEV(devi).zone_capacity_blocks);
3529 FDEV(devi).zone_capacity_blocks = NULL;
3537 * Read f2fs raw super block.
3538 * Because we have two copies of super block, so read both of them
3539 * to get the first valid one. If any one of them is broken, we pass
3540 * them recovery flag back to the caller.
3542 static int read_raw_super_block(struct f2fs_sb_info *sbi,
3543 struct f2fs_super_block **raw_super,
3544 int *valid_super_block, int *recovery)
3546 struct super_block *sb = sbi->sb;
3548 struct buffer_head *bh;
3549 struct f2fs_super_block *super;
3552 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3556 for (block = 0; block < 2; block++) {
3557 bh = sb_bread(sb, block);
3559 f2fs_err(sbi, "Unable to read %dth superblock",
3566 /* sanity checking of raw super */
3567 err = sanity_check_raw_super(sbi, bh);
3569 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3577 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3579 *valid_super_block = block;
3585 /* No valid superblock */
3594 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3596 struct buffer_head *bh;
3600 if ((recover && f2fs_readonly(sbi->sb)) ||
3601 bdev_read_only(sbi->sb->s_bdev)) {
3602 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3606 /* we should update superblock crc here */
3607 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3608 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3609 offsetof(struct f2fs_super_block, crc));
3610 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3613 /* write back-up superblock first */
3614 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3617 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3620 /* if we are in recovery path, skip writing valid superblock */
3624 /* write current valid superblock */
3625 bh = sb_bread(sbi->sb, sbi->valid_super_block);
3628 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3633 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3635 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3636 unsigned int max_devices = MAX_DEVICES;
3639 /* Initialize single device information */
3640 if (!RDEV(0).path[0]) {
3641 if (!bdev_is_zoned(sbi->sb->s_bdev))
3647 * Initialize multiple devices information, or single
3648 * zoned block device information.
3650 sbi->devs = f2fs_kzalloc(sbi,
3651 array_size(max_devices,
3652 sizeof(struct f2fs_dev_info)),
3657 for (i = 0; i < max_devices; i++) {
3659 if (i > 0 && !RDEV(i).path[0])
3662 if (max_devices == 1) {
3663 /* Single zoned block device mount */
3665 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3666 sbi->sb->s_mode, sbi->sb->s_type);
3668 /* Multi-device mount */
3669 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3670 FDEV(i).total_segments =
3671 le32_to_cpu(RDEV(i).total_segments);
3673 FDEV(i).start_blk = 0;
3674 FDEV(i).end_blk = FDEV(i).start_blk +
3675 (FDEV(i).total_segments <<
3676 sbi->log_blocks_per_seg) - 1 +
3677 le32_to_cpu(raw_super->segment0_blkaddr);
3679 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3680 FDEV(i).end_blk = FDEV(i).start_blk +
3681 (FDEV(i).total_segments <<
3682 sbi->log_blocks_per_seg) - 1;
3684 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3685 sbi->sb->s_mode, sbi->sb->s_type);
3687 if (IS_ERR(FDEV(i).bdev))
3688 return PTR_ERR(FDEV(i).bdev);
3690 /* to release errored devices */
3691 sbi->s_ndevs = i + 1;
3693 #ifdef CONFIG_BLK_DEV_ZONED
3694 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3695 !f2fs_sb_has_blkzoned(sbi)) {
3696 f2fs_err(sbi, "Zoned block device feature not enabled");
3699 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3700 if (init_blkz_info(sbi, i)) {
3701 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3704 if (max_devices == 1)
3706 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3708 FDEV(i).total_segments,
3709 FDEV(i).start_blk, FDEV(i).end_blk,
3710 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3711 "Host-aware" : "Host-managed");
3715 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3717 FDEV(i).total_segments,
3718 FDEV(i).start_blk, FDEV(i).end_blk);
3721 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3725 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3727 #ifdef CONFIG_UNICODE
3728 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
3729 const struct f2fs_sb_encodings *encoding_info;
3730 struct unicode_map *encoding;
3731 __u16 encoding_flags;
3733 if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
3736 "Encoding requested by superblock is unknown");
3740 encoding = utf8_load(encoding_info->version);
3741 if (IS_ERR(encoding)) {
3743 "can't mount with superblock charset: %s-%s "
3744 "not supported by the kernel. flags: 0x%x.",
3745 encoding_info->name, encoding_info->version,
3747 return PTR_ERR(encoding);
3749 f2fs_info(sbi, "Using encoding defined by superblock: "
3750 "%s-%s with flags 0x%hx", encoding_info->name,
3751 encoding_info->version?:"\b", encoding_flags);
3753 sbi->sb->s_encoding = encoding;
3754 sbi->sb->s_encoding_flags = encoding_flags;
3757 if (f2fs_sb_has_casefold(sbi)) {
3758 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3765 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3767 struct f2fs_sm_info *sm_i = SM_I(sbi);
3769 /* adjust parameters according to the volume size */
3770 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3771 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3772 sm_i->dcc_info->discard_granularity = 1;
3773 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
3776 sbi->readdir_ra = 1;
3779 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3781 struct f2fs_sb_info *sbi;
3782 struct f2fs_super_block *raw_super;
3785 bool skip_recovery = false, need_fsck = false;
3786 char *options = NULL;
3787 int recovery, i, valid_super_block;
3788 struct curseg_info *seg_i;
3794 valid_super_block = -1;
3797 /* allocate memory for f2fs-specific super block info */
3798 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3804 /* Load the checksum driver */
3805 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3806 if (IS_ERR(sbi->s_chksum_driver)) {
3807 f2fs_err(sbi, "Cannot load crc32 driver.");
3808 err = PTR_ERR(sbi->s_chksum_driver);
3809 sbi->s_chksum_driver = NULL;
3813 /* set a block size */
3814 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3815 f2fs_err(sbi, "unable to set blocksize");
3819 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3824 sb->s_fs_info = sbi;
3825 sbi->raw_super = raw_super;
3827 /* precompute checksum seed for metadata */
3828 if (f2fs_sb_has_inode_chksum(sbi))
3829 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3830 sizeof(raw_super->uuid));
3832 default_options(sbi);
3833 /* parse mount options */
3834 options = kstrdup((const char *)data, GFP_KERNEL);
3835 if (data && !options) {
3840 err = parse_options(sb, options, false);
3844 sb->s_maxbytes = max_file_blocks(NULL) <<
3845 le32_to_cpu(raw_super->log_blocksize);
3846 sb->s_max_links = F2FS_LINK_MAX;
3848 err = f2fs_setup_casefold(sbi);
3853 sb->dq_op = &f2fs_quota_operations;
3854 sb->s_qcop = &f2fs_quotactl_ops;
3855 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3857 if (f2fs_sb_has_quota_ino(sbi)) {
3858 for (i = 0; i < MAXQUOTAS; i++) {
3859 if (f2fs_qf_ino(sbi->sb, i))
3860 sbi->nquota_files++;
3865 sb->s_op = &f2fs_sops;
3866 #ifdef CONFIG_FS_ENCRYPTION
3867 sb->s_cop = &f2fs_cryptops;
3869 #ifdef CONFIG_FS_VERITY
3870 sb->s_vop = &f2fs_verityops;
3872 sb->s_xattr = f2fs_xattr_handlers;
3873 sb->s_export_op = &f2fs_export_ops;
3874 sb->s_magic = F2FS_SUPER_MAGIC;
3875 sb->s_time_gran = 1;
3876 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3877 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3878 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3879 sb->s_iflags |= SB_I_CGROUPWB;
3881 /* init f2fs-specific super block info */
3882 sbi->valid_super_block = valid_super_block;
3883 init_rwsem(&sbi->gc_lock);
3884 mutex_init(&sbi->writepages);
3885 init_rwsem(&sbi->cp_global_sem);
3886 init_rwsem(&sbi->node_write);
3887 init_rwsem(&sbi->node_change);
3889 /* disallow all the data/node/meta page writes */
3890 set_sbi_flag(sbi, SBI_POR_DOING);
3891 spin_lock_init(&sbi->stat_lock);
3893 /* init iostat info */
3894 spin_lock_init(&sbi->iostat_lock);
3895 sbi->iostat_enable = false;
3896 sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
3898 for (i = 0; i < NR_PAGE_TYPE; i++) {
3899 int n = (i == META) ? 1 : NR_TEMP_TYPE;
3905 sizeof(struct f2fs_bio_info)),
3907 if (!sbi->write_io[i]) {
3912 for (j = HOT; j < n; j++) {
3913 init_rwsem(&sbi->write_io[i][j].io_rwsem);
3914 sbi->write_io[i][j].sbi = sbi;
3915 sbi->write_io[i][j].bio = NULL;
3916 spin_lock_init(&sbi->write_io[i][j].io_lock);
3917 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
3918 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
3919 init_rwsem(&sbi->write_io[i][j].bio_list_lock);
3923 init_rwsem(&sbi->cp_rwsem);
3924 init_rwsem(&sbi->quota_sem);
3925 init_waitqueue_head(&sbi->cp_wait);
3928 err = init_percpu_info(sbi);
3932 if (F2FS_IO_ALIGNED(sbi)) {
3933 sbi->write_io_dummy =
3934 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
3935 if (!sbi->write_io_dummy) {
3941 /* init per sbi slab cache */
3942 err = f2fs_init_xattr_caches(sbi);
3945 err = f2fs_init_page_array_cache(sbi);
3947 goto free_xattr_cache;
3949 /* get an inode for meta space */
3950 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3951 if (IS_ERR(sbi->meta_inode)) {
3952 f2fs_err(sbi, "Failed to read F2FS meta data inode");
3953 err = PTR_ERR(sbi->meta_inode);
3954 goto free_page_array_cache;
3957 err = f2fs_get_valid_checkpoint(sbi);
3959 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
3960 goto free_meta_inode;
3963 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
3964 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3965 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
3966 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3967 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
3970 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
3971 set_sbi_flag(sbi, SBI_NEED_FSCK);
3973 /* Initialize device list */
3974 err = f2fs_scan_devices(sbi);
3976 f2fs_err(sbi, "Failed to find devices");
3980 err = f2fs_init_post_read_wq(sbi);
3982 f2fs_err(sbi, "Failed to initialize post read workqueue");
3986 sbi->total_valid_node_count =
3987 le32_to_cpu(sbi->ckpt->valid_node_count);
3988 percpu_counter_set(&sbi->total_valid_inode_count,
3989 le32_to_cpu(sbi->ckpt->valid_inode_count));
3990 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
3991 sbi->total_valid_block_count =
3992 le64_to_cpu(sbi->ckpt->valid_block_count);
3993 sbi->last_valid_block_count = sbi->total_valid_block_count;
3994 sbi->reserved_blocks = 0;
3995 sbi->current_reserved_blocks = 0;
3996 limit_reserve_root(sbi);
3997 adjust_unusable_cap_perc(sbi);
3999 for (i = 0; i < NR_INODE_TYPE; i++) {
4000 INIT_LIST_HEAD(&sbi->inode_list[i]);
4001 spin_lock_init(&sbi->inode_lock[i]);
4003 mutex_init(&sbi->flush_lock);
4005 f2fs_init_extent_cache_info(sbi);
4007 f2fs_init_ino_entry_info(sbi);
4009 f2fs_init_fsync_node_info(sbi);
4011 /* setup checkpoint request control and start checkpoint issue thread */
4012 f2fs_init_ckpt_req_control(sbi);
4013 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4014 test_opt(sbi, MERGE_CHECKPOINT)) {
4015 err = f2fs_start_ckpt_thread(sbi);
4018 "Failed to start F2FS issue_checkpoint_thread (%d)",
4020 goto stop_ckpt_thread;
4024 /* setup f2fs internal modules */
4025 err = f2fs_build_segment_manager(sbi);
4027 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4031 err = f2fs_build_node_manager(sbi);
4033 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4038 /* For write statistics */
4039 sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4041 /* Read accumulated write IO statistics if exists */
4042 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4043 if (__exist_node_summaries(sbi))
4044 sbi->kbytes_written =
4045 le64_to_cpu(seg_i->journal->info.kbytes_written);
4047 f2fs_build_gc_manager(sbi);
4049 err = f2fs_build_stats(sbi);
4053 /* get an inode for node space */
4054 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4055 if (IS_ERR(sbi->node_inode)) {
4056 f2fs_err(sbi, "Failed to read node inode");
4057 err = PTR_ERR(sbi->node_inode);
4061 /* read root inode and dentry */
4062 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4064 f2fs_err(sbi, "Failed to read root inode");
4065 err = PTR_ERR(root);
4066 goto free_node_inode;
4068 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4069 !root->i_size || !root->i_nlink) {
4072 goto free_node_inode;
4075 sb->s_root = d_make_root(root); /* allocate root dentry */
4078 goto free_node_inode;
4081 err = f2fs_init_compress_inode(sbi);
4083 goto free_root_inode;
4085 err = f2fs_register_sysfs(sbi);
4087 goto free_compress_inode;
4090 /* Enable quota usage during mount */
4091 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4092 err = f2fs_enable_quotas(sb);
4094 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4097 /* if there are any orphan inodes, free them */
4098 err = f2fs_recover_orphan_inodes(sbi);
4102 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4103 goto reset_checkpoint;
4105 /* recover fsynced data */
4106 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4107 !test_opt(sbi, NORECOVERY)) {
4109 * mount should be failed, when device has readonly mode, and
4110 * previous checkpoint was not done by clean system shutdown.
4112 if (f2fs_hw_is_readonly(sbi)) {
4113 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4114 err = f2fs_recover_fsync_data(sbi, true);
4117 f2fs_err(sbi, "Need to recover fsync data, but "
4118 "write access unavailable, please try "
4119 "mount w/ disable_roll_forward or norecovery");
4124 f2fs_info(sbi, "write access unavailable, skipping recovery");
4125 goto reset_checkpoint;
4129 set_sbi_flag(sbi, SBI_NEED_FSCK);
4132 goto reset_checkpoint;
4134 err = f2fs_recover_fsync_data(sbi, false);
4137 skip_recovery = true;
4139 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4144 err = f2fs_recover_fsync_data(sbi, true);
4146 if (!f2fs_readonly(sb) && err > 0) {
4148 f2fs_err(sbi, "Need to recover fsync data");
4154 * If the f2fs is not readonly and fsync data recovery succeeds,
4155 * check zoned block devices' write pointer consistency.
4157 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4158 err = f2fs_check_write_pointer(sbi);
4164 f2fs_init_inmem_curseg(sbi);
4166 /* f2fs_recover_fsync_data() cleared this already */
4167 clear_sbi_flag(sbi, SBI_POR_DOING);
4169 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4170 err = f2fs_disable_checkpoint(sbi);
4172 goto sync_free_meta;
4173 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4174 f2fs_enable_checkpoint(sbi);
4178 * If filesystem is not mounted as read-only then
4179 * do start the gc_thread.
4181 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4182 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4183 /* After POR, we can run background GC thread.*/
4184 err = f2fs_start_gc_thread(sbi);
4186 goto sync_free_meta;
4190 /* recover broken superblock */
4192 err = f2fs_commit_super(sbi, true);
4193 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4194 sbi->valid_super_block ? 1 : 2, err);
4197 f2fs_join_shrinker(sbi);
4199 f2fs_tuning_parameters(sbi);
4201 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4202 cur_cp_version(F2FS_CKPT(sbi)));
4203 f2fs_update_time(sbi, CP_TIME);
4204 f2fs_update_time(sbi, REQ_TIME);
4205 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4209 /* safe to flush all the data */
4210 sync_filesystem(sbi->sb);
4215 f2fs_truncate_quota_inode_pages(sb);
4216 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4217 f2fs_quota_off_umount(sbi->sb);
4220 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4221 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4222 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4223 * falls into an infinite loop in f2fs_sync_meta_pages().
4225 truncate_inode_pages_final(META_MAPPING(sbi));
4226 /* evict some inodes being cached by GC */
4228 f2fs_unregister_sysfs(sbi);
4229 free_compress_inode:
4230 f2fs_destroy_compress_inode(sbi);
4235 f2fs_release_ino_entry(sbi, true);
4236 truncate_inode_pages_final(NODE_MAPPING(sbi));
4237 iput(sbi->node_inode);
4238 sbi->node_inode = NULL;
4240 f2fs_destroy_stats(sbi);
4242 f2fs_destroy_node_manager(sbi);
4244 f2fs_destroy_segment_manager(sbi);
4245 f2fs_destroy_post_read_wq(sbi);
4247 f2fs_stop_ckpt_thread(sbi);
4249 destroy_device_list(sbi);
4252 make_bad_inode(sbi->meta_inode);
4253 iput(sbi->meta_inode);
4254 sbi->meta_inode = NULL;
4255 free_page_array_cache:
4256 f2fs_destroy_page_array_cache(sbi);
4258 f2fs_destroy_xattr_caches(sbi);
4260 mempool_destroy(sbi->write_io_dummy);
4262 destroy_percpu_info(sbi);
4264 for (i = 0; i < NR_PAGE_TYPE; i++)
4265 kvfree(sbi->write_io[i]);
4267 #ifdef CONFIG_UNICODE
4268 utf8_unload(sb->s_encoding);
4269 sb->s_encoding = NULL;
4273 for (i = 0; i < MAXQUOTAS; i++)
4274 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4276 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4281 if (sbi->s_chksum_driver)
4282 crypto_free_shash(sbi->s_chksum_driver);
4285 /* give only one another chance */
4286 if (retry_cnt > 0 && skip_recovery) {
4288 shrink_dcache_sb(sb);
4294 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4295 const char *dev_name, void *data)
4297 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4300 static void kill_f2fs_super(struct super_block *sb)
4303 struct f2fs_sb_info *sbi = F2FS_SB(sb);
4305 set_sbi_flag(sbi, SBI_IS_CLOSE);
4306 f2fs_stop_gc_thread(sbi);
4307 f2fs_stop_discard_thread(sbi);
4309 #ifdef CONFIG_F2FS_FS_COMPRESSION
4311 * latter evict_inode() can bypass checking and invalidating
4312 * compress inode cache.
4314 if (test_opt(sbi, COMPRESS_CACHE))
4315 truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4318 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4319 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4320 struct cp_control cpc = {
4321 .reason = CP_UMOUNT,
4323 f2fs_write_checkpoint(sbi, &cpc);
4326 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4327 sb->s_flags &= ~SB_RDONLY;
4329 kill_block_super(sb);
4332 static struct file_system_type f2fs_fs_type = {
4333 .owner = THIS_MODULE,
4335 .mount = f2fs_mount,
4336 .kill_sb = kill_f2fs_super,
4337 .fs_flags = FS_REQUIRES_DEV,
4339 MODULE_ALIAS_FS("f2fs");
4341 static int __init init_inodecache(void)
4343 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4344 sizeof(struct f2fs_inode_info), 0,
4345 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4346 if (!f2fs_inode_cachep)
4351 static void destroy_inodecache(void)
4354 * Make sure all delayed rcu free inodes are flushed before we
4358 kmem_cache_destroy(f2fs_inode_cachep);
4361 static int __init init_f2fs_fs(void)
4365 if (PAGE_SIZE != F2FS_BLKSIZE) {
4366 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4367 PAGE_SIZE, F2FS_BLKSIZE);
4371 err = init_inodecache();
4374 err = f2fs_create_node_manager_caches();
4376 goto free_inodecache;
4377 err = f2fs_create_segment_manager_caches();
4379 goto free_node_manager_caches;
4380 err = f2fs_create_checkpoint_caches();
4382 goto free_segment_manager_caches;
4383 err = f2fs_create_recovery_cache();
4385 goto free_checkpoint_caches;
4386 err = f2fs_create_extent_cache();
4388 goto free_recovery_cache;
4389 err = f2fs_create_garbage_collection_cache();
4391 goto free_extent_cache;
4392 err = f2fs_init_sysfs();
4394 goto free_garbage_collection_cache;
4395 err = register_shrinker(&f2fs_shrinker_info);
4398 err = register_filesystem(&f2fs_fs_type);
4401 f2fs_create_root_stats();
4402 err = f2fs_init_post_read_processing();
4404 goto free_root_stats;
4405 err = f2fs_init_bio_entry_cache();
4407 goto free_post_read;
4408 err = f2fs_init_bioset();
4410 goto free_bio_enrty_cache;
4411 err = f2fs_init_compress_mempool();
4414 err = f2fs_init_compress_cache();
4416 goto free_compress_mempool;
4417 err = f2fs_create_casefold_cache();
4419 goto free_compress_cache;
4421 free_compress_cache:
4422 f2fs_destroy_compress_cache();
4423 free_compress_mempool:
4424 f2fs_destroy_compress_mempool();
4426 f2fs_destroy_bioset();
4427 free_bio_enrty_cache:
4428 f2fs_destroy_bio_entry_cache();
4430 f2fs_destroy_post_read_processing();
4432 f2fs_destroy_root_stats();
4433 unregister_filesystem(&f2fs_fs_type);
4435 unregister_shrinker(&f2fs_shrinker_info);
4438 free_garbage_collection_cache:
4439 f2fs_destroy_garbage_collection_cache();
4441 f2fs_destroy_extent_cache();
4442 free_recovery_cache:
4443 f2fs_destroy_recovery_cache();
4444 free_checkpoint_caches:
4445 f2fs_destroy_checkpoint_caches();
4446 free_segment_manager_caches:
4447 f2fs_destroy_segment_manager_caches();
4448 free_node_manager_caches:
4449 f2fs_destroy_node_manager_caches();
4451 destroy_inodecache();
4456 static void __exit exit_f2fs_fs(void)
4458 f2fs_destroy_casefold_cache();
4459 f2fs_destroy_compress_cache();
4460 f2fs_destroy_compress_mempool();
4461 f2fs_destroy_bioset();
4462 f2fs_destroy_bio_entry_cache();
4463 f2fs_destroy_post_read_processing();
4464 f2fs_destroy_root_stats();
4465 unregister_filesystem(&f2fs_fs_type);
4466 unregister_shrinker(&f2fs_shrinker_info);
4468 f2fs_destroy_garbage_collection_cache();
4469 f2fs_destroy_extent_cache();
4470 f2fs_destroy_recovery_cache();
4471 f2fs_destroy_checkpoint_caches();
4472 f2fs_destroy_segment_manager_caches();
4473 f2fs_destroy_node_manager_caches();
4474 destroy_inodecache();
4477 module_init(init_f2fs_fs)
4478 module_exit(exit_f2fs_fs)
4480 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4481 MODULE_DESCRIPTION("Flash Friendly File System");
4482 MODULE_LICENSE("GPL");
4483 MODULE_SOFTDEP("pre: crc32");