f2fs: multidevice: support direct IO
[linux-2.6-microblaze.git] / fs / f2fs / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/super.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/fs.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
28 #include <linux/zstd.h>
29 #include <linux/lz4.h>
30
31 #include "f2fs.h"
32 #include "node.h"
33 #include "segment.h"
34 #include "xattr.h"
35 #include "gc.h"
36 #include "iostat.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/f2fs.h>
40
41 static struct kmem_cache *f2fs_inode_cachep;
42
43 #ifdef CONFIG_F2FS_FAULT_INJECTION
44
45 const char *f2fs_fault_name[FAULT_MAX] = {
46         [FAULT_KMALLOC]         = "kmalloc",
47         [FAULT_KVMALLOC]        = "kvmalloc",
48         [FAULT_PAGE_ALLOC]      = "page alloc",
49         [FAULT_PAGE_GET]        = "page get",
50         [FAULT_ALLOC_NID]       = "alloc nid",
51         [FAULT_ORPHAN]          = "orphan",
52         [FAULT_BLOCK]           = "no more block",
53         [FAULT_DIR_DEPTH]       = "too big dir depth",
54         [FAULT_EVICT_INODE]     = "evict_inode fail",
55         [FAULT_TRUNCATE]        = "truncate fail",
56         [FAULT_READ_IO]         = "read IO error",
57         [FAULT_CHECKPOINT]      = "checkpoint error",
58         [FAULT_DISCARD]         = "discard error",
59         [FAULT_WRITE_IO]        = "write IO error",
60         [FAULT_SLAB_ALLOC]      = "slab alloc",
61 };
62
63 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
64                                                         unsigned int type)
65 {
66         struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
67
68         if (rate) {
69                 atomic_set(&ffi->inject_ops, 0);
70                 ffi->inject_rate = rate;
71         }
72
73         if (type)
74                 ffi->inject_type = type;
75
76         if (!rate && !type)
77                 memset(ffi, 0, sizeof(struct f2fs_fault_info));
78 }
79 #endif
80
81 /* f2fs-wide shrinker description */
82 static struct shrinker f2fs_shrinker_info = {
83         .scan_objects = f2fs_shrink_scan,
84         .count_objects = f2fs_shrink_count,
85         .seeks = DEFAULT_SEEKS,
86 };
87
88 enum {
89         Opt_gc_background,
90         Opt_disable_roll_forward,
91         Opt_norecovery,
92         Opt_discard,
93         Opt_nodiscard,
94         Opt_noheap,
95         Opt_heap,
96         Opt_user_xattr,
97         Opt_nouser_xattr,
98         Opt_acl,
99         Opt_noacl,
100         Opt_active_logs,
101         Opt_disable_ext_identify,
102         Opt_inline_xattr,
103         Opt_noinline_xattr,
104         Opt_inline_xattr_size,
105         Opt_inline_data,
106         Opt_inline_dentry,
107         Opt_noinline_dentry,
108         Opt_flush_merge,
109         Opt_noflush_merge,
110         Opt_nobarrier,
111         Opt_fastboot,
112         Opt_extent_cache,
113         Opt_noextent_cache,
114         Opt_noinline_data,
115         Opt_data_flush,
116         Opt_reserve_root,
117         Opt_resgid,
118         Opt_resuid,
119         Opt_mode,
120         Opt_io_size_bits,
121         Opt_fault_injection,
122         Opt_fault_type,
123         Opt_lazytime,
124         Opt_nolazytime,
125         Opt_quota,
126         Opt_noquota,
127         Opt_usrquota,
128         Opt_grpquota,
129         Opt_prjquota,
130         Opt_usrjquota,
131         Opt_grpjquota,
132         Opt_prjjquota,
133         Opt_offusrjquota,
134         Opt_offgrpjquota,
135         Opt_offprjjquota,
136         Opt_jqfmt_vfsold,
137         Opt_jqfmt_vfsv0,
138         Opt_jqfmt_vfsv1,
139         Opt_whint,
140         Opt_alloc,
141         Opt_fsync,
142         Opt_test_dummy_encryption,
143         Opt_inlinecrypt,
144         Opt_checkpoint_disable,
145         Opt_checkpoint_disable_cap,
146         Opt_checkpoint_disable_cap_perc,
147         Opt_checkpoint_enable,
148         Opt_checkpoint_merge,
149         Opt_nocheckpoint_merge,
150         Opt_compress_algorithm,
151         Opt_compress_log_size,
152         Opt_compress_extension,
153         Opt_nocompress_extension,
154         Opt_compress_chksum,
155         Opt_compress_mode,
156         Opt_compress_cache,
157         Opt_atgc,
158         Opt_gc_merge,
159         Opt_nogc_merge,
160         Opt_discard_unit,
161         Opt_err,
162 };
163
164 static match_table_t f2fs_tokens = {
165         {Opt_gc_background, "background_gc=%s"},
166         {Opt_disable_roll_forward, "disable_roll_forward"},
167         {Opt_norecovery, "norecovery"},
168         {Opt_discard, "discard"},
169         {Opt_nodiscard, "nodiscard"},
170         {Opt_noheap, "no_heap"},
171         {Opt_heap, "heap"},
172         {Opt_user_xattr, "user_xattr"},
173         {Opt_nouser_xattr, "nouser_xattr"},
174         {Opt_acl, "acl"},
175         {Opt_noacl, "noacl"},
176         {Opt_active_logs, "active_logs=%u"},
177         {Opt_disable_ext_identify, "disable_ext_identify"},
178         {Opt_inline_xattr, "inline_xattr"},
179         {Opt_noinline_xattr, "noinline_xattr"},
180         {Opt_inline_xattr_size, "inline_xattr_size=%u"},
181         {Opt_inline_data, "inline_data"},
182         {Opt_inline_dentry, "inline_dentry"},
183         {Opt_noinline_dentry, "noinline_dentry"},
184         {Opt_flush_merge, "flush_merge"},
185         {Opt_noflush_merge, "noflush_merge"},
186         {Opt_nobarrier, "nobarrier"},
187         {Opt_fastboot, "fastboot"},
188         {Opt_extent_cache, "extent_cache"},
189         {Opt_noextent_cache, "noextent_cache"},
190         {Opt_noinline_data, "noinline_data"},
191         {Opt_data_flush, "data_flush"},
192         {Opt_reserve_root, "reserve_root=%u"},
193         {Opt_resgid, "resgid=%u"},
194         {Opt_resuid, "resuid=%u"},
195         {Opt_mode, "mode=%s"},
196         {Opt_io_size_bits, "io_bits=%u"},
197         {Opt_fault_injection, "fault_injection=%u"},
198         {Opt_fault_type, "fault_type=%u"},
199         {Opt_lazytime, "lazytime"},
200         {Opt_nolazytime, "nolazytime"},
201         {Opt_quota, "quota"},
202         {Opt_noquota, "noquota"},
203         {Opt_usrquota, "usrquota"},
204         {Opt_grpquota, "grpquota"},
205         {Opt_prjquota, "prjquota"},
206         {Opt_usrjquota, "usrjquota=%s"},
207         {Opt_grpjquota, "grpjquota=%s"},
208         {Opt_prjjquota, "prjjquota=%s"},
209         {Opt_offusrjquota, "usrjquota="},
210         {Opt_offgrpjquota, "grpjquota="},
211         {Opt_offprjjquota, "prjjquota="},
212         {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
213         {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
214         {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
215         {Opt_whint, "whint_mode=%s"},
216         {Opt_alloc, "alloc_mode=%s"},
217         {Opt_fsync, "fsync_mode=%s"},
218         {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
219         {Opt_test_dummy_encryption, "test_dummy_encryption"},
220         {Opt_inlinecrypt, "inlinecrypt"},
221         {Opt_checkpoint_disable, "checkpoint=disable"},
222         {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
223         {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
224         {Opt_checkpoint_enable, "checkpoint=enable"},
225         {Opt_checkpoint_merge, "checkpoint_merge"},
226         {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
227         {Opt_compress_algorithm, "compress_algorithm=%s"},
228         {Opt_compress_log_size, "compress_log_size=%u"},
229         {Opt_compress_extension, "compress_extension=%s"},
230         {Opt_nocompress_extension, "nocompress_extension=%s"},
231         {Opt_compress_chksum, "compress_chksum"},
232         {Opt_compress_mode, "compress_mode=%s"},
233         {Opt_compress_cache, "compress_cache"},
234         {Opt_atgc, "atgc"},
235         {Opt_gc_merge, "gc_merge"},
236         {Opt_nogc_merge, "nogc_merge"},
237         {Opt_discard_unit, "discard_unit=%s"},
238         {Opt_err, NULL},
239 };
240
241 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
242 {
243         struct va_format vaf;
244         va_list args;
245         int level;
246
247         va_start(args, fmt);
248
249         level = printk_get_level(fmt);
250         vaf.fmt = printk_skip_level(fmt);
251         vaf.va = &args;
252         printk("%c%cF2FS-fs (%s): %pV\n",
253                KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
254
255         va_end(args);
256 }
257
258 #ifdef CONFIG_UNICODE
259 static const struct f2fs_sb_encodings {
260         __u16 magic;
261         char *name;
262         char *version;
263 } f2fs_sb_encoding_map[] = {
264         {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
265 };
266
267 static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
268                                  const struct f2fs_sb_encodings **encoding,
269                                  __u16 *flags)
270 {
271         __u16 magic = le16_to_cpu(sb->s_encoding);
272         int i;
273
274         for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
275                 if (magic == f2fs_sb_encoding_map[i].magic)
276                         break;
277
278         if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
279                 return -EINVAL;
280
281         *encoding = &f2fs_sb_encoding_map[i];
282         *flags = le16_to_cpu(sb->s_encoding_flags);
283
284         return 0;
285 }
286
287 struct kmem_cache *f2fs_cf_name_slab;
288 static int __init f2fs_create_casefold_cache(void)
289 {
290         f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
291                                                         F2FS_NAME_LEN);
292         if (!f2fs_cf_name_slab)
293                 return -ENOMEM;
294         return 0;
295 }
296
297 static void f2fs_destroy_casefold_cache(void)
298 {
299         kmem_cache_destroy(f2fs_cf_name_slab);
300 }
301 #else
302 static int __init f2fs_create_casefold_cache(void) { return 0; }
303 static void f2fs_destroy_casefold_cache(void) { }
304 #endif
305
306 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
307 {
308         block_t limit = min((sbi->user_block_count << 1) / 1000,
309                         sbi->user_block_count - sbi->reserved_blocks);
310
311         /* limit is 0.2% */
312         if (test_opt(sbi, RESERVE_ROOT) &&
313                         F2FS_OPTION(sbi).root_reserved_blocks > limit) {
314                 F2FS_OPTION(sbi).root_reserved_blocks = limit;
315                 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
316                           F2FS_OPTION(sbi).root_reserved_blocks);
317         }
318         if (!test_opt(sbi, RESERVE_ROOT) &&
319                 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
320                                 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
321                 !gid_eq(F2FS_OPTION(sbi).s_resgid,
322                                 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
323                 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
324                           from_kuid_munged(&init_user_ns,
325                                            F2FS_OPTION(sbi).s_resuid),
326                           from_kgid_munged(&init_user_ns,
327                                            F2FS_OPTION(sbi).s_resgid));
328 }
329
330 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
331 {
332         if (!F2FS_OPTION(sbi).unusable_cap_perc)
333                 return;
334
335         if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
336                 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
337         else
338                 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
339                                         F2FS_OPTION(sbi).unusable_cap_perc;
340
341         f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
342                         F2FS_OPTION(sbi).unusable_cap,
343                         F2FS_OPTION(sbi).unusable_cap_perc);
344 }
345
346 static void init_once(void *foo)
347 {
348         struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
349
350         inode_init_once(&fi->vfs_inode);
351 }
352
353 #ifdef CONFIG_QUOTA
354 static const char * const quotatypes[] = INITQFNAMES;
355 #define QTYPE2NAME(t) (quotatypes[t])
356 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
357                                                         substring_t *args)
358 {
359         struct f2fs_sb_info *sbi = F2FS_SB(sb);
360         char *qname;
361         int ret = -EINVAL;
362
363         if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
364                 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
365                 return -EINVAL;
366         }
367         if (f2fs_sb_has_quota_ino(sbi)) {
368                 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
369                 return 0;
370         }
371
372         qname = match_strdup(args);
373         if (!qname) {
374                 f2fs_err(sbi, "Not enough memory for storing quotafile name");
375                 return -ENOMEM;
376         }
377         if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
378                 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
379                         ret = 0;
380                 else
381                         f2fs_err(sbi, "%s quota file already specified",
382                                  QTYPE2NAME(qtype));
383                 goto errout;
384         }
385         if (strchr(qname, '/')) {
386                 f2fs_err(sbi, "quotafile must be on filesystem root");
387                 goto errout;
388         }
389         F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
390         set_opt(sbi, QUOTA);
391         return 0;
392 errout:
393         kfree(qname);
394         return ret;
395 }
396
397 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
398 {
399         struct f2fs_sb_info *sbi = F2FS_SB(sb);
400
401         if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
402                 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
403                 return -EINVAL;
404         }
405         kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
406         F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
407         return 0;
408 }
409
410 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
411 {
412         /*
413          * We do the test below only for project quotas. 'usrquota' and
414          * 'grpquota' mount options are allowed even without quota feature
415          * to support legacy quotas in quota files.
416          */
417         if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
418                 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
419                 return -1;
420         }
421         if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
422                         F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
423                         F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
424                 if (test_opt(sbi, USRQUOTA) &&
425                                 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
426                         clear_opt(sbi, USRQUOTA);
427
428                 if (test_opt(sbi, GRPQUOTA) &&
429                                 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
430                         clear_opt(sbi, GRPQUOTA);
431
432                 if (test_opt(sbi, PRJQUOTA) &&
433                                 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
434                         clear_opt(sbi, PRJQUOTA);
435
436                 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
437                                 test_opt(sbi, PRJQUOTA)) {
438                         f2fs_err(sbi, "old and new quota format mixing");
439                         return -1;
440                 }
441
442                 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
443                         f2fs_err(sbi, "journaled quota format not specified");
444                         return -1;
445                 }
446         }
447
448         if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
449                 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
450                 F2FS_OPTION(sbi).s_jquota_fmt = 0;
451         }
452         return 0;
453 }
454 #endif
455
456 static int f2fs_set_test_dummy_encryption(struct super_block *sb,
457                                           const char *opt,
458                                           const substring_t *arg,
459                                           bool is_remount)
460 {
461         struct f2fs_sb_info *sbi = F2FS_SB(sb);
462 #ifdef CONFIG_FS_ENCRYPTION
463         int err;
464
465         if (!f2fs_sb_has_encrypt(sbi)) {
466                 f2fs_err(sbi, "Encrypt feature is off");
467                 return -EINVAL;
468         }
469
470         /*
471          * This mount option is just for testing, and it's not worthwhile to
472          * implement the extra complexity (e.g. RCU protection) that would be
473          * needed to allow it to be set or changed during remount.  We do allow
474          * it to be specified during remount, but only if there is no change.
475          */
476         if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
477                 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
478                 return -EINVAL;
479         }
480         err = fscrypt_set_test_dummy_encryption(
481                 sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
482         if (err) {
483                 if (err == -EEXIST)
484                         f2fs_warn(sbi,
485                                   "Can't change test_dummy_encryption on remount");
486                 else if (err == -EINVAL)
487                         f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
488                                   opt);
489                 else
490                         f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
491                                   opt, err);
492                 return -EINVAL;
493         }
494         f2fs_warn(sbi, "Test dummy encryption mode enabled");
495 #else
496         f2fs_warn(sbi, "Test dummy encryption mount option ignored");
497 #endif
498         return 0;
499 }
500
501 #ifdef CONFIG_F2FS_FS_COMPRESSION
502 /*
503  * 1. The same extension name cannot not appear in both compress and non-compress extension
504  * at the same time.
505  * 2. If the compress extension specifies all files, the types specified by the non-compress
506  * extension will be treated as special cases and will not be compressed.
507  * 3. Don't allow the non-compress extension specifies all files.
508  */
509 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
510 {
511         unsigned char (*ext)[F2FS_EXTENSION_LEN];
512         unsigned char (*noext)[F2FS_EXTENSION_LEN];
513         int ext_cnt, noext_cnt, index = 0, no_index = 0;
514
515         ext = F2FS_OPTION(sbi).extensions;
516         ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
517         noext = F2FS_OPTION(sbi).noextensions;
518         noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
519
520         if (!noext_cnt)
521                 return 0;
522
523         for (no_index = 0; no_index < noext_cnt; no_index++) {
524                 if (!strcasecmp("*", noext[no_index])) {
525                         f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
526                         return -EINVAL;
527                 }
528                 for (index = 0; index < ext_cnt; index++) {
529                         if (!strcasecmp(ext[index], noext[no_index])) {
530                                 f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
531                                                 ext[index]);
532                                 return -EINVAL;
533                         }
534                 }
535         }
536         return 0;
537 }
538
539 #ifdef CONFIG_F2FS_FS_LZ4
540 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
541 {
542 #ifdef CONFIG_F2FS_FS_LZ4HC
543         unsigned int level;
544 #endif
545
546         if (strlen(str) == 3) {
547                 F2FS_OPTION(sbi).compress_level = 0;
548                 return 0;
549         }
550
551 #ifdef CONFIG_F2FS_FS_LZ4HC
552         str += 3;
553
554         if (str[0] != ':') {
555                 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
556                 return -EINVAL;
557         }
558         if (kstrtouint(str + 1, 10, &level))
559                 return -EINVAL;
560
561         if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
562                 f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
563                 return -EINVAL;
564         }
565
566         F2FS_OPTION(sbi).compress_level = level;
567         return 0;
568 #else
569         f2fs_info(sbi, "kernel doesn't support lz4hc compression");
570         return -EINVAL;
571 #endif
572 }
573 #endif
574
575 #ifdef CONFIG_F2FS_FS_ZSTD
576 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
577 {
578         unsigned int level;
579         int len = 4;
580
581         if (strlen(str) == len) {
582                 F2FS_OPTION(sbi).compress_level = 0;
583                 return 0;
584         }
585
586         str += len;
587
588         if (str[0] != ':') {
589                 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
590                 return -EINVAL;
591         }
592         if (kstrtouint(str + 1, 10, &level))
593                 return -EINVAL;
594
595         if (!level || level > ZSTD_maxCLevel()) {
596                 f2fs_info(sbi, "invalid zstd compress level: %d", level);
597                 return -EINVAL;
598         }
599
600         F2FS_OPTION(sbi).compress_level = level;
601         return 0;
602 }
603 #endif
604 #endif
605
606 static int parse_options(struct super_block *sb, char *options, bool is_remount)
607 {
608         struct f2fs_sb_info *sbi = F2FS_SB(sb);
609         substring_t args[MAX_OPT_ARGS];
610 #ifdef CONFIG_F2FS_FS_COMPRESSION
611         unsigned char (*ext)[F2FS_EXTENSION_LEN];
612         unsigned char (*noext)[F2FS_EXTENSION_LEN];
613         int ext_cnt, noext_cnt;
614 #endif
615         char *p, *name;
616         int arg = 0;
617         kuid_t uid;
618         kgid_t gid;
619         int ret;
620
621         if (!options)
622                 goto default_check;
623
624         while ((p = strsep(&options, ",")) != NULL) {
625                 int token;
626
627                 if (!*p)
628                         continue;
629                 /*
630                  * Initialize args struct so we know whether arg was
631                  * found; some options take optional arguments.
632                  */
633                 args[0].to = args[0].from = NULL;
634                 token = match_token(p, f2fs_tokens, args);
635
636                 switch (token) {
637                 case Opt_gc_background:
638                         name = match_strdup(&args[0]);
639
640                         if (!name)
641                                 return -ENOMEM;
642                         if (!strcmp(name, "on")) {
643                                 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
644                         } else if (!strcmp(name, "off")) {
645                                 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
646                         } else if (!strcmp(name, "sync")) {
647                                 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
648                         } else {
649                                 kfree(name);
650                                 return -EINVAL;
651                         }
652                         kfree(name);
653                         break;
654                 case Opt_disable_roll_forward:
655                         set_opt(sbi, DISABLE_ROLL_FORWARD);
656                         break;
657                 case Opt_norecovery:
658                         /* this option mounts f2fs with ro */
659                         set_opt(sbi, NORECOVERY);
660                         if (!f2fs_readonly(sb))
661                                 return -EINVAL;
662                         break;
663                 case Opt_discard:
664                         if (!f2fs_hw_support_discard(sbi)) {
665                                 f2fs_warn(sbi, "device does not support discard");
666                                 break;
667                         }
668                         set_opt(sbi, DISCARD);
669                         break;
670                 case Opt_nodiscard:
671                         if (f2fs_hw_should_discard(sbi)) {
672                                 f2fs_warn(sbi, "discard is required for zoned block devices");
673                                 return -EINVAL;
674                         }
675                         clear_opt(sbi, DISCARD);
676                         break;
677                 case Opt_noheap:
678                         set_opt(sbi, NOHEAP);
679                         break;
680                 case Opt_heap:
681                         clear_opt(sbi, NOHEAP);
682                         break;
683 #ifdef CONFIG_F2FS_FS_XATTR
684                 case Opt_user_xattr:
685                         set_opt(sbi, XATTR_USER);
686                         break;
687                 case Opt_nouser_xattr:
688                         clear_opt(sbi, XATTR_USER);
689                         break;
690                 case Opt_inline_xattr:
691                         set_opt(sbi, INLINE_XATTR);
692                         break;
693                 case Opt_noinline_xattr:
694                         clear_opt(sbi, INLINE_XATTR);
695                         break;
696                 case Opt_inline_xattr_size:
697                         if (args->from && match_int(args, &arg))
698                                 return -EINVAL;
699                         set_opt(sbi, INLINE_XATTR_SIZE);
700                         F2FS_OPTION(sbi).inline_xattr_size = arg;
701                         break;
702 #else
703                 case Opt_user_xattr:
704                         f2fs_info(sbi, "user_xattr options not supported");
705                         break;
706                 case Opt_nouser_xattr:
707                         f2fs_info(sbi, "nouser_xattr options not supported");
708                         break;
709                 case Opt_inline_xattr:
710                         f2fs_info(sbi, "inline_xattr options not supported");
711                         break;
712                 case Opt_noinline_xattr:
713                         f2fs_info(sbi, "noinline_xattr options not supported");
714                         break;
715 #endif
716 #ifdef CONFIG_F2FS_FS_POSIX_ACL
717                 case Opt_acl:
718                         set_opt(sbi, POSIX_ACL);
719                         break;
720                 case Opt_noacl:
721                         clear_opt(sbi, POSIX_ACL);
722                         break;
723 #else
724                 case Opt_acl:
725                         f2fs_info(sbi, "acl options not supported");
726                         break;
727                 case Opt_noacl:
728                         f2fs_info(sbi, "noacl options not supported");
729                         break;
730 #endif
731                 case Opt_active_logs:
732                         if (args->from && match_int(args, &arg))
733                                 return -EINVAL;
734                         if (arg != 2 && arg != 4 &&
735                                 arg != NR_CURSEG_PERSIST_TYPE)
736                                 return -EINVAL;
737                         F2FS_OPTION(sbi).active_logs = arg;
738                         break;
739                 case Opt_disable_ext_identify:
740                         set_opt(sbi, DISABLE_EXT_IDENTIFY);
741                         break;
742                 case Opt_inline_data:
743                         set_opt(sbi, INLINE_DATA);
744                         break;
745                 case Opt_inline_dentry:
746                         set_opt(sbi, INLINE_DENTRY);
747                         break;
748                 case Opt_noinline_dentry:
749                         clear_opt(sbi, INLINE_DENTRY);
750                         break;
751                 case Opt_flush_merge:
752                         set_opt(sbi, FLUSH_MERGE);
753                         break;
754                 case Opt_noflush_merge:
755                         clear_opt(sbi, FLUSH_MERGE);
756                         break;
757                 case Opt_nobarrier:
758                         set_opt(sbi, NOBARRIER);
759                         break;
760                 case Opt_fastboot:
761                         set_opt(sbi, FASTBOOT);
762                         break;
763                 case Opt_extent_cache:
764                         set_opt(sbi, EXTENT_CACHE);
765                         break;
766                 case Opt_noextent_cache:
767                         clear_opt(sbi, EXTENT_CACHE);
768                         break;
769                 case Opt_noinline_data:
770                         clear_opt(sbi, INLINE_DATA);
771                         break;
772                 case Opt_data_flush:
773                         set_opt(sbi, DATA_FLUSH);
774                         break;
775                 case Opt_reserve_root:
776                         if (args->from && match_int(args, &arg))
777                                 return -EINVAL;
778                         if (test_opt(sbi, RESERVE_ROOT)) {
779                                 f2fs_info(sbi, "Preserve previous reserve_root=%u",
780                                           F2FS_OPTION(sbi).root_reserved_blocks);
781                         } else {
782                                 F2FS_OPTION(sbi).root_reserved_blocks = arg;
783                                 set_opt(sbi, RESERVE_ROOT);
784                         }
785                         break;
786                 case Opt_resuid:
787                         if (args->from && match_int(args, &arg))
788                                 return -EINVAL;
789                         uid = make_kuid(current_user_ns(), arg);
790                         if (!uid_valid(uid)) {
791                                 f2fs_err(sbi, "Invalid uid value %d", arg);
792                                 return -EINVAL;
793                         }
794                         F2FS_OPTION(sbi).s_resuid = uid;
795                         break;
796                 case Opt_resgid:
797                         if (args->from && match_int(args, &arg))
798                                 return -EINVAL;
799                         gid = make_kgid(current_user_ns(), arg);
800                         if (!gid_valid(gid)) {
801                                 f2fs_err(sbi, "Invalid gid value %d", arg);
802                                 return -EINVAL;
803                         }
804                         F2FS_OPTION(sbi).s_resgid = gid;
805                         break;
806                 case Opt_mode:
807                         name = match_strdup(&args[0]);
808
809                         if (!name)
810                                 return -ENOMEM;
811                         if (!strcmp(name, "adaptive")) {
812                                 if (f2fs_sb_has_blkzoned(sbi)) {
813                                         f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
814                                         kfree(name);
815                                         return -EINVAL;
816                                 }
817                                 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
818                         } else if (!strcmp(name, "lfs")) {
819                                 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
820                         } else if (!strcmp(name, "fragment:segment")) {
821                                 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
822                         } else if (!strcmp(name, "fragment:block")) {
823                                 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
824                         } else {
825                                 kfree(name);
826                                 return -EINVAL;
827                         }
828                         kfree(name);
829                         break;
830                 case Opt_io_size_bits:
831                         if (args->from && match_int(args, &arg))
832                                 return -EINVAL;
833                         if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
834                                 f2fs_warn(sbi, "Not support %d, larger than %d",
835                                           1 << arg, BIO_MAX_VECS);
836                                 return -EINVAL;
837                         }
838                         F2FS_OPTION(sbi).write_io_size_bits = arg;
839                         break;
840 #ifdef CONFIG_F2FS_FAULT_INJECTION
841                 case Opt_fault_injection:
842                         if (args->from && match_int(args, &arg))
843                                 return -EINVAL;
844                         f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
845                         set_opt(sbi, FAULT_INJECTION);
846                         break;
847
848                 case Opt_fault_type:
849                         if (args->from && match_int(args, &arg))
850                                 return -EINVAL;
851                         f2fs_build_fault_attr(sbi, 0, arg);
852                         set_opt(sbi, FAULT_INJECTION);
853                         break;
854 #else
855                 case Opt_fault_injection:
856                         f2fs_info(sbi, "fault_injection options not supported");
857                         break;
858
859                 case Opt_fault_type:
860                         f2fs_info(sbi, "fault_type options not supported");
861                         break;
862 #endif
863                 case Opt_lazytime:
864                         sb->s_flags |= SB_LAZYTIME;
865                         break;
866                 case Opt_nolazytime:
867                         sb->s_flags &= ~SB_LAZYTIME;
868                         break;
869 #ifdef CONFIG_QUOTA
870                 case Opt_quota:
871                 case Opt_usrquota:
872                         set_opt(sbi, USRQUOTA);
873                         break;
874                 case Opt_grpquota:
875                         set_opt(sbi, GRPQUOTA);
876                         break;
877                 case Opt_prjquota:
878                         set_opt(sbi, PRJQUOTA);
879                         break;
880                 case Opt_usrjquota:
881                         ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
882                         if (ret)
883                                 return ret;
884                         break;
885                 case Opt_grpjquota:
886                         ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
887                         if (ret)
888                                 return ret;
889                         break;
890                 case Opt_prjjquota:
891                         ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
892                         if (ret)
893                                 return ret;
894                         break;
895                 case Opt_offusrjquota:
896                         ret = f2fs_clear_qf_name(sb, USRQUOTA);
897                         if (ret)
898                                 return ret;
899                         break;
900                 case Opt_offgrpjquota:
901                         ret = f2fs_clear_qf_name(sb, GRPQUOTA);
902                         if (ret)
903                                 return ret;
904                         break;
905                 case Opt_offprjjquota:
906                         ret = f2fs_clear_qf_name(sb, PRJQUOTA);
907                         if (ret)
908                                 return ret;
909                         break;
910                 case Opt_jqfmt_vfsold:
911                         F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
912                         break;
913                 case Opt_jqfmt_vfsv0:
914                         F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
915                         break;
916                 case Opt_jqfmt_vfsv1:
917                         F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
918                         break;
919                 case Opt_noquota:
920                         clear_opt(sbi, QUOTA);
921                         clear_opt(sbi, USRQUOTA);
922                         clear_opt(sbi, GRPQUOTA);
923                         clear_opt(sbi, PRJQUOTA);
924                         break;
925 #else
926                 case Opt_quota:
927                 case Opt_usrquota:
928                 case Opt_grpquota:
929                 case Opt_prjquota:
930                 case Opt_usrjquota:
931                 case Opt_grpjquota:
932                 case Opt_prjjquota:
933                 case Opt_offusrjquota:
934                 case Opt_offgrpjquota:
935                 case Opt_offprjjquota:
936                 case Opt_jqfmt_vfsold:
937                 case Opt_jqfmt_vfsv0:
938                 case Opt_jqfmt_vfsv1:
939                 case Opt_noquota:
940                         f2fs_info(sbi, "quota operations not supported");
941                         break;
942 #endif
943                 case Opt_whint:
944                         name = match_strdup(&args[0]);
945                         if (!name)
946                                 return -ENOMEM;
947                         if (!strcmp(name, "user-based")) {
948                                 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
949                         } else if (!strcmp(name, "off")) {
950                                 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
951                         } else if (!strcmp(name, "fs-based")) {
952                                 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
953                         } else {
954                                 kfree(name);
955                                 return -EINVAL;
956                         }
957                         kfree(name);
958                         break;
959                 case Opt_alloc:
960                         name = match_strdup(&args[0]);
961                         if (!name)
962                                 return -ENOMEM;
963
964                         if (!strcmp(name, "default")) {
965                                 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
966                         } else if (!strcmp(name, "reuse")) {
967                                 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
968                         } else {
969                                 kfree(name);
970                                 return -EINVAL;
971                         }
972                         kfree(name);
973                         break;
974                 case Opt_fsync:
975                         name = match_strdup(&args[0]);
976                         if (!name)
977                                 return -ENOMEM;
978                         if (!strcmp(name, "posix")) {
979                                 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
980                         } else if (!strcmp(name, "strict")) {
981                                 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
982                         } else if (!strcmp(name, "nobarrier")) {
983                                 F2FS_OPTION(sbi).fsync_mode =
984                                                         FSYNC_MODE_NOBARRIER;
985                         } else {
986                                 kfree(name);
987                                 return -EINVAL;
988                         }
989                         kfree(name);
990                         break;
991                 case Opt_test_dummy_encryption:
992                         ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
993                                                              is_remount);
994                         if (ret)
995                                 return ret;
996                         break;
997                 case Opt_inlinecrypt:
998 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
999                         sb->s_flags |= SB_INLINECRYPT;
1000 #else
1001                         f2fs_info(sbi, "inline encryption not supported");
1002 #endif
1003                         break;
1004                 case Opt_checkpoint_disable_cap_perc:
1005                         if (args->from && match_int(args, &arg))
1006                                 return -EINVAL;
1007                         if (arg < 0 || arg > 100)
1008                                 return -EINVAL;
1009                         F2FS_OPTION(sbi).unusable_cap_perc = arg;
1010                         set_opt(sbi, DISABLE_CHECKPOINT);
1011                         break;
1012                 case Opt_checkpoint_disable_cap:
1013                         if (args->from && match_int(args, &arg))
1014                                 return -EINVAL;
1015                         F2FS_OPTION(sbi).unusable_cap = arg;
1016                         set_opt(sbi, DISABLE_CHECKPOINT);
1017                         break;
1018                 case Opt_checkpoint_disable:
1019                         set_opt(sbi, DISABLE_CHECKPOINT);
1020                         break;
1021                 case Opt_checkpoint_enable:
1022                         clear_opt(sbi, DISABLE_CHECKPOINT);
1023                         break;
1024                 case Opt_checkpoint_merge:
1025                         set_opt(sbi, MERGE_CHECKPOINT);
1026                         break;
1027                 case Opt_nocheckpoint_merge:
1028                         clear_opt(sbi, MERGE_CHECKPOINT);
1029                         break;
1030 #ifdef CONFIG_F2FS_FS_COMPRESSION
1031                 case Opt_compress_algorithm:
1032                         if (!f2fs_sb_has_compression(sbi)) {
1033                                 f2fs_info(sbi, "Image doesn't support compression");
1034                                 break;
1035                         }
1036                         name = match_strdup(&args[0]);
1037                         if (!name)
1038                                 return -ENOMEM;
1039                         if (!strcmp(name, "lzo")) {
1040 #ifdef CONFIG_F2FS_FS_LZO
1041                                 F2FS_OPTION(sbi).compress_level = 0;
1042                                 F2FS_OPTION(sbi).compress_algorithm =
1043                                                                 COMPRESS_LZO;
1044 #else
1045                                 f2fs_info(sbi, "kernel doesn't support lzo compression");
1046 #endif
1047                         } else if (!strncmp(name, "lz4", 3)) {
1048 #ifdef CONFIG_F2FS_FS_LZ4
1049                                 ret = f2fs_set_lz4hc_level(sbi, name);
1050                                 if (ret) {
1051                                         kfree(name);
1052                                         return -EINVAL;
1053                                 }
1054                                 F2FS_OPTION(sbi).compress_algorithm =
1055                                                                 COMPRESS_LZ4;
1056 #else
1057                                 f2fs_info(sbi, "kernel doesn't support lz4 compression");
1058 #endif
1059                         } else if (!strncmp(name, "zstd", 4)) {
1060 #ifdef CONFIG_F2FS_FS_ZSTD
1061                                 ret = f2fs_set_zstd_level(sbi, name);
1062                                 if (ret) {
1063                                         kfree(name);
1064                                         return -EINVAL;
1065                                 }
1066                                 F2FS_OPTION(sbi).compress_algorithm =
1067                                                                 COMPRESS_ZSTD;
1068 #else
1069                                 f2fs_info(sbi, "kernel doesn't support zstd compression");
1070 #endif
1071                         } else if (!strcmp(name, "lzo-rle")) {
1072 #ifdef CONFIG_F2FS_FS_LZORLE
1073                                 F2FS_OPTION(sbi).compress_level = 0;
1074                                 F2FS_OPTION(sbi).compress_algorithm =
1075                                                                 COMPRESS_LZORLE;
1076 #else
1077                                 f2fs_info(sbi, "kernel doesn't support lzorle compression");
1078 #endif
1079                         } else {
1080                                 kfree(name);
1081                                 return -EINVAL;
1082                         }
1083                         kfree(name);
1084                         break;
1085                 case Opt_compress_log_size:
1086                         if (!f2fs_sb_has_compression(sbi)) {
1087                                 f2fs_info(sbi, "Image doesn't support compression");
1088                                 break;
1089                         }
1090                         if (args->from && match_int(args, &arg))
1091                                 return -EINVAL;
1092                         if (arg < MIN_COMPRESS_LOG_SIZE ||
1093                                 arg > MAX_COMPRESS_LOG_SIZE) {
1094                                 f2fs_err(sbi,
1095                                         "Compress cluster log size is out of range");
1096                                 return -EINVAL;
1097                         }
1098                         F2FS_OPTION(sbi).compress_log_size = arg;
1099                         break;
1100                 case Opt_compress_extension:
1101                         if (!f2fs_sb_has_compression(sbi)) {
1102                                 f2fs_info(sbi, "Image doesn't support compression");
1103                                 break;
1104                         }
1105                         name = match_strdup(&args[0]);
1106                         if (!name)
1107                                 return -ENOMEM;
1108
1109                         ext = F2FS_OPTION(sbi).extensions;
1110                         ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1111
1112                         if (strlen(name) >= F2FS_EXTENSION_LEN ||
1113                                 ext_cnt >= COMPRESS_EXT_NUM) {
1114                                 f2fs_err(sbi,
1115                                         "invalid extension length/number");
1116                                 kfree(name);
1117                                 return -EINVAL;
1118                         }
1119
1120                         strcpy(ext[ext_cnt], name);
1121                         F2FS_OPTION(sbi).compress_ext_cnt++;
1122                         kfree(name);
1123                         break;
1124                 case Opt_nocompress_extension:
1125                         if (!f2fs_sb_has_compression(sbi)) {
1126                                 f2fs_info(sbi, "Image doesn't support compression");
1127                                 break;
1128                         }
1129                         name = match_strdup(&args[0]);
1130                         if (!name)
1131                                 return -ENOMEM;
1132
1133                         noext = F2FS_OPTION(sbi).noextensions;
1134                         noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1135
1136                         if (strlen(name) >= F2FS_EXTENSION_LEN ||
1137                                 noext_cnt >= COMPRESS_EXT_NUM) {
1138                                 f2fs_err(sbi,
1139                                         "invalid extension length/number");
1140                                 kfree(name);
1141                                 return -EINVAL;
1142                         }
1143
1144                         strcpy(noext[noext_cnt], name);
1145                         F2FS_OPTION(sbi).nocompress_ext_cnt++;
1146                         kfree(name);
1147                         break;
1148                 case Opt_compress_chksum:
1149                         F2FS_OPTION(sbi).compress_chksum = true;
1150                         break;
1151                 case Opt_compress_mode:
1152                         name = match_strdup(&args[0]);
1153                         if (!name)
1154                                 return -ENOMEM;
1155                         if (!strcmp(name, "fs")) {
1156                                 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1157                         } else if (!strcmp(name, "user")) {
1158                                 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1159                         } else {
1160                                 kfree(name);
1161                                 return -EINVAL;
1162                         }
1163                         kfree(name);
1164                         break;
1165                 case Opt_compress_cache:
1166                         set_opt(sbi, COMPRESS_CACHE);
1167                         break;
1168 #else
1169                 case Opt_compress_algorithm:
1170                 case Opt_compress_log_size:
1171                 case Opt_compress_extension:
1172                 case Opt_nocompress_extension:
1173                 case Opt_compress_chksum:
1174                 case Opt_compress_mode:
1175                 case Opt_compress_cache:
1176                         f2fs_info(sbi, "compression options not supported");
1177                         break;
1178 #endif
1179                 case Opt_atgc:
1180                         set_opt(sbi, ATGC);
1181                         break;
1182                 case Opt_gc_merge:
1183                         set_opt(sbi, GC_MERGE);
1184                         break;
1185                 case Opt_nogc_merge:
1186                         clear_opt(sbi, GC_MERGE);
1187                         break;
1188                 case Opt_discard_unit:
1189                         name = match_strdup(&args[0]);
1190                         if (!name)
1191                                 return -ENOMEM;
1192                         if (!strcmp(name, "block")) {
1193                                 F2FS_OPTION(sbi).discard_unit =
1194                                                 DISCARD_UNIT_BLOCK;
1195                         } else if (!strcmp(name, "segment")) {
1196                                 F2FS_OPTION(sbi).discard_unit =
1197                                                 DISCARD_UNIT_SEGMENT;
1198                         } else if (!strcmp(name, "section")) {
1199                                 F2FS_OPTION(sbi).discard_unit =
1200                                                 DISCARD_UNIT_SECTION;
1201                         } else {
1202                                 kfree(name);
1203                                 return -EINVAL;
1204                         }
1205                         kfree(name);
1206                         break;
1207                 default:
1208                         f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1209                                  p);
1210                         return -EINVAL;
1211                 }
1212         }
1213 default_check:
1214 #ifdef CONFIG_QUOTA
1215         if (f2fs_check_quota_options(sbi))
1216                 return -EINVAL;
1217 #else
1218         if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1219                 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1220                 return -EINVAL;
1221         }
1222         if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1223                 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1224                 return -EINVAL;
1225         }
1226 #endif
1227 #ifndef CONFIG_UNICODE
1228         if (f2fs_sb_has_casefold(sbi)) {
1229                 f2fs_err(sbi,
1230                         "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1231                 return -EINVAL;
1232         }
1233 #endif
1234         /*
1235          * The BLKZONED feature indicates that the drive was formatted with
1236          * zone alignment optimization. This is optional for host-aware
1237          * devices, but mandatory for host-managed zoned block devices.
1238          */
1239 #ifndef CONFIG_BLK_DEV_ZONED
1240         if (f2fs_sb_has_blkzoned(sbi)) {
1241                 f2fs_err(sbi, "Zoned block device support is not enabled");
1242                 return -EINVAL;
1243         }
1244 #endif
1245         if (f2fs_sb_has_blkzoned(sbi)) {
1246                 if (F2FS_OPTION(sbi).discard_unit !=
1247                                                 DISCARD_UNIT_SECTION) {
1248                         f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1249                         F2FS_OPTION(sbi).discard_unit =
1250                                         DISCARD_UNIT_SECTION;
1251                 }
1252         }
1253
1254 #ifdef CONFIG_F2FS_FS_COMPRESSION
1255         if (f2fs_test_compress_extension(sbi)) {
1256                 f2fs_err(sbi, "invalid compress or nocompress extension");
1257                 return -EINVAL;
1258         }
1259 #endif
1260
1261         if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1262                 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
1263                          F2FS_IO_SIZE_KB(sbi));
1264                 return -EINVAL;
1265         }
1266
1267         if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1268                 int min_size, max_size;
1269
1270                 if (!f2fs_sb_has_extra_attr(sbi) ||
1271                         !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1272                         f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1273                         return -EINVAL;
1274                 }
1275                 if (!test_opt(sbi, INLINE_XATTR)) {
1276                         f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1277                         return -EINVAL;
1278                 }
1279
1280                 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1281                 max_size = MAX_INLINE_XATTR_SIZE;
1282
1283                 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1284                                 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1285                         f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1286                                  min_size, max_size);
1287                         return -EINVAL;
1288                 }
1289         }
1290
1291         if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1292                 f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
1293                 return -EINVAL;
1294         }
1295
1296         /* Not pass down write hints if the number of active logs is lesser
1297          * than NR_CURSEG_PERSIST_TYPE.
1298          */
1299         if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
1300                 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1301
1302         if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1303                 f2fs_err(sbi, "Allow to mount readonly mode only");
1304                 return -EROFS;
1305         }
1306         return 0;
1307 }
1308
1309 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1310 {
1311         struct f2fs_inode_info *fi;
1312
1313         fi = f2fs_kmem_cache_alloc(f2fs_inode_cachep,
1314                                 GFP_F2FS_ZERO, false, F2FS_SB(sb));
1315         if (!fi)
1316                 return NULL;
1317
1318         init_once((void *) fi);
1319
1320         /* Initialize f2fs-specific inode info */
1321         atomic_set(&fi->dirty_pages, 0);
1322         atomic_set(&fi->i_compr_blocks, 0);
1323         init_rwsem(&fi->i_sem);
1324         spin_lock_init(&fi->i_size_lock);
1325         INIT_LIST_HEAD(&fi->dirty_list);
1326         INIT_LIST_HEAD(&fi->gdirty_list);
1327         INIT_LIST_HEAD(&fi->inmem_ilist);
1328         INIT_LIST_HEAD(&fi->inmem_pages);
1329         mutex_init(&fi->inmem_lock);
1330         init_rwsem(&fi->i_gc_rwsem[READ]);
1331         init_rwsem(&fi->i_gc_rwsem[WRITE]);
1332         init_rwsem(&fi->i_xattr_sem);
1333
1334         /* Will be used by directory only */
1335         fi->i_dir_level = F2FS_SB(sb)->dir_level;
1336
1337         return &fi->vfs_inode;
1338 }
1339
1340 static int f2fs_drop_inode(struct inode *inode)
1341 {
1342         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1343         int ret;
1344
1345         /*
1346          * during filesystem shutdown, if checkpoint is disabled,
1347          * drop useless meta/node dirty pages.
1348          */
1349         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1350                 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1351                         inode->i_ino == F2FS_META_INO(sbi)) {
1352                         trace_f2fs_drop_inode(inode, 1);
1353                         return 1;
1354                 }
1355         }
1356
1357         /*
1358          * This is to avoid a deadlock condition like below.
1359          * writeback_single_inode(inode)
1360          *  - f2fs_write_data_page
1361          *    - f2fs_gc -> iput -> evict
1362          *       - inode_wait_for_writeback(inode)
1363          */
1364         if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1365                 if (!inode->i_nlink && !is_bad_inode(inode)) {
1366                         /* to avoid evict_inode call simultaneously */
1367                         atomic_inc(&inode->i_count);
1368                         spin_unlock(&inode->i_lock);
1369
1370                         /* some remained atomic pages should discarded */
1371                         if (f2fs_is_atomic_file(inode))
1372                                 f2fs_drop_inmem_pages(inode);
1373
1374                         /* should remain fi->extent_tree for writepage */
1375                         f2fs_destroy_extent_node(inode);
1376
1377                         sb_start_intwrite(inode->i_sb);
1378                         f2fs_i_size_write(inode, 0);
1379
1380                         f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1381                                         inode, NULL, 0, DATA);
1382                         truncate_inode_pages_final(inode->i_mapping);
1383
1384                         if (F2FS_HAS_BLOCKS(inode))
1385                                 f2fs_truncate(inode);
1386
1387                         sb_end_intwrite(inode->i_sb);
1388
1389                         spin_lock(&inode->i_lock);
1390                         atomic_dec(&inode->i_count);
1391                 }
1392                 trace_f2fs_drop_inode(inode, 0);
1393                 return 0;
1394         }
1395         ret = generic_drop_inode(inode);
1396         if (!ret)
1397                 ret = fscrypt_drop_inode(inode);
1398         trace_f2fs_drop_inode(inode, ret);
1399         return ret;
1400 }
1401
1402 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1403 {
1404         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1405         int ret = 0;
1406
1407         spin_lock(&sbi->inode_lock[DIRTY_META]);
1408         if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1409                 ret = 1;
1410         } else {
1411                 set_inode_flag(inode, FI_DIRTY_INODE);
1412                 stat_inc_dirty_inode(sbi, DIRTY_META);
1413         }
1414         if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1415                 list_add_tail(&F2FS_I(inode)->gdirty_list,
1416                                 &sbi->inode_list[DIRTY_META]);
1417                 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1418         }
1419         spin_unlock(&sbi->inode_lock[DIRTY_META]);
1420         return ret;
1421 }
1422
1423 void f2fs_inode_synced(struct inode *inode)
1424 {
1425         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1426
1427         spin_lock(&sbi->inode_lock[DIRTY_META]);
1428         if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1429                 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1430                 return;
1431         }
1432         if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1433                 list_del_init(&F2FS_I(inode)->gdirty_list);
1434                 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1435         }
1436         clear_inode_flag(inode, FI_DIRTY_INODE);
1437         clear_inode_flag(inode, FI_AUTO_RECOVER);
1438         stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1439         spin_unlock(&sbi->inode_lock[DIRTY_META]);
1440 }
1441
1442 /*
1443  * f2fs_dirty_inode() is called from __mark_inode_dirty()
1444  *
1445  * We should call set_dirty_inode to write the dirty inode through write_inode.
1446  */
1447 static void f2fs_dirty_inode(struct inode *inode, int flags)
1448 {
1449         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1450
1451         if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1452                         inode->i_ino == F2FS_META_INO(sbi))
1453                 return;
1454
1455         if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1456                 clear_inode_flag(inode, FI_AUTO_RECOVER);
1457
1458         f2fs_inode_dirtied(inode, false);
1459 }
1460
1461 static void f2fs_free_inode(struct inode *inode)
1462 {
1463         fscrypt_free_inode(inode);
1464         kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1465 }
1466
1467 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1468 {
1469         percpu_counter_destroy(&sbi->alloc_valid_block_count);
1470         percpu_counter_destroy(&sbi->total_valid_inode_count);
1471 }
1472
1473 static void destroy_device_list(struct f2fs_sb_info *sbi)
1474 {
1475         int i;
1476
1477         for (i = 0; i < sbi->s_ndevs; i++) {
1478                 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1479 #ifdef CONFIG_BLK_DEV_ZONED
1480                 kvfree(FDEV(i).blkz_seq);
1481                 kfree(FDEV(i).zone_capacity_blocks);
1482 #endif
1483         }
1484         kvfree(sbi->devs);
1485 }
1486
1487 static void f2fs_put_super(struct super_block *sb)
1488 {
1489         struct f2fs_sb_info *sbi = F2FS_SB(sb);
1490         int i;
1491         bool dropped;
1492
1493         /* unregister procfs/sysfs entries in advance to avoid race case */
1494         f2fs_unregister_sysfs(sbi);
1495
1496         f2fs_quota_off_umount(sb);
1497
1498         /* prevent remaining shrinker jobs */
1499         mutex_lock(&sbi->umount_mutex);
1500
1501         /*
1502          * flush all issued checkpoints and stop checkpoint issue thread.
1503          * after then, all checkpoints should be done by each process context.
1504          */
1505         f2fs_stop_ckpt_thread(sbi);
1506
1507         /*
1508          * We don't need to do checkpoint when superblock is clean.
1509          * But, the previous checkpoint was not done by umount, it needs to do
1510          * clean checkpoint again.
1511          */
1512         if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1513                         !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1514                 struct cp_control cpc = {
1515                         .reason = CP_UMOUNT,
1516                 };
1517                 f2fs_write_checkpoint(sbi, &cpc);
1518         }
1519
1520         /* be sure to wait for any on-going discard commands */
1521         dropped = f2fs_issue_discard_timeout(sbi);
1522
1523         if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1524                                         !sbi->discard_blks && !dropped) {
1525                 struct cp_control cpc = {
1526                         .reason = CP_UMOUNT | CP_TRIMMED,
1527                 };
1528                 f2fs_write_checkpoint(sbi, &cpc);
1529         }
1530
1531         /*
1532          * normally superblock is clean, so we need to release this.
1533          * In addition, EIO will skip do checkpoint, we need this as well.
1534          */
1535         f2fs_release_ino_entry(sbi, true);
1536
1537         f2fs_leave_shrinker(sbi);
1538         mutex_unlock(&sbi->umount_mutex);
1539
1540         /* our cp_error case, we can wait for any writeback page */
1541         f2fs_flush_merged_writes(sbi);
1542
1543         f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1544
1545         f2fs_bug_on(sbi, sbi->fsync_node_num);
1546
1547         f2fs_destroy_compress_inode(sbi);
1548
1549         iput(sbi->node_inode);
1550         sbi->node_inode = NULL;
1551
1552         iput(sbi->meta_inode);
1553         sbi->meta_inode = NULL;
1554
1555         /*
1556          * iput() can update stat information, if f2fs_write_checkpoint()
1557          * above failed with error.
1558          */
1559         f2fs_destroy_stats(sbi);
1560
1561         /* destroy f2fs internal modules */
1562         f2fs_destroy_node_manager(sbi);
1563         f2fs_destroy_segment_manager(sbi);
1564
1565         f2fs_destroy_post_read_wq(sbi);
1566
1567         kvfree(sbi->ckpt);
1568
1569         sb->s_fs_info = NULL;
1570         if (sbi->s_chksum_driver)
1571                 crypto_free_shash(sbi->s_chksum_driver);
1572         kfree(sbi->raw_super);
1573
1574         destroy_device_list(sbi);
1575         f2fs_destroy_page_array_cache(sbi);
1576         f2fs_destroy_xattr_caches(sbi);
1577         mempool_destroy(sbi->write_io_dummy);
1578 #ifdef CONFIG_QUOTA
1579         for (i = 0; i < MAXQUOTAS; i++)
1580                 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1581 #endif
1582         fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1583         destroy_percpu_info(sbi);
1584         f2fs_destroy_iostat(sbi);
1585         for (i = 0; i < NR_PAGE_TYPE; i++)
1586                 kvfree(sbi->write_io[i]);
1587 #ifdef CONFIG_UNICODE
1588         utf8_unload(sb->s_encoding);
1589 #endif
1590         kfree(sbi);
1591 }
1592
1593 int f2fs_sync_fs(struct super_block *sb, int sync)
1594 {
1595         struct f2fs_sb_info *sbi = F2FS_SB(sb);
1596         int err = 0;
1597
1598         if (unlikely(f2fs_cp_error(sbi)))
1599                 return 0;
1600         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1601                 return 0;
1602
1603         trace_f2fs_sync_fs(sb, sync);
1604
1605         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1606                 return -EAGAIN;
1607
1608         if (sync)
1609                 err = f2fs_issue_checkpoint(sbi);
1610
1611         return err;
1612 }
1613
1614 static int f2fs_freeze(struct super_block *sb)
1615 {
1616         if (f2fs_readonly(sb))
1617                 return 0;
1618
1619         /* IO error happened before */
1620         if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1621                 return -EIO;
1622
1623         /* must be clean, since sync_filesystem() was already called */
1624         if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1625                 return -EINVAL;
1626
1627         /* ensure no checkpoint required */
1628         if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
1629                 return -EINVAL;
1630         return 0;
1631 }
1632
1633 static int f2fs_unfreeze(struct super_block *sb)
1634 {
1635         return 0;
1636 }
1637
1638 #ifdef CONFIG_QUOTA
1639 static int f2fs_statfs_project(struct super_block *sb,
1640                                 kprojid_t projid, struct kstatfs *buf)
1641 {
1642         struct kqid qid;
1643         struct dquot *dquot;
1644         u64 limit;
1645         u64 curblock;
1646
1647         qid = make_kqid_projid(projid);
1648         dquot = dqget(sb, qid);
1649         if (IS_ERR(dquot))
1650                 return PTR_ERR(dquot);
1651         spin_lock(&dquot->dq_dqb_lock);
1652
1653         limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1654                                         dquot->dq_dqb.dqb_bhardlimit);
1655         if (limit)
1656                 limit >>= sb->s_blocksize_bits;
1657
1658         if (limit && buf->f_blocks > limit) {
1659                 curblock = (dquot->dq_dqb.dqb_curspace +
1660                             dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1661                 buf->f_blocks = limit;
1662                 buf->f_bfree = buf->f_bavail =
1663                         (buf->f_blocks > curblock) ?
1664                          (buf->f_blocks - curblock) : 0;
1665         }
1666
1667         limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1668                                         dquot->dq_dqb.dqb_ihardlimit);
1669
1670         if (limit && buf->f_files > limit) {
1671                 buf->f_files = limit;
1672                 buf->f_ffree =
1673                         (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1674                          (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1675         }
1676
1677         spin_unlock(&dquot->dq_dqb_lock);
1678         dqput(dquot);
1679         return 0;
1680 }
1681 #endif
1682
1683 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1684 {
1685         struct super_block *sb = dentry->d_sb;
1686         struct f2fs_sb_info *sbi = F2FS_SB(sb);
1687         u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1688         block_t total_count, user_block_count, start_count;
1689         u64 avail_node_count;
1690
1691         total_count = le64_to_cpu(sbi->raw_super->block_count);
1692         user_block_count = sbi->user_block_count;
1693         start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1694         buf->f_type = F2FS_SUPER_MAGIC;
1695         buf->f_bsize = sbi->blocksize;
1696
1697         buf->f_blocks = total_count - start_count;
1698         buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1699                                                 sbi->current_reserved_blocks;
1700
1701         spin_lock(&sbi->stat_lock);
1702         if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1703                 buf->f_bfree = 0;
1704         else
1705                 buf->f_bfree -= sbi->unusable_block_count;
1706         spin_unlock(&sbi->stat_lock);
1707
1708         if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1709                 buf->f_bavail = buf->f_bfree -
1710                                 F2FS_OPTION(sbi).root_reserved_blocks;
1711         else
1712                 buf->f_bavail = 0;
1713
1714         avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1715
1716         if (avail_node_count > user_block_count) {
1717                 buf->f_files = user_block_count;
1718                 buf->f_ffree = buf->f_bavail;
1719         } else {
1720                 buf->f_files = avail_node_count;
1721                 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1722                                         buf->f_bavail);
1723         }
1724
1725         buf->f_namelen = F2FS_NAME_LEN;
1726         buf->f_fsid    = u64_to_fsid(id);
1727
1728 #ifdef CONFIG_QUOTA
1729         if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1730                         sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1731                 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1732         }
1733 #endif
1734         return 0;
1735 }
1736
1737 static inline void f2fs_show_quota_options(struct seq_file *seq,
1738                                            struct super_block *sb)
1739 {
1740 #ifdef CONFIG_QUOTA
1741         struct f2fs_sb_info *sbi = F2FS_SB(sb);
1742
1743         if (F2FS_OPTION(sbi).s_jquota_fmt) {
1744                 char *fmtname = "";
1745
1746                 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1747                 case QFMT_VFS_OLD:
1748                         fmtname = "vfsold";
1749                         break;
1750                 case QFMT_VFS_V0:
1751                         fmtname = "vfsv0";
1752                         break;
1753                 case QFMT_VFS_V1:
1754                         fmtname = "vfsv1";
1755                         break;
1756                 }
1757                 seq_printf(seq, ",jqfmt=%s", fmtname);
1758         }
1759
1760         if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1761                 seq_show_option(seq, "usrjquota",
1762                         F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1763
1764         if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1765                 seq_show_option(seq, "grpjquota",
1766                         F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1767
1768         if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1769                 seq_show_option(seq, "prjjquota",
1770                         F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1771 #endif
1772 }
1773
1774 #ifdef CONFIG_F2FS_FS_COMPRESSION
1775 static inline void f2fs_show_compress_options(struct seq_file *seq,
1776                                                         struct super_block *sb)
1777 {
1778         struct f2fs_sb_info *sbi = F2FS_SB(sb);
1779         char *algtype = "";
1780         int i;
1781
1782         if (!f2fs_sb_has_compression(sbi))
1783                 return;
1784
1785         switch (F2FS_OPTION(sbi).compress_algorithm) {
1786         case COMPRESS_LZO:
1787                 algtype = "lzo";
1788                 break;
1789         case COMPRESS_LZ4:
1790                 algtype = "lz4";
1791                 break;
1792         case COMPRESS_ZSTD:
1793                 algtype = "zstd";
1794                 break;
1795         case COMPRESS_LZORLE:
1796                 algtype = "lzo-rle";
1797                 break;
1798         }
1799         seq_printf(seq, ",compress_algorithm=%s", algtype);
1800
1801         if (F2FS_OPTION(sbi).compress_level)
1802                 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1803
1804         seq_printf(seq, ",compress_log_size=%u",
1805                         F2FS_OPTION(sbi).compress_log_size);
1806
1807         for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1808                 seq_printf(seq, ",compress_extension=%s",
1809                         F2FS_OPTION(sbi).extensions[i]);
1810         }
1811
1812         for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
1813                 seq_printf(seq, ",nocompress_extension=%s",
1814                         F2FS_OPTION(sbi).noextensions[i]);
1815         }
1816
1817         if (F2FS_OPTION(sbi).compress_chksum)
1818                 seq_puts(seq, ",compress_chksum");
1819
1820         if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1821                 seq_printf(seq, ",compress_mode=%s", "fs");
1822         else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1823                 seq_printf(seq, ",compress_mode=%s", "user");
1824
1825         if (test_opt(sbi, COMPRESS_CACHE))
1826                 seq_puts(seq, ",compress_cache");
1827 }
1828 #endif
1829
1830 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1831 {
1832         struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1833
1834         if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1835                 seq_printf(seq, ",background_gc=%s", "sync");
1836         else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1837                 seq_printf(seq, ",background_gc=%s", "on");
1838         else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1839                 seq_printf(seq, ",background_gc=%s", "off");
1840
1841         if (test_opt(sbi, GC_MERGE))
1842                 seq_puts(seq, ",gc_merge");
1843
1844         if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1845                 seq_puts(seq, ",disable_roll_forward");
1846         if (test_opt(sbi, NORECOVERY))
1847                 seq_puts(seq, ",norecovery");
1848         if (test_opt(sbi, DISCARD))
1849                 seq_puts(seq, ",discard");
1850         else
1851                 seq_puts(seq, ",nodiscard");
1852         if (test_opt(sbi, NOHEAP))
1853                 seq_puts(seq, ",no_heap");
1854         else
1855                 seq_puts(seq, ",heap");
1856 #ifdef CONFIG_F2FS_FS_XATTR
1857         if (test_opt(sbi, XATTR_USER))
1858                 seq_puts(seq, ",user_xattr");
1859         else
1860                 seq_puts(seq, ",nouser_xattr");
1861         if (test_opt(sbi, INLINE_XATTR))
1862                 seq_puts(seq, ",inline_xattr");
1863         else
1864                 seq_puts(seq, ",noinline_xattr");
1865         if (test_opt(sbi, INLINE_XATTR_SIZE))
1866                 seq_printf(seq, ",inline_xattr_size=%u",
1867                                         F2FS_OPTION(sbi).inline_xattr_size);
1868 #endif
1869 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1870         if (test_opt(sbi, POSIX_ACL))
1871                 seq_puts(seq, ",acl");
1872         else
1873                 seq_puts(seq, ",noacl");
1874 #endif
1875         if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1876                 seq_puts(seq, ",disable_ext_identify");
1877         if (test_opt(sbi, INLINE_DATA))
1878                 seq_puts(seq, ",inline_data");
1879         else
1880                 seq_puts(seq, ",noinline_data");
1881         if (test_opt(sbi, INLINE_DENTRY))
1882                 seq_puts(seq, ",inline_dentry");
1883         else
1884                 seq_puts(seq, ",noinline_dentry");
1885         if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1886                 seq_puts(seq, ",flush_merge");
1887         if (test_opt(sbi, NOBARRIER))
1888                 seq_puts(seq, ",nobarrier");
1889         if (test_opt(sbi, FASTBOOT))
1890                 seq_puts(seq, ",fastboot");
1891         if (test_opt(sbi, EXTENT_CACHE))
1892                 seq_puts(seq, ",extent_cache");
1893         else
1894                 seq_puts(seq, ",noextent_cache");
1895         if (test_opt(sbi, DATA_FLUSH))
1896                 seq_puts(seq, ",data_flush");
1897
1898         seq_puts(seq, ",mode=");
1899         if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1900                 seq_puts(seq, "adaptive");
1901         else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1902                 seq_puts(seq, "lfs");
1903         else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
1904                 seq_puts(seq, "fragment:segment");
1905         else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
1906                 seq_puts(seq, "fragment:block");
1907         seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1908         if (test_opt(sbi, RESERVE_ROOT))
1909                 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1910                                 F2FS_OPTION(sbi).root_reserved_blocks,
1911                                 from_kuid_munged(&init_user_ns,
1912                                         F2FS_OPTION(sbi).s_resuid),
1913                                 from_kgid_munged(&init_user_ns,
1914                                         F2FS_OPTION(sbi).s_resgid));
1915         if (F2FS_IO_SIZE_BITS(sbi))
1916                 seq_printf(seq, ",io_bits=%u",
1917                                 F2FS_OPTION(sbi).write_io_size_bits);
1918 #ifdef CONFIG_F2FS_FAULT_INJECTION
1919         if (test_opt(sbi, FAULT_INJECTION)) {
1920                 seq_printf(seq, ",fault_injection=%u",
1921                                 F2FS_OPTION(sbi).fault_info.inject_rate);
1922                 seq_printf(seq, ",fault_type=%u",
1923                                 F2FS_OPTION(sbi).fault_info.inject_type);
1924         }
1925 #endif
1926 #ifdef CONFIG_QUOTA
1927         if (test_opt(sbi, QUOTA))
1928                 seq_puts(seq, ",quota");
1929         if (test_opt(sbi, USRQUOTA))
1930                 seq_puts(seq, ",usrquota");
1931         if (test_opt(sbi, GRPQUOTA))
1932                 seq_puts(seq, ",grpquota");
1933         if (test_opt(sbi, PRJQUOTA))
1934                 seq_puts(seq, ",prjquota");
1935 #endif
1936         f2fs_show_quota_options(seq, sbi->sb);
1937         if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1938                 seq_printf(seq, ",whint_mode=%s", "user-based");
1939         else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1940                 seq_printf(seq, ",whint_mode=%s", "fs-based");
1941
1942         fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
1943
1944         if (sbi->sb->s_flags & SB_INLINECRYPT)
1945                 seq_puts(seq, ",inlinecrypt");
1946
1947         if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1948                 seq_printf(seq, ",alloc_mode=%s", "default");
1949         else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1950                 seq_printf(seq, ",alloc_mode=%s", "reuse");
1951
1952         if (test_opt(sbi, DISABLE_CHECKPOINT))
1953                 seq_printf(seq, ",checkpoint=disable:%u",
1954                                 F2FS_OPTION(sbi).unusable_cap);
1955         if (test_opt(sbi, MERGE_CHECKPOINT))
1956                 seq_puts(seq, ",checkpoint_merge");
1957         else
1958                 seq_puts(seq, ",nocheckpoint_merge");
1959         if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1960                 seq_printf(seq, ",fsync_mode=%s", "posix");
1961         else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1962                 seq_printf(seq, ",fsync_mode=%s", "strict");
1963         else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1964                 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1965
1966 #ifdef CONFIG_F2FS_FS_COMPRESSION
1967         f2fs_show_compress_options(seq, sbi->sb);
1968 #endif
1969
1970         if (test_opt(sbi, ATGC))
1971                 seq_puts(seq, ",atgc");
1972
1973         if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
1974                 seq_printf(seq, ",discard_unit=%s", "block");
1975         else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
1976                 seq_printf(seq, ",discard_unit=%s", "segment");
1977         else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
1978                 seq_printf(seq, ",discard_unit=%s", "section");
1979
1980         return 0;
1981 }
1982
1983 static void default_options(struct f2fs_sb_info *sbi)
1984 {
1985         /* init some FS parameters */
1986         if (f2fs_sb_has_readonly(sbi))
1987                 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
1988         else
1989                 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
1990
1991         F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1992         F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1993         F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1994         F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1995         F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1996         F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1997         F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
1998         F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
1999         F2FS_OPTION(sbi).compress_ext_cnt = 0;
2000         F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2001         F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2002
2003         sbi->sb->s_flags &= ~SB_INLINECRYPT;
2004
2005         set_opt(sbi, INLINE_XATTR);
2006         set_opt(sbi, INLINE_DATA);
2007         set_opt(sbi, INLINE_DENTRY);
2008         set_opt(sbi, EXTENT_CACHE);
2009         set_opt(sbi, NOHEAP);
2010         clear_opt(sbi, DISABLE_CHECKPOINT);
2011         set_opt(sbi, MERGE_CHECKPOINT);
2012         F2FS_OPTION(sbi).unusable_cap = 0;
2013         sbi->sb->s_flags |= SB_LAZYTIME;
2014         set_opt(sbi, FLUSH_MERGE);
2015         if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2016                 set_opt(sbi, DISCARD);
2017         if (f2fs_sb_has_blkzoned(sbi)) {
2018                 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2019                 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2020         } else {
2021                 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2022                 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2023         }
2024
2025 #ifdef CONFIG_F2FS_FS_XATTR
2026         set_opt(sbi, XATTR_USER);
2027 #endif
2028 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2029         set_opt(sbi, POSIX_ACL);
2030 #endif
2031
2032         f2fs_build_fault_attr(sbi, 0, 0);
2033 }
2034
2035 #ifdef CONFIG_QUOTA
2036 static int f2fs_enable_quotas(struct super_block *sb);
2037 #endif
2038
2039 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2040 {
2041         unsigned int s_flags = sbi->sb->s_flags;
2042         struct cp_control cpc;
2043         int err = 0;
2044         int ret;
2045         block_t unusable;
2046
2047         if (s_flags & SB_RDONLY) {
2048                 f2fs_err(sbi, "checkpoint=disable on readonly fs");
2049                 return -EINVAL;
2050         }
2051         sbi->sb->s_flags |= SB_ACTIVE;
2052
2053         f2fs_update_time(sbi, DISABLE_TIME);
2054
2055         while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2056                 down_write(&sbi->gc_lock);
2057                 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
2058                 if (err == -ENODATA) {
2059                         err = 0;
2060                         break;
2061                 }
2062                 if (err && err != -EAGAIN)
2063                         break;
2064         }
2065
2066         ret = sync_filesystem(sbi->sb);
2067         if (ret || err) {
2068                 err = ret ? ret : err;
2069                 goto restore_flag;
2070         }
2071
2072         unusable = f2fs_get_unusable_blocks(sbi);
2073         if (f2fs_disable_cp_again(sbi, unusable)) {
2074                 err = -EAGAIN;
2075                 goto restore_flag;
2076         }
2077
2078         down_write(&sbi->gc_lock);
2079         cpc.reason = CP_PAUSE;
2080         set_sbi_flag(sbi, SBI_CP_DISABLED);
2081         err = f2fs_write_checkpoint(sbi, &cpc);
2082         if (err)
2083                 goto out_unlock;
2084
2085         spin_lock(&sbi->stat_lock);
2086         sbi->unusable_block_count = unusable;
2087         spin_unlock(&sbi->stat_lock);
2088
2089 out_unlock:
2090         up_write(&sbi->gc_lock);
2091 restore_flag:
2092         sbi->sb->s_flags = s_flags;     /* Restore SB_RDONLY status */
2093         return err;
2094 }
2095
2096 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2097 {
2098         int retry = DEFAULT_RETRY_IO_COUNT;
2099
2100         /* we should flush all the data to keep data consistency */
2101         do {
2102                 sync_inodes_sb(sbi->sb);
2103                 cond_resched();
2104                 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2105         } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
2106
2107         if (unlikely(retry < 0))
2108                 f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
2109
2110         down_write(&sbi->gc_lock);
2111         f2fs_dirty_to_prefree(sbi);
2112
2113         clear_sbi_flag(sbi, SBI_CP_DISABLED);
2114         set_sbi_flag(sbi, SBI_IS_DIRTY);
2115         up_write(&sbi->gc_lock);
2116
2117         f2fs_sync_fs(sbi->sb, 1);
2118 }
2119
2120 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2121 {
2122         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2123         struct f2fs_mount_info org_mount_opt;
2124         unsigned long old_sb_flags;
2125         int err;
2126         bool need_restart_gc = false, need_stop_gc = false;
2127         bool need_restart_ckpt = false, need_stop_ckpt = false;
2128         bool need_restart_flush = false, need_stop_flush = false;
2129         bool need_restart_discard = false, need_stop_discard = false;
2130         bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
2131         bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2132         bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2133         bool no_atgc = !test_opt(sbi, ATGC);
2134         bool no_discard = !test_opt(sbi, DISCARD);
2135         bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2136         bool block_unit_discard = f2fs_block_unit_discard(sbi);
2137         struct discard_cmd_control *dcc;
2138 #ifdef CONFIG_QUOTA
2139         int i, j;
2140 #endif
2141
2142         /*
2143          * Save the old mount options in case we
2144          * need to restore them.
2145          */
2146         org_mount_opt = sbi->mount_opt;
2147         old_sb_flags = sb->s_flags;
2148
2149 #ifdef CONFIG_QUOTA
2150         org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2151         for (i = 0; i < MAXQUOTAS; i++) {
2152                 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2153                         org_mount_opt.s_qf_names[i] =
2154                                 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2155                                 GFP_KERNEL);
2156                         if (!org_mount_opt.s_qf_names[i]) {
2157                                 for (j = 0; j < i; j++)
2158                                         kfree(org_mount_opt.s_qf_names[j]);
2159                                 return -ENOMEM;
2160                         }
2161                 } else {
2162                         org_mount_opt.s_qf_names[i] = NULL;
2163                 }
2164         }
2165 #endif
2166
2167         /* recover superblocks we couldn't write due to previous RO mount */
2168         if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2169                 err = f2fs_commit_super(sbi, false);
2170                 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2171                           err);
2172                 if (!err)
2173                         clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2174         }
2175
2176         default_options(sbi);
2177
2178         /* parse mount options */
2179         err = parse_options(sb, data, true);
2180         if (err)
2181                 goto restore_opts;
2182
2183         /*
2184          * Previous and new state of filesystem is RO,
2185          * so skip checking GC and FLUSH_MERGE conditions.
2186          */
2187         if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2188                 goto skip;
2189
2190         if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
2191                 err = -EROFS;
2192                 goto restore_opts;
2193         }
2194
2195 #ifdef CONFIG_QUOTA
2196         if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2197                 err = dquot_suspend(sb, -1);
2198                 if (err < 0)
2199                         goto restore_opts;
2200         } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2201                 /* dquot_resume needs RW */
2202                 sb->s_flags &= ~SB_RDONLY;
2203                 if (sb_any_quota_suspended(sb)) {
2204                         dquot_resume(sb, -1);
2205                 } else if (f2fs_sb_has_quota_ino(sbi)) {
2206                         err = f2fs_enable_quotas(sb);
2207                         if (err)
2208                                 goto restore_opts;
2209                 }
2210         }
2211 #endif
2212         /* disallow enable atgc dynamically */
2213         if (no_atgc == !!test_opt(sbi, ATGC)) {
2214                 err = -EINVAL;
2215                 f2fs_warn(sbi, "switch atgc option is not allowed");
2216                 goto restore_opts;
2217         }
2218
2219         /* disallow enable/disable extent_cache dynamically */
2220         if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
2221                 err = -EINVAL;
2222                 f2fs_warn(sbi, "switch extent_cache option is not allowed");
2223                 goto restore_opts;
2224         }
2225
2226         if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2227                 err = -EINVAL;
2228                 f2fs_warn(sbi, "switch io_bits option is not allowed");
2229                 goto restore_opts;
2230         }
2231
2232         if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2233                 err = -EINVAL;
2234                 f2fs_warn(sbi, "switch compress_cache option is not allowed");
2235                 goto restore_opts;
2236         }
2237
2238         if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2239                 err = -EINVAL;
2240                 f2fs_warn(sbi, "switch discard_unit option is not allowed");
2241                 goto restore_opts;
2242         }
2243
2244         if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2245                 err = -EINVAL;
2246                 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2247                 goto restore_opts;
2248         }
2249
2250         /*
2251          * We stop the GC thread if FS is mounted as RO
2252          * or if background_gc = off is passed in mount
2253          * option. Also sync the filesystem.
2254          */
2255         if ((*flags & SB_RDONLY) ||
2256                         (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2257                         !test_opt(sbi, GC_MERGE))) {
2258                 if (sbi->gc_thread) {
2259                         f2fs_stop_gc_thread(sbi);
2260                         need_restart_gc = true;
2261                 }
2262         } else if (!sbi->gc_thread) {
2263                 err = f2fs_start_gc_thread(sbi);
2264                 if (err)
2265                         goto restore_opts;
2266                 need_stop_gc = true;
2267         }
2268
2269         if (*flags & SB_RDONLY ||
2270                 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
2271                 sync_inodes_sb(sb);
2272
2273                 set_sbi_flag(sbi, SBI_IS_DIRTY);
2274                 set_sbi_flag(sbi, SBI_IS_CLOSE);
2275                 f2fs_sync_fs(sb, 1);
2276                 clear_sbi_flag(sbi, SBI_IS_CLOSE);
2277         }
2278
2279         if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2280                         !test_opt(sbi, MERGE_CHECKPOINT)) {
2281                 f2fs_stop_ckpt_thread(sbi);
2282                 need_restart_ckpt = true;
2283         } else {
2284                 err = f2fs_start_ckpt_thread(sbi);
2285                 if (err) {
2286                         f2fs_err(sbi,
2287                             "Failed to start F2FS issue_checkpoint_thread (%d)",
2288                             err);
2289                         goto restore_gc;
2290                 }
2291                 need_stop_ckpt = true;
2292         }
2293
2294         /*
2295          * We stop issue flush thread if FS is mounted as RO
2296          * or if flush_merge is not passed in mount option.
2297          */
2298         if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2299                 clear_opt(sbi, FLUSH_MERGE);
2300                 f2fs_destroy_flush_cmd_control(sbi, false);
2301                 need_restart_flush = true;
2302         } else {
2303                 err = f2fs_create_flush_cmd_control(sbi);
2304                 if (err)
2305                         goto restore_ckpt;
2306                 need_stop_flush = true;
2307         }
2308
2309         if (no_discard == !!test_opt(sbi, DISCARD)) {
2310                 if (test_opt(sbi, DISCARD)) {
2311                         err = f2fs_start_discard_thread(sbi);
2312                         if (err)
2313                                 goto restore_flush;
2314                         need_stop_discard = true;
2315                 } else {
2316                         dcc = SM_I(sbi)->dcc_info;
2317                         f2fs_stop_discard_thread(sbi);
2318                         if (atomic_read(&dcc->discard_cmd_cnt))
2319                                 f2fs_issue_discard_timeout(sbi);
2320                         need_restart_discard = true;
2321                 }
2322         }
2323
2324         if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2325                 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2326                         err = f2fs_disable_checkpoint(sbi);
2327                         if (err)
2328                                 goto restore_discard;
2329                 } else {
2330                         f2fs_enable_checkpoint(sbi);
2331                 }
2332         }
2333
2334 skip:
2335 #ifdef CONFIG_QUOTA
2336         /* Release old quota file names */
2337         for (i = 0; i < MAXQUOTAS; i++)
2338                 kfree(org_mount_opt.s_qf_names[i]);
2339 #endif
2340         /* Update the POSIXACL Flag */
2341         sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2342                 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2343
2344         limit_reserve_root(sbi);
2345         adjust_unusable_cap_perc(sbi);
2346         *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2347         return 0;
2348 restore_discard:
2349         if (need_restart_discard) {
2350                 if (f2fs_start_discard_thread(sbi))
2351                         f2fs_warn(sbi, "discard has been stopped");
2352         } else if (need_stop_discard) {
2353                 f2fs_stop_discard_thread(sbi);
2354         }
2355 restore_flush:
2356         if (need_restart_flush) {
2357                 if (f2fs_create_flush_cmd_control(sbi))
2358                         f2fs_warn(sbi, "background flush thread has stopped");
2359         } else if (need_stop_flush) {
2360                 clear_opt(sbi, FLUSH_MERGE);
2361                 f2fs_destroy_flush_cmd_control(sbi, false);
2362         }
2363 restore_ckpt:
2364         if (need_restart_ckpt) {
2365                 if (f2fs_start_ckpt_thread(sbi))
2366                         f2fs_warn(sbi, "background ckpt thread has stopped");
2367         } else if (need_stop_ckpt) {
2368                 f2fs_stop_ckpt_thread(sbi);
2369         }
2370 restore_gc:
2371         if (need_restart_gc) {
2372                 if (f2fs_start_gc_thread(sbi))
2373                         f2fs_warn(sbi, "background gc thread has stopped");
2374         } else if (need_stop_gc) {
2375                 f2fs_stop_gc_thread(sbi);
2376         }
2377 restore_opts:
2378 #ifdef CONFIG_QUOTA
2379         F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2380         for (i = 0; i < MAXQUOTAS; i++) {
2381                 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2382                 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2383         }
2384 #endif
2385         sbi->mount_opt = org_mount_opt;
2386         sb->s_flags = old_sb_flags;
2387         return err;
2388 }
2389
2390 #ifdef CONFIG_QUOTA
2391 /* Read data from quotafile */
2392 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2393                                size_t len, loff_t off)
2394 {
2395         struct inode *inode = sb_dqopt(sb)->files[type];
2396         struct address_space *mapping = inode->i_mapping;
2397         block_t blkidx = F2FS_BYTES_TO_BLK(off);
2398         int offset = off & (sb->s_blocksize - 1);
2399         int tocopy;
2400         size_t toread;
2401         loff_t i_size = i_size_read(inode);
2402         struct page *page;
2403         char *kaddr;
2404
2405         if (off > i_size)
2406                 return 0;
2407
2408         if (off + len > i_size)
2409                 len = i_size - off;
2410         toread = len;
2411         while (toread > 0) {
2412                 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2413 repeat:
2414                 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2415                 if (IS_ERR(page)) {
2416                         if (PTR_ERR(page) == -ENOMEM) {
2417                                 congestion_wait(BLK_RW_ASYNC,
2418                                                 DEFAULT_IO_TIMEOUT);
2419                                 goto repeat;
2420                         }
2421                         set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2422                         return PTR_ERR(page);
2423                 }
2424
2425                 lock_page(page);
2426
2427                 if (unlikely(page->mapping != mapping)) {
2428                         f2fs_put_page(page, 1);
2429                         goto repeat;
2430                 }
2431                 if (unlikely(!PageUptodate(page))) {
2432                         f2fs_put_page(page, 1);
2433                         set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2434                         return -EIO;
2435                 }
2436
2437                 kaddr = kmap_atomic(page);
2438                 memcpy(data, kaddr + offset, tocopy);
2439                 kunmap_atomic(kaddr);
2440                 f2fs_put_page(page, 1);
2441
2442                 offset = 0;
2443                 toread -= tocopy;
2444                 data += tocopy;
2445                 blkidx++;
2446         }
2447         return len;
2448 }
2449
2450 /* Write to quotafile */
2451 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2452                                 const char *data, size_t len, loff_t off)
2453 {
2454         struct inode *inode = sb_dqopt(sb)->files[type];
2455         struct address_space *mapping = inode->i_mapping;
2456         const struct address_space_operations *a_ops = mapping->a_ops;
2457         int offset = off & (sb->s_blocksize - 1);
2458         size_t towrite = len;
2459         struct page *page;
2460         void *fsdata = NULL;
2461         char *kaddr;
2462         int err = 0;
2463         int tocopy;
2464
2465         while (towrite > 0) {
2466                 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2467                                                                 towrite);
2468 retry:
2469                 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
2470                                                         &page, &fsdata);
2471                 if (unlikely(err)) {
2472                         if (err == -ENOMEM) {
2473                                 congestion_wait(BLK_RW_ASYNC,
2474                                                 DEFAULT_IO_TIMEOUT);
2475                                 goto retry;
2476                         }
2477                         set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2478                         break;
2479                 }
2480
2481                 kaddr = kmap_atomic(page);
2482                 memcpy(kaddr + offset, data, tocopy);
2483                 kunmap_atomic(kaddr);
2484                 flush_dcache_page(page);
2485
2486                 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2487                                                 page, fsdata);
2488                 offset = 0;
2489                 towrite -= tocopy;
2490                 off += tocopy;
2491                 data += tocopy;
2492                 cond_resched();
2493         }
2494
2495         if (len == towrite)
2496                 return err;
2497         inode->i_mtime = inode->i_ctime = current_time(inode);
2498         f2fs_mark_inode_dirty_sync(inode, false);
2499         return len - towrite;
2500 }
2501
2502 static struct dquot **f2fs_get_dquots(struct inode *inode)
2503 {
2504         return F2FS_I(inode)->i_dquot;
2505 }
2506
2507 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2508 {
2509         return &F2FS_I(inode)->i_reserved_quota;
2510 }
2511
2512 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2513 {
2514         if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2515                 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2516                 return 0;
2517         }
2518
2519         return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2520                                         F2FS_OPTION(sbi).s_jquota_fmt, type);
2521 }
2522
2523 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2524 {
2525         int enabled = 0;
2526         int i, err;
2527
2528         if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2529                 err = f2fs_enable_quotas(sbi->sb);
2530                 if (err) {
2531                         f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2532                         return 0;
2533                 }
2534                 return 1;
2535         }
2536
2537         for (i = 0; i < MAXQUOTAS; i++) {
2538                 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2539                         err = f2fs_quota_on_mount(sbi, i);
2540                         if (!err) {
2541                                 enabled = 1;
2542                                 continue;
2543                         }
2544                         f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2545                                  err, i);
2546                 }
2547         }
2548         return enabled;
2549 }
2550
2551 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2552                              unsigned int flags)
2553 {
2554         struct inode *qf_inode;
2555         unsigned long qf_inum;
2556         int err;
2557
2558         BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2559
2560         qf_inum = f2fs_qf_ino(sb, type);
2561         if (!qf_inum)
2562                 return -EPERM;
2563
2564         qf_inode = f2fs_iget(sb, qf_inum);
2565         if (IS_ERR(qf_inode)) {
2566                 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2567                 return PTR_ERR(qf_inode);
2568         }
2569
2570         /* Don't account quota for quota files to avoid recursion */
2571         qf_inode->i_flags |= S_NOQUOTA;
2572         err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2573         iput(qf_inode);
2574         return err;
2575 }
2576
2577 static int f2fs_enable_quotas(struct super_block *sb)
2578 {
2579         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2580         int type, err = 0;
2581         unsigned long qf_inum;
2582         bool quota_mopt[MAXQUOTAS] = {
2583                 test_opt(sbi, USRQUOTA),
2584                 test_opt(sbi, GRPQUOTA),
2585                 test_opt(sbi, PRJQUOTA),
2586         };
2587
2588         if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2589                 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2590                 return 0;
2591         }
2592
2593         sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2594
2595         for (type = 0; type < MAXQUOTAS; type++) {
2596                 qf_inum = f2fs_qf_ino(sb, type);
2597                 if (qf_inum) {
2598                         err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2599                                 DQUOT_USAGE_ENABLED |
2600                                 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2601                         if (err) {
2602                                 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2603                                          type, err);
2604                                 for (type--; type >= 0; type--)
2605                                         dquot_quota_off(sb, type);
2606                                 set_sbi_flag(F2FS_SB(sb),
2607                                                 SBI_QUOTA_NEED_REPAIR);
2608                                 return err;
2609                         }
2610                 }
2611         }
2612         return 0;
2613 }
2614
2615 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2616 {
2617         struct quota_info *dqopt = sb_dqopt(sbi->sb);
2618         struct address_space *mapping = dqopt->files[type]->i_mapping;
2619         int ret = 0;
2620
2621         ret = dquot_writeback_dquots(sbi->sb, type);
2622         if (ret)
2623                 goto out;
2624
2625         ret = filemap_fdatawrite(mapping);
2626         if (ret)
2627                 goto out;
2628
2629         /* if we are using journalled quota */
2630         if (is_journalled_quota(sbi))
2631                 goto out;
2632
2633         ret = filemap_fdatawait(mapping);
2634
2635         truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2636 out:
2637         if (ret)
2638                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2639         return ret;
2640 }
2641
2642 int f2fs_quota_sync(struct super_block *sb, int type)
2643 {
2644         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2645         struct quota_info *dqopt = sb_dqopt(sb);
2646         int cnt;
2647         int ret;
2648
2649         /*
2650          * Now when everything is written we can discard the pagecache so
2651          * that userspace sees the changes.
2652          */
2653         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2654
2655                 if (type != -1 && cnt != type)
2656                         continue;
2657
2658                 if (!sb_has_quota_active(sb, type))
2659                         return 0;
2660
2661                 inode_lock(dqopt->files[cnt]);
2662
2663                 /*
2664                  * do_quotactl
2665                  *  f2fs_quota_sync
2666                  *  down_read(quota_sem)
2667                  *  dquot_writeback_dquots()
2668                  *  f2fs_dquot_commit
2669                  *                            block_operation
2670                  *                            down_read(quota_sem)
2671                  */
2672                 f2fs_lock_op(sbi);
2673                 down_read(&sbi->quota_sem);
2674
2675                 ret = f2fs_quota_sync_file(sbi, cnt);
2676
2677                 up_read(&sbi->quota_sem);
2678                 f2fs_unlock_op(sbi);
2679
2680                 inode_unlock(dqopt->files[cnt]);
2681
2682                 if (ret)
2683                         break;
2684         }
2685         return ret;
2686 }
2687
2688 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2689                                                         const struct path *path)
2690 {
2691         struct inode *inode;
2692         int err;
2693
2694         /* if quota sysfile exists, deny enabling quota with specific file */
2695         if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2696                 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2697                 return -EBUSY;
2698         }
2699
2700         err = f2fs_quota_sync(sb, type);
2701         if (err)
2702                 return err;
2703
2704         err = dquot_quota_on(sb, type, format_id, path);
2705         if (err)
2706                 return err;
2707
2708         inode = d_inode(path->dentry);
2709
2710         inode_lock(inode);
2711         F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2712         f2fs_set_inode_flags(inode);
2713         inode_unlock(inode);
2714         f2fs_mark_inode_dirty_sync(inode, false);
2715
2716         return 0;
2717 }
2718
2719 static int __f2fs_quota_off(struct super_block *sb, int type)
2720 {
2721         struct inode *inode = sb_dqopt(sb)->files[type];
2722         int err;
2723
2724         if (!inode || !igrab(inode))
2725                 return dquot_quota_off(sb, type);
2726
2727         err = f2fs_quota_sync(sb, type);
2728         if (err)
2729                 goto out_put;
2730
2731         err = dquot_quota_off(sb, type);
2732         if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2733                 goto out_put;
2734
2735         inode_lock(inode);
2736         F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2737         f2fs_set_inode_flags(inode);
2738         inode_unlock(inode);
2739         f2fs_mark_inode_dirty_sync(inode, false);
2740 out_put:
2741         iput(inode);
2742         return err;
2743 }
2744
2745 static int f2fs_quota_off(struct super_block *sb, int type)
2746 {
2747         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2748         int err;
2749
2750         err = __f2fs_quota_off(sb, type);
2751
2752         /*
2753          * quotactl can shutdown journalled quota, result in inconsistence
2754          * between quota record and fs data by following updates, tag the
2755          * flag to let fsck be aware of it.
2756          */
2757         if (is_journalled_quota(sbi))
2758                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2759         return err;
2760 }
2761
2762 void f2fs_quota_off_umount(struct super_block *sb)
2763 {
2764         int type;
2765         int err;
2766
2767         for (type = 0; type < MAXQUOTAS; type++) {
2768                 err = __f2fs_quota_off(sb, type);
2769                 if (err) {
2770                         int ret = dquot_quota_off(sb, type);
2771
2772                         f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2773                                  type, err, ret);
2774                         set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2775                 }
2776         }
2777         /*
2778          * In case of checkpoint=disable, we must flush quota blocks.
2779          * This can cause NULL exception for node_inode in end_io, since
2780          * put_super already dropped it.
2781          */
2782         sync_filesystem(sb);
2783 }
2784
2785 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2786 {
2787         struct quota_info *dqopt = sb_dqopt(sb);
2788         int type;
2789
2790         for (type = 0; type < MAXQUOTAS; type++) {
2791                 if (!dqopt->files[type])
2792                         continue;
2793                 f2fs_inode_synced(dqopt->files[type]);
2794         }
2795 }
2796
2797 static int f2fs_dquot_commit(struct dquot *dquot)
2798 {
2799         struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2800         int ret;
2801
2802         down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2803         ret = dquot_commit(dquot);
2804         if (ret < 0)
2805                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2806         up_read(&sbi->quota_sem);
2807         return ret;
2808 }
2809
2810 static int f2fs_dquot_acquire(struct dquot *dquot)
2811 {
2812         struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2813         int ret;
2814
2815         down_read(&sbi->quota_sem);
2816         ret = dquot_acquire(dquot);
2817         if (ret < 0)
2818                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2819         up_read(&sbi->quota_sem);
2820         return ret;
2821 }
2822
2823 static int f2fs_dquot_release(struct dquot *dquot)
2824 {
2825         struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2826         int ret = dquot_release(dquot);
2827
2828         if (ret < 0)
2829                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2830         return ret;
2831 }
2832
2833 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2834 {
2835         struct super_block *sb = dquot->dq_sb;
2836         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2837         int ret = dquot_mark_dquot_dirty(dquot);
2838
2839         /* if we are using journalled quota */
2840         if (is_journalled_quota(sbi))
2841                 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2842
2843         return ret;
2844 }
2845
2846 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2847 {
2848         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2849         int ret = dquot_commit_info(sb, type);
2850
2851         if (ret < 0)
2852                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2853         return ret;
2854 }
2855
2856 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2857 {
2858         *projid = F2FS_I(inode)->i_projid;
2859         return 0;
2860 }
2861
2862 static const struct dquot_operations f2fs_quota_operations = {
2863         .get_reserved_space = f2fs_get_reserved_space,
2864         .write_dquot    = f2fs_dquot_commit,
2865         .acquire_dquot  = f2fs_dquot_acquire,
2866         .release_dquot  = f2fs_dquot_release,
2867         .mark_dirty     = f2fs_dquot_mark_dquot_dirty,
2868         .write_info     = f2fs_dquot_commit_info,
2869         .alloc_dquot    = dquot_alloc,
2870         .destroy_dquot  = dquot_destroy,
2871         .get_projid     = f2fs_get_projid,
2872         .get_next_id    = dquot_get_next_id,
2873 };
2874
2875 static const struct quotactl_ops f2fs_quotactl_ops = {
2876         .quota_on       = f2fs_quota_on,
2877         .quota_off      = f2fs_quota_off,
2878         .quota_sync     = f2fs_quota_sync,
2879         .get_state      = dquot_get_state,
2880         .set_info       = dquot_set_dqinfo,
2881         .get_dqblk      = dquot_get_dqblk,
2882         .set_dqblk      = dquot_set_dqblk,
2883         .get_nextdqblk  = dquot_get_next_dqblk,
2884 };
2885 #else
2886 int f2fs_quota_sync(struct super_block *sb, int type)
2887 {
2888         return 0;
2889 }
2890
2891 void f2fs_quota_off_umount(struct super_block *sb)
2892 {
2893 }
2894 #endif
2895
2896 static const struct super_operations f2fs_sops = {
2897         .alloc_inode    = f2fs_alloc_inode,
2898         .free_inode     = f2fs_free_inode,
2899         .drop_inode     = f2fs_drop_inode,
2900         .write_inode    = f2fs_write_inode,
2901         .dirty_inode    = f2fs_dirty_inode,
2902         .show_options   = f2fs_show_options,
2903 #ifdef CONFIG_QUOTA
2904         .quota_read     = f2fs_quota_read,
2905         .quota_write    = f2fs_quota_write,
2906         .get_dquots     = f2fs_get_dquots,
2907 #endif
2908         .evict_inode    = f2fs_evict_inode,
2909         .put_super      = f2fs_put_super,
2910         .sync_fs        = f2fs_sync_fs,
2911         .freeze_fs      = f2fs_freeze,
2912         .unfreeze_fs    = f2fs_unfreeze,
2913         .statfs         = f2fs_statfs,
2914         .remount_fs     = f2fs_remount,
2915 };
2916
2917 #ifdef CONFIG_FS_ENCRYPTION
2918 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2919 {
2920         return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2921                                 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2922                                 ctx, len, NULL);
2923 }
2924
2925 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2926                                                         void *fs_data)
2927 {
2928         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2929
2930         /*
2931          * Encrypting the root directory is not allowed because fsck
2932          * expects lost+found directory to exist and remain unencrypted
2933          * if LOST_FOUND feature is enabled.
2934          *
2935          */
2936         if (f2fs_sb_has_lost_found(sbi) &&
2937                         inode->i_ino == F2FS_ROOT_INO(sbi))
2938                 return -EPERM;
2939
2940         return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2941                                 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2942                                 ctx, len, fs_data, XATTR_CREATE);
2943 }
2944
2945 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
2946 {
2947         return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
2948 }
2949
2950 static bool f2fs_has_stable_inodes(struct super_block *sb)
2951 {
2952         return true;
2953 }
2954
2955 static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
2956                                        int *ino_bits_ret, int *lblk_bits_ret)
2957 {
2958         *ino_bits_ret = 8 * sizeof(nid_t);
2959         *lblk_bits_ret = 8 * sizeof(block_t);
2960 }
2961
2962 static int f2fs_get_num_devices(struct super_block *sb)
2963 {
2964         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2965
2966         if (f2fs_is_multi_device(sbi))
2967                 return sbi->s_ndevs;
2968         return 1;
2969 }
2970
2971 static void f2fs_get_devices(struct super_block *sb,
2972                              struct request_queue **devs)
2973 {
2974         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2975         int i;
2976
2977         for (i = 0; i < sbi->s_ndevs; i++)
2978                 devs[i] = bdev_get_queue(FDEV(i).bdev);
2979 }
2980
2981 static const struct fscrypt_operations f2fs_cryptops = {
2982         .key_prefix             = "f2fs:",
2983         .get_context            = f2fs_get_context,
2984         .set_context            = f2fs_set_context,
2985         .get_dummy_policy       = f2fs_get_dummy_policy,
2986         .empty_dir              = f2fs_empty_dir,
2987         .max_namelen            = F2FS_NAME_LEN,
2988         .has_stable_inodes      = f2fs_has_stable_inodes,
2989         .get_ino_and_lblk_bits  = f2fs_get_ino_and_lblk_bits,
2990         .get_num_devices        = f2fs_get_num_devices,
2991         .get_devices            = f2fs_get_devices,
2992 };
2993 #endif
2994
2995 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2996                 u64 ino, u32 generation)
2997 {
2998         struct f2fs_sb_info *sbi = F2FS_SB(sb);
2999         struct inode *inode;
3000
3001         if (f2fs_check_nid_range(sbi, ino))
3002                 return ERR_PTR(-ESTALE);
3003
3004         /*
3005          * f2fs_iget isn't quite right if the inode is currently unallocated!
3006          * However f2fs_iget currently does appropriate checks to handle stale
3007          * inodes so everything is OK.
3008          */
3009         inode = f2fs_iget(sb, ino);
3010         if (IS_ERR(inode))
3011                 return ERR_CAST(inode);
3012         if (unlikely(generation && inode->i_generation != generation)) {
3013                 /* we didn't find the right inode.. */
3014                 iput(inode);
3015                 return ERR_PTR(-ESTALE);
3016         }
3017         return inode;
3018 }
3019
3020 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3021                 int fh_len, int fh_type)
3022 {
3023         return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3024                                     f2fs_nfs_get_inode);
3025 }
3026
3027 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3028                 int fh_len, int fh_type)
3029 {
3030         return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3031                                     f2fs_nfs_get_inode);
3032 }
3033
3034 static const struct export_operations f2fs_export_ops = {
3035         .fh_to_dentry = f2fs_fh_to_dentry,
3036         .fh_to_parent = f2fs_fh_to_parent,
3037         .get_parent = f2fs_get_parent,
3038 };
3039
3040 loff_t max_file_blocks(struct inode *inode)
3041 {
3042         loff_t result = 0;
3043         loff_t leaf_count;
3044
3045         /*
3046          * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3047          * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3048          * space in inode.i_addr, it will be more safe to reassign
3049          * result as zero.
3050          */
3051
3052         if (inode && f2fs_compressed_file(inode))
3053                 leaf_count = ADDRS_PER_BLOCK(inode);
3054         else
3055                 leaf_count = DEF_ADDRS_PER_BLOCK;
3056
3057         /* two direct node blocks */
3058         result += (leaf_count * 2);
3059
3060         /* two indirect node blocks */
3061         leaf_count *= NIDS_PER_BLOCK;
3062         result += (leaf_count * 2);
3063
3064         /* one double indirect node block */
3065         leaf_count *= NIDS_PER_BLOCK;
3066         result += leaf_count;
3067
3068         return result;
3069 }
3070
3071 static int __f2fs_commit_super(struct buffer_head *bh,
3072                         struct f2fs_super_block *super)
3073 {
3074         lock_buffer(bh);
3075         if (super)
3076                 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
3077         set_buffer_dirty(bh);
3078         unlock_buffer(bh);
3079
3080         /* it's rare case, we can do fua all the time */
3081         return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
3082 }
3083
3084 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3085                                         struct buffer_head *bh)
3086 {
3087         struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3088                                         (bh->b_data + F2FS_SUPER_OFFSET);
3089         struct super_block *sb = sbi->sb;
3090         u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3091         u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3092         u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3093         u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3094         u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3095         u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3096         u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3097         u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3098         u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3099         u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3100         u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3101         u32 segment_count = le32_to_cpu(raw_super->segment_count);
3102         u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3103         u64 main_end_blkaddr = main_blkaddr +
3104                                 (segment_count_main << log_blocks_per_seg);
3105         u64 seg_end_blkaddr = segment0_blkaddr +
3106                                 (segment_count << log_blocks_per_seg);
3107
3108         if (segment0_blkaddr != cp_blkaddr) {
3109                 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3110                           segment0_blkaddr, cp_blkaddr);
3111                 return true;
3112         }
3113
3114         if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3115                                                         sit_blkaddr) {
3116                 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3117                           cp_blkaddr, sit_blkaddr,
3118                           segment_count_ckpt << log_blocks_per_seg);
3119                 return true;
3120         }
3121
3122         if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3123                                                         nat_blkaddr) {
3124                 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3125                           sit_blkaddr, nat_blkaddr,
3126                           segment_count_sit << log_blocks_per_seg);
3127                 return true;
3128         }
3129
3130         if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3131                                                         ssa_blkaddr) {
3132                 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3133                           nat_blkaddr, ssa_blkaddr,
3134                           segment_count_nat << log_blocks_per_seg);
3135                 return true;
3136         }
3137
3138         if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3139                                                         main_blkaddr) {
3140                 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3141                           ssa_blkaddr, main_blkaddr,
3142                           segment_count_ssa << log_blocks_per_seg);
3143                 return true;
3144         }
3145
3146         if (main_end_blkaddr > seg_end_blkaddr) {
3147                 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3148                           main_blkaddr, seg_end_blkaddr,
3149                           segment_count_main << log_blocks_per_seg);
3150                 return true;
3151         } else if (main_end_blkaddr < seg_end_blkaddr) {
3152                 int err = 0;
3153                 char *res;
3154
3155                 /* fix in-memory information all the time */
3156                 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3157                                 segment0_blkaddr) >> log_blocks_per_seg);
3158
3159                 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
3160                         set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3161                         res = "internally";
3162                 } else {
3163                         err = __f2fs_commit_super(bh, NULL);
3164                         res = err ? "failed" : "done";
3165                 }
3166                 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3167                           res, main_blkaddr, seg_end_blkaddr,
3168                           segment_count_main << log_blocks_per_seg);
3169                 if (err)
3170                         return true;
3171         }
3172         return false;
3173 }
3174
3175 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3176                                 struct buffer_head *bh)
3177 {
3178         block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3179         block_t total_sections, blocks_per_seg;
3180         struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3181                                         (bh->b_data + F2FS_SUPER_OFFSET);
3182         size_t crc_offset = 0;
3183         __u32 crc = 0;
3184
3185         if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3186                 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3187                           F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3188                 return -EINVAL;
3189         }
3190
3191         /* Check checksum_offset and crc in superblock */
3192         if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3193                 crc_offset = le32_to_cpu(raw_super->checksum_offset);
3194                 if (crc_offset !=
3195                         offsetof(struct f2fs_super_block, crc)) {
3196                         f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3197                                   crc_offset);
3198                         return -EFSCORRUPTED;
3199                 }
3200                 crc = le32_to_cpu(raw_super->crc);
3201                 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3202                         f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3203                         return -EFSCORRUPTED;
3204                 }
3205         }
3206
3207         /* Currently, support only 4KB block size */
3208         if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3209                 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3210                           le32_to_cpu(raw_super->log_blocksize),
3211                           F2FS_BLKSIZE_BITS);
3212                 return -EFSCORRUPTED;
3213         }
3214
3215         /* check log blocks per segment */
3216         if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3217                 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3218                           le32_to_cpu(raw_super->log_blocks_per_seg));
3219                 return -EFSCORRUPTED;
3220         }
3221
3222         /* Currently, support 512/1024/2048/4096 bytes sector size */
3223         if (le32_to_cpu(raw_super->log_sectorsize) >
3224                                 F2FS_MAX_LOG_SECTOR_SIZE ||
3225                 le32_to_cpu(raw_super->log_sectorsize) <
3226                                 F2FS_MIN_LOG_SECTOR_SIZE) {
3227                 f2fs_info(sbi, "Invalid log sectorsize (%u)",
3228                           le32_to_cpu(raw_super->log_sectorsize));
3229                 return -EFSCORRUPTED;
3230         }
3231         if (le32_to_cpu(raw_super->log_sectors_per_block) +
3232                 le32_to_cpu(raw_super->log_sectorsize) !=
3233                         F2FS_MAX_LOG_SECTOR_SIZE) {
3234                 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3235                           le32_to_cpu(raw_super->log_sectors_per_block),
3236                           le32_to_cpu(raw_super->log_sectorsize));
3237                 return -EFSCORRUPTED;
3238         }
3239
3240         segment_count = le32_to_cpu(raw_super->segment_count);
3241         segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3242         segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3243         secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3244         total_sections = le32_to_cpu(raw_super->section_count);
3245
3246         /* blocks_per_seg should be 512, given the above check */
3247         blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
3248
3249         if (segment_count > F2FS_MAX_SEGMENT ||
3250                                 segment_count < F2FS_MIN_SEGMENTS) {
3251                 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3252                 return -EFSCORRUPTED;
3253         }
3254
3255         if (total_sections > segment_count_main || total_sections < 1 ||
3256                         segs_per_sec > segment_count || !segs_per_sec) {
3257                 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3258                           segment_count, total_sections, segs_per_sec);
3259                 return -EFSCORRUPTED;
3260         }
3261
3262         if (segment_count_main != total_sections * segs_per_sec) {
3263                 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3264                           segment_count_main, total_sections, segs_per_sec);
3265                 return -EFSCORRUPTED;
3266         }
3267
3268         if ((segment_count / segs_per_sec) < total_sections) {
3269                 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3270                           segment_count, segs_per_sec, total_sections);
3271                 return -EFSCORRUPTED;
3272         }
3273
3274         if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3275                 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3276                           segment_count, le64_to_cpu(raw_super->block_count));
3277                 return -EFSCORRUPTED;
3278         }
3279
3280         if (RDEV(0).path[0]) {
3281                 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3282                 int i = 1;
3283
3284                 while (i < MAX_DEVICES && RDEV(i).path[0]) {
3285                         dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3286                         i++;
3287                 }
3288                 if (segment_count != dev_seg_count) {
3289                         f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3290                                         segment_count, dev_seg_count);
3291                         return -EFSCORRUPTED;
3292                 }
3293         } else {
3294                 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3295                                         !bdev_is_zoned(sbi->sb->s_bdev)) {
3296                         f2fs_info(sbi, "Zoned block device path is missing");
3297                         return -EFSCORRUPTED;
3298                 }
3299         }
3300
3301         if (secs_per_zone > total_sections || !secs_per_zone) {
3302                 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3303                           secs_per_zone, total_sections);
3304                 return -EFSCORRUPTED;
3305         }
3306         if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3307                         raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3308                         (le32_to_cpu(raw_super->extension_count) +
3309                         raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3310                 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3311                           le32_to_cpu(raw_super->extension_count),
3312                           raw_super->hot_ext_count,
3313                           F2FS_MAX_EXTENSION);
3314                 return -EFSCORRUPTED;
3315         }
3316
3317         if (le32_to_cpu(raw_super->cp_payload) >=
3318                                 (blocks_per_seg - F2FS_CP_PACKS -
3319                                 NR_CURSEG_PERSIST_TYPE)) {
3320                 f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
3321                           le32_to_cpu(raw_super->cp_payload),
3322                           blocks_per_seg - F2FS_CP_PACKS -
3323                           NR_CURSEG_PERSIST_TYPE);
3324                 return -EFSCORRUPTED;
3325         }
3326
3327         /* check reserved ino info */
3328         if (le32_to_cpu(raw_super->node_ino) != 1 ||
3329                 le32_to_cpu(raw_super->meta_ino) != 2 ||
3330                 le32_to_cpu(raw_super->root_ino) != 3) {
3331                 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3332                           le32_to_cpu(raw_super->node_ino),
3333                           le32_to_cpu(raw_super->meta_ino),
3334                           le32_to_cpu(raw_super->root_ino));
3335                 return -EFSCORRUPTED;
3336         }
3337
3338         /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3339         if (sanity_check_area_boundary(sbi, bh))
3340                 return -EFSCORRUPTED;
3341
3342         return 0;
3343 }
3344
3345 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3346 {
3347         unsigned int total, fsmeta;
3348         struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3349         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3350         unsigned int ovp_segments, reserved_segments;
3351         unsigned int main_segs, blocks_per_seg;
3352         unsigned int sit_segs, nat_segs;
3353         unsigned int sit_bitmap_size, nat_bitmap_size;
3354         unsigned int log_blocks_per_seg;
3355         unsigned int segment_count_main;
3356         unsigned int cp_pack_start_sum, cp_payload;
3357         block_t user_block_count, valid_user_blocks;
3358         block_t avail_node_count, valid_node_count;
3359         unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
3360         int i, j;
3361
3362         total = le32_to_cpu(raw_super->segment_count);
3363         fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3364         sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3365         fsmeta += sit_segs;
3366         nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3367         fsmeta += nat_segs;
3368         fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3369         fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3370
3371         if (unlikely(fsmeta >= total))
3372                 return 1;
3373
3374         ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3375         reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3376
3377         if (!f2fs_sb_has_readonly(sbi) &&
3378                         unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3379                         ovp_segments == 0 || reserved_segments == 0)) {
3380                 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3381                 return 1;
3382         }
3383         user_block_count = le64_to_cpu(ckpt->user_block_count);
3384         segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3385                         (f2fs_sb_has_readonly(sbi) ? 1 : 0);
3386         log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3387         if (!user_block_count || user_block_count >=
3388                         segment_count_main << log_blocks_per_seg) {
3389                 f2fs_err(sbi, "Wrong user_block_count: %u",
3390                          user_block_count);
3391                 return 1;
3392         }
3393
3394         valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3395         if (valid_user_blocks > user_block_count) {
3396                 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3397                          valid_user_blocks, user_block_count);
3398                 return 1;
3399         }
3400
3401         valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3402         avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3403         if (valid_node_count > avail_node_count) {
3404                 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3405                          valid_node_count, avail_node_count);
3406                 return 1;
3407         }
3408
3409         main_segs = le32_to_cpu(raw_super->segment_count_main);
3410         blocks_per_seg = sbi->blocks_per_seg;
3411
3412         for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3413                 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3414                         le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3415                         return 1;
3416
3417                 if (f2fs_sb_has_readonly(sbi))
3418                         goto check_data;
3419
3420                 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3421                         if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3422                                 le32_to_cpu(ckpt->cur_node_segno[j])) {
3423                                 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3424                                          i, j,
3425                                          le32_to_cpu(ckpt->cur_node_segno[i]));
3426                                 return 1;
3427                         }
3428                 }
3429         }
3430 check_data:
3431         for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3432                 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3433                         le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3434                         return 1;
3435
3436                 if (f2fs_sb_has_readonly(sbi))
3437                         goto skip_cross;
3438
3439                 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3440                         if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3441                                 le32_to_cpu(ckpt->cur_data_segno[j])) {
3442                                 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3443                                          i, j,
3444                                          le32_to_cpu(ckpt->cur_data_segno[i]));
3445                                 return 1;
3446                         }
3447                 }
3448         }
3449         for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3450                 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3451                         if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3452                                 le32_to_cpu(ckpt->cur_data_segno[j])) {
3453                                 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3454                                          i, j,
3455                                          le32_to_cpu(ckpt->cur_node_segno[i]));
3456                                 return 1;
3457                         }
3458                 }
3459         }
3460 skip_cross:
3461         sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3462         nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3463
3464         if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3465                 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3466                 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3467                          sit_bitmap_size, nat_bitmap_size);
3468                 return 1;
3469         }
3470
3471         cp_pack_start_sum = __start_sum_addr(sbi);
3472         cp_payload = __cp_payload(sbi);
3473         if (cp_pack_start_sum < cp_payload + 1 ||
3474                 cp_pack_start_sum > blocks_per_seg - 1 -
3475                         NR_CURSEG_PERSIST_TYPE) {
3476                 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3477                          cp_pack_start_sum);
3478                 return 1;
3479         }
3480
3481         if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3482                 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3483                 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3484                           "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3485                           "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3486                           le32_to_cpu(ckpt->checksum_offset));
3487                 return 1;
3488         }
3489
3490         nat_blocks = nat_segs << log_blocks_per_seg;
3491         nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3492         nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3493         if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3494                 (cp_payload + F2FS_CP_PACKS +
3495                 NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3496                 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3497                           cp_payload, nat_bits_blocks);
3498                 return -EFSCORRUPTED;
3499         }
3500
3501         if (unlikely(f2fs_cp_error(sbi))) {
3502                 f2fs_err(sbi, "A bug case: need to run fsck");
3503                 return 1;
3504         }
3505         return 0;
3506 }
3507
3508 static void init_sb_info(struct f2fs_sb_info *sbi)
3509 {
3510         struct f2fs_super_block *raw_super = sbi->raw_super;
3511         int i;
3512
3513         sbi->log_sectors_per_block =
3514                 le32_to_cpu(raw_super->log_sectors_per_block);
3515         sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3516         sbi->blocksize = 1 << sbi->log_blocksize;
3517         sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3518         sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
3519         sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3520         sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3521         sbi->total_sections = le32_to_cpu(raw_super->section_count);
3522         sbi->total_node_count =
3523                 (le32_to_cpu(raw_super->segment_count_nat) / 2)
3524                         * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3525         F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3526         F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3527         F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3528         sbi->cur_victim_sec = NULL_SECNO;
3529         sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3530         sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3531         sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3532         sbi->migration_granularity = sbi->segs_per_sec;
3533         sbi->seq_file_ra_mul = MIN_RA_MUL;
3534         sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
3535         sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
3536
3537         sbi->dir_level = DEF_DIR_LEVEL;
3538         sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3539         sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3540         sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3541         sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3542         sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3543         sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3544                                 DEF_UMOUNT_DISCARD_TIMEOUT;
3545         clear_sbi_flag(sbi, SBI_NEED_FSCK);
3546
3547         for (i = 0; i < NR_COUNT_TYPE; i++)
3548                 atomic_set(&sbi->nr_pages[i], 0);
3549
3550         for (i = 0; i < META; i++)
3551                 atomic_set(&sbi->wb_sync_req[i], 0);
3552
3553         INIT_LIST_HEAD(&sbi->s_list);
3554         mutex_init(&sbi->umount_mutex);
3555         init_rwsem(&sbi->io_order_lock);
3556         spin_lock_init(&sbi->cp_lock);
3557
3558         sbi->dirty_device = 0;
3559         spin_lock_init(&sbi->dev_lock);
3560
3561         init_rwsem(&sbi->sb_lock);
3562         init_rwsem(&sbi->pin_sem);
3563 }
3564
3565 static int init_percpu_info(struct f2fs_sb_info *sbi)
3566 {
3567         int err;
3568
3569         err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3570         if (err)
3571                 return err;
3572
3573         err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3574                                                                 GFP_KERNEL);
3575         if (err)
3576                 percpu_counter_destroy(&sbi->alloc_valid_block_count);
3577
3578         return err;
3579 }
3580
3581 #ifdef CONFIG_BLK_DEV_ZONED
3582
3583 struct f2fs_report_zones_args {
3584         struct f2fs_dev_info *dev;
3585         bool zone_cap_mismatch;
3586 };
3587
3588 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3589                               void *data)
3590 {
3591         struct f2fs_report_zones_args *rz_args = data;
3592
3593         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3594                 return 0;
3595
3596         set_bit(idx, rz_args->dev->blkz_seq);
3597         rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
3598                                                 F2FS_LOG_SECTORS_PER_BLOCK;
3599         if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
3600                 rz_args->zone_cap_mismatch = true;
3601
3602         return 0;
3603 }
3604
3605 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3606 {
3607         struct block_device *bdev = FDEV(devi).bdev;
3608         sector_t nr_sectors = bdev_nr_sectors(bdev);
3609         struct f2fs_report_zones_args rep_zone_arg;
3610         int ret;
3611
3612         if (!f2fs_sb_has_blkzoned(sbi))
3613                 return 0;
3614
3615         if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3616                                 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3617                 return -EINVAL;
3618         sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3619         if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3620                                 __ilog2_u32(sbi->blocks_per_blkz))
3621                 return -EINVAL;
3622         sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3623         FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3624                                         sbi->log_blocks_per_blkz;
3625         if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3626                 FDEV(devi).nr_blkz++;
3627
3628         FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3629                                         BITS_TO_LONGS(FDEV(devi).nr_blkz)
3630                                         * sizeof(unsigned long),
3631                                         GFP_KERNEL);
3632         if (!FDEV(devi).blkz_seq)
3633                 return -ENOMEM;
3634
3635         /* Get block zones type and zone-capacity */
3636         FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
3637                                         FDEV(devi).nr_blkz * sizeof(block_t),
3638                                         GFP_KERNEL);
3639         if (!FDEV(devi).zone_capacity_blocks)
3640                 return -ENOMEM;
3641
3642         rep_zone_arg.dev = &FDEV(devi);
3643         rep_zone_arg.zone_cap_mismatch = false;
3644
3645         ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3646                                   &rep_zone_arg);
3647         if (ret < 0)
3648                 return ret;
3649
3650         if (!rep_zone_arg.zone_cap_mismatch) {
3651                 kfree(FDEV(devi).zone_capacity_blocks);
3652                 FDEV(devi).zone_capacity_blocks = NULL;
3653         }
3654
3655         return 0;
3656 }
3657 #endif
3658
3659 /*
3660  * Read f2fs raw super block.
3661  * Because we have two copies of super block, so read both of them
3662  * to get the first valid one. If any one of them is broken, we pass
3663  * them recovery flag back to the caller.
3664  */
3665 static int read_raw_super_block(struct f2fs_sb_info *sbi,
3666                         struct f2fs_super_block **raw_super,
3667                         int *valid_super_block, int *recovery)
3668 {
3669         struct super_block *sb = sbi->sb;
3670         int block;
3671         struct buffer_head *bh;
3672         struct f2fs_super_block *super;
3673         int err = 0;
3674
3675         super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3676         if (!super)
3677                 return -ENOMEM;
3678
3679         for (block = 0; block < 2; block++) {
3680                 bh = sb_bread(sb, block);
3681                 if (!bh) {
3682                         f2fs_err(sbi, "Unable to read %dth superblock",
3683                                  block + 1);
3684                         err = -EIO;
3685                         *recovery = 1;
3686                         continue;
3687                 }
3688
3689                 /* sanity checking of raw super */
3690                 err = sanity_check_raw_super(sbi, bh);
3691                 if (err) {
3692                         f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3693                                  block + 1);
3694                         brelse(bh);
3695                         *recovery = 1;
3696                         continue;
3697                 }
3698
3699                 if (!*raw_super) {
3700                         memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3701                                                         sizeof(*super));
3702                         *valid_super_block = block;
3703                         *raw_super = super;
3704                 }
3705                 brelse(bh);
3706         }
3707
3708         /* No valid superblock */
3709         if (!*raw_super)
3710                 kfree(super);
3711         else
3712                 err = 0;
3713
3714         return err;
3715 }
3716
3717 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3718 {
3719         struct buffer_head *bh;
3720         __u32 crc = 0;
3721         int err;
3722
3723         if ((recover && f2fs_readonly(sbi->sb)) ||
3724                                 bdev_read_only(sbi->sb->s_bdev)) {
3725                 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3726                 return -EROFS;
3727         }
3728
3729         /* we should update superblock crc here */
3730         if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3731                 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3732                                 offsetof(struct f2fs_super_block, crc));
3733                 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3734         }
3735
3736         /* write back-up superblock first */
3737         bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3738         if (!bh)
3739                 return -EIO;
3740         err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3741         brelse(bh);
3742
3743         /* if we are in recovery path, skip writing valid superblock */
3744         if (recover || err)
3745                 return err;
3746
3747         /* write current valid superblock */
3748         bh = sb_bread(sbi->sb, sbi->valid_super_block);
3749         if (!bh)
3750                 return -EIO;
3751         err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3752         brelse(bh);
3753         return err;
3754 }
3755
3756 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3757 {
3758         struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3759         unsigned int max_devices = MAX_DEVICES;
3760         unsigned int logical_blksize;
3761         int i;
3762
3763         /* Initialize single device information */
3764         if (!RDEV(0).path[0]) {
3765                 if (!bdev_is_zoned(sbi->sb->s_bdev))
3766                         return 0;
3767                 max_devices = 1;
3768         }
3769
3770         /*
3771          * Initialize multiple devices information, or single
3772          * zoned block device information.
3773          */
3774         sbi->devs = f2fs_kzalloc(sbi,
3775                                  array_size(max_devices,
3776                                             sizeof(struct f2fs_dev_info)),
3777                                  GFP_KERNEL);
3778         if (!sbi->devs)
3779                 return -ENOMEM;
3780
3781         logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
3782         sbi->aligned_blksize = true;
3783
3784         for (i = 0; i < max_devices; i++) {
3785
3786                 if (i > 0 && !RDEV(i).path[0])
3787                         break;
3788
3789                 if (max_devices == 1) {
3790                         /* Single zoned block device mount */
3791                         FDEV(0).bdev =
3792                                 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3793                                         sbi->sb->s_mode, sbi->sb->s_type);
3794                 } else {
3795                         /* Multi-device mount */
3796                         memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3797                         FDEV(i).total_segments =
3798                                 le32_to_cpu(RDEV(i).total_segments);
3799                         if (i == 0) {
3800                                 FDEV(i).start_blk = 0;
3801                                 FDEV(i).end_blk = FDEV(i).start_blk +
3802                                     (FDEV(i).total_segments <<
3803                                     sbi->log_blocks_per_seg) - 1 +
3804                                     le32_to_cpu(raw_super->segment0_blkaddr);
3805                         } else {
3806                                 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3807                                 FDEV(i).end_blk = FDEV(i).start_blk +
3808                                         (FDEV(i).total_segments <<
3809                                         sbi->log_blocks_per_seg) - 1;
3810                         }
3811                         FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3812                                         sbi->sb->s_mode, sbi->sb->s_type);
3813                 }
3814                 if (IS_ERR(FDEV(i).bdev))
3815                         return PTR_ERR(FDEV(i).bdev);
3816
3817                 /* to release errored devices */
3818                 sbi->s_ndevs = i + 1;
3819
3820                 if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
3821                         sbi->aligned_blksize = false;
3822
3823 #ifdef CONFIG_BLK_DEV_ZONED
3824                 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3825                                 !f2fs_sb_has_blkzoned(sbi)) {
3826                         f2fs_err(sbi, "Zoned block device feature not enabled");
3827                         return -EINVAL;
3828                 }
3829                 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3830                         if (init_blkz_info(sbi, i)) {
3831                                 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3832                                 return -EINVAL;
3833                         }
3834                         if (max_devices == 1)
3835                                 break;
3836                         f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3837                                   i, FDEV(i).path,
3838                                   FDEV(i).total_segments,
3839                                   FDEV(i).start_blk, FDEV(i).end_blk,
3840                                   bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3841                                   "Host-aware" : "Host-managed");
3842                         continue;
3843                 }
3844 #endif
3845                 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3846                           i, FDEV(i).path,
3847                           FDEV(i).total_segments,
3848                           FDEV(i).start_blk, FDEV(i).end_blk);
3849         }
3850         f2fs_info(sbi,
3851                   "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3852         return 0;
3853 }
3854
3855 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3856 {
3857 #ifdef CONFIG_UNICODE
3858         if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
3859                 const struct f2fs_sb_encodings *encoding_info;
3860                 struct unicode_map *encoding;
3861                 __u16 encoding_flags;
3862
3863                 if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
3864                                           &encoding_flags)) {
3865                         f2fs_err(sbi,
3866                                  "Encoding requested by superblock is unknown");
3867                         return -EINVAL;
3868                 }
3869
3870                 encoding = utf8_load(encoding_info->version);
3871                 if (IS_ERR(encoding)) {
3872                         f2fs_err(sbi,
3873                                  "can't mount with superblock charset: %s-%s "
3874                                  "not supported by the kernel. flags: 0x%x.",
3875                                  encoding_info->name, encoding_info->version,
3876                                  encoding_flags);
3877                         return PTR_ERR(encoding);
3878                 }
3879                 f2fs_info(sbi, "Using encoding defined by superblock: "
3880                          "%s-%s with flags 0x%hx", encoding_info->name,
3881                          encoding_info->version?:"\b", encoding_flags);
3882
3883                 sbi->sb->s_encoding = encoding;
3884                 sbi->sb->s_encoding_flags = encoding_flags;
3885         }
3886 #else
3887         if (f2fs_sb_has_casefold(sbi)) {
3888                 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3889                 return -EINVAL;
3890         }
3891 #endif
3892         return 0;
3893 }
3894
3895 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3896 {
3897         struct f2fs_sm_info *sm_i = SM_I(sbi);
3898
3899         /* adjust parameters according to the volume size */
3900         if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3901                 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3902                 if (f2fs_block_unit_discard(sbi))
3903                         sm_i->dcc_info->discard_granularity = 1;
3904                 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
3905         }
3906
3907         sbi->readdir_ra = 1;
3908 }
3909
3910 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3911 {
3912         struct f2fs_sb_info *sbi;
3913         struct f2fs_super_block *raw_super;
3914         struct inode *root;
3915         int err;
3916         bool skip_recovery = false, need_fsck = false;
3917         char *options = NULL;
3918         int recovery, i, valid_super_block;
3919         struct curseg_info *seg_i;
3920         int retry_cnt = 1;
3921
3922 try_onemore:
3923         err = -EINVAL;
3924         raw_super = NULL;
3925         valid_super_block = -1;
3926         recovery = 0;
3927
3928         /* allocate memory for f2fs-specific super block info */
3929         sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3930         if (!sbi)
3931                 return -ENOMEM;
3932
3933         sbi->sb = sb;
3934
3935         /* Load the checksum driver */
3936         sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3937         if (IS_ERR(sbi->s_chksum_driver)) {
3938                 f2fs_err(sbi, "Cannot load crc32 driver.");
3939                 err = PTR_ERR(sbi->s_chksum_driver);
3940                 sbi->s_chksum_driver = NULL;
3941                 goto free_sbi;
3942         }
3943
3944         /* set a block size */
3945         if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3946                 f2fs_err(sbi, "unable to set blocksize");
3947                 goto free_sbi;
3948         }
3949
3950         err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3951                                                                 &recovery);
3952         if (err)
3953                 goto free_sbi;
3954
3955         sb->s_fs_info = sbi;
3956         sbi->raw_super = raw_super;
3957
3958         /* precompute checksum seed for metadata */
3959         if (f2fs_sb_has_inode_chksum(sbi))
3960                 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3961                                                 sizeof(raw_super->uuid));
3962
3963         default_options(sbi);
3964         /* parse mount options */
3965         options = kstrdup((const char *)data, GFP_KERNEL);
3966         if (data && !options) {
3967                 err = -ENOMEM;
3968                 goto free_sb_buf;
3969         }
3970
3971         err = parse_options(sb, options, false);
3972         if (err)
3973                 goto free_options;
3974
3975         sb->s_maxbytes = max_file_blocks(NULL) <<
3976                                 le32_to_cpu(raw_super->log_blocksize);
3977         sb->s_max_links = F2FS_LINK_MAX;
3978
3979         err = f2fs_setup_casefold(sbi);
3980         if (err)
3981                 goto free_options;
3982
3983 #ifdef CONFIG_QUOTA
3984         sb->dq_op = &f2fs_quota_operations;
3985         sb->s_qcop = &f2fs_quotactl_ops;
3986         sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3987
3988         if (f2fs_sb_has_quota_ino(sbi)) {
3989                 for (i = 0; i < MAXQUOTAS; i++) {
3990                         if (f2fs_qf_ino(sbi->sb, i))
3991                                 sbi->nquota_files++;
3992                 }
3993         }
3994 #endif
3995
3996         sb->s_op = &f2fs_sops;
3997 #ifdef CONFIG_FS_ENCRYPTION
3998         sb->s_cop = &f2fs_cryptops;
3999 #endif
4000 #ifdef CONFIG_FS_VERITY
4001         sb->s_vop = &f2fs_verityops;
4002 #endif
4003         sb->s_xattr = f2fs_xattr_handlers;
4004         sb->s_export_op = &f2fs_export_ops;
4005         sb->s_magic = F2FS_SUPER_MAGIC;
4006         sb->s_time_gran = 1;
4007         sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4008                 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
4009         memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
4010         sb->s_iflags |= SB_I_CGROUPWB;
4011
4012         /* init f2fs-specific super block info */
4013         sbi->valid_super_block = valid_super_block;
4014         init_rwsem(&sbi->gc_lock);
4015         mutex_init(&sbi->writepages);
4016         init_rwsem(&sbi->cp_global_sem);
4017         init_rwsem(&sbi->node_write);
4018         init_rwsem(&sbi->node_change);
4019
4020         /* disallow all the data/node/meta page writes */
4021         set_sbi_flag(sbi, SBI_POR_DOING);
4022         spin_lock_init(&sbi->stat_lock);
4023
4024         for (i = 0; i < NR_PAGE_TYPE; i++) {
4025                 int n = (i == META) ? 1 : NR_TEMP_TYPE;
4026                 int j;
4027
4028                 sbi->write_io[i] =
4029                         f2fs_kmalloc(sbi,
4030                                      array_size(n,
4031                                                 sizeof(struct f2fs_bio_info)),
4032                                      GFP_KERNEL);
4033                 if (!sbi->write_io[i]) {
4034                         err = -ENOMEM;
4035                         goto free_bio_info;
4036                 }
4037
4038                 for (j = HOT; j < n; j++) {
4039                         init_rwsem(&sbi->write_io[i][j].io_rwsem);
4040                         sbi->write_io[i][j].sbi = sbi;
4041                         sbi->write_io[i][j].bio = NULL;
4042                         spin_lock_init(&sbi->write_io[i][j].io_lock);
4043                         INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
4044                         INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
4045                         init_rwsem(&sbi->write_io[i][j].bio_list_lock);
4046                 }
4047         }
4048
4049         init_rwsem(&sbi->cp_rwsem);
4050         init_rwsem(&sbi->quota_sem);
4051         init_waitqueue_head(&sbi->cp_wait);
4052         init_sb_info(sbi);
4053
4054         err = f2fs_init_iostat(sbi);
4055         if (err)
4056                 goto free_bio_info;
4057
4058         err = init_percpu_info(sbi);
4059         if (err)
4060                 goto free_iostat;
4061
4062         if (F2FS_IO_ALIGNED(sbi)) {
4063                 sbi->write_io_dummy =
4064                         mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
4065                 if (!sbi->write_io_dummy) {
4066                         err = -ENOMEM;
4067                         goto free_percpu;
4068                 }
4069         }
4070
4071         /* init per sbi slab cache */
4072         err = f2fs_init_xattr_caches(sbi);
4073         if (err)
4074                 goto free_io_dummy;
4075         err = f2fs_init_page_array_cache(sbi);
4076         if (err)
4077                 goto free_xattr_cache;
4078
4079         /* get an inode for meta space */
4080         sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
4081         if (IS_ERR(sbi->meta_inode)) {
4082                 f2fs_err(sbi, "Failed to read F2FS meta data inode");
4083                 err = PTR_ERR(sbi->meta_inode);
4084                 goto free_page_array_cache;
4085         }
4086
4087         err = f2fs_get_valid_checkpoint(sbi);
4088         if (err) {
4089                 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
4090                 goto free_meta_inode;
4091         }
4092
4093         if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
4094                 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
4095         if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
4096                 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4097                 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
4098         }
4099
4100         if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
4101                 set_sbi_flag(sbi, SBI_NEED_FSCK);
4102
4103         /* Initialize device list */
4104         err = f2fs_scan_devices(sbi);
4105         if (err) {
4106                 f2fs_err(sbi, "Failed to find devices");
4107                 goto free_devices;
4108         }
4109
4110         err = f2fs_init_post_read_wq(sbi);
4111         if (err) {
4112                 f2fs_err(sbi, "Failed to initialize post read workqueue");
4113                 goto free_devices;
4114         }
4115
4116         sbi->total_valid_node_count =
4117                                 le32_to_cpu(sbi->ckpt->valid_node_count);
4118         percpu_counter_set(&sbi->total_valid_inode_count,
4119                                 le32_to_cpu(sbi->ckpt->valid_inode_count));
4120         sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
4121         sbi->total_valid_block_count =
4122                                 le64_to_cpu(sbi->ckpt->valid_block_count);
4123         sbi->last_valid_block_count = sbi->total_valid_block_count;
4124         sbi->reserved_blocks = 0;
4125         sbi->current_reserved_blocks = 0;
4126         limit_reserve_root(sbi);
4127         adjust_unusable_cap_perc(sbi);
4128
4129         for (i = 0; i < NR_INODE_TYPE; i++) {
4130                 INIT_LIST_HEAD(&sbi->inode_list[i]);
4131                 spin_lock_init(&sbi->inode_lock[i]);
4132         }
4133         mutex_init(&sbi->flush_lock);
4134
4135         f2fs_init_extent_cache_info(sbi);
4136
4137         f2fs_init_ino_entry_info(sbi);
4138
4139         f2fs_init_fsync_node_info(sbi);
4140
4141         /* setup checkpoint request control and start checkpoint issue thread */
4142         f2fs_init_ckpt_req_control(sbi);
4143         if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4144                         test_opt(sbi, MERGE_CHECKPOINT)) {
4145                 err = f2fs_start_ckpt_thread(sbi);
4146                 if (err) {
4147                         f2fs_err(sbi,
4148                             "Failed to start F2FS issue_checkpoint_thread (%d)",
4149                             err);
4150                         goto stop_ckpt_thread;
4151                 }
4152         }
4153
4154         /* setup f2fs internal modules */
4155         err = f2fs_build_segment_manager(sbi);
4156         if (err) {
4157                 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4158                          err);
4159                 goto free_sm;
4160         }
4161         err = f2fs_build_node_manager(sbi);
4162         if (err) {
4163                 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4164                          err);
4165                 goto free_nm;
4166         }
4167
4168         /* For write statistics */
4169         sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4170
4171         /* Read accumulated write IO statistics if exists */
4172         seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4173         if (__exist_node_summaries(sbi))
4174                 sbi->kbytes_written =
4175                         le64_to_cpu(seg_i->journal->info.kbytes_written);
4176
4177         f2fs_build_gc_manager(sbi);
4178
4179         err = f2fs_build_stats(sbi);
4180         if (err)
4181                 goto free_nm;
4182
4183         /* get an inode for node space */
4184         sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4185         if (IS_ERR(sbi->node_inode)) {
4186                 f2fs_err(sbi, "Failed to read node inode");
4187                 err = PTR_ERR(sbi->node_inode);
4188                 goto free_stats;
4189         }
4190
4191         /* read root inode and dentry */
4192         root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4193         if (IS_ERR(root)) {
4194                 f2fs_err(sbi, "Failed to read root inode");
4195                 err = PTR_ERR(root);
4196                 goto free_node_inode;
4197         }
4198         if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4199                         !root->i_size || !root->i_nlink) {
4200                 iput(root);
4201                 err = -EINVAL;
4202                 goto free_node_inode;
4203         }
4204
4205         sb->s_root = d_make_root(root); /* allocate root dentry */
4206         if (!sb->s_root) {
4207                 err = -ENOMEM;
4208                 goto free_node_inode;
4209         }
4210
4211         err = f2fs_init_compress_inode(sbi);
4212         if (err)
4213                 goto free_root_inode;
4214
4215         err = f2fs_register_sysfs(sbi);
4216         if (err)
4217                 goto free_compress_inode;
4218
4219 #ifdef CONFIG_QUOTA
4220         /* Enable quota usage during mount */
4221         if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4222                 err = f2fs_enable_quotas(sb);
4223                 if (err)
4224                         f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4225         }
4226 #endif
4227         /* if there are any orphan inodes, free them */
4228         err = f2fs_recover_orphan_inodes(sbi);
4229         if (err)
4230                 goto free_meta;
4231
4232         if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4233                 goto reset_checkpoint;
4234
4235         /* recover fsynced data */
4236         if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4237                         !test_opt(sbi, NORECOVERY)) {
4238                 /*
4239                  * mount should be failed, when device has readonly mode, and
4240                  * previous checkpoint was not done by clean system shutdown.
4241                  */
4242                 if (f2fs_hw_is_readonly(sbi)) {
4243                         if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4244                                 err = f2fs_recover_fsync_data(sbi, true);
4245                                 if (err > 0) {
4246                                         err = -EROFS;
4247                                         f2fs_err(sbi, "Need to recover fsync data, but "
4248                                                 "write access unavailable, please try "
4249                                                 "mount w/ disable_roll_forward or norecovery");
4250                                 }
4251                                 if (err < 0)
4252                                         goto free_meta;
4253                         }
4254                         f2fs_info(sbi, "write access unavailable, skipping recovery");
4255                         goto reset_checkpoint;
4256                 }
4257
4258                 if (need_fsck)
4259                         set_sbi_flag(sbi, SBI_NEED_FSCK);
4260
4261                 if (skip_recovery)
4262                         goto reset_checkpoint;
4263
4264                 err = f2fs_recover_fsync_data(sbi, false);
4265                 if (err < 0) {
4266                         if (err != -ENOMEM)
4267                                 skip_recovery = true;
4268                         need_fsck = true;
4269                         f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4270                                  err);
4271                         goto free_meta;
4272                 }
4273         } else {
4274                 err = f2fs_recover_fsync_data(sbi, true);
4275
4276                 if (!f2fs_readonly(sb) && err > 0) {
4277                         err = -EINVAL;
4278                         f2fs_err(sbi, "Need to recover fsync data");
4279                         goto free_meta;
4280                 }
4281         }
4282
4283         /*
4284          * If the f2fs is not readonly and fsync data recovery succeeds,
4285          * check zoned block devices' write pointer consistency.
4286          */
4287         if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4288                 err = f2fs_check_write_pointer(sbi);
4289                 if (err)
4290                         goto free_meta;
4291         }
4292
4293 reset_checkpoint:
4294         f2fs_init_inmem_curseg(sbi);
4295
4296         /* f2fs_recover_fsync_data() cleared this already */
4297         clear_sbi_flag(sbi, SBI_POR_DOING);
4298
4299         if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4300                 err = f2fs_disable_checkpoint(sbi);
4301                 if (err)
4302                         goto sync_free_meta;
4303         } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4304                 f2fs_enable_checkpoint(sbi);
4305         }
4306
4307         /*
4308          * If filesystem is not mounted as read-only then
4309          * do start the gc_thread.
4310          */
4311         if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4312                 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4313                 /* After POR, we can run background GC thread.*/
4314                 err = f2fs_start_gc_thread(sbi);
4315                 if (err)
4316                         goto sync_free_meta;
4317         }
4318         kvfree(options);
4319
4320         /* recover broken superblock */
4321         if (recovery) {
4322                 err = f2fs_commit_super(sbi, true);
4323                 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4324                           sbi->valid_super_block ? 1 : 2, err);
4325         }
4326
4327         f2fs_join_shrinker(sbi);
4328
4329         f2fs_tuning_parameters(sbi);
4330
4331         f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4332                     cur_cp_version(F2FS_CKPT(sbi)));
4333         f2fs_update_time(sbi, CP_TIME);
4334         f2fs_update_time(sbi, REQ_TIME);
4335         clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4336         return 0;
4337
4338 sync_free_meta:
4339         /* safe to flush all the data */
4340         sync_filesystem(sbi->sb);
4341         retry_cnt = 0;
4342
4343 free_meta:
4344 #ifdef CONFIG_QUOTA
4345         f2fs_truncate_quota_inode_pages(sb);
4346         if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4347                 f2fs_quota_off_umount(sbi->sb);
4348 #endif
4349         /*
4350          * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4351          * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4352          * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4353          * falls into an infinite loop in f2fs_sync_meta_pages().
4354          */
4355         truncate_inode_pages_final(META_MAPPING(sbi));
4356         /* evict some inodes being cached by GC */
4357         evict_inodes(sb);
4358         f2fs_unregister_sysfs(sbi);
4359 free_compress_inode:
4360         f2fs_destroy_compress_inode(sbi);
4361 free_root_inode:
4362         dput(sb->s_root);
4363         sb->s_root = NULL;
4364 free_node_inode:
4365         f2fs_release_ino_entry(sbi, true);
4366         truncate_inode_pages_final(NODE_MAPPING(sbi));
4367         iput(sbi->node_inode);
4368         sbi->node_inode = NULL;
4369 free_stats:
4370         f2fs_destroy_stats(sbi);
4371 free_nm:
4372         f2fs_destroy_node_manager(sbi);
4373 free_sm:
4374         f2fs_destroy_segment_manager(sbi);
4375         f2fs_destroy_post_read_wq(sbi);
4376 stop_ckpt_thread:
4377         f2fs_stop_ckpt_thread(sbi);
4378 free_devices:
4379         destroy_device_list(sbi);
4380         kvfree(sbi->ckpt);
4381 free_meta_inode:
4382         make_bad_inode(sbi->meta_inode);
4383         iput(sbi->meta_inode);
4384         sbi->meta_inode = NULL;
4385 free_page_array_cache:
4386         f2fs_destroy_page_array_cache(sbi);
4387 free_xattr_cache:
4388         f2fs_destroy_xattr_caches(sbi);
4389 free_io_dummy:
4390         mempool_destroy(sbi->write_io_dummy);
4391 free_percpu:
4392         destroy_percpu_info(sbi);
4393 free_iostat:
4394         f2fs_destroy_iostat(sbi);
4395 free_bio_info:
4396         for (i = 0; i < NR_PAGE_TYPE; i++)
4397                 kvfree(sbi->write_io[i]);
4398
4399 #ifdef CONFIG_UNICODE
4400         utf8_unload(sb->s_encoding);
4401         sb->s_encoding = NULL;
4402 #endif
4403 free_options:
4404 #ifdef CONFIG_QUOTA
4405         for (i = 0; i < MAXQUOTAS; i++)
4406                 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4407 #endif
4408         fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4409         kvfree(options);
4410 free_sb_buf:
4411         kfree(raw_super);
4412 free_sbi:
4413         if (sbi->s_chksum_driver)
4414                 crypto_free_shash(sbi->s_chksum_driver);
4415         kfree(sbi);
4416
4417         /* give only one another chance */
4418         if (retry_cnt > 0 && skip_recovery) {
4419                 retry_cnt--;
4420                 shrink_dcache_sb(sb);
4421                 goto try_onemore;
4422         }
4423         return err;
4424 }
4425
4426 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4427                         const char *dev_name, void *data)
4428 {
4429         return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4430 }
4431
4432 static void kill_f2fs_super(struct super_block *sb)
4433 {
4434         if (sb->s_root) {
4435                 struct f2fs_sb_info *sbi = F2FS_SB(sb);
4436
4437                 set_sbi_flag(sbi, SBI_IS_CLOSE);
4438                 f2fs_stop_gc_thread(sbi);
4439                 f2fs_stop_discard_thread(sbi);
4440
4441 #ifdef CONFIG_F2FS_FS_COMPRESSION
4442                 /*
4443                  * latter evict_inode() can bypass checking and invalidating
4444                  * compress inode cache.
4445                  */
4446                 if (test_opt(sbi, COMPRESS_CACHE))
4447                         truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4448 #endif
4449
4450                 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4451                                 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4452                         struct cp_control cpc = {
4453                                 .reason = CP_UMOUNT,
4454                         };
4455                         f2fs_write_checkpoint(sbi, &cpc);
4456                 }
4457
4458                 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4459                         sb->s_flags &= ~SB_RDONLY;
4460         }
4461         kill_block_super(sb);
4462 }
4463
4464 static struct file_system_type f2fs_fs_type = {
4465         .owner          = THIS_MODULE,
4466         .name           = "f2fs",
4467         .mount          = f2fs_mount,
4468         .kill_sb        = kill_f2fs_super,
4469         .fs_flags       = FS_REQUIRES_DEV,
4470 };
4471 MODULE_ALIAS_FS("f2fs");
4472
4473 static int __init init_inodecache(void)
4474 {
4475         f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4476                         sizeof(struct f2fs_inode_info), 0,
4477                         SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4478         if (!f2fs_inode_cachep)
4479                 return -ENOMEM;
4480         return 0;
4481 }
4482
4483 static void destroy_inodecache(void)
4484 {
4485         /*
4486          * Make sure all delayed rcu free inodes are flushed before we
4487          * destroy cache.
4488          */
4489         rcu_barrier();
4490         kmem_cache_destroy(f2fs_inode_cachep);
4491 }
4492
4493 static int __init init_f2fs_fs(void)
4494 {
4495         int err;
4496
4497         if (PAGE_SIZE != F2FS_BLKSIZE) {
4498                 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4499                                 PAGE_SIZE, F2FS_BLKSIZE);
4500                 return -EINVAL;
4501         }
4502
4503         err = init_inodecache();
4504         if (err)
4505                 goto fail;
4506         err = f2fs_create_node_manager_caches();
4507         if (err)
4508                 goto free_inodecache;
4509         err = f2fs_create_segment_manager_caches();
4510         if (err)
4511                 goto free_node_manager_caches;
4512         err = f2fs_create_checkpoint_caches();
4513         if (err)
4514                 goto free_segment_manager_caches;
4515         err = f2fs_create_recovery_cache();
4516         if (err)
4517                 goto free_checkpoint_caches;
4518         err = f2fs_create_extent_cache();
4519         if (err)
4520                 goto free_recovery_cache;
4521         err = f2fs_create_garbage_collection_cache();
4522         if (err)
4523                 goto free_extent_cache;
4524         err = f2fs_init_sysfs();
4525         if (err)
4526                 goto free_garbage_collection_cache;
4527         err = register_shrinker(&f2fs_shrinker_info);
4528         if (err)
4529                 goto free_sysfs;
4530         err = register_filesystem(&f2fs_fs_type);
4531         if (err)
4532                 goto free_shrinker;
4533         f2fs_create_root_stats();
4534         err = f2fs_init_post_read_processing();
4535         if (err)
4536                 goto free_root_stats;
4537         err = f2fs_init_iostat_processing();
4538         if (err)
4539                 goto free_post_read;
4540         err = f2fs_init_bio_entry_cache();
4541         if (err)
4542                 goto free_iostat;
4543         err = f2fs_init_bioset();
4544         if (err)
4545                 goto free_bio_enrty_cache;
4546         err = f2fs_init_compress_mempool();
4547         if (err)
4548                 goto free_bioset;
4549         err = f2fs_init_compress_cache();
4550         if (err)
4551                 goto free_compress_mempool;
4552         err = f2fs_create_casefold_cache();
4553         if (err)
4554                 goto free_compress_cache;
4555         return 0;
4556 free_compress_cache:
4557         f2fs_destroy_compress_cache();
4558 free_compress_mempool:
4559         f2fs_destroy_compress_mempool();
4560 free_bioset:
4561         f2fs_destroy_bioset();
4562 free_bio_enrty_cache:
4563         f2fs_destroy_bio_entry_cache();
4564 free_iostat:
4565         f2fs_destroy_iostat_processing();
4566 free_post_read:
4567         f2fs_destroy_post_read_processing();
4568 free_root_stats:
4569         f2fs_destroy_root_stats();
4570         unregister_filesystem(&f2fs_fs_type);
4571 free_shrinker:
4572         unregister_shrinker(&f2fs_shrinker_info);
4573 free_sysfs:
4574         f2fs_exit_sysfs();
4575 free_garbage_collection_cache:
4576         f2fs_destroy_garbage_collection_cache();
4577 free_extent_cache:
4578         f2fs_destroy_extent_cache();
4579 free_recovery_cache:
4580         f2fs_destroy_recovery_cache();
4581 free_checkpoint_caches:
4582         f2fs_destroy_checkpoint_caches();
4583 free_segment_manager_caches:
4584         f2fs_destroy_segment_manager_caches();
4585 free_node_manager_caches:
4586         f2fs_destroy_node_manager_caches();
4587 free_inodecache:
4588         destroy_inodecache();
4589 fail:
4590         return err;
4591 }
4592
4593 static void __exit exit_f2fs_fs(void)
4594 {
4595         f2fs_destroy_casefold_cache();
4596         f2fs_destroy_compress_cache();
4597         f2fs_destroy_compress_mempool();
4598         f2fs_destroy_bioset();
4599         f2fs_destroy_bio_entry_cache();
4600         f2fs_destroy_iostat_processing();
4601         f2fs_destroy_post_read_processing();
4602         f2fs_destroy_root_stats();
4603         unregister_filesystem(&f2fs_fs_type);
4604         unregister_shrinker(&f2fs_shrinker_info);
4605         f2fs_exit_sysfs();
4606         f2fs_destroy_garbage_collection_cache();
4607         f2fs_destroy_extent_cache();
4608         f2fs_destroy_recovery_cache();
4609         f2fs_destroy_checkpoint_caches();
4610         f2fs_destroy_segment_manager_caches();
4611         f2fs_destroy_node_manager_caches();
4612         destroy_inodecache();
4613 }
4614
4615 module_init(init_f2fs_fs)
4616 module_exit(exit_f2fs_fs)
4617
4618 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4619 MODULE_DESCRIPTION("Flash Friendly File System");
4620 MODULE_LICENSE("GPL");
4621 MODULE_SOFTDEP("pre: crc32");
4622