4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry *f2fs_proc_root;
39 static struct kmem_cache *f2fs_inode_cachep;
40 static struct kset *f2fs_kset;
42 /* f2fs-wide shrinker description */
43 static struct shrinker f2fs_shrinker_info = {
44 .scan_objects = f2fs_shrink_scan,
45 .count_objects = f2fs_shrink_count,
46 .seeks = DEFAULT_SEEKS,
51 Opt_disable_roll_forward,
60 Opt_disable_ext_identify,
74 static match_table_t f2fs_tokens = {
75 {Opt_gc_background, "background_gc=%s"},
76 {Opt_disable_roll_forward, "disable_roll_forward"},
77 {Opt_norecovery, "norecovery"},
78 {Opt_discard, "discard"},
79 {Opt_noheap, "no_heap"},
80 {Opt_user_xattr, "user_xattr"},
81 {Opt_nouser_xattr, "nouser_xattr"},
84 {Opt_active_logs, "active_logs=%u"},
85 {Opt_disable_ext_identify, "disable_ext_identify"},
86 {Opt_inline_xattr, "inline_xattr"},
87 {Opt_inline_data, "inline_data"},
88 {Opt_inline_dentry, "inline_dentry"},
89 {Opt_flush_merge, "flush_merge"},
90 {Opt_nobarrier, "nobarrier"},
91 {Opt_fastboot, "fastboot"},
92 {Opt_extent_cache, "extent_cache"},
93 {Opt_noextent_cache, "noextent_cache"},
94 {Opt_noinline_data, "noinline_data"},
95 {Opt_data_flush, "data_flush"},
99 /* Sysfs support for f2fs */
101 GC_THREAD, /* struct f2fs_gc_thread */
102 SM_INFO, /* struct f2fs_sm_info */
103 NM_INFO, /* struct f2fs_nm_info */
104 F2FS_SBI, /* struct f2fs_sb_info */
108 struct attribute attr;
109 ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
110 ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
111 const char *, size_t);
116 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
118 if (struct_type == GC_THREAD)
119 return (unsigned char *)sbi->gc_thread;
120 else if (struct_type == SM_INFO)
121 return (unsigned char *)SM_I(sbi);
122 else if (struct_type == NM_INFO)
123 return (unsigned char *)NM_I(sbi);
124 else if (struct_type == F2FS_SBI)
125 return (unsigned char *)sbi;
129 static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
130 struct f2fs_sb_info *sbi, char *buf)
132 struct super_block *sb = sbi->sb;
134 if (!sb->s_bdev->bd_part)
135 return snprintf(buf, PAGE_SIZE, "0\n");
137 return snprintf(buf, PAGE_SIZE, "%llu\n",
138 (unsigned long long)(sbi->kbytes_written +
139 BD_PART_WRITTEN(sbi)));
142 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
143 struct f2fs_sb_info *sbi, char *buf)
145 unsigned char *ptr = NULL;
148 ptr = __struct_ptr(sbi, a->struct_type);
152 ui = (unsigned int *)(ptr + a->offset);
154 return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
157 static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
158 struct f2fs_sb_info *sbi,
159 const char *buf, size_t count)
166 ptr = __struct_ptr(sbi, a->struct_type);
170 ui = (unsigned int *)(ptr + a->offset);
172 ret = kstrtoul(skip_spaces(buf), 0, &t);
179 static ssize_t f2fs_attr_show(struct kobject *kobj,
180 struct attribute *attr, char *buf)
182 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
184 struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
186 return a->show ? a->show(a, sbi, buf) : 0;
189 static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
190 const char *buf, size_t len)
192 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
194 struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
196 return a->store ? a->store(a, sbi, buf, len) : 0;
199 static void f2fs_sb_release(struct kobject *kobj)
201 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
203 complete(&sbi->s_kobj_unregister);
206 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
207 static struct f2fs_attr f2fs_attr_##_name = { \
208 .attr = {.name = __stringify(_name), .mode = _mode }, \
211 .struct_type = _struct_type, \
215 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
216 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
217 f2fs_sbi_show, f2fs_sbi_store, \
218 offsetof(struct struct_name, elname))
220 #define F2FS_GENERAL_RO_ATTR(name) \
221 static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
223 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
224 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
225 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
226 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
227 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
228 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
229 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
230 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
231 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
232 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
233 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
234 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
235 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
236 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
237 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
238 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
239 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
240 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
242 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
243 static struct attribute *f2fs_attrs[] = {
244 ATTR_LIST(gc_min_sleep_time),
245 ATTR_LIST(gc_max_sleep_time),
246 ATTR_LIST(gc_no_gc_sleep_time),
248 ATTR_LIST(reclaim_segments),
249 ATTR_LIST(max_small_discards),
250 ATTR_LIST(batched_trim_sections),
251 ATTR_LIST(ipu_policy),
252 ATTR_LIST(min_ipu_util),
253 ATTR_LIST(min_fsync_blocks),
254 ATTR_LIST(max_victim_search),
255 ATTR_LIST(dir_level),
256 ATTR_LIST(ram_thresh),
257 ATTR_LIST(ra_nid_pages),
258 ATTR_LIST(dirty_nats_ratio),
259 ATTR_LIST(cp_interval),
260 ATTR_LIST(idle_interval),
261 ATTR_LIST(lifetime_write_kbytes),
265 static const struct sysfs_ops f2fs_attr_ops = {
266 .show = f2fs_attr_show,
267 .store = f2fs_attr_store,
270 static struct kobj_type f2fs_ktype = {
271 .default_attrs = f2fs_attrs,
272 .sysfs_ops = &f2fs_attr_ops,
273 .release = f2fs_sb_release,
276 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
278 struct va_format vaf;
284 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
288 static void init_once(void *foo)
290 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
292 inode_init_once(&fi->vfs_inode);
295 static int parse_options(struct super_block *sb, char *options)
297 struct f2fs_sb_info *sbi = F2FS_SB(sb);
298 struct request_queue *q;
299 substring_t args[MAX_OPT_ARGS];
306 while ((p = strsep(&options, ",")) != NULL) {
311 * Initialize args struct so we know whether arg was
312 * found; some options take optional arguments.
314 args[0].to = args[0].from = NULL;
315 token = match_token(p, f2fs_tokens, args);
318 case Opt_gc_background:
319 name = match_strdup(&args[0]);
323 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
325 clear_opt(sbi, FORCE_FG_GC);
326 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
327 clear_opt(sbi, BG_GC);
328 clear_opt(sbi, FORCE_FG_GC);
329 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
331 set_opt(sbi, FORCE_FG_GC);
338 case Opt_disable_roll_forward:
339 set_opt(sbi, DISABLE_ROLL_FORWARD);
342 /* this option mounts f2fs with ro */
343 set_opt(sbi, DISABLE_ROLL_FORWARD);
344 if (!f2fs_readonly(sb))
348 q = bdev_get_queue(sb->s_bdev);
349 if (blk_queue_discard(q)) {
350 set_opt(sbi, DISCARD);
352 f2fs_msg(sb, KERN_WARNING,
353 "mounting with \"discard\" option, but "
354 "the device does not support discard");
358 set_opt(sbi, NOHEAP);
360 #ifdef CONFIG_F2FS_FS_XATTR
362 set_opt(sbi, XATTR_USER);
364 case Opt_nouser_xattr:
365 clear_opt(sbi, XATTR_USER);
367 case Opt_inline_xattr:
368 set_opt(sbi, INLINE_XATTR);
372 f2fs_msg(sb, KERN_INFO,
373 "user_xattr options not supported");
375 case Opt_nouser_xattr:
376 f2fs_msg(sb, KERN_INFO,
377 "nouser_xattr options not supported");
379 case Opt_inline_xattr:
380 f2fs_msg(sb, KERN_INFO,
381 "inline_xattr options not supported");
384 #ifdef CONFIG_F2FS_FS_POSIX_ACL
386 set_opt(sbi, POSIX_ACL);
389 clear_opt(sbi, POSIX_ACL);
393 f2fs_msg(sb, KERN_INFO, "acl options not supported");
396 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
399 case Opt_active_logs:
400 if (args->from && match_int(args, &arg))
402 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
404 sbi->active_logs = arg;
406 case Opt_disable_ext_identify:
407 set_opt(sbi, DISABLE_EXT_IDENTIFY);
409 case Opt_inline_data:
410 set_opt(sbi, INLINE_DATA);
412 case Opt_inline_dentry:
413 set_opt(sbi, INLINE_DENTRY);
415 case Opt_flush_merge:
416 set_opt(sbi, FLUSH_MERGE);
419 set_opt(sbi, NOBARRIER);
422 set_opt(sbi, FASTBOOT);
424 case Opt_extent_cache:
425 set_opt(sbi, EXTENT_CACHE);
427 case Opt_noextent_cache:
428 clear_opt(sbi, EXTENT_CACHE);
430 case Opt_noinline_data:
431 clear_opt(sbi, INLINE_DATA);
434 set_opt(sbi, DATA_FLUSH);
437 f2fs_msg(sb, KERN_ERR,
438 "Unrecognized mount option \"%s\" or missing value",
446 static struct inode *f2fs_alloc_inode(struct super_block *sb)
448 struct f2fs_inode_info *fi;
450 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
454 init_once((void *) fi);
456 /* Initialize f2fs-specific inode info */
457 fi->vfs_inode.i_version = 1;
458 atomic_set(&fi->dirty_pages, 0);
459 fi->i_current_depth = 1;
461 init_rwsem(&fi->i_sem);
462 INIT_LIST_HEAD(&fi->dirty_list);
463 INIT_LIST_HEAD(&fi->inmem_pages);
464 mutex_init(&fi->inmem_lock);
466 set_inode_flag(fi, FI_NEW_INODE);
468 if (test_opt(F2FS_SB(sb), INLINE_XATTR))
469 set_inode_flag(fi, FI_INLINE_XATTR);
471 /* Will be used by directory only */
472 fi->i_dir_level = F2FS_SB(sb)->dir_level;
473 return &fi->vfs_inode;
476 static int f2fs_drop_inode(struct inode *inode)
479 * This is to avoid a deadlock condition like below.
480 * writeback_single_inode(inode)
481 * - f2fs_write_data_page
482 * - f2fs_gc -> iput -> evict
483 * - inode_wait_for_writeback(inode)
485 if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
486 if (!inode->i_nlink && !is_bad_inode(inode)) {
487 /* to avoid evict_inode call simultaneously */
488 atomic_inc(&inode->i_count);
489 spin_unlock(&inode->i_lock);
491 /* some remained atomic pages should discarded */
492 if (f2fs_is_atomic_file(inode))
493 drop_inmem_pages(inode);
495 /* should remain fi->extent_tree for writepage */
496 f2fs_destroy_extent_node(inode);
498 sb_start_intwrite(inode->i_sb);
499 i_size_write(inode, 0);
501 if (F2FS_HAS_BLOCKS(inode))
502 f2fs_truncate(inode, true);
504 sb_end_intwrite(inode->i_sb);
506 fscrypt_put_encryption_info(inode, NULL);
507 spin_lock(&inode->i_lock);
508 atomic_dec(&inode->i_count);
512 return generic_drop_inode(inode);
516 * f2fs_dirty_inode() is called from __mark_inode_dirty()
518 * We should call set_dirty_inode to write the dirty inode through write_inode.
520 static void f2fs_dirty_inode(struct inode *inode, int flags)
522 set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
525 static void f2fs_i_callback(struct rcu_head *head)
527 struct inode *inode = container_of(head, struct inode, i_rcu);
528 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
531 static void f2fs_destroy_inode(struct inode *inode)
533 call_rcu(&inode->i_rcu, f2fs_i_callback);
536 static void f2fs_put_super(struct super_block *sb)
538 struct f2fs_sb_info *sbi = F2FS_SB(sb);
541 remove_proc_entry("segment_info", sbi->s_proc);
542 remove_proc_entry(sb->s_id, f2fs_proc_root);
544 kobject_del(&sbi->s_kobj);
548 /* prevent remaining shrinker jobs */
549 mutex_lock(&sbi->umount_mutex);
552 * We don't need to do checkpoint when superblock is clean.
553 * But, the previous checkpoint was not done by umount, it needs to do
554 * clean checkpoint again.
556 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
557 !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
558 struct cp_control cpc = {
561 write_checkpoint(sbi, &cpc);
564 /* write_checkpoint can update stat informaion */
565 f2fs_destroy_stats(sbi);
568 * normally superblock is clean, so we need to release this.
569 * In addition, EIO will skip do checkpoint, we need this as well.
571 release_ino_entry(sbi);
572 release_discard_addrs(sbi);
574 f2fs_leave_shrinker(sbi);
575 mutex_unlock(&sbi->umount_mutex);
577 /* our cp_error case, we can wait for any writeback page */
578 if (get_pages(sbi, F2FS_WRITEBACK))
579 f2fs_flush_merged_bios(sbi);
581 iput(sbi->node_inode);
582 iput(sbi->meta_inode);
584 /* destroy f2fs internal modules */
585 destroy_node_manager(sbi);
586 destroy_segment_manager(sbi);
589 kobject_put(&sbi->s_kobj);
590 wait_for_completion(&sbi->s_kobj_unregister);
592 sb->s_fs_info = NULL;
593 if (sbi->s_chksum_driver)
594 crypto_free_shash(sbi->s_chksum_driver);
595 kfree(sbi->raw_super);
599 int f2fs_sync_fs(struct super_block *sb, int sync)
601 struct f2fs_sb_info *sbi = F2FS_SB(sb);
604 trace_f2fs_sync_fs(sb, sync);
607 struct cp_control cpc;
609 cpc.reason = __get_cp_reason(sbi);
611 mutex_lock(&sbi->gc_mutex);
612 err = write_checkpoint(sbi, &cpc);
613 mutex_unlock(&sbi->gc_mutex);
615 f2fs_trace_ios(NULL, 1);
620 static int f2fs_freeze(struct super_block *sb)
624 if (f2fs_readonly(sb))
627 err = f2fs_sync_fs(sb, 1);
631 static int f2fs_unfreeze(struct super_block *sb)
636 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
638 struct super_block *sb = dentry->d_sb;
639 struct f2fs_sb_info *sbi = F2FS_SB(sb);
640 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
641 block_t total_count, user_block_count, start_count, ovp_count;
643 total_count = le64_to_cpu(sbi->raw_super->block_count);
644 user_block_count = sbi->user_block_count;
645 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
646 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
647 buf->f_type = F2FS_SUPER_MAGIC;
648 buf->f_bsize = sbi->blocksize;
650 buf->f_blocks = total_count - start_count;
651 buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
652 buf->f_bavail = user_block_count - valid_user_blocks(sbi);
654 buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
655 buf->f_ffree = buf->f_files - valid_inode_count(sbi);
657 buf->f_namelen = F2FS_NAME_LEN;
658 buf->f_fsid.val[0] = (u32)id;
659 buf->f_fsid.val[1] = (u32)(id >> 32);
664 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
666 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
668 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
669 if (test_opt(sbi, FORCE_FG_GC))
670 seq_printf(seq, ",background_gc=%s", "sync");
672 seq_printf(seq, ",background_gc=%s", "on");
674 seq_printf(seq, ",background_gc=%s", "off");
676 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
677 seq_puts(seq, ",disable_roll_forward");
678 if (test_opt(sbi, DISCARD))
679 seq_puts(seq, ",discard");
680 if (test_opt(sbi, NOHEAP))
681 seq_puts(seq, ",no_heap_alloc");
682 #ifdef CONFIG_F2FS_FS_XATTR
683 if (test_opt(sbi, XATTR_USER))
684 seq_puts(seq, ",user_xattr");
686 seq_puts(seq, ",nouser_xattr");
687 if (test_opt(sbi, INLINE_XATTR))
688 seq_puts(seq, ",inline_xattr");
690 #ifdef CONFIG_F2FS_FS_POSIX_ACL
691 if (test_opt(sbi, POSIX_ACL))
692 seq_puts(seq, ",acl");
694 seq_puts(seq, ",noacl");
696 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
697 seq_puts(seq, ",disable_ext_identify");
698 if (test_opt(sbi, INLINE_DATA))
699 seq_puts(seq, ",inline_data");
701 seq_puts(seq, ",noinline_data");
702 if (test_opt(sbi, INLINE_DENTRY))
703 seq_puts(seq, ",inline_dentry");
704 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
705 seq_puts(seq, ",flush_merge");
706 if (test_opt(sbi, NOBARRIER))
707 seq_puts(seq, ",nobarrier");
708 if (test_opt(sbi, FASTBOOT))
709 seq_puts(seq, ",fastboot");
710 if (test_opt(sbi, EXTENT_CACHE))
711 seq_puts(seq, ",extent_cache");
713 seq_puts(seq, ",noextent_cache");
714 if (test_opt(sbi, DATA_FLUSH))
715 seq_puts(seq, ",data_flush");
716 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
721 static int segment_info_seq_show(struct seq_file *seq, void *offset)
723 struct super_block *sb = seq->private;
724 struct f2fs_sb_info *sbi = F2FS_SB(sb);
725 unsigned int total_segs =
726 le32_to_cpu(sbi->raw_super->segment_count_main);
729 seq_puts(seq, "format: segment_type|valid_blocks\n"
730 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
732 for (i = 0; i < total_segs; i++) {
733 struct seg_entry *se = get_seg_entry(sbi, i);
736 seq_printf(seq, "%-10d", i);
737 seq_printf(seq, "%d|%-3u", se->type,
738 get_valid_blocks(sbi, i, 1));
739 if ((i % 10) == 9 || i == (total_segs - 1))
748 static int segment_info_open_fs(struct inode *inode, struct file *file)
750 return single_open(file, segment_info_seq_show, PDE_DATA(inode));
753 static const struct file_operations f2fs_seq_segment_info_fops = {
754 .owner = THIS_MODULE,
755 .open = segment_info_open_fs,
758 .release = single_release,
761 static void default_options(struct f2fs_sb_info *sbi)
763 /* init some FS parameters */
764 sbi->active_logs = NR_CURSEG_TYPE;
767 set_opt(sbi, INLINE_DATA);
768 set_opt(sbi, EXTENT_CACHE);
770 #ifdef CONFIG_F2FS_FS_XATTR
771 set_opt(sbi, XATTR_USER);
773 #ifdef CONFIG_F2FS_FS_POSIX_ACL
774 set_opt(sbi, POSIX_ACL);
778 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
780 struct f2fs_sb_info *sbi = F2FS_SB(sb);
781 struct f2fs_mount_info org_mount_opt;
782 int err, active_logs;
783 bool need_restart_gc = false;
784 bool need_stop_gc = false;
785 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
788 * Save the old mount options in case we
789 * need to restore them.
791 org_mount_opt = sbi->mount_opt;
792 active_logs = sbi->active_logs;
794 if (*flags & MS_RDONLY) {
795 set_opt(sbi, FASTBOOT);
796 set_sbi_flag(sbi, SBI_IS_DIRTY);
801 sbi->mount_opt.opt = 0;
802 default_options(sbi);
804 /* parse mount options */
805 err = parse_options(sb, data);
810 * Previous and new state of filesystem is RO,
811 * so skip checking GC and FLUSH_MERGE conditions.
813 if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
816 /* disallow enable/disable extent_cache dynamically */
817 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
819 f2fs_msg(sbi->sb, KERN_WARNING,
820 "switch extent_cache option is not allowed");
825 * We stop the GC thread if FS is mounted as RO
826 * or if background_gc = off is passed in mount
827 * option. Also sync the filesystem.
829 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
830 if (sbi->gc_thread) {
833 need_restart_gc = true;
835 } else if (!sbi->gc_thread) {
836 err = start_gc_thread(sbi);
843 * We stop issue flush thread if FS is mounted as RO
844 * or if flush_merge is not passed in mount option.
846 if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
847 destroy_flush_cmd_control(sbi);
848 } else if (!SM_I(sbi)->cmd_control_info) {
849 err = create_flush_cmd_control(sbi);
854 /* Update the POSIXACL Flag */
855 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
856 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
859 if (need_restart_gc) {
860 if (start_gc_thread(sbi))
861 f2fs_msg(sbi->sb, KERN_WARNING,
862 "background gc thread has stopped");
863 } else if (need_stop_gc) {
867 sbi->mount_opt = org_mount_opt;
868 sbi->active_logs = active_logs;
872 static struct super_operations f2fs_sops = {
873 .alloc_inode = f2fs_alloc_inode,
874 .drop_inode = f2fs_drop_inode,
875 .destroy_inode = f2fs_destroy_inode,
876 .write_inode = f2fs_write_inode,
877 .dirty_inode = f2fs_dirty_inode,
878 .show_options = f2fs_show_options,
879 .evict_inode = f2fs_evict_inode,
880 .put_super = f2fs_put_super,
881 .sync_fs = f2fs_sync_fs,
882 .freeze_fs = f2fs_freeze,
883 .unfreeze_fs = f2fs_unfreeze,
884 .statfs = f2fs_statfs,
885 .remount_fs = f2fs_remount,
888 #ifdef CONFIG_F2FS_FS_ENCRYPTION
889 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
891 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
892 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
896 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
899 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
900 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
901 ctx, len, fs_data, XATTR_CREATE);
904 static unsigned f2fs_max_namelen(struct inode *inode)
906 return S_ISLNK(inode->i_mode) ?
907 inode->i_sb->s_blocksize : F2FS_NAME_LEN;
910 static struct fscrypt_operations f2fs_cryptops = {
911 .get_context = f2fs_get_context,
912 .set_context = f2fs_set_context,
913 .is_encrypted = f2fs_encrypted_inode,
914 .empty_dir = f2fs_empty_dir,
915 .max_namelen = f2fs_max_namelen,
918 static struct fscrypt_operations f2fs_cryptops = {
919 .is_encrypted = f2fs_encrypted_inode,
923 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
924 u64 ino, u32 generation)
926 struct f2fs_sb_info *sbi = F2FS_SB(sb);
929 if (check_nid_range(sbi, ino))
930 return ERR_PTR(-ESTALE);
933 * f2fs_iget isn't quite right if the inode is currently unallocated!
934 * However f2fs_iget currently does appropriate checks to handle stale
935 * inodes so everything is OK.
937 inode = f2fs_iget(sb, ino);
939 return ERR_CAST(inode);
940 if (unlikely(generation && inode->i_generation != generation)) {
941 /* we didn't find the right inode.. */
943 return ERR_PTR(-ESTALE);
948 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
949 int fh_len, int fh_type)
951 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
955 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
956 int fh_len, int fh_type)
958 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
962 static const struct export_operations f2fs_export_ops = {
963 .fh_to_dentry = f2fs_fh_to_dentry,
964 .fh_to_parent = f2fs_fh_to_parent,
965 .get_parent = f2fs_get_parent,
968 static loff_t max_file_blocks(void)
970 loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
971 loff_t leaf_count = ADDRS_PER_BLOCK;
973 /* two direct node blocks */
974 result += (leaf_count * 2);
976 /* two indirect node blocks */
977 leaf_count *= NIDS_PER_BLOCK;
978 result += (leaf_count * 2);
980 /* one double indirect node block */
981 leaf_count *= NIDS_PER_BLOCK;
982 result += leaf_count;
987 static inline bool sanity_check_area_boundary(struct super_block *sb,
988 struct f2fs_super_block *raw_super)
990 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
991 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
992 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
993 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
994 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
995 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
996 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
997 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
998 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
999 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
1000 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1001 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1002 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1004 if (segment0_blkaddr != cp_blkaddr) {
1005 f2fs_msg(sb, KERN_INFO,
1006 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1007 segment0_blkaddr, cp_blkaddr);
1011 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
1013 f2fs_msg(sb, KERN_INFO,
1014 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1015 cp_blkaddr, sit_blkaddr,
1016 segment_count_ckpt << log_blocks_per_seg);
1020 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
1022 f2fs_msg(sb, KERN_INFO,
1023 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1024 sit_blkaddr, nat_blkaddr,
1025 segment_count_sit << log_blocks_per_seg);
1029 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
1031 f2fs_msg(sb, KERN_INFO,
1032 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1033 nat_blkaddr, ssa_blkaddr,
1034 segment_count_nat << log_blocks_per_seg);
1038 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
1040 f2fs_msg(sb, KERN_INFO,
1041 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1042 ssa_blkaddr, main_blkaddr,
1043 segment_count_ssa << log_blocks_per_seg);
1047 if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
1048 segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
1049 f2fs_msg(sb, KERN_INFO,
1050 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
1052 segment0_blkaddr + (segment_count << log_blocks_per_seg),
1053 segment_count_main << log_blocks_per_seg);
1060 static int sanity_check_raw_super(struct super_block *sb,
1061 struct f2fs_super_block *raw_super)
1063 unsigned int blocksize;
1065 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
1066 f2fs_msg(sb, KERN_INFO,
1067 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1068 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
1072 /* Currently, support only 4KB page cache size */
1073 if (F2FS_BLKSIZE != PAGE_SIZE) {
1074 f2fs_msg(sb, KERN_INFO,
1075 "Invalid page_cache_size (%lu), supports only 4KB\n",
1080 /* Currently, support only 4KB block size */
1081 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
1082 if (blocksize != F2FS_BLKSIZE) {
1083 f2fs_msg(sb, KERN_INFO,
1084 "Invalid blocksize (%u), supports only 4KB\n",
1089 /* check log blocks per segment */
1090 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
1091 f2fs_msg(sb, KERN_INFO,
1092 "Invalid log blocks per segment (%u)\n",
1093 le32_to_cpu(raw_super->log_blocks_per_seg));
1097 /* Currently, support 512/1024/2048/4096 bytes sector size */
1098 if (le32_to_cpu(raw_super->log_sectorsize) >
1099 F2FS_MAX_LOG_SECTOR_SIZE ||
1100 le32_to_cpu(raw_super->log_sectorsize) <
1101 F2FS_MIN_LOG_SECTOR_SIZE) {
1102 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
1103 le32_to_cpu(raw_super->log_sectorsize));
1106 if (le32_to_cpu(raw_super->log_sectors_per_block) +
1107 le32_to_cpu(raw_super->log_sectorsize) !=
1108 F2FS_MAX_LOG_SECTOR_SIZE) {
1109 f2fs_msg(sb, KERN_INFO,
1110 "Invalid log sectors per block(%u) log sectorsize(%u)",
1111 le32_to_cpu(raw_super->log_sectors_per_block),
1112 le32_to_cpu(raw_super->log_sectorsize));
1116 /* check reserved ino info */
1117 if (le32_to_cpu(raw_super->node_ino) != 1 ||
1118 le32_to_cpu(raw_super->meta_ino) != 2 ||
1119 le32_to_cpu(raw_super->root_ino) != 3) {
1120 f2fs_msg(sb, KERN_INFO,
1121 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1122 le32_to_cpu(raw_super->node_ino),
1123 le32_to_cpu(raw_super->meta_ino),
1124 le32_to_cpu(raw_super->root_ino));
1128 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1129 if (sanity_check_area_boundary(sb, raw_super))
1135 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1137 unsigned int total, fsmeta;
1138 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1139 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1141 total = le32_to_cpu(raw_super->segment_count);
1142 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
1143 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
1144 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
1145 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
1146 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
1148 if (unlikely(fsmeta >= total))
1151 if (unlikely(f2fs_cp_error(sbi))) {
1152 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
1158 static void init_sb_info(struct f2fs_sb_info *sbi)
1160 struct f2fs_super_block *raw_super = sbi->raw_super;
1163 sbi->log_sectors_per_block =
1164 le32_to_cpu(raw_super->log_sectors_per_block);
1165 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
1166 sbi->blocksize = 1 << sbi->log_blocksize;
1167 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1168 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1169 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
1170 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
1171 sbi->total_sections = le32_to_cpu(raw_super->section_count);
1172 sbi->total_node_count =
1173 (le32_to_cpu(raw_super->segment_count_nat) / 2)
1174 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1175 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
1176 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
1177 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
1178 sbi->cur_victim_sec = NULL_SECNO;
1179 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
1181 for (i = 0; i < NR_COUNT_TYPE; i++)
1182 atomic_set(&sbi->nr_pages[i], 0);
1184 sbi->dir_level = DEF_DIR_LEVEL;
1185 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
1186 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
1187 clear_sbi_flag(sbi, SBI_NEED_FSCK);
1189 INIT_LIST_HEAD(&sbi->s_list);
1190 mutex_init(&sbi->umount_mutex);
1194 * Read f2fs raw super block.
1195 * Because we have two copies of super block, so read both of them
1196 * to get the first valid one. If any one of them is broken, we pass
1197 * them recovery flag back to the caller.
1199 static int read_raw_super_block(struct super_block *sb,
1200 struct f2fs_super_block **raw_super,
1201 int *valid_super_block, int *recovery)
1204 struct buffer_head *bh;
1205 struct f2fs_super_block *super, *buf;
1208 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
1212 for (block = 0; block < 2; block++) {
1213 bh = sb_bread(sb, block);
1215 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
1221 buf = (struct f2fs_super_block *)
1222 (bh->b_data + F2FS_SUPER_OFFSET);
1224 /* sanity checking of raw super */
1225 if (sanity_check_raw_super(sb, buf)) {
1226 f2fs_msg(sb, KERN_ERR,
1227 "Can't find valid F2FS filesystem in %dth superblock",
1235 memcpy(super, buf, sizeof(*super));
1236 *valid_super_block = block;
1242 /* Fail to read any one of the superblocks*/
1246 /* No valid superblock */
1255 static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
1257 struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
1258 struct buffer_head *bh;
1261 bh = sb_getblk(sbi->sb, block);
1266 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1267 set_buffer_uptodate(bh);
1268 set_buffer_dirty(bh);
1271 /* it's rare case, we can do fua all the time */
1272 err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
1278 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1282 /* write back-up superblock first */
1283 err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
1285 /* if we are in recovery path, skip writing valid superblock */
1289 /* write current valid superblock */
1290 return __f2fs_commit_super(sbi, sbi->valid_super_block);
1293 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
1295 struct f2fs_sb_info *sbi;
1296 struct f2fs_super_block *raw_super;
1299 bool retry = true, need_fsck = false;
1300 char *options = NULL;
1301 int recovery, i, valid_super_block;
1302 struct curseg_info *seg_i;
1307 valid_super_block = -1;
1310 /* allocate memory for f2fs-specific super block info */
1311 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
1315 /* Load the checksum driver */
1316 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
1317 if (IS_ERR(sbi->s_chksum_driver)) {
1318 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
1319 err = PTR_ERR(sbi->s_chksum_driver);
1320 sbi->s_chksum_driver = NULL;
1324 /* set a block size */
1325 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
1326 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
1330 err = read_raw_super_block(sb, &raw_super, &valid_super_block,
1335 sb->s_fs_info = sbi;
1336 default_options(sbi);
1337 /* parse mount options */
1338 options = kstrdup((const char *)data, GFP_KERNEL);
1339 if (data && !options) {
1344 err = parse_options(sb, options);
1348 sbi->max_file_blocks = max_file_blocks();
1349 sb->s_maxbytes = sbi->max_file_blocks <<
1350 le32_to_cpu(raw_super->log_blocksize);
1351 sb->s_max_links = F2FS_LINK_MAX;
1352 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1354 sb->s_op = &f2fs_sops;
1355 sb->s_cop = &f2fs_cryptops;
1356 sb->s_xattr = f2fs_xattr_handlers;
1357 sb->s_export_op = &f2fs_export_ops;
1358 sb->s_magic = F2FS_SUPER_MAGIC;
1359 sb->s_time_gran = 1;
1360 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1361 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
1362 memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
1364 /* init f2fs-specific super block info */
1366 sbi->raw_super = raw_super;
1367 sbi->valid_super_block = valid_super_block;
1368 mutex_init(&sbi->gc_mutex);
1369 mutex_init(&sbi->writepages);
1370 mutex_init(&sbi->cp_mutex);
1371 init_rwsem(&sbi->node_write);
1373 /* disallow all the data/node/meta page writes */
1374 set_sbi_flag(sbi, SBI_POR_DOING);
1375 spin_lock_init(&sbi->stat_lock);
1377 init_rwsem(&sbi->read_io.io_rwsem);
1378 sbi->read_io.sbi = sbi;
1379 sbi->read_io.bio = NULL;
1380 for (i = 0; i < NR_PAGE_TYPE; i++) {
1381 init_rwsem(&sbi->write_io[i].io_rwsem);
1382 sbi->write_io[i].sbi = sbi;
1383 sbi->write_io[i].bio = NULL;
1386 init_rwsem(&sbi->cp_rwsem);
1387 init_waitqueue_head(&sbi->cp_wait);
1390 /* get an inode for meta space */
1391 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
1392 if (IS_ERR(sbi->meta_inode)) {
1393 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
1394 err = PTR_ERR(sbi->meta_inode);
1398 err = get_valid_checkpoint(sbi);
1400 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
1401 goto free_meta_inode;
1404 sbi->total_valid_node_count =
1405 le32_to_cpu(sbi->ckpt->valid_node_count);
1406 sbi->total_valid_inode_count =
1407 le32_to_cpu(sbi->ckpt->valid_inode_count);
1408 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
1409 sbi->total_valid_block_count =
1410 le64_to_cpu(sbi->ckpt->valid_block_count);
1411 sbi->last_valid_block_count = sbi->total_valid_block_count;
1412 sbi->alloc_valid_block_count = 0;
1413 for (i = 0; i < NR_INODE_TYPE; i++) {
1414 INIT_LIST_HEAD(&sbi->inode_list[i]);
1415 spin_lock_init(&sbi->inode_lock[i]);
1418 init_extent_cache_info(sbi);
1420 init_ino_entry_info(sbi);
1422 /* setup f2fs internal modules */
1423 err = build_segment_manager(sbi);
1425 f2fs_msg(sb, KERN_ERR,
1426 "Failed to initialize F2FS segment manager");
1429 err = build_node_manager(sbi);
1431 f2fs_msg(sb, KERN_ERR,
1432 "Failed to initialize F2FS node manager");
1436 /* For write statistics */
1437 if (sb->s_bdev->bd_part)
1438 sbi->sectors_written_start =
1439 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
1441 /* Read accumulated write IO statistics if exists */
1442 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1443 if (__exist_node_summaries(sbi))
1444 sbi->kbytes_written =
1445 le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written);
1447 build_gc_manager(sbi);
1449 /* get an inode for node space */
1450 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
1451 if (IS_ERR(sbi->node_inode)) {
1452 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
1453 err = PTR_ERR(sbi->node_inode);
1457 f2fs_join_shrinker(sbi);
1459 /* if there are nt orphan nodes free them */
1460 err = recover_orphan_inodes(sbi);
1462 goto free_node_inode;
1464 /* read root inode and dentry */
1465 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
1467 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
1468 err = PTR_ERR(root);
1469 goto free_node_inode;
1471 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1474 goto free_node_inode;
1477 sb->s_root = d_make_root(root); /* allocate root dentry */
1480 goto free_root_inode;
1483 err = f2fs_build_stats(sbi);
1485 goto free_root_inode;
1488 sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
1491 proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
1492 &f2fs_seq_segment_info_fops, sb);
1494 sbi->s_kobj.kset = f2fs_kset;
1495 init_completion(&sbi->s_kobj_unregister);
1496 err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
1501 /* recover fsynced data */
1502 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
1504 * mount should be failed, when device has readonly mode, and
1505 * previous checkpoint was not done by clean system shutdown.
1507 if (bdev_read_only(sb->s_bdev) &&
1508 !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
1514 set_sbi_flag(sbi, SBI_NEED_FSCK);
1516 err = recover_fsync_data(sbi);
1519 f2fs_msg(sb, KERN_ERR,
1520 "Cannot recover all fsync data errno=%ld", err);
1524 /* recover_fsync_data() cleared this already */
1525 clear_sbi_flag(sbi, SBI_POR_DOING);
1528 * If filesystem is not mounted as read-only then
1529 * do start the gc_thread.
1531 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
1532 /* After POR, we can run background GC thread.*/
1533 err = start_gc_thread(sbi);
1539 /* recover broken superblock */
1540 if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
1541 err = f2fs_commit_super(sbi, true);
1542 f2fs_msg(sb, KERN_INFO,
1543 "Try to recover %dth superblock, ret: %ld",
1544 sbi->valid_super_block ? 1 : 2, err);
1547 f2fs_update_time(sbi, CP_TIME);
1548 f2fs_update_time(sbi, REQ_TIME);
1552 kobject_del(&sbi->s_kobj);
1553 kobject_put(&sbi->s_kobj);
1554 wait_for_completion(&sbi->s_kobj_unregister);
1557 remove_proc_entry("segment_info", sbi->s_proc);
1558 remove_proc_entry(sb->s_id, f2fs_proc_root);
1560 f2fs_destroy_stats(sbi);
1565 mutex_lock(&sbi->umount_mutex);
1566 f2fs_leave_shrinker(sbi);
1567 iput(sbi->node_inode);
1568 mutex_unlock(&sbi->umount_mutex);
1570 destroy_node_manager(sbi);
1572 destroy_segment_manager(sbi);
1575 make_bad_inode(sbi->meta_inode);
1576 iput(sbi->meta_inode);
1582 if (sbi->s_chksum_driver)
1583 crypto_free_shash(sbi->s_chksum_driver);
1586 /* give only one another chance */
1589 shrink_dcache_sb(sb);
1595 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
1596 const char *dev_name, void *data)
1598 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
1601 static void kill_f2fs_super(struct super_block *sb)
1604 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
1605 kill_block_super(sb);
1608 static struct file_system_type f2fs_fs_type = {
1609 .owner = THIS_MODULE,
1611 .mount = f2fs_mount,
1612 .kill_sb = kill_f2fs_super,
1613 .fs_flags = FS_REQUIRES_DEV,
1615 MODULE_ALIAS_FS("f2fs");
1617 static int __init init_inodecache(void)
1619 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
1620 sizeof(struct f2fs_inode_info), 0,
1621 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
1622 if (!f2fs_inode_cachep)
1627 static void destroy_inodecache(void)
1630 * Make sure all delayed rcu free inodes are flushed before we
1634 kmem_cache_destroy(f2fs_inode_cachep);
1637 static int __init init_f2fs_fs(void)
1641 f2fs_build_trace_ios();
1643 err = init_inodecache();
1646 err = create_node_manager_caches();
1648 goto free_inodecache;
1649 err = create_segment_manager_caches();
1651 goto free_node_manager_caches;
1652 err = create_checkpoint_caches();
1654 goto free_segment_manager_caches;
1655 err = create_extent_cache();
1657 goto free_checkpoint_caches;
1658 f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
1661 goto free_extent_cache;
1663 err = register_shrinker(&f2fs_shrinker_info);
1667 err = register_filesystem(&f2fs_fs_type);
1670 err = f2fs_create_root_stats();
1672 goto free_filesystem;
1673 f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
1677 unregister_filesystem(&f2fs_fs_type);
1679 unregister_shrinker(&f2fs_shrinker_info);
1681 kset_unregister(f2fs_kset);
1683 destroy_extent_cache();
1684 free_checkpoint_caches:
1685 destroy_checkpoint_caches();
1686 free_segment_manager_caches:
1687 destroy_segment_manager_caches();
1688 free_node_manager_caches:
1689 destroy_node_manager_caches();
1691 destroy_inodecache();
1696 static void __exit exit_f2fs_fs(void)
1698 remove_proc_entry("fs/f2fs", NULL);
1699 f2fs_destroy_root_stats();
1700 unregister_shrinker(&f2fs_shrinker_info);
1701 unregister_filesystem(&f2fs_fs_type);
1702 destroy_extent_cache();
1703 destroy_checkpoint_caches();
1704 destroy_segment_manager_caches();
1705 destroy_node_manager_caches();
1706 destroy_inodecache();
1707 kset_unregister(f2fs_kset);
1708 f2fs_destroy_trace_ios();
1711 module_init(init_f2fs_fs)
1712 module_exit(exit_f2fs_fs)
1714 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1715 MODULE_DESCRIPTION("Flash Friendly File System");
1716 MODULE_LICENSE("GPL");