4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache *fsync_entry_slab;
50 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
52 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
54 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
59 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
62 struct fsync_inode_entry *entry;
64 list_for_each_entry(entry, head, list)
65 if (entry->inode->i_ino == ino)
71 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
72 struct list_head *head, nid_t ino, bool quota_inode)
75 struct fsync_inode_entry *entry;
78 inode = f2fs_iget_retry(sbi->sb, ino);
80 return ERR_CAST(inode);
82 err = dquot_initialize(inode);
87 err = dquot_alloc_inode(inode);
92 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
94 list_add_tail(&entry->list, head);
102 static void del_fsync_inode(struct fsync_inode_entry *entry)
105 list_del(&entry->list);
106 kmem_cache_free(fsync_entry_slab, entry);
109 static int recover_dentry(struct inode *inode, struct page *ipage,
110 struct list_head *dir_list)
112 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
113 nid_t pino = le32_to_cpu(raw_inode->i_pino);
114 struct f2fs_dir_entry *de;
115 struct fscrypt_name fname;
117 struct inode *dir, *einode;
118 struct fsync_inode_entry *entry;
122 entry = get_fsync_inode(dir_list, pino);
124 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
127 dir = ERR_CAST(entry);
128 err = PTR_ERR(entry);
135 memset(&fname, 0, sizeof(struct fscrypt_name));
136 fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
137 fname.disk_name.name = raw_inode->i_name;
139 if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
145 de = __f2fs_find_entry(dir, &fname, &page);
146 if (de && inode->i_ino == le32_to_cpu(de->ino))
150 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
151 if (IS_ERR(einode)) {
153 err = PTR_ERR(einode);
159 err = dquot_initialize(einode);
165 err = acquire_orphan_inode(F2FS_I_SB(inode));
170 f2fs_delete_entry(de, page, dir, einode);
173 } else if (IS_ERR(page)) {
176 err = __f2fs_do_add_link(dir, &fname, inode,
177 inode->i_ino, inode->i_mode);
184 f2fs_put_page(page, 0);
186 if (file_enc_name(inode))
187 name = "<encrypted>";
189 name = raw_inode->i_name;
190 f2fs_msg(inode->i_sb, KERN_NOTICE,
191 "%s: ino = %x, name = %s, dir = %lx, err = %d",
192 __func__, ino_of_node(ipage), name,
193 IS_ERR(dir) ? 0 : dir->i_ino, err);
197 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
199 if (ri->i_inline & F2FS_PIN_FILE)
200 set_inode_flag(inode, FI_PIN_FILE);
202 clear_inode_flag(inode, FI_PIN_FILE);
203 if (ri->i_inline & F2FS_DATA_EXIST)
204 set_inode_flag(inode, FI_DATA_EXIST);
206 clear_inode_flag(inode, FI_DATA_EXIST);
207 if (!(ri->i_inline & F2FS_INLINE_DOTS))
208 clear_inode_flag(inode, FI_INLINE_DOTS);
211 static void recover_inode(struct inode *inode, struct page *page)
213 struct f2fs_inode *raw = F2FS_INODE(page);
216 inode->i_mode = le16_to_cpu(raw->i_mode);
217 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
218 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
219 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
220 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
221 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
222 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
223 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
225 F2FS_I(inode)->i_advise = raw->i_advise;
227 recover_inline_flags(inode, raw);
229 if (file_enc_name(inode))
230 name = "<encrypted>";
232 name = F2FS_INODE(page)->i_name;
234 f2fs_msg(inode->i_sb, KERN_NOTICE,
235 "recover_inode: ino = %x, name = %s, inline = %x",
236 ino_of_node(page), name, raw->i_inline);
239 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
242 struct curseg_info *curseg;
243 struct page *page = NULL;
245 unsigned int loop_cnt = 0;
246 unsigned int free_blocks = sbi->user_block_count -
247 valid_user_blocks(sbi);
250 /* get node pages in the current segment */
251 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
252 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
255 struct fsync_inode_entry *entry;
257 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
260 page = get_tmp_page(sbi, blkaddr);
262 if (!is_recoverable_dnode(page))
265 if (!is_fsync_dnode(page))
268 entry = get_fsync_inode(head, ino_of_node(page));
270 bool quota_inode = false;
273 IS_INODE(page) && is_dent_dnode(page)) {
274 err = recover_inode_page(sbi, page);
281 * CP | dnode(F) | inode(DF)
282 * For this case, we should not give up now.
284 entry = add_fsync_inode(sbi, head, ino_of_node(page),
287 err = PTR_ERR(entry);
288 if (err == -ENOENT) {
295 entry->blkaddr = blkaddr;
297 if (IS_INODE(page) && is_dent_dnode(page))
298 entry->last_dentry = blkaddr;
300 /* sanity check in order to detect looped node chain */
301 if (++loop_cnt >= free_blocks ||
302 blkaddr == next_blkaddr_of_node(page)) {
303 f2fs_msg(sbi->sb, KERN_NOTICE,
304 "%s: detect looped node chain, "
305 "blkaddr:%u, next:%u",
306 __func__, blkaddr, next_blkaddr_of_node(page));
311 /* check next segment */
312 blkaddr = next_blkaddr_of_node(page);
313 f2fs_put_page(page, 1);
315 ra_meta_pages_cond(sbi, blkaddr);
317 f2fs_put_page(page, 1);
321 static void destroy_fsync_dnodes(struct list_head *head)
323 struct fsync_inode_entry *entry, *tmp;
325 list_for_each_entry_safe(entry, tmp, head, list)
326 del_fsync_inode(entry);
329 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
330 block_t blkaddr, struct dnode_of_data *dn)
332 struct seg_entry *sentry;
333 unsigned int segno = GET_SEGNO(sbi, blkaddr);
334 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
335 struct f2fs_summary_block *sum_node;
336 struct f2fs_summary sum;
337 struct page *sum_page, *node_page;
338 struct dnode_of_data tdn = *dn;
345 sentry = get_seg_entry(sbi, segno);
346 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
349 /* Get the previous summary */
350 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
351 struct curseg_info *curseg = CURSEG_I(sbi, i);
352 if (curseg->segno == segno) {
353 sum = curseg->sum_blk->entries[blkoff];
358 sum_page = get_sum_page(sbi, segno);
359 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
360 sum = sum_node->entries[blkoff];
361 f2fs_put_page(sum_page, 1);
363 /* Use the locked dnode page and inode */
364 nid = le32_to_cpu(sum.nid);
365 if (dn->inode->i_ino == nid) {
367 if (!dn->inode_page_locked)
368 lock_page(dn->inode_page);
369 tdn.node_page = dn->inode_page;
370 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
372 } else if (dn->nid == nid) {
373 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
377 /* Get the node page */
378 node_page = get_node_page(sbi, nid);
379 if (IS_ERR(node_page))
380 return PTR_ERR(node_page);
382 offset = ofs_of_node(node_page);
383 ino = ino_of_node(node_page);
384 f2fs_put_page(node_page, 1);
386 if (ino != dn->inode->i_ino) {
389 /* Deallocate previous index in the node page */
390 inode = f2fs_iget_retry(sbi->sb, ino);
392 return PTR_ERR(inode);
394 ret = dquot_initialize(inode);
403 bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
406 * if inode page is locked, unlock temporarily, but its reference
409 if (ino == dn->inode->i_ino && dn->inode_page_locked)
410 unlock_page(dn->inode_page);
412 set_new_dnode(&tdn, inode, NULL, NULL, 0);
413 if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
416 if (tdn.data_blkaddr == blkaddr)
417 truncate_data_blocks_range(&tdn, 1);
419 f2fs_put_dnode(&tdn);
421 if (ino != dn->inode->i_ino)
423 else if (dn->inode_page_locked)
424 lock_page(dn->inode_page);
428 if (datablock_addr(tdn.inode, tdn.node_page,
429 tdn.ofs_in_node) == blkaddr)
430 truncate_data_blocks_range(&tdn, 1);
431 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
432 unlock_page(dn->inode_page);
436 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
439 struct dnode_of_data dn;
441 unsigned int start, end;
442 int err = 0, recovered = 0;
444 /* step 1: recover xattr */
445 if (IS_INODE(page)) {
446 recover_inline_xattr(inode, page);
447 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
448 err = recover_xattr_data(inode, page);
454 /* step 2: recover inline data */
455 if (recover_inline_data(inode, page))
458 /* step 3: recover data indices */
459 start = start_bidx_of_node(ofs_of_node(page), inode);
460 end = start + ADDRS_PER_PAGE(page, inode);
462 set_new_dnode(&dn, inode, NULL, NULL, 0);
464 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
466 if (err == -ENOMEM) {
467 congestion_wait(BLK_RW_ASYNC, HZ/50);
473 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
475 get_node_info(sbi, dn.nid, &ni);
476 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
477 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
479 for (; start < end; start++, dn.ofs_in_node++) {
482 src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
483 dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
485 /* skip recovering if dest is the same as src */
489 /* dest is invalid, just invalidate src block */
490 if (dest == NULL_ADDR) {
491 truncate_data_blocks_range(&dn, 1);
495 if (!file_keep_isize(inode) &&
496 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
497 f2fs_i_size_write(inode,
498 (loff_t)(start + 1) << PAGE_SHIFT);
501 * dest is reserved block, invalidate src block
502 * and then reserve one new block in dnode page.
504 if (dest == NEW_ADDR) {
505 truncate_data_blocks_range(&dn, 1);
506 reserve_new_block(&dn);
510 /* dest is valid block, try to recover from src to dest */
511 if (is_valid_blkaddr(sbi, dest, META_POR)) {
513 if (src == NULL_ADDR) {
514 err = reserve_new_block(&dn);
515 #ifdef CONFIG_F2FS_FAULT_INJECTION
517 err = reserve_new_block(&dn);
519 /* We should not get -ENOSPC */
520 f2fs_bug_on(sbi, err);
525 /* Check the previous node page having this index */
526 err = check_index_in_prev_nodes(sbi, dest, &dn);
528 if (err == -ENOMEM) {
529 congestion_wait(BLK_RW_ASYNC, HZ/50);
535 /* write dummy data page */
536 f2fs_replace_block(sbi, &dn, src, dest,
537 ni.version, false, false);
542 copy_node_footer(dn.node_page, page);
543 fill_node_footer(dn.node_page, dn.nid, ni.ino,
544 ofs_of_node(page), false);
545 set_page_dirty(dn.node_page);
549 f2fs_msg(sbi->sb, KERN_NOTICE,
550 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
552 file_keep_isize(inode) ? "keep" : "recover",
557 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
558 struct list_head *dir_list)
560 struct curseg_info *curseg;
561 struct page *page = NULL;
565 /* get node pages in the current segment */
566 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
567 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
570 struct fsync_inode_entry *entry;
572 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
575 ra_meta_pages_cond(sbi, blkaddr);
577 page = get_tmp_page(sbi, blkaddr);
579 if (!is_recoverable_dnode(page)) {
580 f2fs_put_page(page, 1);
584 entry = get_fsync_inode(inode_list, ino_of_node(page));
588 * inode(x) | CP | inode(x) | dnode(F)
589 * In this case, we can lose the latest inode(x).
590 * So, call recover_inode for the inode update.
593 recover_inode(entry->inode, page);
594 if (entry->last_dentry == blkaddr) {
595 err = recover_dentry(entry->inode, page, dir_list);
597 f2fs_put_page(page, 1);
601 err = do_recover_data(sbi, entry->inode, page);
603 f2fs_put_page(page, 1);
607 if (entry->blkaddr == blkaddr)
608 del_fsync_inode(entry);
610 /* check next segment */
611 blkaddr = next_blkaddr_of_node(page);
612 f2fs_put_page(page, 1);
615 allocate_new_segments(sbi);
619 int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
621 struct list_head inode_list;
622 struct list_head dir_list;
625 unsigned long s_flags = sbi->sb->s_flags;
626 bool need_writecp = false;
631 if (s_flags & SB_RDONLY) {
632 f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
633 sbi->sb->s_flags &= ~SB_RDONLY;
637 /* Needed for iput() to work correctly and not trash data */
638 sbi->sb->s_flags |= SB_ACTIVE;
639 /* Turn on quotas so that they are updated correctly */
640 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
643 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
644 sizeof(struct fsync_inode_entry));
645 if (!fsync_entry_slab) {
650 INIT_LIST_HEAD(&inode_list);
651 INIT_LIST_HEAD(&dir_list);
653 /* prevent checkpoint */
654 mutex_lock(&sbi->cp_mutex);
656 /* step #1: find fsynced inode numbers */
657 err = find_fsync_dnodes(sbi, &inode_list, check_only);
658 if (err || list_empty(&inode_list))
668 /* step #2: recover data */
669 err = recover_data(sbi, &inode_list, &dir_list);
671 f2fs_bug_on(sbi, !list_empty(&inode_list));
673 destroy_fsync_dnodes(&inode_list);
675 /* truncate meta pages to be used by the recovery */
676 truncate_inode_pages_range(META_MAPPING(sbi),
677 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
680 truncate_inode_pages_final(NODE_MAPPING(sbi));
681 truncate_inode_pages_final(META_MAPPING(sbi));
684 clear_sbi_flag(sbi, SBI_POR_DOING);
685 mutex_unlock(&sbi->cp_mutex);
687 /* let's drop all the directory inodes for clean checkpoint */
688 destroy_fsync_dnodes(&dir_list);
690 if (!err && need_writecp) {
691 struct cp_control cpc = {
692 .reason = CP_RECOVERY,
694 err = write_checkpoint(sbi, &cpc);
697 kmem_cache_destroy(fsync_entry_slab);
700 /* Turn quotas off */
702 f2fs_quota_off_umount(sbi->sb);
704 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
706 return ret ? ret: err;