4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data)
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
33 wait_ms = gc_th->min_sleep_time;
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
45 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
46 increase_sleep_time(gc_th, &wait_ms);
51 * [GC triggering condition]
52 * 0. GC is not conducted currently.
53 * 1. There are enough dirty segments.
54 * 2. IO subsystem is idle by checking the # of writeback pages.
55 * 3. IO subsystem is idle by checking the # of requests in
56 * bdev's request list.
58 * Note) We have to avoid triggering GCs frequently.
59 * Because it is possible that some segments can be
60 * invalidated soon after by user update or deletion.
61 * So, I'd like to wait some time to collect dirty segments.
63 if (!mutex_trylock(&sbi->gc_mutex))
67 increase_sleep_time(gc_th, &wait_ms);
68 mutex_unlock(&sbi->gc_mutex);
72 if (has_enough_invalid_blocks(sbi))
73 decrease_sleep_time(gc_th, &wait_ms);
75 increase_sleep_time(gc_th, &wait_ms);
77 stat_inc_bggc_count(sbi);
79 /* if return value is not zero, no victim was selected */
80 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
81 wait_ms = gc_th->no_gc_sleep_time;
83 trace_f2fs_background_gc(sbi->sb, wait_ms,
84 prefree_segments(sbi), free_segments(sbi));
86 /* balancing f2fs's metadata periodically */
87 f2fs_balance_fs_bg(sbi);
89 } while (!kthread_should_stop());
93 int start_gc_thread(struct f2fs_sb_info *sbi)
95 struct f2fs_gc_kthread *gc_th;
96 dev_t dev = sbi->sb->s_bdev->bd_dev;
99 gc_th = f2fs_kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
105 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
106 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
107 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
111 sbi->gc_thread = gc_th;
112 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
113 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
114 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
115 if (IS_ERR(gc_th->f2fs_gc_task)) {
116 err = PTR_ERR(gc_th->f2fs_gc_task);
118 sbi->gc_thread = NULL;
124 void stop_gc_thread(struct f2fs_sb_info *sbi)
126 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
129 kthread_stop(gc_th->f2fs_gc_task);
131 sbi->gc_thread = NULL;
134 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
136 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
138 if (gc_th && gc_th->gc_idle) {
139 if (gc_th->gc_idle == 1)
141 else if (gc_th->gc_idle == 2)
147 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
148 int type, struct victim_sel_policy *p)
150 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
152 if (p->alloc_mode == SSR) {
153 p->gc_mode = GC_GREEDY;
154 p->dirty_segmap = dirty_i->dirty_segmap[type];
155 p->max_search = dirty_i->nr_dirty[type];
158 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
159 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
160 p->max_search = dirty_i->nr_dirty[DIRTY];
161 p->ofs_unit = sbi->segs_per_sec;
164 if (p->max_search > sbi->max_victim_search)
165 p->max_search = sbi->max_victim_search;
167 p->offset = sbi->last_victim[p->gc_mode];
170 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
171 struct victim_sel_policy *p)
173 /* SSR allocates in a segment unit */
174 if (p->alloc_mode == SSR)
175 return sbi->blocks_per_seg;
176 if (p->gc_mode == GC_GREEDY)
177 return sbi->blocks_per_seg * p->ofs_unit;
178 else if (p->gc_mode == GC_CB)
180 else /* No other gc_mode */
184 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
186 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
190 * If the gc_type is FG_GC, we can select victim segments
191 * selected by background GC before.
192 * Those segments guarantee they have small valid blocks.
194 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
195 if (sec_usage_check(sbi, secno))
197 clear_bit(secno, dirty_i->victim_secmap);
198 return secno * sbi->segs_per_sec;
203 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
205 struct sit_info *sit_i = SIT_I(sbi);
206 unsigned int secno = GET_SECNO(sbi, segno);
207 unsigned int start = secno * sbi->segs_per_sec;
208 unsigned long long mtime = 0;
209 unsigned int vblocks;
210 unsigned char age = 0;
214 for (i = 0; i < sbi->segs_per_sec; i++)
215 mtime += get_seg_entry(sbi, start + i)->mtime;
216 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
218 mtime = div_u64(mtime, sbi->segs_per_sec);
219 vblocks = div_u64(vblocks, sbi->segs_per_sec);
221 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
223 /* Handle if the system time has changed by the user */
224 if (mtime < sit_i->min_mtime)
225 sit_i->min_mtime = mtime;
226 if (mtime > sit_i->max_mtime)
227 sit_i->max_mtime = mtime;
228 if (sit_i->max_mtime != sit_i->min_mtime)
229 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
230 sit_i->max_mtime - sit_i->min_mtime);
232 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
235 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
236 unsigned int segno, struct victim_sel_policy *p)
238 if (p->alloc_mode == SSR)
239 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
241 /* alloc_mode == LFS */
242 if (p->gc_mode == GC_GREEDY)
243 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
245 return get_cb_cost(sbi, segno);
248 static unsigned int count_bits(const unsigned long *addr,
249 unsigned int offset, unsigned int len)
251 unsigned int end = offset + len, sum = 0;
253 while (offset < end) {
254 if (test_bit(offset++, addr))
261 * This function is called from two paths.
262 * One is garbage collection and the other is SSR segment selection.
263 * When it is called during GC, it just gets a victim segment
264 * and it does not remove it from dirty seglist.
265 * When it is called from SSR segment selection, it finds a segment
266 * which has minimum valid blocks and removes it from dirty seglist.
268 static int get_victim_by_default(struct f2fs_sb_info *sbi,
269 unsigned int *result, int gc_type, int type, char alloc_mode)
271 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
272 struct victim_sel_policy p;
273 unsigned int secno, max_cost, last_victim;
274 unsigned int last_segment = MAIN_SEGS(sbi);
275 unsigned int nsearched = 0;
277 mutex_lock(&dirty_i->seglist_lock);
279 p.alloc_mode = alloc_mode;
280 select_policy(sbi, gc_type, type, &p);
282 p.min_segno = NULL_SEGNO;
283 p.min_cost = max_cost = get_max_cost(sbi, &p);
285 if (p.max_search == 0)
288 last_victim = sbi->last_victim[p.gc_mode];
289 if (p.alloc_mode == LFS && gc_type == FG_GC) {
290 p.min_segno = check_bg_victims(sbi);
291 if (p.min_segno != NULL_SEGNO)
299 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
300 if (segno >= last_segment) {
301 if (sbi->last_victim[p.gc_mode]) {
302 last_segment = sbi->last_victim[p.gc_mode];
303 sbi->last_victim[p.gc_mode] = 0;
310 p.offset = segno + p.ofs_unit;
311 if (p.ofs_unit > 1) {
312 p.offset -= segno % p.ofs_unit;
313 nsearched += count_bits(p.dirty_segmap,
314 p.offset - p.ofs_unit,
321 secno = GET_SECNO(sbi, segno);
323 if (sec_usage_check(sbi, secno))
325 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
328 cost = get_gc_cost(sbi, segno, &p);
330 if (p.min_cost > cost) {
335 if (nsearched >= p.max_search) {
336 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
337 sbi->last_victim[p.gc_mode] = last_victim + 1;
339 sbi->last_victim[p.gc_mode] = segno + 1;
343 if (p.min_segno != NULL_SEGNO) {
345 if (p.alloc_mode == LFS) {
346 secno = GET_SECNO(sbi, p.min_segno);
347 if (gc_type == FG_GC)
348 sbi->cur_victim_sec = secno;
350 set_bit(secno, dirty_i->victim_secmap);
352 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
354 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
356 prefree_segments(sbi), free_segments(sbi));
359 mutex_unlock(&dirty_i->seglist_lock);
361 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
364 static const struct victim_selection default_v_ops = {
365 .get_victim = get_victim_by_default,
368 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
370 struct inode_entry *ie;
372 ie = radix_tree_lookup(&gc_list->iroot, ino);
378 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
380 struct inode_entry *new_ie;
382 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
386 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
387 new_ie->inode = inode;
389 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
390 list_add_tail(&new_ie->list, &gc_list->ilist);
393 static void put_gc_inode(struct gc_inode_list *gc_list)
395 struct inode_entry *ie, *next_ie;
396 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
397 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
400 kmem_cache_free(inode_entry_slab, ie);
404 static int check_valid_map(struct f2fs_sb_info *sbi,
405 unsigned int segno, int offset)
407 struct sit_info *sit_i = SIT_I(sbi);
408 struct seg_entry *sentry;
411 mutex_lock(&sit_i->sentry_lock);
412 sentry = get_seg_entry(sbi, segno);
413 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
414 mutex_unlock(&sit_i->sentry_lock);
419 * This function compares node address got in summary with that in NAT.
420 * On validity, copy that node with cold status, otherwise (invalid node)
423 static void gc_node_segment(struct f2fs_sb_info *sbi,
424 struct f2fs_summary *sum, unsigned int segno, int gc_type)
427 struct f2fs_summary *entry;
431 start_addr = START_BLOCK(sbi, segno);
436 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
437 nid_t nid = le32_to_cpu(entry->nid);
438 struct page *node_page;
441 /* stop BG_GC if there is not enough free sections. */
442 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
445 if (check_valid_map(sbi, segno, off) == 0)
449 ra_node_page(sbi, nid);
452 node_page = get_node_page(sbi, nid);
453 if (IS_ERR(node_page))
456 /* block may become invalid during get_node_page */
457 if (check_valid_map(sbi, segno, off) == 0) {
458 f2fs_put_page(node_page, 1);
462 get_node_info(sbi, nid, &ni);
463 if (ni.blk_addr != start_addr + off) {
464 f2fs_put_page(node_page, 1);
468 move_node_page(node_page, gc_type);
469 stat_inc_node_blk_count(sbi, 1, gc_type);
479 * Calculate start block index indicating the given node offset.
480 * Be careful, caller should give this node offset only indicating direct node
481 * blocks. If any node offsets, which point the other types of node blocks such
482 * as indirect or double indirect node blocks, are given, it must be a caller's
485 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
487 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
495 } else if (node_ofs <= indirect_blks) {
496 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
497 bidx = node_ofs - 2 - dec;
499 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
500 bidx = node_ofs - 5 - dec;
502 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
505 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
506 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
508 struct page *node_page;
510 unsigned int ofs_in_node;
511 block_t source_blkaddr;
513 nid = le32_to_cpu(sum->nid);
514 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
516 node_page = get_node_page(sbi, nid);
517 if (IS_ERR(node_page))
520 get_node_info(sbi, nid, dni);
522 if (sum->version != dni->version) {
523 f2fs_put_page(node_page, 1);
527 *nofs = ofs_of_node(node_page);
528 source_blkaddr = datablock_addr(node_page, ofs_in_node);
529 f2fs_put_page(node_page, 1);
531 if (source_blkaddr != blkaddr)
536 static void move_encrypted_block(struct inode *inode, block_t bidx)
538 struct f2fs_io_info fio = {
539 .sbi = F2FS_I_SB(inode),
542 .op_flags = READ_SYNC,
543 .encrypted_page = NULL,
545 struct dnode_of_data dn;
546 struct f2fs_summary sum;
552 /* do not read out */
553 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
557 set_new_dnode(&dn, inode, NULL, NULL, 0);
558 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
562 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
563 ClearPageUptodate(page);
568 * don't cache encrypted data into meta inode until previous dirty
569 * data were writebacked to avoid racing between GC and flush.
571 f2fs_wait_on_page_writeback(page, DATA, true);
573 get_node_info(fio.sbi, dn.nid, &ni);
574 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
578 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
580 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
581 &sum, CURSEG_COLD_DATA);
583 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
584 FGP_LOCK | FGP_CREAT, GFP_NOFS);
585 if (!fio.encrypted_page) {
590 err = f2fs_submit_page_bio(&fio);
595 lock_page(fio.encrypted_page);
597 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
601 if (unlikely(!PageUptodate(fio.encrypted_page))) {
606 set_page_dirty(fio.encrypted_page);
607 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
608 if (clear_page_dirty_for_io(fio.encrypted_page))
609 dec_page_count(fio.sbi, F2FS_DIRTY_META);
611 set_page_writeback(fio.encrypted_page);
613 /* allocate block address */
614 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
616 fio.op = REQ_OP_WRITE;
617 fio.op_flags = WRITE_SYNC;
618 fio.new_blkaddr = newaddr;
619 f2fs_submit_page_mbio(&fio);
621 f2fs_update_data_blkaddr(&dn, newaddr);
622 set_inode_flag(inode, FI_APPEND_WRITE);
623 if (page->index == 0)
624 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
626 f2fs_put_page(fio.encrypted_page, 1);
629 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
634 f2fs_put_page(page, 1);
637 static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
641 page = get_lock_data_page(inode, bidx, true);
645 if (gc_type == BG_GC) {
646 if (PageWriteback(page))
648 set_page_dirty(page);
651 struct f2fs_io_info fio = {
652 .sbi = F2FS_I_SB(inode),
655 .op_flags = WRITE_SYNC,
657 .encrypted_page = NULL,
659 bool is_dirty = PageDirty(page);
663 set_page_dirty(page);
664 f2fs_wait_on_page_writeback(page, DATA, true);
665 if (clear_page_dirty_for_io(page))
666 inode_dec_dirty_pages(inode);
670 err = do_write_data_page(&fio);
671 if (err == -ENOMEM && is_dirty) {
672 congestion_wait(BLK_RW_ASYNC, HZ/50);
676 clear_cold_data(page);
679 f2fs_put_page(page, 1);
683 * This function tries to get parent node of victim data block, and identifies
684 * data block validity. If the block is valid, copy that with cold status and
685 * modify parent node.
686 * If the parent node is not valid or the data block address is different,
687 * the victim data block is ignored.
689 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
690 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
692 struct super_block *sb = sbi->sb;
693 struct f2fs_summary *entry;
698 start_addr = START_BLOCK(sbi, segno);
703 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
704 struct page *data_page;
706 struct node_info dni; /* dnode info for the data */
707 unsigned int ofs_in_node, nofs;
710 /* stop BG_GC if there is not enough free sections. */
711 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
714 if (check_valid_map(sbi, segno, off) == 0)
718 ra_node_page(sbi, le32_to_cpu(entry->nid));
722 /* Get an inode by ino with checking validity */
723 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
727 ra_node_page(sbi, dni.ino);
731 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
734 inode = f2fs_iget(sb, dni.ino);
735 if (IS_ERR(inode) || is_bad_inode(inode))
738 /* if encrypted inode, let's go phase 3 */
739 if (f2fs_encrypted_inode(inode) &&
740 S_ISREG(inode->i_mode)) {
741 add_gc_inode(gc_list, inode);
745 start_bidx = start_bidx_of_node(nofs, inode);
746 data_page = get_read_data_page(inode,
747 start_bidx + ofs_in_node, REQ_RAHEAD,
749 if (IS_ERR(data_page)) {
754 f2fs_put_page(data_page, 0);
755 add_gc_inode(gc_list, inode);
760 inode = find_gc_inode(gc_list, dni.ino);
762 struct f2fs_inode_info *fi = F2FS_I(inode);
765 if (S_ISREG(inode->i_mode)) {
766 if (!down_write_trylock(&fi->dio_rwsem[READ]))
768 if (!down_write_trylock(
769 &fi->dio_rwsem[WRITE])) {
770 up_write(&fi->dio_rwsem[READ]);
776 start_bidx = start_bidx_of_node(nofs, inode)
778 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
779 move_encrypted_block(inode, start_bidx);
781 move_data_page(inode, start_bidx, gc_type);
784 up_write(&fi->dio_rwsem[WRITE]);
785 up_write(&fi->dio_rwsem[READ]);
788 stat_inc_data_blk_count(sbi, 1, gc_type);
796 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
799 struct sit_info *sit_i = SIT_I(sbi);
802 mutex_lock(&sit_i->sentry_lock);
803 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
805 mutex_unlock(&sit_i->sentry_lock);
809 static int do_garbage_collect(struct f2fs_sb_info *sbi,
810 unsigned int start_segno,
811 struct gc_inode_list *gc_list, int gc_type)
813 struct page *sum_page;
814 struct f2fs_summary_block *sum;
815 struct blk_plug plug;
816 unsigned int segno = start_segno;
817 unsigned int end_segno = start_segno + sbi->segs_per_sec;
819 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
820 SUM_TYPE_DATA : SUM_TYPE_NODE;
822 /* readahead multi ssa blocks those have contiguous address */
823 if (sbi->segs_per_sec > 1)
824 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
825 sbi->segs_per_sec, META_SSA, true);
827 /* reference all summary page */
828 while (segno < end_segno) {
829 sum_page = get_sum_page(sbi, segno++);
830 unlock_page(sum_page);
833 blk_start_plug(&plug);
835 for (segno = start_segno; segno < end_segno; segno++) {
837 if (get_valid_blocks(sbi, segno, 1) == 0)
840 /* find segment summary of victim */
841 sum_page = find_get_page(META_MAPPING(sbi),
842 GET_SUM_BLOCK(sbi, segno));
843 f2fs_bug_on(sbi, !PageUptodate(sum_page));
844 f2fs_put_page(sum_page, 0);
846 sum = page_address(sum_page);
847 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
850 * this is to avoid deadlock:
851 * - lock_page(sum_page) - f2fs_replace_block
852 * - check_valid_map() - mutex_lock(sentry_lock)
853 * - mutex_lock(sentry_lock) - change_curseg()
854 * - lock_page(sum_page)
857 if (type == SUM_TYPE_NODE)
858 gc_node_segment(sbi, sum->entries, segno, gc_type);
860 gc_data_segment(sbi, sum->entries, gc_list, segno,
863 stat_inc_seg_count(sbi, type, gc_type);
865 f2fs_put_page(sum_page, 0);
868 if (gc_type == FG_GC)
869 f2fs_submit_merged_bio(sbi,
870 (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
872 blk_finish_plug(&plug);
874 if (gc_type == FG_GC) {
875 while (start_segno < end_segno)
876 if (get_valid_blocks(sbi, start_segno++, 1) == 0)
880 stat_inc_call_count(sbi->stat_info);
885 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
888 int gc_type = sync ? FG_GC : BG_GC;
889 int sec_freed = 0, seg_freed;
891 struct cp_control cpc;
892 struct gc_inode_list gc_list = {
893 .ilist = LIST_HEAD_INIT(gc_list.ilist),
894 .iroot = RADIX_TREE_INIT(GFP_NOFS),
897 cpc.reason = __get_cp_reason(sbi);
901 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
903 if (unlikely(f2fs_cp_error(sbi))) {
908 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
911 * If there is no victim and no prefree segment but still not
912 * enough free sections, we should flush dent/node blocks and do
913 * garbage collections.
915 if (__get_victim(sbi, &segno, gc_type) ||
916 prefree_segments(sbi)) {
917 write_checkpoint(sbi, &cpc);
919 } else if (has_not_enough_free_secs(sbi, 0)) {
920 write_checkpoint(sbi, &cpc);
924 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
928 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
930 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
933 if (gc_type == FG_GC)
934 sbi->cur_victim_sec = NULL_SEGNO;
937 if (has_not_enough_free_secs(sbi, sec_freed))
940 if (gc_type == FG_GC)
941 write_checkpoint(sbi, &cpc);
944 mutex_unlock(&sbi->gc_mutex);
946 put_gc_inode(&gc_list);
949 ret = sec_freed ? 0 : -EAGAIN;
953 void build_gc_manager(struct f2fs_sb_info *sbi)
955 DIRTY_I(sbi)->v_ops = &default_v_ops;