4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data)
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
33 wait_ms = gc_th->min_sleep_time;
37 wait_event_interruptible_timeout(*wq,
38 kthread_should_stop() || freezing(current) ||
40 msecs_to_jiffies(wait_ms));
42 /* give it a try one time */
48 if (kthread_should_stop())
51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
52 increase_sleep_time(gc_th, &wait_ms);
56 #ifdef CONFIG_F2FS_FAULT_INJECTION
57 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
58 f2fs_show_injection_info(FAULT_CHECKPOINT);
59 f2fs_stop_checkpoint(sbi, false);
63 if (!sb_start_write_trylock(sbi->sb))
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
79 if (gc_th->gc_urgent) {
80 wait_ms = gc_th->urgent_sleep_time;
81 mutex_lock(&sbi->gc_mutex);
85 if (!mutex_trylock(&sbi->gc_mutex))
89 increase_sleep_time(gc_th, &wait_ms);
90 mutex_unlock(&sbi->gc_mutex);
94 if (has_enough_invalid_blocks(sbi))
95 decrease_sleep_time(gc_th, &wait_ms);
97 increase_sleep_time(gc_th, &wait_ms);
99 stat_inc_bggc_count(sbi);
101 /* if return value is not zero, no victim was selected */
102 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
103 wait_ms = gc_th->no_gc_sleep_time;
105 trace_f2fs_background_gc(sbi->sb, wait_ms,
106 prefree_segments(sbi), free_segments(sbi));
108 /* balancing f2fs's metadata periodically */
109 f2fs_balance_fs_bg(sbi);
111 sb_end_write(sbi->sb);
113 } while (!kthread_should_stop());
117 int start_gc_thread(struct f2fs_sb_info *sbi)
119 struct f2fs_gc_kthread *gc_th;
120 dev_t dev = sbi->sb->s_bdev->bd_dev;
123 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
129 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
130 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
131 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
132 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
135 gc_th->gc_urgent = 0;
138 sbi->gc_thread = gc_th;
139 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
140 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
141 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
142 if (IS_ERR(gc_th->f2fs_gc_task)) {
143 err = PTR_ERR(gc_th->f2fs_gc_task);
145 sbi->gc_thread = NULL;
151 void stop_gc_thread(struct f2fs_sb_info *sbi)
153 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
156 kthread_stop(gc_th->f2fs_gc_task);
158 sbi->gc_thread = NULL;
161 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
163 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
168 if (gc_th->gc_idle) {
169 if (gc_th->gc_idle == 1)
171 else if (gc_th->gc_idle == 2)
174 if (gc_th->gc_urgent)
179 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
180 int type, struct victim_sel_policy *p)
182 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
184 if (p->alloc_mode == SSR) {
185 p->gc_mode = GC_GREEDY;
186 p->dirty_segmap = dirty_i->dirty_segmap[type];
187 p->max_search = dirty_i->nr_dirty[type];
190 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
191 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
192 p->max_search = dirty_i->nr_dirty[DIRTY];
193 p->ofs_unit = sbi->segs_per_sec;
196 /* we need to check every dirty segments in the FG_GC case */
197 if (gc_type != FG_GC &&
198 (sbi->gc_thread && !sbi->gc_thread->gc_urgent) &&
199 p->max_search > sbi->max_victim_search)
200 p->max_search = sbi->max_victim_search;
202 /* let's select beginning hot/small space first in no_heap mode*/
203 if (test_opt(sbi, NOHEAP) &&
204 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
207 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
210 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
211 struct victim_sel_policy *p)
213 /* SSR allocates in a segment unit */
214 if (p->alloc_mode == SSR)
215 return sbi->blocks_per_seg;
216 if (p->gc_mode == GC_GREEDY)
217 return 2 * sbi->blocks_per_seg * p->ofs_unit;
218 else if (p->gc_mode == GC_CB)
220 else /* No other gc_mode */
224 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
226 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
230 * If the gc_type is FG_GC, we can select victim segments
231 * selected by background GC before.
232 * Those segments guarantee they have small valid blocks.
234 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
235 if (sec_usage_check(sbi, secno))
238 if (no_fggc_candidate(sbi, secno))
241 clear_bit(secno, dirty_i->victim_secmap);
242 return GET_SEG_FROM_SEC(sbi, secno);
247 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
249 struct sit_info *sit_i = SIT_I(sbi);
250 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
251 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
252 unsigned long long mtime = 0;
253 unsigned int vblocks;
254 unsigned char age = 0;
258 for (i = 0; i < sbi->segs_per_sec; i++)
259 mtime += get_seg_entry(sbi, start + i)->mtime;
260 vblocks = get_valid_blocks(sbi, segno, true);
262 mtime = div_u64(mtime, sbi->segs_per_sec);
263 vblocks = div_u64(vblocks, sbi->segs_per_sec);
265 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
267 /* Handle if the system time has changed by the user */
268 if (mtime < sit_i->min_mtime)
269 sit_i->min_mtime = mtime;
270 if (mtime > sit_i->max_mtime)
271 sit_i->max_mtime = mtime;
272 if (sit_i->max_mtime != sit_i->min_mtime)
273 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
274 sit_i->max_mtime - sit_i->min_mtime);
276 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
279 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
280 unsigned int segno, struct victim_sel_policy *p)
282 if (p->alloc_mode == SSR)
283 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
285 /* alloc_mode == LFS */
286 if (p->gc_mode == GC_GREEDY)
287 return get_valid_blocks(sbi, segno, true);
289 return get_cb_cost(sbi, segno);
292 static unsigned int count_bits(const unsigned long *addr,
293 unsigned int offset, unsigned int len)
295 unsigned int end = offset + len, sum = 0;
297 while (offset < end) {
298 if (test_bit(offset++, addr))
305 * This function is called from two paths.
306 * One is garbage collection and the other is SSR segment selection.
307 * When it is called during GC, it just gets a victim segment
308 * and it does not remove it from dirty seglist.
309 * When it is called from SSR segment selection, it finds a segment
310 * which has minimum valid blocks and removes it from dirty seglist.
312 static int get_victim_by_default(struct f2fs_sb_info *sbi,
313 unsigned int *result, int gc_type, int type, char alloc_mode)
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
316 struct sit_info *sm = SIT_I(sbi);
317 struct victim_sel_policy p;
318 unsigned int secno, last_victim;
319 unsigned int last_segment = MAIN_SEGS(sbi);
320 unsigned int nsearched = 0;
322 mutex_lock(&dirty_i->seglist_lock);
324 p.alloc_mode = alloc_mode;
325 select_policy(sbi, gc_type, type, &p);
327 p.min_segno = NULL_SEGNO;
328 p.min_cost = get_max_cost(sbi, &p);
330 if (*result != NULL_SEGNO) {
331 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
332 get_valid_blocks(sbi, *result, false) &&
333 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
334 p.min_segno = *result;
338 if (p.max_search == 0)
341 last_victim = sm->last_victim[p.gc_mode];
342 if (p.alloc_mode == LFS && gc_type == FG_GC) {
343 p.min_segno = check_bg_victims(sbi);
344 if (p.min_segno != NULL_SEGNO)
352 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
353 if (segno >= last_segment) {
354 if (sm->last_victim[p.gc_mode]) {
356 sm->last_victim[p.gc_mode];
357 sm->last_victim[p.gc_mode] = 0;
364 p.offset = segno + p.ofs_unit;
365 if (p.ofs_unit > 1) {
366 p.offset -= segno % p.ofs_unit;
367 nsearched += count_bits(p.dirty_segmap,
368 p.offset - p.ofs_unit,
374 secno = GET_SEC_FROM_SEG(sbi, segno);
376 if (sec_usage_check(sbi, secno))
378 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
380 if (gc_type == FG_GC && p.alloc_mode == LFS &&
381 no_fggc_candidate(sbi, secno))
384 cost = get_gc_cost(sbi, segno, &p);
386 if (p.min_cost > cost) {
391 if (nsearched >= p.max_search) {
392 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
393 sm->last_victim[p.gc_mode] = last_victim + 1;
395 sm->last_victim[p.gc_mode] = segno + 1;
396 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
400 if (p.min_segno != NULL_SEGNO) {
402 if (p.alloc_mode == LFS) {
403 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
404 if (gc_type == FG_GC)
405 sbi->cur_victim_sec = secno;
407 set_bit(secno, dirty_i->victim_secmap);
409 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
411 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
413 prefree_segments(sbi), free_segments(sbi));
416 mutex_unlock(&dirty_i->seglist_lock);
418 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
421 static const struct victim_selection default_v_ops = {
422 .get_victim = get_victim_by_default,
425 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
427 struct inode_entry *ie;
429 ie = radix_tree_lookup(&gc_list->iroot, ino);
435 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
437 struct inode_entry *new_ie;
439 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
443 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
444 new_ie->inode = inode;
446 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
447 list_add_tail(&new_ie->list, &gc_list->ilist);
450 static void put_gc_inode(struct gc_inode_list *gc_list)
452 struct inode_entry *ie, *next_ie;
453 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
454 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
457 kmem_cache_free(inode_entry_slab, ie);
461 static int check_valid_map(struct f2fs_sb_info *sbi,
462 unsigned int segno, int offset)
464 struct sit_info *sit_i = SIT_I(sbi);
465 struct seg_entry *sentry;
468 down_read(&sit_i->sentry_lock);
469 sentry = get_seg_entry(sbi, segno);
470 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
471 up_read(&sit_i->sentry_lock);
476 * This function compares node address got in summary with that in NAT.
477 * On validity, copy that node with cold status, otherwise (invalid node)
480 static void gc_node_segment(struct f2fs_sb_info *sbi,
481 struct f2fs_summary *sum, unsigned int segno, int gc_type)
483 struct f2fs_summary *entry;
488 start_addr = START_BLOCK(sbi, segno);
493 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
494 nid_t nid = le32_to_cpu(entry->nid);
495 struct page *node_page;
498 /* stop BG_GC if there is not enough free sections. */
499 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
502 if (check_valid_map(sbi, segno, off) == 0)
506 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
512 ra_node_page(sbi, nid);
517 node_page = get_node_page(sbi, nid);
518 if (IS_ERR(node_page))
521 /* block may become invalid during get_node_page */
522 if (check_valid_map(sbi, segno, off) == 0) {
523 f2fs_put_page(node_page, 1);
527 get_node_info(sbi, nid, &ni);
528 if (ni.blk_addr != start_addr + off) {
529 f2fs_put_page(node_page, 1);
533 move_node_page(node_page, gc_type);
534 stat_inc_node_blk_count(sbi, 1, gc_type);
542 * Calculate start block index indicating the given node offset.
543 * Be careful, caller should give this node offset only indicating direct node
544 * blocks. If any node offsets, which point the other types of node blocks such
545 * as indirect or double indirect node blocks, are given, it must be a caller's
548 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
550 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
558 } else if (node_ofs <= indirect_blks) {
559 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
560 bidx = node_ofs - 2 - dec;
562 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
563 bidx = node_ofs - 5 - dec;
565 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
568 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
569 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
571 struct page *node_page;
573 unsigned int ofs_in_node;
574 block_t source_blkaddr;
576 nid = le32_to_cpu(sum->nid);
577 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
579 node_page = get_node_page(sbi, nid);
580 if (IS_ERR(node_page))
583 get_node_info(sbi, nid, dni);
585 if (sum->version != dni->version) {
586 f2fs_msg(sbi->sb, KERN_WARNING,
587 "%s: valid data with mismatched node version.",
589 set_sbi_flag(sbi, SBI_NEED_FSCK);
592 *nofs = ofs_of_node(node_page);
593 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
594 f2fs_put_page(node_page, 1);
596 if (source_blkaddr != blkaddr)
602 * Move data block via META_MAPPING while keeping locked data page.
603 * This can be used to move blocks, aka LBAs, directly on disk.
605 static void move_data_block(struct inode *inode, block_t bidx,
606 unsigned int segno, int off)
608 struct f2fs_io_info fio = {
609 .sbi = F2FS_I_SB(inode),
615 .encrypted_page = NULL,
618 struct dnode_of_data dn;
619 struct f2fs_summary sum;
625 /* do not read out */
626 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
630 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
633 if (f2fs_is_atomic_file(inode))
636 if (f2fs_is_pinned_file(inode)) {
637 f2fs_pin_file_control(inode, true);
641 set_new_dnode(&dn, inode, NULL, NULL, 0);
642 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
646 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
647 ClearPageUptodate(page);
652 * don't cache encrypted data into meta inode until previous dirty
653 * data were writebacked to avoid racing between GC and flush.
655 f2fs_wait_on_page_writeback(page, DATA, true);
657 get_node_info(fio.sbi, dn.nid, &ni);
658 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
662 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
664 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
665 &sum, CURSEG_COLD_DATA, NULL, false);
667 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
668 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
669 if (!fio.encrypted_page) {
674 err = f2fs_submit_page_bio(&fio);
679 lock_page(fio.encrypted_page);
681 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
685 if (unlikely(!PageUptodate(fio.encrypted_page))) {
690 set_page_dirty(fio.encrypted_page);
691 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
692 if (clear_page_dirty_for_io(fio.encrypted_page))
693 dec_page_count(fio.sbi, F2FS_DIRTY_META);
695 set_page_writeback(fio.encrypted_page);
697 /* allocate block address */
698 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
700 fio.op = REQ_OP_WRITE;
701 fio.op_flags = REQ_SYNC;
702 fio.new_blkaddr = newaddr;
703 err = f2fs_submit_page_write(&fio);
705 if (PageWriteback(fio.encrypted_page))
706 end_page_writeback(fio.encrypted_page);
710 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
712 f2fs_update_data_blkaddr(&dn, newaddr);
713 set_inode_flag(inode, FI_APPEND_WRITE);
714 if (page->index == 0)
715 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
717 f2fs_put_page(fio.encrypted_page, 1);
720 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
725 f2fs_put_page(page, 1);
728 static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
729 unsigned int segno, int off)
733 page = get_lock_data_page(inode, bidx, true);
737 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
740 if (f2fs_is_atomic_file(inode))
742 if (f2fs_is_pinned_file(inode)) {
743 if (gc_type == FG_GC)
744 f2fs_pin_file_control(inode, true);
748 if (gc_type == BG_GC) {
749 if (PageWriteback(page))
751 set_page_dirty(page);
754 struct f2fs_io_info fio = {
755 .sbi = F2FS_I_SB(inode),
760 .op_flags = REQ_SYNC,
761 .old_blkaddr = NULL_ADDR,
763 .encrypted_page = NULL,
764 .need_lock = LOCK_REQ,
765 .io_type = FS_GC_DATA_IO,
767 bool is_dirty = PageDirty(page);
771 set_page_dirty(page);
772 f2fs_wait_on_page_writeback(page, DATA, true);
773 if (clear_page_dirty_for_io(page)) {
774 inode_dec_dirty_pages(inode);
775 remove_dirty_inode(inode);
780 err = do_write_data_page(&fio);
781 if (err == -ENOMEM && is_dirty) {
782 congestion_wait(BLK_RW_ASYNC, HZ/50);
787 f2fs_put_page(page, 1);
791 * This function tries to get parent node of victim data block, and identifies
792 * data block validity. If the block is valid, copy that with cold status and
793 * modify parent node.
794 * If the parent node is not valid or the data block address is different,
795 * the victim data block is ignored.
797 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
798 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
800 struct super_block *sb = sbi->sb;
801 struct f2fs_summary *entry;
806 start_addr = START_BLOCK(sbi, segno);
811 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
812 struct page *data_page;
814 struct node_info dni; /* dnode info for the data */
815 unsigned int ofs_in_node, nofs;
817 nid_t nid = le32_to_cpu(entry->nid);
819 /* stop BG_GC if there is not enough free sections. */
820 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
823 if (check_valid_map(sbi, segno, off) == 0)
827 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
833 ra_node_page(sbi, nid);
837 /* Get an inode by ino with checking validity */
838 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
842 ra_node_page(sbi, dni.ino);
846 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
849 inode = f2fs_iget(sb, dni.ino);
850 if (IS_ERR(inode) || is_bad_inode(inode))
853 /* if encrypted inode, let's go phase 3 */
854 if (f2fs_encrypted_file(inode)) {
855 add_gc_inode(gc_list, inode);
859 if (!down_write_trylock(
860 &F2FS_I(inode)->dio_rwsem[WRITE])) {
865 start_bidx = start_bidx_of_node(nofs, inode);
866 data_page = get_read_data_page(inode,
867 start_bidx + ofs_in_node, REQ_RAHEAD,
869 up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
870 if (IS_ERR(data_page)) {
875 f2fs_put_page(data_page, 0);
876 add_gc_inode(gc_list, inode);
881 inode = find_gc_inode(gc_list, dni.ino);
883 struct f2fs_inode_info *fi = F2FS_I(inode);
886 if (S_ISREG(inode->i_mode)) {
887 if (!down_write_trylock(&fi->dio_rwsem[READ]))
889 if (!down_write_trylock(
890 &fi->dio_rwsem[WRITE])) {
891 up_write(&fi->dio_rwsem[READ]);
896 /* wait for all inflight aio data */
897 inode_dio_wait(inode);
900 start_bidx = start_bidx_of_node(nofs, inode)
902 if (f2fs_encrypted_file(inode))
903 move_data_block(inode, start_bidx, segno, off);
905 move_data_page(inode, start_bidx, gc_type,
909 up_write(&fi->dio_rwsem[WRITE]);
910 up_write(&fi->dio_rwsem[READ]);
913 stat_inc_data_blk_count(sbi, 1, gc_type);
921 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
924 struct sit_info *sit_i = SIT_I(sbi);
927 down_write(&sit_i->sentry_lock);
928 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
930 up_write(&sit_i->sentry_lock);
934 static int do_garbage_collect(struct f2fs_sb_info *sbi,
935 unsigned int start_segno,
936 struct gc_inode_list *gc_list, int gc_type)
938 struct page *sum_page;
939 struct f2fs_summary_block *sum;
940 struct blk_plug plug;
941 unsigned int segno = start_segno;
942 unsigned int end_segno = start_segno + sbi->segs_per_sec;
944 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
945 SUM_TYPE_DATA : SUM_TYPE_NODE;
947 /* readahead multi ssa blocks those have contiguous address */
948 if (sbi->segs_per_sec > 1)
949 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
950 sbi->segs_per_sec, META_SSA, true);
952 /* reference all summary page */
953 while (segno < end_segno) {
954 sum_page = get_sum_page(sbi, segno++);
955 unlock_page(sum_page);
958 blk_start_plug(&plug);
960 for (segno = start_segno; segno < end_segno; segno++) {
962 /* find segment summary of victim */
963 sum_page = find_get_page(META_MAPPING(sbi),
964 GET_SUM_BLOCK(sbi, segno));
965 f2fs_put_page(sum_page, 0);
967 if (get_valid_blocks(sbi, segno, false) == 0 ||
968 !PageUptodate(sum_page) ||
969 unlikely(f2fs_cp_error(sbi)))
972 sum = page_address(sum_page);
973 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
976 * this is to avoid deadlock:
977 * - lock_page(sum_page) - f2fs_replace_block
978 * - check_valid_map() - down_write(sentry_lock)
979 * - down_read(sentry_lock) - change_curseg()
980 * - lock_page(sum_page)
982 if (type == SUM_TYPE_NODE)
983 gc_node_segment(sbi, sum->entries, segno, gc_type);
985 gc_data_segment(sbi, sum->entries, gc_list, segno,
988 stat_inc_seg_count(sbi, type, gc_type);
990 if (gc_type == FG_GC &&
991 get_valid_blocks(sbi, segno, false) == 0)
994 f2fs_put_page(sum_page, 0);
997 if (gc_type == FG_GC)
998 f2fs_submit_merged_write(sbi,
999 (type == SUM_TYPE_NODE) ? NODE : DATA);
1001 blk_finish_plug(&plug);
1003 stat_inc_call_count(sbi->stat_info);
1008 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1009 bool background, unsigned int segno)
1011 int gc_type = sync ? FG_GC : BG_GC;
1012 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1014 struct cp_control cpc;
1015 unsigned int init_segno = segno;
1016 struct gc_inode_list gc_list = {
1017 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1018 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1021 trace_f2fs_gc_begin(sbi->sb, sync, background,
1022 get_pages(sbi, F2FS_DIRTY_NODES),
1023 get_pages(sbi, F2FS_DIRTY_DENTS),
1024 get_pages(sbi, F2FS_DIRTY_IMETA),
1027 reserved_segments(sbi),
1028 prefree_segments(sbi));
1030 cpc.reason = __get_cp_reason(sbi);
1032 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1036 if (unlikely(f2fs_cp_error(sbi))) {
1041 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1043 * For example, if there are many prefree_segments below given
1044 * threshold, we can make them free by checkpoint. Then, we
1045 * secure free segments which doesn't need fggc any more.
1047 if (prefree_segments(sbi)) {
1048 ret = write_checkpoint(sbi, &cpc);
1052 if (has_not_enough_free_secs(sbi, 0, 0))
1056 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1057 if (gc_type == BG_GC && !background) {
1061 if (!__get_victim(sbi, &segno, gc_type)) {
1066 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1067 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1069 total_freed += seg_freed;
1071 if (gc_type == FG_GC)
1072 sbi->cur_victim_sec = NULL_SEGNO;
1075 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1080 if (gc_type == FG_GC)
1081 ret = write_checkpoint(sbi, &cpc);
1084 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1085 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1087 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1088 get_pages(sbi, F2FS_DIRTY_NODES),
1089 get_pages(sbi, F2FS_DIRTY_DENTS),
1090 get_pages(sbi, F2FS_DIRTY_IMETA),
1093 reserved_segments(sbi),
1094 prefree_segments(sbi));
1096 mutex_unlock(&sbi->gc_mutex);
1098 put_gc_inode(&gc_list);
1101 ret = sec_freed ? 0 : -EAGAIN;
1105 void build_gc_manager(struct f2fs_sb_info *sbi)
1107 u64 main_count, resv_count, ovp_count;
1109 DIRTY_I(sbi)->v_ops = &default_v_ops;
1111 /* threshold of # of valid blocks in a section for victims of FG_GC */
1112 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
1113 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1114 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1116 sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
1117 BLKS_PER_SEC(sbi), (main_count - resv_count));
1118 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1120 /* give warm/cold data area from slower device */
1121 if (sbi->s_ndevs && sbi->segs_per_sec == 1)
1122 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1123 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;