epoll: simplify signal handling
[linux-2.6-microblaze.git] / fs / f2fs / segment.c
index 1596502..deca74c 100644 (file)
@@ -529,31 +529,38 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
        else
                f2fs_build_free_nids(sbi, false, false);
 
-       if (!is_idle(sbi, REQ_TIME) &&
-               (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
+       if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
+               excess_prefree_segs(sbi))
+               goto do_sync;
+
+       /* there is background inflight IO or foreground operation recently */
+       if (is_inflight_io(sbi, REQ_TIME) ||
+               (!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
                return;
 
+       /* exceed periodical checkpoint timeout threshold */
+       if (f2fs_time_over(sbi, CP_TIME))
+               goto do_sync;
+
        /* checkpoint is the only way to shrink partial cached entries */
-       if (!f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
-                       !f2fs_available_free_memory(sbi, INO_ENTRIES) ||
-                       excess_prefree_segs(sbi) ||
-                       excess_dirty_nats(sbi) ||
-                       excess_dirty_nodes(sbi) ||
-                       f2fs_time_over(sbi, CP_TIME)) {
-               if (test_opt(sbi, DATA_FLUSH) && from_bg) {
-                       struct blk_plug plug;
-
-                       mutex_lock(&sbi->flush_lock);
-
-                       blk_start_plug(&plug);
-                       f2fs_sync_dirty_inodes(sbi, FILE_INODE);
-                       blk_finish_plug(&plug);
+       if (f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
+               f2fs_available_free_memory(sbi, INO_ENTRIES))
+               return;
 
-                       mutex_unlock(&sbi->flush_lock);
-               }
-               f2fs_sync_fs(sbi->sb, true);
-               stat_inc_bg_cp_count(sbi->stat_info);
+do_sync:
+       if (test_opt(sbi, DATA_FLUSH) && from_bg) {
+               struct blk_plug plug;
+
+               mutex_lock(&sbi->flush_lock);
+
+               blk_start_plug(&plug);
+               f2fs_sync_dirty_inodes(sbi, FILE_INODE);
+               blk_finish_plug(&plug);
+
+               mutex_unlock(&sbi->flush_lock);
        }
+       f2fs_sync_fs(sbi->sb, true);
+       stat_inc_bg_cp_count(sbi->stat_info);
 }
 
 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
@@ -3254,7 +3261,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
                        else
                                return CURSEG_COLD_DATA;
                }
-               if (file_is_cold(inode) || f2fs_compressed_file(inode))
+               if (file_is_cold(inode) || f2fs_need_compress_data(inode))
                        return CURSEG_COLD_DATA;
                if (file_is_hot(inode) ||
                                is_inode_flag_set(inode, FI_HOT_DATA) ||
@@ -4544,7 +4551,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
                return;
 
        mutex_lock(&dirty_i->seglist_lock);
-       for (segno = 0; segno < MAIN_SECS(sbi); segno += blks_per_sec) {
+       for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
                valid_blocks = get_valid_blocks(sbi, segno, true);
                secno = GET_SEC_FROM_SEG(sbi, segno);