radix_tree_delete(&nm_i->free_nid_root, i->nid);
 }
 
-static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
+/* return if the nid is recognized as free */
+static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i;
 
        /* 0 nid should not be used */
        if (unlikely(nid == 0))
-               return 0;
+               return false;
 
        if (build) {
                /* do not add allocated nids */
                ne = __lookup_nat_cache(nm_i, nid);
                if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
                                nat_get_blkaddr(ne) != NULL_ADDR))
-                       return 0;
+                       return false;
        }
 
        i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
 
        if (radix_tree_preload(GFP_NOFS)) {
                kmem_cache_free(free_nid_slab, i);
-               return 0;
+               return true;
        }
 
        spin_lock(&nm_i->nid_list_lock);
        radix_tree_preload_end();
        if (err) {
                kmem_cache_free(free_nid_slab, i);
-               return 0;
+               return true;
        }
-       return 1;
+       return true;
 }
 
 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
                kmem_cache_free(free_nid_slab, i);
 }
 
+void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
+       unsigned int nid_ofs = nid - START_NID(nid);
+
+       if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+               return;
+
+       if (set)
+               set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+       else
+               clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+}
+
 static void scan_nat_page(struct f2fs_sb_info *sbi,
                        struct page *nat_page, nid_t start_nid)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct f2fs_nat_block *nat_blk = page_address(nat_page);
        block_t blk_addr;
+       unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
        int i;
 
+       set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
+
        i = start_nid % NAT_ENTRY_PER_BLOCK;
 
        for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
+               bool freed = false;
 
                if (unlikely(start_nid >= nm_i->max_nid))
                        break;
                blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
                f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
                if (blk_addr == NULL_ADDR)
-                       add_free_nid(sbi, start_nid, true);
+                       freed = add_free_nid(sbi, start_nid, true);
+               update_free_nid_bitmap(sbi, start_nid, freed);
+       }
+}
+
+static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+       struct f2fs_journal *journal = curseg->journal;
+       unsigned int i, idx;
+       unsigned int target = FREE_NID_PAGES * NAT_ENTRY_PER_BLOCK;
+
+       down_read(&nm_i->nat_tree_lock);
+
+       for (i = 0; i < nm_i->nat_blocks; i++) {
+               if (!test_bit_le(i, nm_i->nat_block_bitmap))
+                       continue;
+               for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
+                       nid_t nid;
+
+                       if (!test_bit_le(idx, nm_i->free_nid_bitmap[i]))
+                               continue;
+
+                       nid = i * NAT_ENTRY_PER_BLOCK + idx;
+                       add_free_nid(sbi, nid, true);
+
+                       if (nm_i->nid_cnt[FREE_NID_LIST] >= target)
+                               goto out;
+               }
+       }
+out:
+       down_read(&curseg->journal_rwsem);
+       for (i = 0; i < nats_in_cursum(journal); i++) {
+               block_t addr;
+               nid_t nid;
+
+               addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+               nid = le32_to_cpu(nid_in_journal(journal, i));
+               if (addr == NULL_ADDR)
+                       add_free_nid(sbi, nid, true);
+               else
+                       remove_free_nid(sbi, nid);
        }
+       up_read(&curseg->journal_rwsem);
+       up_read(&nm_i->nat_tree_lock);
 }
 
 static int scan_nat_bits(struct f2fs_sb_info *sbi)
        if (!sync && !available_free_memory(sbi, FREE_NIDS))
                return;
 
-       /* try to find free nids with nat_bits */
-       if (!mount && !scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
-               return;
+       if (!mount) {
+               /* try to find free nids in free_nid_bitmap */
+               scan_free_nid_bits(sbi);
+
+               if (nm_i->nid_cnt[FREE_NID_LIST])
+                       return;
+
+               /* try to find free nids with nat_bits */
+               if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
+                       return;
+       }
 
        /* find next valid candidate */
        if (enabled_nat_bits(sbi, NULL)) {
                i->state = NID_ALLOC;
                __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
                nm_i->available_nids--;
+
+               update_free_nid_bitmap(sbi, *nid, false);
+
                spin_unlock(&nm_i->nid_list_lock);
                return true;
        }
 
        nm_i->available_nids++;
 
+       update_free_nid_bitmap(sbi, nid, true);
+
        spin_unlock(&nm_i->nid_list_lock);
 
        if (need_free)
                        add_free_nid(sbi, nid, false);
                        spin_lock(&NM_I(sbi)->nid_list_lock);
                        NM_I(sbi)->available_nids++;
+                       update_free_nid_bitmap(sbi, nid, true);
+                       spin_unlock(&NM_I(sbi)->nid_list_lock);
+               } else {
+                       spin_lock(&NM_I(sbi)->nid_list_lock);
+                       update_free_nid_bitmap(sbi, nid, false);
                        spin_unlock(&NM_I(sbi)->nid_list_lock);
                }
        }
        return 0;
 }
 
+int init_free_nid_cache(struct f2fs_sb_info *sbi)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+       nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks *
+                                       NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
+       if (!nm_i->free_nid_bitmap)
+               return -ENOMEM;
+
+       nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8,
+                                                               GFP_KERNEL);
+       if (!nm_i->nat_block_bitmap)
+               return -ENOMEM;
+       return 0;
+}
+
 int build_node_manager(struct f2fs_sb_info *sbi)
 {
        int err;
        if (err)
                return err;
 
+       err = init_free_nid_cache(sbi);
+       if (err)
+               return err;
+
        build_free_nids(sbi, true, true);
        return 0;
 }
        }
        up_write(&nm_i->nat_tree_lock);
 
+       kvfree(nm_i->nat_block_bitmap);
+       kvfree(nm_i->free_nid_bitmap);
+
        kfree(nm_i->nat_bitmap);
        kfree(nm_i->nat_bits);
 #ifdef CONFIG_F2FS_CHECK_FS