This patch used f2fs_bitmap_size macro to calculate mem used by
free nid bitmap, and stat used mem including aligned part.
Signed-off-by: Yunlei He <heyunlei@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
        si->base_mem += sizeof(struct f2fs_nm_info);
        si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
        si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
-       si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+       si->base_mem += NM_I(sbi)->nat_blocks *
+                               f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
        si->base_mem += NM_I(sbi)->nat_blocks / 8;
        si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
 
 
 
        for (i = 0; i < nm_i->nat_blocks; i++) {
                nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
-                               NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
+                       f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
                if (!nm_i->free_nid_bitmap)
                        return -ENOMEM;
        }
 
  * For NAT entries
  */
 #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE  ((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED                          \
-       ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) /          \
-       BITS_PER_LONG * BITS_PER_LONG)
-
 
 struct f2fs_nat_entry {
        __u8 version;           /* latest version of cached nat entry */