#define F2FS_IOC_SET_PIN_FILE          _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
 #define F2FS_IOC_GET_PIN_FILE          _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
 #define F2FS_IOC_PRECACHE_EXTENTS      _IO(F2FS_IOCTL_MAGIC, 15)
+#define F2FS_IOC_RESIZE_FS             _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
 
 #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
 #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
        SBI_QUOTA_NEED_FLUSH,                   /* need to flush quota info in CP */
        SBI_QUOTA_SKIP_FLUSH,                   /* skip flushing quota in current CP */
        SBI_QUOTA_NEED_REPAIR,                  /* quota file may be corrupted */
+       SBI_IS_RESIZEFS,                        /* resizefs is in process */
 };
 
 enum {
        unsigned int segs_per_sec;              /* segments per section */
        unsigned int secs_per_zone;             /* sections per zone */
        unsigned int total_sections;            /* total section count */
+       struct mutex resize_mutex;              /* for resize exclusion */
        unsigned int total_node_count;          /* total node block count */
        unsigned int total_valid_node_count;    /* valid node block count */
        loff_t max_file_blocks;                 /* max block index of file */
 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
+void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+                                       unsigned int start, unsigned int end);
 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
                        unsigned int segno);
 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
+int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
 
 /*
  * recovery.c
 
        struct sit_info *sm = SIT_I(sbi);
        struct victim_sel_policy p;
        unsigned int secno, last_victim;
-       unsigned int last_segment = MAIN_SEGS(sbi);
+       unsigned int last_segment;
        unsigned int nsearched = 0;
 
        mutex_lock(&dirty_i->seglist_lock);
+       last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
 
        p.alloc_mode = alloc_mode;
        select_policy(sbi, gc_type, type, &p);
                                sm->last_victim[p.gc_mode] = last_victim + 1;
                        else
                                sm->last_victim[p.gc_mode] = segno + 1;
-                       sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
+                       sm->last_victim[p.gc_mode] %=
+                               (MAIN_SECS(sbi) * sbi->segs_per_sec);
                        break;
                }
        }
                SIT_I(sbi)->last_victim[ALLOC_NEXT] =
                                GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
 }
+
+static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
+                                                       unsigned int end)
+{
+       int type;
+       unsigned int segno, next_inuse;
+       int err = 0;
+
+       /* Move out cursegs from the target range */
+       for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
+               allocate_segment_for_resize(sbi, type, start, end);
+
+       /* do GC to move out valid blocks in the range */
+       for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
+               struct gc_inode_list gc_list = {
+                       .ilist = LIST_HEAD_INIT(gc_list.ilist),
+                       .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+               };
+
+               mutex_lock(&sbi->gc_mutex);
+               do_garbage_collect(sbi, segno, &gc_list, FG_GC);
+               mutex_unlock(&sbi->gc_mutex);
+               put_gc_inode(&gc_list);
+
+               if (get_valid_blocks(sbi, segno, true))
+                       return -EAGAIN;
+       }
+
+       err = f2fs_sync_fs(sbi->sb, 1);
+       if (err)
+               return err;
+
+       next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
+       if (next_inuse <= end) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "segno %u should be free but still inuse!", next_inuse);
+               f2fs_bug_on(sbi, 1);
+       }
+       return err;
+}
+
+static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
+{
+       struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
+       int section_count = le32_to_cpu(raw_sb->section_count);
+       int segment_count = le32_to_cpu(raw_sb->segment_count);
+       int segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
+       long long block_count = le64_to_cpu(raw_sb->block_count);
+       int segs = secs * sbi->segs_per_sec;
+
+       raw_sb->section_count = cpu_to_le32(section_count + secs);
+       raw_sb->segment_count = cpu_to_le32(segment_count + segs);
+       raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
+       raw_sb->block_count = cpu_to_le64(block_count +
+                                       (long long)segs * sbi->blocks_per_seg);
+}
+
+static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
+{
+       int segs = secs * sbi->segs_per_sec;
+       long long user_block_count =
+                               le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
+
+       SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
+       MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
+       FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
+       FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
+       F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count +
+                                       (long long)segs * sbi->blocks_per_seg);
+}
+
+int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+{
+       __u64 old_block_count, shrunk_blocks;
+       unsigned int secs;
+       int gc_mode, gc_type;
+       int err = 0;
+       __u32 rem;
+
+       old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
+       if (block_count > old_block_count)
+               return -EINVAL;
+
+       /* new fs size should align to section size */
+       div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
+       if (rem)
+               return -EINVAL;
+
+       if (block_count == old_block_count)
+               return 0;
+
+       if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "Should run fsck to repair first.");
+               return -EINVAL;
+       }
+
+       if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "Checkpoint should be enabled.");
+               return -EINVAL;
+       }
+
+       freeze_bdev(sbi->sb->s_bdev);
+
+       shrunk_blocks = old_block_count - block_count;
+       secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
+       spin_lock(&sbi->stat_lock);
+       if (shrunk_blocks + valid_user_blocks(sbi) +
+               sbi->current_reserved_blocks + sbi->unusable_block_count +
+               F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
+               err = -ENOSPC;
+       else
+               sbi->user_block_count -= shrunk_blocks;
+       spin_unlock(&sbi->stat_lock);
+       if (err) {
+               thaw_bdev(sbi->sb->s_bdev, sbi->sb);
+               return err;
+       }
+
+       mutex_lock(&sbi->resize_mutex);
+       set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+
+       mutex_lock(&DIRTY_I(sbi)->seglist_lock);
+
+       MAIN_SECS(sbi) -= secs;
+
+       for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
+               if (SIT_I(sbi)->last_victim[gc_mode] >=
+                                       MAIN_SECS(sbi) * sbi->segs_per_sec)
+                       SIT_I(sbi)->last_victim[gc_mode] = 0;
+
+       for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
+               if (sbi->next_victim_seg[gc_type] >=
+                                       MAIN_SECS(sbi) * sbi->segs_per_sec)
+                       sbi->next_victim_seg[gc_type] = NULL_SEGNO;
+
+       mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
+
+       err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec,
+                       MAIN_SEGS(sbi) - 1);
+       if (err)
+               goto out;
+
+       update_sb_metadata(sbi, -secs);
+
+       err = f2fs_commit_super(sbi, false);
+       if (err) {
+               update_sb_metadata(sbi, secs);
+               goto out;
+       }
+
+       update_fs_metadata(sbi, -secs);
+       clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+       err = f2fs_sync_fs(sbi->sb, 1);
+       if (err) {
+               update_fs_metadata(sbi, secs);
+               update_sb_metadata(sbi, secs);
+               f2fs_commit_super(sbi, false);
+       }
+out:
+       if (err) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_ERR,
+                               "resize_fs failed, should run fsck to repair!");
+
+               MAIN_SECS(sbi) += secs;
+               spin_lock(&sbi->stat_lock);
+               sbi->user_block_count += shrunk_blocks;
+               spin_unlock(&sbi->stat_lock);
+       }
+       clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+       mutex_unlock(&sbi->resize_mutex);
+       thaw_bdev(sbi->sb->s_bdev, sbi->sb);
+       return err;
+}
 
        stat_inc_seg_type(sbi, curseg);
 }
 
+void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+                                       unsigned int start, unsigned int end)
+{
+       struct curseg_info *curseg = CURSEG_I(sbi, type);
+       unsigned int segno;
+
+       down_read(&SM_I(sbi)->curseg_lock);
+       mutex_lock(&curseg->curseg_mutex);
+       down_write(&SIT_I(sbi)->sentry_lock);
+
+       segno = CURSEG_I(sbi, type)->segno;
+       if (segno < start || segno > end)
+               goto unlock;
+
+       if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
+               change_curseg(sbi, type);
+       else
+               new_curseg(sbi, type, true);
+
+       stat_inc_seg_type(sbi, curseg);
+
+       locate_dirty_segment(sbi, segno);
+unlock:
+       up_write(&SIT_I(sbi)->sentry_lock);
+
+       if (segno != curseg->segno)
+               f2fs_msg(sbi->sb, KERN_NOTICE,
+                       "For resize: curseg of type %d: %u ==> %u",
+                       type, segno, curseg->segno);
+
+       mutex_unlock(&curseg->curseg_mutex);
+       up_read(&SM_I(sbi)->curseg_lock);
+}
+
 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
 {
        struct curseg_info *curseg;
        struct f2fs_journal *journal = curseg->journal;
        struct sit_entry_set *ses, *tmp;
        struct list_head *head = &SM_I(sbi)->sit_entry_set;
-       bool to_journal = true;
+       bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
        struct seg_entry *se;
 
        down_write(&sit_i->sentry_lock);
         * entries, remove all entries from journal and add and account
         * them in sit entry set.
         */
-       if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
+       if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
+                                                               !to_journal)
                remove_sits_in_journal(sbi);
 
        /*