int len;                /* # of consecutive blocks of the discard */
 };
 
+struct bio_entry {
+       struct list_head list;
+       struct bio *bio;
+       struct completion event;
+       int error;
+};
+
 /* for the list of fsync inodes, used only during recovery */
 struct fsync_inode_entry {
        struct list_head list;  /* list head */
 
        /* for small discard management */
        struct list_head discard_list;          /* 4KB discard list */
+       struct list_head wait_list;             /* linked with issued discard bio */
        int nr_discards;                        /* # of discards in the list */
        int max_discards;                       /* max. discards to be issued */
 
 void invalidate_blocks(struct f2fs_sb_info *, block_t);
 bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
 void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
+void f2fs_wait_all_discard_bio(struct f2fs_sb_info *);
 void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
 void release_discard_addrs(struct f2fs_sb_info *);
 bool discard_next_dnode(struct f2fs_sb_info *, block_t);
 
 #define __reverse_ffz(x) __reverse_ffs(~(x))
 
 static struct kmem_cache *discard_entry_slab;
+static struct kmem_cache *bio_entry_slab;
 static struct kmem_cache *sit_entry_set_slab;
 static struct kmem_cache *inmem_entry_slab;
 
        mutex_unlock(&dirty_i->seglist_lock);
 }
 
+static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi,
+                                                       struct bio *bio)
+{
+       struct list_head *wait_list = &(SM_I(sbi)->wait_list);
+       struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
+
+       INIT_LIST_HEAD(&be->list);
+       be->bio = bio;
+       init_completion(&be->event);
+       list_add_tail(&be->list, wait_list);
+
+       return be;
+}
+
+void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi)
+{
+       struct list_head *wait_list = &(SM_I(sbi)->wait_list);
+       struct bio_entry *be, *tmp;
+
+       list_for_each_entry_safe(be, tmp, wait_list, list) {
+               struct bio *bio = be->bio;
+               int err;
+
+               wait_for_completion_io(&be->event);
+               err = be->error;
+               if (err == -EOPNOTSUPP)
+                       err = 0;
+
+               if (err)
+                       f2fs_msg(sbi->sb, KERN_INFO,
+                               "Issue discard failed, ret: %d", err);
+
+               bio_put(bio);
+               list_del(&be->list);
+               kmem_cache_free(bio_entry_slab, be);
+       }
+}
+
+static void f2fs_submit_bio_wait_endio(struct bio *bio)
+{
+       struct bio_entry *be = (struct bio_entry *)bio->bi_private;
+
+       be->error = bio->bi_error;
+       complete(&be->event);
+}
+
+/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
+int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+       struct block_device *bdev = sbi->sb->s_bdev;
+       struct bio *bio = NULL;
+       int err;
+
+       err = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
+                       &bio);
+       if (!err && bio) {
+               struct bio_entry *be = __add_bio_entry(sbi, bio);
+
+               bio->bi_private = be;
+               bio->bi_end_io = f2fs_submit_bio_wait_endio;
+               bio->bi_opf |= REQ_SYNC;
+               submit_bio(bio);
+       }
+
+       return err;
+}
+
 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
                                block_t blkstart, block_t blklen)
 {
                        sbi->discard_blks--;
        }
        trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
-       return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
+       return __f2fs_issue_discard_async(sbi, start, len, GFP_NOFS, 0);
 }
 
 bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
        struct list_head *head = &(SM_I(sbi)->discard_list);
        struct discard_entry *entry, *this;
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+       struct blk_plug plug;
        unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
        unsigned int start = 0, end = -1;
        unsigned int secno, start_segno;
        bool force = (cpc->reason == CP_DISCARD);
 
+       blk_start_plug(&plug);
+
        mutex_lock(&dirty_i->seglist_lock);
 
        while (1) {
                SM_I(sbi)->nr_discards -= entry->len;
                kmem_cache_free(discard_entry_slab, entry);
        }
+
+       blk_finish_plug(&plug);
 }
 
 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
        sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
 
        INIT_LIST_HEAD(&sm_info->discard_list);
+       INIT_LIST_HEAD(&sm_info->wait_list);
        sm_info->nr_discards = 0;
        sm_info->max_discards = 0;
 
        if (!discard_entry_slab)
                goto fail;
 
+       bio_entry_slab = f2fs_kmem_cache_create("bio_entry",
+                       sizeof(struct bio_entry));
+       if (!bio_entry_slab)
+               goto destory_discard_entry;
+
        sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
                        sizeof(struct sit_entry_set));
        if (!sit_entry_set_slab)
-               goto destory_discard_entry;
+               goto destroy_bio_entry;
 
        inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
                        sizeof(struct inmem_pages));
 
 destroy_sit_entry_set:
        kmem_cache_destroy(sit_entry_set_slab);
+destroy_bio_entry:
+       kmem_cache_destroy(bio_entry_slab);
 destory_discard_entry:
        kmem_cache_destroy(discard_entry_slab);
 fail:
 void destroy_segment_manager_caches(void)
 {
        kmem_cache_destroy(sit_entry_set_slab);
+       kmem_cache_destroy(bio_entry_slab);
        kmem_cache_destroy(discard_entry_slab);
        kmem_cache_destroy(inmem_entry_slab);
 }