cond_resched();
                goto repeat;
        }
-       f2fs_wait_on_page_writeback(page, META);
+       f2fs_wait_on_page_writeback(page, META, true);
        SetPageUptodate(page);
        return page;
 }
        if (unlikely(f2fs_cp_error(sbi)))
                goto redirty_out;
 
-       f2fs_wait_on_page_writeback(page, META);
+       f2fs_wait_on_page_writeback(page, META, true);
        write_meta_page(sbi, page);
        dec_page_count(sbi, F2FS_DIRTY_META);
        unlock_page(page);
 
        struct page *node_page = dn->node_page;
        unsigned int ofs_in_node = dn->ofs_in_node;
 
-       f2fs_wait_on_page_writeback(node_page, NODE);
+       f2fs_wait_on_page_writeback(node_page, NODE, true);
 
        rn = F2FS_NODE(node_page);
 
 
                        if (PageWriteback(page)) {
                                if (wbc->sync_mode != WB_SYNC_NONE)
-                                       f2fs_wait_on_page_writeback(page, DATA);
+                                       f2fs_wait_on_page_writeback(page,
+                                                               DATA, true);
                                else
                                        goto continue_unlock;
                        }
                }
        }
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, false);
 
        /* wait for GCed encrypted page writeback */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 
 {
        enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, type);
+       f2fs_wait_on_page_writeback(page, type, true);
        de->ino = cpu_to_le32(inode->i_ino);
        set_de_type(de, inode->i_mode);
        f2fs_dentry_kunmap(dir, page);
 {
        struct f2fs_inode *ri;
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
        /* copy name info. to this inode page */
        ri = F2FS_INODE(ipage);
        ++level;
        goto start;
 add_dentry:
-       f2fs_wait_on_page_writeback(dentry_page, DATA);
+       f2fs_wait_on_page_writeback(dentry_page, DATA, true);
 
        if (inode) {
                down_write(&F2FS_I(inode)->i_sem);
                return f2fs_delete_inline_entry(dentry, page, dir, inode);
 
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
        dentry_blk = page_address(page);
        bit_pos = dentry - dentry_blk->dentry;
 
                                block_t, block_t, unsigned char, bool);
 void allocate_data_block(struct f2fs_sb_info *, struct page *,
                block_t, block_t *, struct f2fs_summary *, int);
-void f2fs_wait_on_page_writeback(struct page *, enum page_type);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
 void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
 void write_data_summaries(struct f2fs_sb_info *, block_t);
 void write_node_summaries(struct f2fs_sb_info *, block_t);
 
        trace_f2fs_vm_page_mkwrite(page, DATA);
 mapped:
        /* fill the page */
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, false);
 
        /* wait for GCed encrypted page writeback */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
        if (IS_ERR(page))
                return 0;
 truncate_out:
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user(page, offset, PAGE_CACHE_SIZE - offset);
        if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
                set_page_dirty(page);
        if (IS_ERR(page))
                return PTR_ERR(page);
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user(page, start, len);
        set_page_dirty(page);
        f2fs_put_page(page, 1);
 
 
                /* set page dirty and write it */
                if (gc_type == FG_GC) {
-                       f2fs_wait_on_page_writeback(node_page, NODE);
+                       f2fs_wait_on_page_writeback(node_page, NODE, true);
                        set_page_dirty(node_page);
                } else {
                        if (!PageWriteback(node_page))
         * don't cache encrypted data into meta inode until previous dirty
         * data were writebacked to avoid racing between GC and flush.
         */
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
        get_node_info(fio.sbi, dn.nid, &ni);
        set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
                goto put_page_out;
 
        set_page_dirty(fio.encrypted_page);
-       f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
+       f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
        if (clear_page_dirty_for_io(fio.encrypted_page))
                dec_page_count(fio.sbi, F2FS_DIRTY_META);
 
        set_page_writeback(fio.encrypted_page);
 
        /* allocate block address */
-       f2fs_wait_on_page_writeback(dn.node_page, NODE);
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
        allocate_data_block(fio.sbi, NULL, fio.blk_addr,
                                        &fio.blk_addr, &sum, CURSEG_COLD_DATA);
        fio.rw = WRITE_SYNC;
                        .encrypted_page = NULL,
                };
                set_page_dirty(page);
-               f2fs_wait_on_page_writeback(page, DATA);
+               f2fs_wait_on_page_writeback(page, DATA, true);
                if (clear_page_dirty_for_io(page))
                        inode_dec_dirty_pages(inode);
                set_cold_data(page);
 
 
        addr = inline_data_addr(ipage);
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
        memset(addr + from, 0, MAX_INLINE_DATA - from);
 
        return true;
        if (err)
                return err;
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
        if (PageUptodate(page))
                goto no_update;
        write_data_page(dn, &fio);
        set_data_blkaddr(dn);
        f2fs_update_extent_cache(dn);
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        if (dirty)
                inode_dec_dirty_pages(dn->inode);
 
 
        f2fs_bug_on(F2FS_I_SB(inode), page->index);
 
-       f2fs_wait_on_page_writeback(dn.inode_page, NODE);
+       f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
        src_addr = kmap_atomic(page);
        dst_addr = inline_data_addr(dn.inode_page);
        memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
                ipage = get_node_page(sbi, inode->i_ino);
                f2fs_bug_on(sbi, IS_ERR(ipage));
 
-               f2fs_wait_on_page_writeback(ipage, NODE);
+               f2fs_wait_on_page_writeback(ipage, NODE, true);
 
                src_addr = inline_data_addr(npage);
                dst_addr = inline_data_addr(ipage);
        if (err)
                goto out;
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
 
        dentry_blk = kmap_atomic(page);
                }
        }
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
        name_hash = f2fs_dentry_hash(name);
        make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
        int i;
 
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, NODE);
+       f2fs_wait_on_page_writeback(page, NODE, true);
 
        inline_dentry = inline_data_addr(page);
        bit_pos = dentry - inline_dentry->dentry;
 
 
        while (start < end) {
                if (*start++) {
-                       f2fs_wait_on_page_writeback(ipage, NODE);
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
                        set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
                        set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
 {
        struct f2fs_inode *ri;
 
-       f2fs_wait_on_page_writeback(node_page, NODE);
+       f2fs_wait_on_page_writeback(node_page, NODE, true);
 
        ri = F2FS_INODE(node_page);
 
 
                                f2fs_put_page(page, 1);
                                goto restart;
                        }
-                       f2fs_wait_on_page_writeback(page, NODE);
+                       f2fs_wait_on_page_writeback(page, NODE, true);
                        ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
                        set_page_dirty(page);
                        unlock_page(page);
        new_ni.ino = dn->inode->i_ino;
        set_node_addr(sbi, &new_ni, NEW_ADDR, false);
 
-       f2fs_wait_on_page_writeback(page, NODE);
+       f2fs_wait_on_page_writeback(page, NODE, true);
        fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
        set_cold_node(dn->inode, page);
        SetPageUptodate(page);
                                continue;
 
                        if (ino && ino_of_node(page) == ino) {
-                               f2fs_wait_on_page_writeback(page, NODE);
+                               f2fs_wait_on_page_writeback(page, NODE, true);
                                if (TestClearPageError(page))
                                        ret = -EIO;
                        }
        if (unlikely(f2fs_cp_error(sbi)))
                goto redirty_out;
 
-       f2fs_wait_on_page_writeback(page, NODE);
+       f2fs_wait_on_page_writeback(page, NODE, true);
 
        /* get old block addr of this node page */
        nid = nid_of_node(page);
        src_addr = inline_xattr_addr(page);
        inline_size = inline_xattr_size(inode);
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
        memcpy(dst_addr, src_addr, inline_size);
 update_inode:
        update_inode(inode, ipage);
 
 {
        struct f2fs_node *rn = F2FS_NODE(p);
 
-       f2fs_wait_on_page_writeback(p, NODE);
+       f2fs_wait_on_page_writeback(p, NODE, true);
 
        if (i)
                rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
 
        if (err)
                goto out;
 
-       f2fs_wait_on_page_writeback(dn.node_page, NODE);
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
        get_node_info(sbi, dn.nid, &ni);
        f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 
                if (!abort) {
                        if (cur->page->mapping == inode->i_mapping) {
                                set_page_dirty(cur->page);
-                               f2fs_wait_on_page_writeback(cur->page, DATA);
+                               f2fs_wait_on_page_writeback(cur->page, DATA,
+                                                                       true);
                                if (clear_page_dirty_for_io(cur->page))
                                        inode_dec_dirty_pages(inode);
                                trace_f2fs_commit_inmem_page(cur->page, INMEM);
 }
 
 void f2fs_wait_on_page_writeback(struct page *page,
-                               enum page_type type)
+                               enum page_type type, bool ordered)
 {
        if (PageWriteback(page)) {
                struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
                if (is_merged_page(sbi, page, type))
                        f2fs_submit_merged_bio(sbi, type, WRITE);
-               wait_on_page_writeback(page);
+               if (ordered)
+                       wait_on_page_writeback(page);
+               else
+                       wait_for_stable_page(page);
        }
 }
 
 
        cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
        if (cpage) {
-               f2fs_wait_on_page_writeback(cpage, DATA);
+               f2fs_wait_on_page_writeback(cpage, DATA, true);
                f2fs_put_page(cpage, 1);
        }
 }
 
 
                if (ipage) {
                        inline_addr = inline_xattr_addr(ipage);
-                       f2fs_wait_on_page_writeback(ipage, NODE);
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
                } else {
                        page = get_node_page(sbi, inode->i_ino);
                        if (IS_ERR(page)) {
                                return PTR_ERR(page);
                        }
                        inline_addr = inline_xattr_addr(page);
-                       f2fs_wait_on_page_writeback(page, NODE);
+                       f2fs_wait_on_page_writeback(page, NODE, true);
                }
                memcpy(inline_addr, txattr_addr, inline_size);
                f2fs_put_page(page, 1);
                        return PTR_ERR(xpage);
                }
                f2fs_bug_on(sbi, new_nid);
-               f2fs_wait_on_page_writeback(xpage, NODE);
+               f2fs_wait_on_page_writeback(xpage, NODE, true);
        } else {
                struct dnode_of_data dn;
                set_new_dnode(&dn, inode, NULL, NULL, new_nid);