.set_page_dirty = f2fs_set_meta_page_dirty,
        .invalidatepage = f2fs_invalidate_page,
        .releasepage    = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+       .migratepage    = f2fs_migrate_page,
+#endif
 };
 
 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 
        return generic_block_bmap(mapping, block, get_data_block_bmap);
 }
 
+#ifdef CONFIG_MIGRATION
+#include <linux/migrate.h>
+
+int f2fs_migrate_page(struct address_space *mapping,
+               struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+       int rc, extra_count;
+       struct f2fs_inode_info *fi = F2FS_I(mapping->host);
+       bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
+
+       BUG_ON(PageWriteback(page));
+
+       /* migrating an atomic written page is safe with the inmem_lock hold */
+       if (atomic_written && !mutex_trylock(&fi->inmem_lock))
+               return -EAGAIN;
+
+       /*
+        * A reference is expected if PagePrivate set when move mapping,
+        * however F2FS breaks this for maintaining dirty page counts when
+        * truncating pages. So here adjusting the 'extra_count' make it work.
+        */
+       extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+       rc = migrate_page_move_mapping(mapping, newpage,
+                               page, NULL, mode, extra_count);
+       if (rc != MIGRATEPAGE_SUCCESS) {
+               if (atomic_written)
+                       mutex_unlock(&fi->inmem_lock);
+               return rc;
+       }
+
+       if (atomic_written) {
+               struct inmem_pages *cur;
+               list_for_each_entry(cur, &fi->inmem_pages, list)
+                       if (cur->page == page) {
+                               cur->page = newpage;
+                               break;
+                       }
+               mutex_unlock(&fi->inmem_lock);
+               put_page(page);
+               get_page(newpage);
+       }
+
+       if (PagePrivate(page))
+               SetPagePrivate(newpage);
+       set_page_private(newpage, page_private(page));
+
+       migrate_page_copy(newpage, page);
+
+       return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
 const struct address_space_operations f2fs_dblock_aops = {
        .readpage       = f2fs_read_data_page,
        .readpages      = f2fs_read_data_pages,
        .releasepage    = f2fs_release_page,
        .direct_IO      = f2fs_direct_IO,
        .bmap           = f2fs_bmap,
+#ifdef CONFIG_MIGRATION
+       .migratepage    = f2fs_migrate_page,
+#endif
 };
 
        .set_page_dirty = f2fs_set_node_page_dirty,
        .invalidatepage = f2fs_invalidate_page,
        .releasepage    = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+       .migratepage    = f2fs_migrate_page,
+#endif
 };
 
 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,