/* Obtain the lock on page, remove all ptes. */
 static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
                               unsigned long private, struct folio *src,
-                              struct folio **dstp, int force,
-                              enum migrate_mode mode, enum migrate_reason reason,
-                              struct list_head *ret)
+                              struct folio **dstp, enum migrate_mode mode,
+                              enum migrate_reason reason, struct list_head *ret)
 {
        struct folio *dst;
        int rc = -EAGAIN;
        dst->private = NULL;
 
        if (!folio_trylock(src)) {
-               if (!force || mode == MIGRATE_ASYNC)
+               if (mode == MIGRATE_ASYNC)
                        goto out;
 
                /*
                        rc = -EBUSY;
                        goto out;
                }
-               if (!force)
-                       goto out;
                folio_wait_writeback(src);
        }
 
 #define NR_MAX_BATCHED_MIGRATION       512
 #endif
 #define NR_MAX_MIGRATE_PAGES_RETRY     10
+#define NR_MAX_MIGRATE_ASYNC_RETRY     3
+#define NR_MAX_MIGRATE_SYNC_RETRY                                      \
+       (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
 
 struct migrate_pages_stats {
        int nr_succeeded;       /* Normal and large folios migrated successfully, in
                        }
 
                        rc = migrate_folio_unmap(get_new_page, put_new_page, private,
-                                                folio, &dst, pass > 2, mode,
-                                                reason, ret_folios);
+                                                folio, &dst, mode, reason, ret_folios);
                        /*
                         * The rules are:
                         *      Success: folio will be freed
        return rc;
 }
 
+static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
+               free_page_t put_new_page, unsigned long private,
+               enum migrate_mode mode, int reason, struct list_head *ret_folios,
+               struct list_head *split_folios, struct migrate_pages_stats *stats)
+{
+       int rc, nr_failed = 0;
+       LIST_HEAD(folios);
+       struct migrate_pages_stats astats;
+
+       memset(&astats, 0, sizeof(astats));
+       /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
+       rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
+                                reason, &folios, split_folios, &astats,
+                                NR_MAX_MIGRATE_ASYNC_RETRY);
+       stats->nr_succeeded += astats.nr_succeeded;
+       stats->nr_thp_succeeded += astats.nr_thp_succeeded;
+       stats->nr_thp_split += astats.nr_thp_split;
+       if (rc < 0) {
+               stats->nr_failed_pages += astats.nr_failed_pages;
+               stats->nr_thp_failed += astats.nr_thp_failed;
+               list_splice_tail(&folios, ret_folios);
+               return rc;
+       }
+       stats->nr_thp_failed += astats.nr_thp_split;
+       nr_failed += astats.nr_thp_split;
+       /*
+        * Fall back to migrate all failed folios one by one synchronously. All
+        * failed folios except split THPs will be retried, so their failure
+        * isn't counted
+        */
+       list_splice_tail_init(&folios, from);
+       while (!list_empty(from)) {
+               list_move(from->next, &folios);
+               rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
+                                        private, mode, reason, ret_folios,
+                                        split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
+               list_splice_tail_init(&folios, ret_folios);
+               if (rc < 0)
+                       return rc;
+               nr_failed += rc;
+       }
+
+       return nr_failed;
+}
+
 /*
  * migrate_pages - migrate the folios specified in a list, to the free folios
  *                supplied as the target for the page migration
                enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
 {
        int rc, rc_gather;
-       int nr_pages, batch;
+       int nr_pages;
        struct folio *folio, *folio2;
        LIST_HEAD(folios);
        LIST_HEAD(ret_folios);
        if (rc_gather < 0)
                goto out;
 
-       if (mode == MIGRATE_ASYNC)
-               batch = NR_MAX_BATCHED_MIGRATION;
-       else
-               batch = 1;
 again:
        nr_pages = 0;
        list_for_each_entry_safe(folio, folio2, from, lru) {
                }
 
                nr_pages += folio_nr_pages(folio);
-               if (nr_pages >= batch)
+               if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
                        break;
        }
-       if (nr_pages >= batch)
+       if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
                list_cut_before(&folios, from, &folio2->lru);
        else
                list_splice_init(from, &folios);
-       rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
-                                mode, reason, &ret_folios, &split_folios, &stats,
-                                NR_MAX_MIGRATE_PAGES_RETRY);
+       if (mode == MIGRATE_ASYNC)
+               rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
+                                        mode, reason, &ret_folios, &split_folios, &stats,
+                                        NR_MAX_MIGRATE_PAGES_RETRY);
+       else
+               rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
+                                       mode, reason, &ret_folios, &split_folios, &stats);
        list_splice_tail_init(&folios, &ret_folios);
        if (rc < 0) {
                rc_gather = rc;