* Note that start_page and end_pages are not aligned on a pageblock
  * boundary. If alignment is required, use move_freepages_block()
  */
-static int move_freepages(struct zone *zone,
-                         unsigned long start_pfn, unsigned long end_pfn,
-                         int migratetype, int *num_movable)
+static int move_freepages(struct zone *zone, unsigned long start_pfn,
+                         unsigned long end_pfn, int migratetype)
 {
        struct page *page;
        unsigned long pfn;
        for (pfn = start_pfn; pfn <= end_pfn;) {
                page = pfn_to_page(pfn);
                if (!PageBuddy(page)) {
-                       /*
-                        * We assume that pages that could be isolated for
-                        * migration are movable. But we don't actually try
-                        * isolating, as that would be expensive.
-                        */
-                       if (num_movable &&
-                                       (PageLRU(page) || __PageMovable(page)))
-                               (*num_movable)++;
                        pfn++;
                        continue;
                }
        return pages_moved;
 }
 
-int move_freepages_block(struct zone *zone, struct page *page,
-                               int migratetype, int *num_movable)
+static bool prep_move_freepages_block(struct zone *zone, struct page *page,
+                                     unsigned long *start_pfn,
+                                     unsigned long *end_pfn,
+                                     int *num_free, int *num_movable)
 {
-       unsigned long start_pfn, end_pfn, pfn;
-
-       if (num_movable)
-               *num_movable = 0;
+       unsigned long pfn, start, end;
 
        pfn = page_to_pfn(page);
-       start_pfn = pageblock_start_pfn(pfn);
-       end_pfn = pageblock_end_pfn(pfn) - 1;
+       start = pageblock_start_pfn(pfn);
+       end = pageblock_end_pfn(pfn) - 1;
 
        /*
         * The caller only has the lock for @zone, don't touch ranges
         * accompanied by other operations such as migratetype updates
         * which also should be locked.
         */
-       if (!zone_spans_pfn(zone, start_pfn))
-               return 0;
-       if (!zone_spans_pfn(zone, end_pfn))
-               return 0;
+       if (!zone_spans_pfn(zone, start))
+               return false;
+       if (!zone_spans_pfn(zone, end))
+               return false;
+
+       *start_pfn = start;
+       *end_pfn = end;
 
-       return move_freepages(zone, start_pfn, end_pfn, migratetype,
-                                                               num_movable);
+       if (num_free) {
+               *num_free = 0;
+               *num_movable = 0;
+               for (pfn = start; pfn <= end;) {
+                       page = pfn_to_page(pfn);
+                       if (PageBuddy(page)) {
+                               int nr = 1 << buddy_order(page);
+
+                               *num_free += nr;
+                               pfn += nr;
+                               continue;
+                       }
+                       /*
+                        * We assume that pages that could be isolated for
+                        * migration are movable. But we don't actually try
+                        * isolating, as that would be expensive.
+                        */
+                       if (PageLRU(page) || __PageMovable(page))
+                               (*num_movable)++;
+                       pfn++;
+               }
+       }
+
+       return true;
+}
+
+int move_freepages_block(struct zone *zone, struct page *page,
+                        int migratetype)
+{
+       unsigned long start_pfn, end_pfn;
+
+       if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
+                                      NULL, NULL))
+               return -1;
+
+       return move_freepages(zone, start_pfn, end_pfn, migratetype);
 }
 
 static void change_pageblock_range(struct page *pageblock_page,
 }
 
 /*
- * This function implements actual steal behaviour. If order is large enough,
- * we can steal whole pageblock. If not, we first move freepages in this
- * pageblock to our migratetype and determine how many already-allocated pages
- * are there in the pageblock with a compatible migratetype. If at least half
- * of pages are free or compatible, we can change migratetype of the pageblock
- * itself, so pages freed in the future will be put on the correct free list.
+ * This function implements actual steal behaviour. If order is large enough, we
+ * can claim the whole pageblock for the requested migratetype. If not, we check
+ * the pageblock for constituent pages; if at least half of the pages are free
+ * or compatible, we can still claim the whole block, so pages freed in the
+ * future will be put on the correct free list. Otherwise, we isolate exactly
+ * the order we need from the fallback block and leave its migratetype alone.
  */
-static void steal_suitable_fallback(struct zone *zone, struct page *page,
-               unsigned int alloc_flags, int start_type, bool whole_block)
+static struct page *
+steal_suitable_fallback(struct zone *zone, struct page *page,
+                       int current_order, int order, int start_type,
+                       unsigned int alloc_flags, bool whole_block)
 {
-       unsigned int current_order = buddy_order(page);
        int free_pages, movable_pages, alike_pages;
-       int old_block_type;
+       unsigned long start_pfn, end_pfn;
+       int block_type;
 
-       old_block_type = get_pageblock_migratetype(page);
+       block_type = get_pageblock_migratetype(page);
 
        /*
         * This can happen due to races and we want to prevent broken
         * highatomic accounting.
         */
-       if (is_migrate_highatomic(old_block_type))
+       if (is_migrate_highatomic(block_type))
                goto single_page;
 
        /* Take ownership for orders >= pageblock_order */
        if (current_order >= pageblock_order) {
+               del_page_from_free_list(page, zone, current_order);
                change_pageblock_range(page, current_order, start_type);
-               goto single_page;
+               expand(zone, page, order, current_order, start_type);
+               return page;
        }
 
        /*
        if (!whole_block)
                goto single_page;
 
-       free_pages = move_freepages_block(zone, page, start_type,
-                                               &movable_pages);
        /* moving whole block can fail due to zone boundary conditions */
-       if (!free_pages)
+       if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
+                                      &free_pages, &movable_pages))
                goto single_page;
 
        /*
                 * vice versa, be conservative since we can't distinguish the
                 * exact migratetype of non-movable pages.
                 */
-               if (old_block_type == MIGRATE_MOVABLE)
+               if (block_type == MIGRATE_MOVABLE)
                        alike_pages = pageblock_nr_pages
                                                - (free_pages + movable_pages);
                else
         * compatible migratability as our allocation, claim the whole block.
         */
        if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
-                       page_group_by_mobility_disabled)
+                       page_group_by_mobility_disabled) {
+               move_freepages(zone, start_pfn, end_pfn, start_type);
                set_pageblock_migratetype(page, start_type);
-
-       return;
+               return __rmqueue_smallest(zone, order, start_type);
+       }
 
 single_page:
-       move_to_free_list(page, zone, current_order, start_type);
+       del_page_from_free_list(page, zone, current_order);
+       expand(zone, page, order, current_order, block_type);
+       return page;
 }
 
 /*
        mt = get_pageblock_migratetype(page);
        /* Only reserve normal pageblocks (i.e., they can merge with others) */
        if (migratetype_is_mergeable(mt)) {
-               zone->nr_reserved_highatomic += pageblock_nr_pages;
-               set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
-               move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
+               if (move_freepages_block(zone, page, MIGRATE_HIGHATOMIC) != -1) {
+                       set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
+                       zone->nr_reserved_highatomic += pageblock_nr_pages;
+               }
        }
 
 out_unlock:
        struct zone *zone;
        struct page *page;
        int order;
-       bool ret;
+       int ret;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
                                                                ac->nodemask) {
                         * of pageblocks that cannot be completely freed
                         * may increase.
                         */
+                       ret = move_freepages_block(zone, page, ac->migratetype);
+                       /*
+                        * Reserving this block already succeeded, so this should
+                        * not fail on zone boundaries.
+                        */
+                       WARN_ON_ONCE(ret == -1);
                        set_pageblock_migratetype(page, ac->migratetype);
-                       ret = move_freepages_block(zone, page, ac->migratetype,
-                                                                       NULL);
-                       if (ret) {
+                       if (ret > 0) {
                                spin_unlock_irqrestore(&zone->lock, flags);
                                return ret;
                        }
  * deviation from the rest of this file, to make the for loop
  * condition simpler.
  */
-static __always_inline bool
+static __always_inline struct page *
 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
                                                unsigned int alloc_flags)
 {
                goto do_steal;
        }
 
-       return false;
+       return NULL;
 
 find_smallest:
        for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
 do_steal:
        page = get_page_from_free_area(area, fallback_mt);
 
-       steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
-                                                               can_steal);
+       /* take off list, maybe claim block, expand remainder */
+       page = steal_suitable_fallback(zone, page, current_order, order,
+                                      start_migratetype, alloc_flags, can_steal);
 
        trace_mm_page_alloc_extfrag(page, order, current_order,
                start_migratetype, fallback_mt);
 
-       return true;
-
+       return page;
 }
 
 /*
                                return page;
                }
        }
-retry:
+
        page = __rmqueue_smallest(zone, order, migratetype);
        if (unlikely(!page)) {
                if (alloc_flags & ALLOC_CMA)
                        page = __rmqueue_cma_fallback(zone, order);
 
-               if (!page && __rmqueue_fallback(zone, order, migratetype,
-                                                               alloc_flags))
-                       goto retry;
+               if (!page)
+                       page = __rmqueue_fallback(zone, order, migratetype,
+                                                 alloc_flags);
        }
        return page;
 }
                         * Only change normal pageblocks (i.e., they can merge
                         * with others)
                         */
-                       if (migratetype_is_mergeable(mt)) {
-                               set_pageblock_migratetype(page,
-                                                         MIGRATE_MOVABLE);
-                               move_freepages_block(zone, page,
-                                                    MIGRATE_MOVABLE, NULL);
-                       }
+                       if (migratetype_is_mergeable(mt) &&
+                           move_freepages_block(zone, page,
+                                                MIGRATE_MOVABLE) != -1)
+                               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
                }
        }