* __folio_lock() waiting on then setting PG_locked.
                         */
        SHARED,         /* Hold ref to page and check the bit when woken, like
-                        * wait_on_page_writeback() waiting on PG_writeback.
+                        * folio_wait_writeback() waiting on PG_writeback.
                         */
        DROP,           /* Drop ref to page before wait, no check when woken,
-                        * like put_and_wait_on_page_locked() on PG_locked.
+                        * like folio_put_wait_locked() on PG_locked.
                         */
 };
 
 EXPORT_SYMBOL(folio_wait_bit_killable);
 
 /**
- * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
- * @page: The page to wait for.
+ * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
+ * @folio: The folio to wait for.
  * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
  *
- * The caller should hold a reference on @page.  They expect the page to
+ * The caller should hold a reference on @folio.  They expect the page to
  * become unlocked relatively soon, but do not wish to hold up migration
- * (for example) by holding the reference while waiting for the page to
+ * (for example) by holding the reference while waiting for the folio to
  * come unlocked.  After this function returns, the caller should not
- * dereference @page.
+ * dereference @folio.
  *
- * Return: 0 if the page was unlocked or -EINTR if interrupted by a signal.
+ * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
  */
-int put_and_wait_on_page_locked(struct page *page, int state)
+int folio_put_wait_locked(struct folio *folio, int state)
 {
-       return folio_wait_bit_common(page_folio(page), PG_locked, state,
-                       DROP);
+       return folio_wait_bit_common(folio, PG_locked, state, DROP);
 }
 
 /**
                        goto unlock_mapping;
                if (!(iocb->ki_flags & IOCB_WAITQ)) {
                        filemap_invalidate_unlock_shared(mapping);
-                       put_and_wait_on_page_locked(&folio->page, TASK_KILLABLE);
+                       /*
+                        * This is where we usually end up waiting for a
+                        * previously submitted readahead to finish.
+                        */
+                       folio_put_wait_locked(folio, TASK_KILLABLE);
                        return AOP_TRUNCATED_PAGE;
                }
                error = __folio_lock_async(folio, iocb->ki_waitq);
 
 {
        pte_t pte;
        swp_entry_t entry;
-       struct page *page;
+       struct folio *folio;
 
        spin_lock(ptl);
        pte = *ptep;
        if (!is_migration_entry(entry))
                goto out;
 
-       page = pfn_swap_entry_to_page(entry);
-       page = compound_head(page);
+       folio = page_folio(pfn_swap_entry_to_page(entry));
 
        /*
         * Once page cache replacement of page migration started, page_count
-        * is zero; but we must not call put_and_wait_on_page_locked() without
-        * a ref. Use get_page_unless_zero(), and just fault again if it fails.
+        * is zero; but we must not call folio_put_wait_locked() without
+        * a ref. Use folio_try_get(), and just fault again if it fails.
         */
-       if (!get_page_unless_zero(page))
+       if (!folio_try_get(folio))
                goto out;
        pte_unmap_unlock(ptep, ptl);
-       put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
+       folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
        return;
 out:
        pte_unmap_unlock(ptep, ptl);
 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
 {
        spinlock_t *ptl;
-       struct page *page;
+       struct folio *folio;
 
        ptl = pmd_lock(mm, pmd);
        if (!is_pmd_migration_entry(*pmd))
                goto unlock;
-       page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
-       if (!get_page_unless_zero(page))
+       folio = page_folio(pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)));
+       if (!folio_try_get(folio))
                goto unlock;
        spin_unlock(ptl);
-       put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
+       folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
        return;
 unlock:
        spin_unlock(ptl);