for (addr = start; addr < end; addr += PAGE_SIZE) {
                pte_t pte;
                swp_entry_t entry;
-               struct page *page;
+               struct folio *folio;
 
                if (!ptep++) {
                        ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
                pte_unmap_unlock(ptep, ptl);
                ptep = NULL;
 
-               page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
+               folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
                                             vma, addr, &splug);
-               if (page)
-                       put_page(page);
+               if (folio)
+                       folio_put(folio);
        }
 
        if (ptep)
 {
        XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
        pgoff_t end_index = linear_page_index(vma, end) - 1;
-       struct page *page;
+       struct folio *folio;
        struct swap_iocb *splug = NULL;
 
        rcu_read_lock();
-       xas_for_each(&xas, page, end_index) {
+       xas_for_each(&xas, folio, end_index) {
                unsigned long addr;
                swp_entry_t entry;
 
-               if (!xa_is_value(page))
+               if (!xa_is_value(folio))
                        continue;
-               entry = radix_to_swp_entry(page);
+               entry = radix_to_swp_entry(folio);
                /* There might be swapin error entries in shmem mapping. */
                if (non_swap_entry(entry))
                        continue;
                xas_pause(&xas);
                rcu_read_unlock();
 
-               page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
+               folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
                                             vma, addr, &splug);
-               if (page)
-                       put_page(page);
+               if (folio)
+                       folio_put(folio);
 
                rcu_read_lock();
        }
 
 struct folio *filemap_get_incore_folio(struct address_space *mapping,
                pgoff_t index);
 
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-                                  struct vm_area_struct *vma,
-                                  unsigned long addr,
-                                  struct swap_iocb **plug);
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+               struct vm_area_struct *vma, unsigned long addr,
+               struct swap_iocb **plug);
 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
                struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
                bool skip_if_exists);
 
  * __read_swap_cache_async() call them and swap_read_folio() holds the
  * swap cache folio lock.
  */
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-                                  struct vm_area_struct *vma,
-                                  unsigned long addr, struct swap_iocb **plug)
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+               struct vm_area_struct *vma, unsigned long addr,
+               struct swap_iocb **plug)
 {
        bool page_allocated;
        struct mempolicy *mpol;
 
        if (page_allocated)
                swap_read_folio(folio, false, plug);
-       return folio_file_page(folio, swp_offset(entry));
+       return folio;
 }
 
 static unsigned int __swapin_nr_pages(unsigned long prev_offset,