folio_add_lru(folio);
 
-                               /* To provide entry to swap_readpage() */
+                               /* To provide entry to swap_read_folio() */
                                folio->swap = entry;
-                               swap_readpage(page, true, NULL);
+                               swap_read_folio(folio, true, NULL);
                                folio->private = NULL;
                        }
                } else {
 
        mempool_free(sio, sio_pool);
 }
 
-static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug)
+static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
 {
        struct swap_info_struct *sis = swp_swap_info(folio->swap);
        struct swap_iocb *sio = NULL;
                *plug = sio;
 }
 
-static void swap_readpage_bdev_sync(struct folio *folio,
+static void swap_read_folio_bdev_sync(struct folio *folio,
                struct swap_info_struct *sis)
 {
        struct bio_vec bv;
        put_task_struct(current);
 }
 
-static void swap_readpage_bdev_async(struct folio *folio,
+static void swap_read_folio_bdev_async(struct folio *folio,
                struct swap_info_struct *sis)
 {
        struct bio *bio;
        submit_bio(bio);
 }
 
-void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
+void swap_read_folio(struct folio *folio, bool synchronous,
+               struct swap_iocb **plug)
 {
-       struct folio *folio = page_folio(page);
-       struct swap_info_struct *sis = page_swap_info(page);
+       struct swap_info_struct *sis = swp_swap_info(folio->swap);
        bool workingset = folio_test_workingset(folio);
        unsigned long pflags;
        bool in_thrashing;
                folio_mark_uptodate(folio);
                folio_unlock(folio);
        } else if (data_race(sis->flags & SWP_FS_OPS)) {
-               swap_readpage_fs(folio, plug);
+               swap_read_folio_fs(folio, plug);
        } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
-               swap_readpage_bdev_sync(folio, sis);
+               swap_read_folio_bdev_sync(folio, sis);
        } else {
-               swap_readpage_bdev_async(folio, sis);
+               swap_read_folio_bdev_async(folio, sis);
        }
 
        if (workingset) {
 
 /* linux/mm/page_io.c */
 int sio_pool_init(void);
 struct swap_iocb;
-void swap_readpage(struct page *page, bool do_poll, struct swap_iocb **plug);
+void swap_read_folio(struct folio *folio, bool do_poll,
+               struct swap_iocb **plug);
 void __swap_read_unplug(struct swap_iocb *plug);
 static inline void swap_read_unplug(struct swap_iocb *plug)
 {
 }
 #else /* CONFIG_SWAP */
 struct swap_iocb;
-static inline void swap_readpage(struct page *page, bool do_poll,
+static inline void swap_read_folio(struct folio *folio, bool do_poll,
                struct swap_iocb **plug)
 {
 }
 
  * the swap entry is no longer in use.
  *
  * get/put_swap_device() aren't needed to call this function, because
- * __read_swap_cache_async() call them and swap_readpage() holds the
+ * __read_swap_cache_async() call them and swap_read_folio() holds the
  * swap cache folio lock.
  */
 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        mpol_cond_put(mpol);
 
        if (page_allocated)
-               swap_readpage(&folio->page, false, plug);
+               swap_read_folio(folio, false, plug);
        return folio_file_page(folio, swp_offset(entry));
 }
 
                if (!folio)
                        continue;
                if (page_allocated) {
-                       swap_readpage(&folio->page, false, &splug);
+                       swap_read_folio(folio, false, &splug);
                        if (offset != entry_offset) {
                                folio_set_readahead(folio);
                                count_vm_event(SWAP_RA);
        folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
                                        &page_allocated, false);
        if (unlikely(page_allocated))
-               swap_readpage(&folio->page, false, NULL);
+               swap_read_folio(folio, false, NULL);
        zswap_folio_swapin(folio);
        return folio_file_page(folio, swp_offset(entry));
 }
                if (!folio)
                        continue;
                if (page_allocated) {
-                       swap_readpage(&folio->page, false, &splug);
+                       swap_read_folio(folio, false, &splug);
                        if (i != ra_info.offset) {
                                folio_set_readahead(folio);
                                count_vm_event(SWAP_RA);
        folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
                                        &page_allocated, false);
        if (unlikely(page_allocated))
-               swap_readpage(&folio->page, false, NULL);
+               swap_read_folio(folio, false, NULL);
        zswap_folio_swapin(folio);
        return folio_file_page(folio, swp_offset(entry));
 }
 
 /*
  * A `swap extent' is a simple thing which maps a contiguous range of pages
  * onto a contiguous range of disk blocks.  A rbtree of swap extents is
- * built at swapon time and is then used at swap_writepage/swap_readpage
+ * built at swapon time and is then used at swap_writepage/swap_read_folio
  * time for locating where on disk a page belongs.
  *
  * If the swapfile is an S_ISBLK block device, a single extent is installed.