unsigned long           mmap_base;
        unsigned long           mmap_size;
 
-       struct page             **ring_pages;
+       struct folio            **ring_folios;
        long                    nr_pages;
 
        struct rcu_work         free_rwork;     /* see free_ioctx() */
                spinlock_t      completion_lock;
        } ____cacheline_aligned_in_smp;
 
-       struct page             *internal_pages[AIO_RING_PAGES];
+       struct folio            *internal_folios[AIO_RING_PAGES];
        struct file             *aio_ring_file;
 
        unsigned                id;
        put_aio_ring_file(ctx);
 
        for (i = 0; i < ctx->nr_pages; i++) {
-               struct folio *folio = page_folio(ctx->ring_pages[i]);
+               struct folio *folio = ctx->ring_folios[i];
 
                if (!folio)
                        continue;
 
                pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
                         folio_ref_count(folio));
-               ctx->ring_pages[i] = NULL;
+               ctx->ring_folios[i] = NULL;
                folio_put(folio);
        }
 
-       if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
-               kfree(ctx->ring_pages);
-               ctx->ring_pages = NULL;
+       if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) {
+               kfree(ctx->ring_folios);
+               ctx->ring_folios = NULL;
        }
 }
 
        idx = src->index;
        if (idx < (pgoff_t)ctx->nr_pages) {
                /* Make sure the old folio hasn't already been changed */
-               if (ctx->ring_pages[idx] != &src->page)
+               if (ctx->ring_folios[idx] != src)
                        rc = -EAGAIN;
        } else
                rc = -EINVAL;
         */
        spin_lock_irqsave(&ctx->completion_lock, flags);
        folio_migrate_copy(dst, src);
-       BUG_ON(ctx->ring_pages[idx] != &src->page);
-       ctx->ring_pages[idx] = &dst->page;
+       BUG_ON(ctx->ring_folios[idx] != src);
+       ctx->ring_folios[idx] = dst;
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        /* The old folio is no longer accessible. */
        nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
                        / sizeof(struct io_event);
 
-       ctx->ring_pages = ctx->internal_pages;
+       ctx->ring_folios = ctx->internal_folios;
        if (nr_pages > AIO_RING_PAGES) {
-               ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
-                                         GFP_KERNEL);
-               if (!ctx->ring_pages) {
+               ctx->ring_folios = kcalloc(nr_pages, sizeof(struct folio *),
+                                          GFP_KERNEL);
+               if (!ctx->ring_folios) {
                        put_aio_ring_file(ctx);
                        return -ENOMEM;
                }
                         folio_ref_count(folio));
                folio_end_read(folio, true);
 
-               ctx->ring_pages[i] = &folio->page;
+               ctx->ring_folios[i] = folio;
        }
        ctx->nr_pages = i;
 
        ctx->user_id = ctx->mmap_base;
        ctx->nr_events = nr_events; /* trusted copy */
 
-       ring = page_address(ctx->ring_pages[0]);
+       ring = folio_address(ctx->ring_folios[0]);
        ring->nr = nr_events;   /* user copy */
        ring->id = ~0U;
        ring->head = ring->tail = 0;
        ring->compat_features = AIO_RING_COMPAT_FEATURES;
        ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
        ring->header_length = sizeof(struct aio_ring);
-       flush_dcache_page(ctx->ring_pages[0]);
+       flush_dcache_folio(ctx->ring_folios[0]);
 
        return 0;
 }
 
                                        /* While kioctx setup is in progress,
                                         * we are protected from page migration
-                                        * changes ring_pages by ->ring_lock.
+                                        * changes ring_folios by ->ring_lock.
                                         */
-                                       ring = page_address(ctx->ring_pages[0]);
+                                       ring = folio_address(ctx->ring_folios[0]);
                                        ring->id = ctx->id;
                                        return 0;
                                }
                 * against ctx->completed_events below will make sure we do the
                 * safe/right thing.
                 */
-               ring = page_address(ctx->ring_pages[0]);
+               ring = folio_address(ctx->ring_folios[0]);
                head = ring->head;
 
                refill_reqs_available(ctx, head, ctx->tail);
        if (++tail >= ctx->nr_events)
                tail = 0;
 
-       ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+       ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
        event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
        *event = iocb->ki_res;
 
-       flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+       flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
 
        pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
                 (void __user *)(unsigned long)iocb->ki_res.obj,
 
        ctx->tail = tail;
 
-       ring = page_address(ctx->ring_pages[0]);
+       ring = folio_address(ctx->ring_folios[0]);
        head = ring->head;
        ring->tail = tail;
-       flush_dcache_page(ctx->ring_pages[0]);
+       flush_dcache_folio(ctx->ring_folios[0]);
 
        ctx->completed_events++;
        if (ctx->completed_events > 1)
        sched_annotate_sleep();
        mutex_lock(&ctx->ring_lock);
 
-       /* Access to ->ring_pages here is protected by ctx->ring_lock. */
-       ring = page_address(ctx->ring_pages[0]);
+       /* Access to ->ring_folios here is protected by ctx->ring_lock. */
+       ring = folio_address(ctx->ring_folios[0]);
        head = ring->head;
        tail = ring->tail;
 
        while (ret < nr) {
                long avail;
                struct io_event *ev;
-               struct page *page;
+               struct folio *folio;
 
                avail = (head <= tail ?  tail : ctx->nr_events) - head;
                if (head == tail)
                        break;
 
                pos = head + AIO_EVENTS_OFFSET;
-               page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
+               folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE];
                pos %= AIO_EVENTS_PER_PAGE;
 
                avail = min(avail, nr - ret);
                avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
 
-               ev = page_address(page);
+               ev = folio_address(folio);
                copy_ret = copy_to_user(event + ret, ev + pos,
                                        sizeof(*ev) * avail);
 
                head %= ctx->nr_events;
        }
 
-       ring = page_address(ctx->ring_pages[0]);
+       ring = folio_address(ctx->ring_folios[0]);
        ring->head = head;
-       flush_dcache_page(ctx->ring_pages[0]);
+       flush_dcache_folio(ctx->ring_folios[0]);
 
        pr_debug("%li  h%u t%u\n", ret, head, tail);
 out: