kmem_cache_free(nfs_rdata_cachep, rhdr);
 }
 
-static
-int nfs_return_empty_page(struct page *page)
+static int nfs_return_empty_folio(struct folio *folio)
 {
-       zero_user(page, 0, PAGE_SIZE);
-       SetPageUptodate(page);
-       unlock_page(page);
+       folio_zero_segment(folio, 0, folio_size(folio));
+       folio_mark_uptodate(folio);
+       folio_unlock(folio);
        return 0;
 }
 
 static void nfs_readpage_release(struct nfs_page *req, int error)
 {
        struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
-       struct page *page = req->wb_page;
+       struct folio *folio = nfs_page_to_folio(req);
 
        dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
                (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
                (long long)req_offset(req));
 
        if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
-               SetPageError(page);
+               folio_set_error(folio);
        if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
-               if (PageUptodate(page))
-                       nfs_fscache_write_page(inode, page);
-               unlock_page(page);
+               if (folio_test_uptodate(folio))
+                       nfs_fscache_write_page(inode, &folio->page);
+               folio_unlock(folio);
        }
        nfs_release_request(req);
 }
 static void nfs_page_group_set_uptodate(struct nfs_page *req)
 {
        if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
-               SetPageUptodate(req->wb_page);
+               folio_mark_uptodate(nfs_page_to_folio(req));
 }
 
 static void nfs_read_completion(struct nfs_pgio_header *hdr)
                goto out;
        while (!list_empty(&hdr->pages)) {
                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
-               struct page *page = req->wb_page;
+               struct folio *folio = nfs_page_to_folio(req);
                unsigned long start = req->wb_pgbase;
                unsigned long end = req->wb_pgbase + req->wb_bytes;
 
                        if (bytes > hdr->good_bytes) {
                                /* nothing in this request was good, so zero
                                 * the full extent of the request */
-                               zero_user_segment(page, start, end);
+                               folio_zero_segment(folio, start, end);
 
                        } else if (hdr->good_bytes - bytes < req->wb_bytes) {
                                /* part of this request has good bytes, but
                                 * not all. zero the bad bytes */
                                start += hdr->good_bytes - bytes;
                                WARN_ON(start < req->wb_pgbase);
-                               zero_user_segment(page, start, end);
+                               folio_zero_segment(folio, start, end);
                        }
                }
                error = 0;
                nfs_readpage_retry(task, hdr);
 }
 
-static int
-readpage_async_filler(struct nfs_readdesc *desc, struct page *page)
+static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio)
 {
-       struct inode *inode = page_file_mapping(page)->host;
-       unsigned int rsize = NFS_SERVER(inode)->rsize;
+       struct inode *inode = folio_file_mapping(folio)->host;
+       struct nfs_server *server = NFS_SERVER(inode);
+       size_t fsize = folio_size(folio);
+       unsigned int rsize = server->rsize;
        struct nfs_page *new;
        unsigned int len, aligned_len;
        int error;
 
-       len = nfs_page_length(page);
+       len = nfs_folio_length(folio);
        if (len == 0)
-               return nfs_return_empty_page(page);
+               return nfs_return_empty_folio(folio);
 
-       aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
+       aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
 
-       if (!IS_SYNC(page->mapping->host)) {
-               error = nfs_fscache_read_page(page->mapping->host, page);
+       if (!IS_SYNC(inode)) {
+               error = nfs_fscache_read_page(inode, &folio->page);
                if (error == 0)
                        goto out_unlock;
        }
 
-       new = nfs_create_request(desc->ctx, page, 0, aligned_len);
+       new = nfs_page_create_from_folio(desc->ctx, folio, 0, aligned_len);
        if (IS_ERR(new))
                goto out_error;
 
-       if (len < PAGE_SIZE)
-               zero_user_segment(page, len, PAGE_SIZE);
+       if (len < fsize)
+               folio_zero_segment(folio, len, fsize);
        if (!nfs_pageio_add_request(&desc->pgio, new)) {
                nfs_list_remove_request(new);
                error = desc->pgio.pg_error;
 out_error:
        error = PTR_ERR(new);
 out_unlock:
-       unlock_page(page);
+       folio_unlock(folio);
 out:
        return error;
 }
  */
 int nfs_read_folio(struct file *file, struct folio *folio)
 {
-       struct page *page = &folio->page;
        struct nfs_readdesc desc;
-       struct inode *inode = page_file_mapping(page)->host;
+       struct inode *inode = file_inode(file);
        int ret;
 
-       trace_nfs_aop_readpage(inode, page);
+       trace_nfs_aop_readpage(inode, folio);
        nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
 
        /*
         * Try to flush any pending writes to the file..
         *
-        * NOTE! Because we own the page lock, there cannot
+        * NOTE! Because we own the folio lock, there cannot
         * be any new pending writes generated at this point
-        * for this page (other pages can be written to).
+        * for this folio (other folios can be written to).
         */
-       ret = nfs_wb_page(inode, page);
+       ret = nfs_wb_folio(inode, folio);
        if (ret)
                goto out_unlock;
-       if (PageUptodate(page))
+       if (folio_test_uptodate(folio))
                goto out_unlock;
 
        ret = -ESTALE;
        nfs_pageio_init_read(&desc.pgio, inode, false,
                             &nfs_async_read_completion_ops);
 
-       ret = readpage_async_filler(&desc, page);
+       ret = readpage_async_filler(&desc, folio);
        if (ret)
                goto out;
 
        nfs_pageio_complete_read(&desc.pgio);
        ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
        if (!ret) {
-               ret = wait_on_page_locked_killable(page);
-               if (!PageUptodate(page) && !ret)
+               ret = folio_wait_locked_killable(folio);
+               if (!folio_test_uptodate(folio) && !ret)
                        ret = xchg(&desc.ctx->error, 0);
        }
 out:
        put_nfs_open_context(desc.ctx);
-       trace_nfs_aop_readpage_done(inode, page, ret);
+       trace_nfs_aop_readpage_done(inode, folio, ret);
        return ret;
 out_unlock:
-       unlock_page(page);
-       trace_nfs_aop_readpage_done(inode, page, ret);
+       folio_unlock(folio);
+       trace_nfs_aop_readpage_done(inode, folio, ret);
        return ret;
 }
 
        struct file *file = ractl->file;
        struct nfs_readdesc desc;
        struct inode *inode = ractl->mapping->host;
-       struct page *page;
+       struct folio *folio;
        int ret;
 
        trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
        nfs_pageio_init_read(&desc.pgio, inode, false,
                             &nfs_async_read_completion_ops);
 
-       while ((page = readahead_page(ractl)) != NULL) {
-               ret = readpage_async_filler(&desc, page);
-               put_page(page);
+       while ((folio = readahead_folio(ractl)) != NULL) {
+               ret = readpage_async_filler(&desc, folio);
                if (ret)
                        break;
        }