memcg_check_events(ug->memcg, ug->nid);
        local_irq_restore(flags);
 
-       /* drop reference from uncharge_page */
+       /* drop reference from uncharge_folio */
        css_put(&ug->memcg->css);
 }
 
-static void uncharge_page(struct page *page, struct uncharge_gather *ug)
+static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
 {
-       struct folio *folio = page_folio(page);
-       unsigned long nr_pages;
+       long nr_pages;
        struct mem_cgroup *memcg;
        struct obj_cgroup *objcg;
-       bool use_objcg = PageMemcgKmem(page);
+       bool use_objcg = folio_memcg_kmem(folio);
 
-       VM_BUG_ON_PAGE(PageLRU(page), page);
+       VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
        /*
         * Nobody should be changing or seriously looking at
-        * page memcg or objcg at this point, we have fully
-        * exclusive access to the page.
+        * folio memcg or objcg at this point, we have fully
+        * exclusive access to the folio.
         */
        if (use_objcg) {
                objcg = __folio_objcg(folio);
                        uncharge_gather_clear(ug);
                }
                ug->memcg = memcg;
-               ug->nid = page_to_nid(page);
+               ug->nid = folio_nid(folio);
 
                /* pairs with css_put in uncharge_batch */
                css_get(&memcg->css);
        }
 
-       nr_pages = compound_nr(page);
+       nr_pages = folio_nr_pages(folio);
 
        if (use_objcg) {
                ug->nr_memory += nr_pages;
                ug->nr_kmem += nr_pages;
 
-               page->memcg_data = 0;
+               folio->memcg_data = 0;
                obj_cgroup_put(objcg);
        } else {
                /* LRU pages aren't accounted at the root level */
                        ug->nr_memory += nr_pages;
                ug->pgpgout++;
 
-               page->memcg_data = 0;
+               folio->memcg_data = 0;
        }
 
        css_put(&memcg->css);
                return;
 
        uncharge_gather_clear(&ug);
-       uncharge_page(page, &ug);
+       uncharge_folio(page_folio(page), &ug);
        uncharge_batch(&ug);
 }
 
 void __mem_cgroup_uncharge_list(struct list_head *page_list)
 {
        struct uncharge_gather ug;
-       struct page *page;
+       struct folio *folio;
 
        uncharge_gather_clear(&ug);
-       list_for_each_entry(page, page_list, lru)
-               uncharge_page(page, &ug);
+       list_for_each_entry(folio, page_list, lru)
+               uncharge_folio(folio, &ug);
        if (ug.memcg)
                uncharge_batch(&ug);
 }