mm/memcg: Convert mem_cgroup_uncharge() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 2 May 2021 00:42:23 +0000 (20:42 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 27 Sep 2021 13:27:31 +0000 (09:27 -0400)
Convert all the callers to call page_folio().  Most of them were already
using a head page, but a few of them I can't prove were, so this may
actually fix a bug.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/memcontrol.h
mm/filemap.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory-failure.c
mm/memremap.c
mm/page_alloc.c
mm/swap.c

index 19a51729e00cb59304f9b1368225a53507786cff..b4bc052db32b6a007d321740fc55806d0673137e 100644 (file)
@@ -722,12 +722,19 @@ int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
                                  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void __mem_cgroup_uncharge(struct page *page);
-static inline void mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio);
+
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
        if (mem_cgroup_disabled())
                return;
-       __mem_cgroup_uncharge(page);
+       __mem_cgroup_uncharge(folio);
 }
 
 void __mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -1229,7 +1236,7 @@ static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
 {
 }
 
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
 }
 
index 816af226f49d134f89fab808b8b76ed38f7303c2..44fcd9d1dd653abc8f9666f63a26d20fa304a4a4 100644 (file)
@@ -940,7 +940,7 @@ unlock:
        if (xas_error(&xas)) {
                error = xas_error(&xas);
                if (charged)
-                       mem_cgroup_uncharge(page);
+                       mem_cgroup_uncharge(page_folio(page));
                goto error;
        }
 
index 8480a3b05bccba209d7dfce2e0dba6fabb8b5943..6d56e7abd2b8d5989e27f1fce0bbd92367821aeb 100644 (file)
@@ -1211,7 +1211,7 @@ out_up_write:
        mmap_write_unlock(mm);
 out_nolock:
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        trace_mm_collapse_huge_page(mm, isolated, result);
        return;
 }
@@ -1975,7 +1975,7 @@ xa_unlocked:
 out:
        VM_BUG_ON(!list_empty(&pagelist));
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        /* TODO: tracepoints */
 }
 
index 64eac157db79f05604aa28b5df3bb65a1fd099df..6321ed6d6e5a5cc55ec5c4ce7be3492562c431ee 100644 (file)
@@ -6858,22 +6858,16 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
        css_put(&memcg->css);
 }
 
-/**
- * __mem_cgroup_uncharge - uncharge a page
- * @page: page to uncharge
- *
- * Uncharge a page previously charged with __mem_cgroup_charge().
- */
-void __mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio)
 {
        struct uncharge_gather ug;
 
-       /* Don't touch page->lru of any random page, pre-check: */
-       if (!page_memcg(page))
+       /* Don't touch folio->lru of any random page, pre-check: */
+       if (!folio_memcg(folio))
                return;
 
        uncharge_gather_clear(&ug);
-       uncharge_folio(page_folio(page), &ug);
+       uncharge_folio(folio, &ug);
        uncharge_batch(&ug);
 }
 
index 3e6449f2102a77fce4ac5416f960c801e616a887..fffe4afaff437fb91cba7012bb365f9996f29a59 100644 (file)
@@ -762,7 +762,7 @@ static int delete_from_lru_cache(struct page *p)
                 * Poisoned page might never drop its ref count to 0 so we have
                 * to uncharge it manually from its memcg.
                 */
-               mem_cgroup_uncharge(p);
+               mem_cgroup_uncharge(page_folio(p));
 
                /*
                 * drop the page count elevated by isolate_lru_page()
index ed593bf87109a86403abf011f9652c4dda02a626..5a66a71ab5911a4cb4102bba6e8cead63eb2327b 100644 (file)
@@ -505,7 +505,7 @@ void free_devmap_managed_page(struct page *page)
 
        __ClearPageWaiters(page);
 
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
 
        /*
         * When a device_private page is freed, the page->mapping field
index b37435c274cf1cb1fabd26465ca7899bb610d91f..869d0b06e1ef4c5138f75f36fb73a4979da63f53 100644 (file)
@@ -724,7 +724,7 @@ static inline void free_the_page(struct page *page, unsigned int order)
 
 void free_compound_page(struct page *page)
 {
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_the_page(page, compound_order(page));
 }
 
index 0edbcb9c8876c1b1da5581b80aa27609d38dad4e..5679ce5bc362ba3d11b94abf80239ecbcdc6a905 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -94,7 +94,7 @@ static void __page_cache_release(struct page *page)
 static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_unref_page(page, 0);
 }