memcg: convert mem_cgroup_swapin_charge_page() to mem_cgroup_swapin_charge_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:12 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:47 +0000 (14:02 -0700)
All callers now have a folio, so pass it in here and remove an unnecessary
call to page_folio().

Link: https://lkml.kernel.org/r/20220902194653.1739778-17-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/memory.c
mm/swap_state.c

index 60545e4a1c0344fc61e871c49becb2634ac5c344..ca0df42662ad171c54768b95ad67c27e4d9c82e0 100644 (file)
@@ -688,7 +688,7 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
        return __mem_cgroup_charge(folio, mm, gfp);
 }
 
-int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
                                  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
@@ -1254,7 +1254,7 @@ static inline int mem_cgroup_charge(struct folio *folio,
        return 0;
 }
 
-static inline int mem_cgroup_swapin_charge_page(struct page *page,
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
                        struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
 {
        return 0;
index e804056422db0d2a72d741969bf3abcd1c7678cb..621b4472c4094688b36c9f371f4c6bd6d2cfc613 100644 (file)
@@ -6844,21 +6844,20 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
 }
 
 /**
- * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
- * @page: page to charge
+ * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
+ * @folio: folio to charge.
  * @mm: mm context of the victim
  * @gfp: reclaim mode
- * @entry: swap entry for which the page is allocated
+ * @entry: swap entry for which the folio is allocated
  *
- * This function charges a page allocated for swapin. Please call this before
- * adding the page to the swapcache.
+ * This function charges a folio allocated for swapin. Please call this before
+ * adding the folio to the swapcache.
  *
  * Returns 0 on success. Otherwise, an error code is returned.
  */
-int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
                                  gfp_t gfp, swp_entry_t entry)
 {
-       struct folio *folio = page_folio(page);
        struct mem_cgroup *memcg;
        unsigned short id;
        int ret;
index 1e114438f6064221875b54b70348c70406718893..b36b177e0ea91ee3cf262bd6cd953d53ad20d898 100644 (file)
@@ -3783,7 +3783,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                __folio_set_locked(folio);
                                __folio_set_swapbacked(folio);
 
-                               if (mem_cgroup_swapin_charge_page(page,
+                               if (mem_cgroup_swapin_charge_folio(folio,
                                                        vma->vm_mm, GFP_KERNEL,
                                                        entry)) {
                                        ret = VM_FAULT_OOM;
index ea354efd37356402476777ac474cbe7999807e1d..a7e0438902dd11bc10f4f6c067e4ccbafa226b7f 100644 (file)
@@ -480,7 +480,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        __folio_set_locked(folio);
        __folio_set_swapbacked(folio);
 
-       if (mem_cgroup_swapin_charge_page(&folio->page, NULL, gfp_mask, entry))
+       if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
                goto fail_unlock;
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */