khugepaged: convert alloc_charge_hpage to alloc_charge_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 3 Apr 2024 17:18:31 +0000 (18:18 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:33 +0000 (20:56 -0700)
Both callers want to deal with a folio, so return a folio from this
function.

Link: https://lkml.kernel.org/r/20240403171838.1445826-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index ad16dd8b26a8aef3d7ef681af92e78a98f3b38da..2f1dacd65d12b51ab9267e29c21a4c97f00a948e 100644 (file)
@@ -1045,7 +1045,7 @@ out:
        return result;
 }
 
-static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
+static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
                              struct collapse_control *cc)
 {
        gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
@@ -1055,7 +1055,7 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
 
        folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
        if (!folio) {
-               *hpage = NULL;
+               *foliop = NULL;
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                return SCAN_ALLOC_HUGE_PAGE_FAIL;
        }
@@ -1063,13 +1063,13 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
        count_vm_event(THP_COLLAPSE_ALLOC);
        if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
                folio_put(folio);
-               *hpage = NULL;
+               *foliop = NULL;
                return SCAN_CGROUP_CHARGE_FAIL;
        }
 
        count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
 
-       *hpage = folio_page(folio, 0);
+       *foliop = folio;
        return SCAN_SUCCEED;
 }
 
@@ -1098,7 +1098,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
         */
        mmap_read_unlock(mm);
 
-       result = alloc_charge_hpage(&hpage, mm, cc);
+       result = alloc_charge_folio(&folio, mm, cc);
+       hpage = &folio->page;
        if (result != SCAN_SUCCEED)
                goto out_nolock;
 
@@ -1204,7 +1205,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        if (unlikely(result != SCAN_SUCCEED))
                goto out_up_write;
 
-       folio = page_folio(hpage);
        /*
         * The smp_wmb() inside __folio_mark_uptodate() ensures the
         * copy_huge_page writes become visible before the set_pmd_at()
@@ -1789,7 +1789,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
        struct page *hpage;
        struct page *page;
        struct page *tmp;
-       struct folio *folio;
+       struct folio *folio, *new_folio;
        pgoff_t index = 0, end = start + HPAGE_PMD_NR;
        LIST_HEAD(pagelist);
        XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1800,7 +1800,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
        VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
        VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
 
-       result = alloc_charge_hpage(&hpage, mm, cc);
+       result = alloc_charge_folio(&new_folio, mm, cc);
+       hpage = &new_folio->page;
        if (result != SCAN_SUCCEED)
                goto out;