mm: memory: make numa_migrate_prep() to take a folio
authorKefeng Wang <wangkefeng.wang@huawei.com>
Thu, 21 Sep 2023 07:44:15 +0000 (15:44 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 16 Oct 2023 22:44:37 +0000 (15:44 -0700)
In preparation for large folio numa balancing, make numa_migrate_prep() to
take a folio, no functional change intended.

Link: https://lkml.kernel.org/r/20230921074417.24004-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/internal.h
mm/memory.c

index 5baf9b6dc52249e5f935f6cfffbe0041b17d6b76..aa0224556132611bed08aa322d31cc38ca1cbf4f 100644 (file)
@@ -1556,7 +1556,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
         */
        if (node_is_toptier(nid))
                last_cpupid = page_cpupid_last(&folio->page);
-       target_nid = numa_migrate_prep(&folio->page, vma, haddr, nid, &flags);
+       target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
        if (target_nid == NUMA_NO_NODE) {
                folio_put(folio);
                goto out_map;
index 9e62c8b952b8a09f24e42cbfd8a172b3eaebd65f..5a5c923725d314694cc196cb886390b766cda031 100644 (file)
@@ -983,7 +983,7 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
 
 void __vunmap_range_noflush(unsigned long start, unsigned long end);
 
-int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
                      unsigned long addr, int page_nid, int *flags);
 
 void free_zone_device_page(struct page *page);
index 865741d9b6b90c30339a2c955d95840b6cb95e34..20b290c9dc87bd53459ef1ea8226afdaf335afd1 100644 (file)
@@ -4724,10 +4724,10 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
        return ret;
 }
 
-int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
                      unsigned long addr, int page_nid, int *flags)
 {
-       get_page(page);
+       folio_get(folio);
 
        /* Record the current PID acceesing VMA */
        vma_set_access_pid_bit(vma);
@@ -4738,7 +4738,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
                *flags |= TNF_FAULT_LOCAL;
        }
 
-       return mpol_misplaced(page, vma, addr);
+       return mpol_misplaced(&folio->page, vma, addr);
 }
 
 static vm_fault_t do_numa_page(struct vm_fault *vmf)
@@ -4812,8 +4812,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
                last_cpupid = (-1 & LAST_CPUPID_MASK);
        else
                last_cpupid = page_cpupid_last(&folio->page);
-       target_nid = numa_migrate_prep(&folio->page, vma, vmf->address, nid,
-                                      &flags);
+       target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags);
        if (target_nid == NUMA_NO_NODE) {
                folio_put(folio);
                goto out_map;