From: Kefeng Wang Date: Thu, 21 Sep 2023 07:44:15 +0000 (+0800) Subject: mm: memory: make numa_migrate_prep() to take a folio X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=cda6d93672ac5dd8af778a3f3e6082e12233b65b;p=linux.git mm: memory: make numa_migrate_prep() to take a folio In preparation for large folio numa balancing, make numa_migrate_prep() to take a folio, no functional change intended. Link: https://lkml.kernel.org/r/20230921074417.24004-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5baf9b6dc5224..aa02245561326 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1556,7 +1556,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) */ if (node_is_toptier(nid)) last_cpupid = page_cpupid_last(&folio->page); - target_nid = numa_migrate_prep(&folio->page, vma, haddr, nid, &flags); + target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map; diff --git a/mm/internal.h b/mm/internal.h index 9e62c8b952b8a..5a5c923725d31 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -983,7 +983,7 @@ void vunmap_range_noflush(unsigned long start, unsigned long end); void __vunmap_range_noflush(unsigned long start, unsigned long end); -int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); void free_zone_device_page(struct page *page); diff --git a/mm/memory.c b/mm/memory.c index 865741d9b6b90..20b290c9dc87b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4724,10 +4724,10 @@ static vm_fault_t do_fault(struct vm_fault *vmf) return ret; } -int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) { - get_page(page); + folio_get(folio); /* Record the current PID acceesing VMA */ vma_set_access_pid_bit(vma); @@ -4738,7 +4738,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, *flags |= TNF_FAULT_LOCAL; } - return mpol_misplaced(page, vma, addr); + return mpol_misplaced(&folio->page, vma, addr); } static vm_fault_t do_numa_page(struct vm_fault *vmf) @@ -4812,8 +4812,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) last_cpupid = (-1 & LAST_CPUPID_MASK); else last_cpupid = page_cpupid_last(&folio->page); - target_nid = numa_migrate_prep(&folio->page, vma, vmf->address, nid, - &flags); + target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map;