mm: memory: use a folio in do_numa_page()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Thu, 21 Sep 2023 07:44:14 +0000 (15:44 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 16 Oct 2023 22:44:37 +0000 (15:44 -0700)
Numa balancing only try to migrate non-compound page in do_numa_page(),
use a folio in it to save several compound_head calls, note we use
folio_estimated_sharers(), it is enough to check the folio sharers since
only normal page is handled, if large folio numa balancing is supported, a
precise folio sharers check would be used, no functional change intended.

Link: https://lkml.kernel.org/r/20230921074417.24004-4-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 311e862c6404aa58498af8e42209b4c2c3b1f91d..865741d9b6b90c30339a2c955d95840b6cb95e34 100644 (file)
@@ -4744,8 +4744,8 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
 static vm_fault_t do_numa_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
-       struct page *page = NULL;
-       int page_nid = NUMA_NO_NODE;
+       struct folio *folio = NULL;
+       int nid = NUMA_NO_NODE;
        bool writable = false;
        int last_cpupid;
        int target_nid;
@@ -4776,12 +4776,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
            can_change_pte_writable(vma, vmf->address, pte))
                writable = true;
 
-       page = vm_normal_page(vma, vmf->address, pte);
-       if (!page || is_zone_device_page(page))
+       folio = vm_normal_folio(vma, vmf->address, pte);
+       if (!folio || folio_is_zone_device(folio))
                goto out_map;
 
        /* TODO: handle PTE-mapped THP */
-       if (PageCompound(page))
+       if (folio_test_large(folio))
                goto out_map;
 
        /*
@@ -4796,34 +4796,34 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
                flags |= TNF_NO_GROUP;
 
        /*
-        * Flag if the page is shared between multiple address spaces. This
+        * Flag if the folio is shared between multiple address spaces. This
         * is later used when determining whether to group tasks together
         */
-       if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
+       if (folio_estimated_sharers(folio) > 1 && (vma->vm_flags & VM_SHARED))
                flags |= TNF_SHARED;
 
-       page_nid = page_to_nid(page);
+       nid = folio_nid(folio);
        /*
         * For memory tiering mode, cpupid of slow memory page is used
         * to record page access time.  So use default value.
         */
        if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
-           !node_is_toptier(page_nid))
+           !node_is_toptier(nid))
                last_cpupid = (-1 & LAST_CPUPID_MASK);
        else
-               last_cpupid = page_cpupid_last(page);
-       target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
-                       &flags);
+               last_cpupid = page_cpupid_last(&folio->page);
+       target_nid = numa_migrate_prep(&folio->page, vma, vmf->address, nid,
+                                      &flags);
        if (target_nid == NUMA_NO_NODE) {
-               put_page(page);
+               folio_put(folio);
                goto out_map;
        }
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        writable = false;
 
        /* Migrate to the requested node */
-       if (migrate_misplaced_folio(page_folio(page), vma, target_nid)) {
-               page_nid = target_nid;
+       if (migrate_misplaced_folio(folio, vma, target_nid)) {
+               nid = target_nid;
                flags |= TNF_MIGRATED;
        } else {
                flags |= TNF_MIGRATE_FAIL;
@@ -4839,8 +4839,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
        }
 
 out:
-       if (page_nid != NUMA_NO_NODE)
-               task_numa_fault(last_cpupid, page_nid, 1, flags);
+       if (nid != NUMA_NO_NODE)
+               task_numa_fault(last_cpupid, nid, 1, flags);
        return 0;
 out_map:
        /*