mm/gup: refactor record_subpages() to find 1st small page
authorPeter Xu <peterx@redhat.com>
Wed, 27 Mar 2024 15:23:26 +0000 (11:23 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:22 +0000 (20:56 -0700)
All the fast-gup functions take a tail page to operate, always need to do
page mask calculations before feeding that into record_subpages().

Merge that logic into record_subpages(), so that it will do the nth_page()
calculation.

Link: https://lkml.kernel.org/r/20240327152332.950956-8-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Jones <andrew.jones@linux.dev>
Cc: Aneesh Kumar K.V (IBM) <aneesh.kumar@kernel.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: James Houghton <jthoughton@google.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/gup.c

index 1cc3240e490012d299face8f37f8c0e988bcc7e6..e83e262ea8e95cb520b091463803c62ea1c366cd 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2789,13 +2789,16 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
 }
 #endif
 
-static int record_subpages(struct page *page, unsigned long addr,
-                          unsigned long end, struct page **pages)
+static int record_subpages(struct page *page, unsigned long sz,
+                          unsigned long addr, unsigned long end,
+                          struct page **pages)
 {
+       struct page *start_page;
        int nr;
 
+       start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT);
        for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
-               pages[nr] = nth_page(page, nr);
+               pages[nr] = nth_page(start_page, nr);
 
        return nr;
 }
@@ -2830,8 +2833,8 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
        /* hugepages are never "special" */
        VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 
-       page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
-       refs = record_subpages(page, addr, end, pages + *nr);
+       page = pte_page(pte);
+       refs = record_subpages(page, sz, addr, end, pages + *nr);
 
        folio = try_grab_folio(page, refs, flags);
        if (!folio)
@@ -2904,8 +2907,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                                             pages, nr);
        }
 
-       page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
-       refs = record_subpages(page, addr, end, pages + *nr);
+       page = pmd_page(orig);
+       refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
 
        folio = try_grab_folio(page, refs, flags);
        if (!folio)
@@ -2948,8 +2951,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                                             pages, nr);
        }
 
-       page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
-       refs = record_subpages(page, addr, end, pages + *nr);
+       page = pud_page(orig);
+       refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
 
        folio = try_grab_folio(page, refs, flags);
        if (!folio)
@@ -2988,8 +2991,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
 
        BUILD_BUG_ON(pgd_devmap(orig));
 
-       page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
-       refs = record_subpages(page, addr, end, pages + *nr);
+       page = pgd_page(orig);
+       refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
 
        folio = try_grab_folio(page, refs, flags);
        if (!folio)