mm: rename vma_pgoff_address back to vma_address
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 28 Mar 2024 22:58:29 +0000 (22:58 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:31 +0000 (20:56 -0700)
With all callers converted, we can use the nice shorter name.  Take this
opportunity to reorder the arguments to the logical order (larger object
first).

Link: https://lkml.kernel.org/r/20240328225831.1765286-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/memory-failure.c
mm/page_vma_mapped.c
mm/rmap.c

index f4ef48d57b1c4a730e74cb7cf2047fb2e37c6710..d567381b12cc80cbbbf90f1de6c8dff6e1f9265b 100644 (file)
@@ -805,17 +805,16 @@ void mlock_drain_remote(int cpu);
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
 /**
- * vma_pgoff_address - Find the virtual address a page range is mapped at
+ * vma_address - Find the virtual address a page range is mapped at
+ * @vma: The vma which maps this object.
  * @pgoff: The page offset within its object.
  * @nr_pages: The number of pages to consider.
- * @vma: The vma which maps this object.
  *
  * If any page in this range is mapped by this VMA, return the first address
  * where any of these pages appear.  Otherwise, return -EFAULT.
  */
-static inline unsigned long
-vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
-                 struct vm_area_struct *vma)
+static inline unsigned long vma_address(struct vm_area_struct *vma,
+               pgoff_t pgoff, unsigned long nr_pages)
 {
        unsigned long address;
 
index 9e50586f2e37e0e0d393a5d3c256c66141f5d445..0d863e9216affd054e53dcd061828f133cabf338 100644 (file)
@@ -455,7 +455,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
        tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
        if (is_zone_device_page(p)) {
                if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
-                       tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
+                       tk->addr = vma_address(vma, fsdax_pgoff, 1);
                tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
        } else
                tk->size_shift = page_shift(compound_head(p));
index ac48d6284badcedd68d421fdd43f7962dc5543ef..53b8868ede61c609510c538c5d7b70a68c9a68da 100644 (file)
@@ -334,7 +334,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
                .flags = PVMW_SYNC,
        };
 
-       pvmw.address = vma_pgoff_address(pgoff, 1, vma);
+       pvmw.address = vma_address(vma, pgoff, 1);
        if (pvmw.address == -EFAULT)
                return 0;
        if (!page_vma_mapped_walk(&pvmw))
index 4b08b1a06688877a3c71e9bfb1b005896132635e..56b313aa2ebfb252796e8303fff6f77cd9a4d2cf 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -794,7 +794,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 
        /* The !page__anon_vma above handles KSM folios */
        pgoff = folio->index + folio_page_idx(folio, page);
-       return vma_pgoff_address(pgoff, 1, vma);
+       return vma_address(vma, pgoff, 1);
 }
 
 /*
@@ -1132,7 +1132,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
        if (invalid_mkclean_vma(vma, NULL))
                return 0;
 
-       pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
+       pvmw.address = vma_address(vma, pgoff, nr_pages);
        VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
 
        return page_vma_mkclean_one(&pvmw);
@@ -2592,8 +2592,8 @@ static void rmap_walk_anon(struct folio *folio,
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
-               unsigned long address = vma_pgoff_address(pgoff_start,
-                               folio_nr_pages(folio), vma);
+               unsigned long address = vma_address(vma, pgoff_start,
+                               folio_nr_pages(folio));
 
                VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();
@@ -2654,8 +2654,8 @@ static void rmap_walk_file(struct folio *folio,
 lookup:
        vma_interval_tree_foreach(vma, &mapping->i_mmap,
                        pgoff_start, pgoff_end) {
-               unsigned long address = vma_pgoff_address(pgoff_start,
-                              folio_nr_pages(folio), vma);
+               unsigned long address = vma_address(vma, pgoff_start,
+                              folio_nr_pages(folio));
 
                VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();