mm: simplify follow_pte{,pmd}
authorChristoph Hellwig <hch@lst.de>
Wed, 16 Dec 2020 04:47:23 +0000 (20:47 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2020 06:46:19 +0000 (22:46 -0800)
Merge __follow_pte_pmd, follow_pte_pmd and follow_pte into a single
follow_pte function and just pass two additional NULL arguments for the
two previous follow_pte callers.

[sfr@canb.auug.org.au: merge fix for "s390/pci: remove races against pte updates"]
Link: https://lkml.kernel.org/r/20201111221254.7f6a3658@canb.auug.org.au
Link: https://lkml.kernel.org/r/20201029101432.47011-3-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/s390/pci/pci_mmio.c
fs/dax.c
include/linux/mm.h
mm/memory.c

index de3bdbed88811d7c5e60eb450d7ca5ac5afe0a9f..18f2d10c31764afb802f0ed51102a791f9be227e 100644 (file)
@@ -170,7 +170,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
        if (!(vma->vm_flags & VM_WRITE))
                goto out_unlock_mmap;
 
-       ret = follow_pte_pmd(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
+       ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
        if (ret)
                goto out_unlock_mmap;
 
@@ -311,7 +311,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
        if (!(vma->vm_flags & VM_WRITE))
                goto out_unlock_mmap;
 
-       ret = follow_pte_pmd(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
+       ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
        if (ret)
                goto out_unlock_mmap;
 
index 5b47834f2e1bb55cbea08131e725438b517a28c8..26d5dcd2d69e5c0afd2c93d7042b54c3324b7342 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -810,12 +810,11 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
                address = pgoff_address(index, vma);
 
                /*
-                * Note because we provide range to follow_pte_pmd it will
-                * call mmu_notifier_invalidate_range_start() on our behalf
-                * before taking any lock.
+                * Note because we provide range to follow_pte it will call
+                * mmu_notifier_invalidate_range_start() on our behalf before
+                * taking any lock.
                 */
-               if (follow_pte_pmd(vma->vm_mm, address, &range,
-                                  &ptep, &pmdp, &ptl))
+               if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
                        continue;
 
                /*
index abc7b31542985c6d0f1adad9f6f5d8bfbbbb416b..855161080f18c874df14c1ca57e97cff2d3c6ee6 100644 (file)
@@ -1641,9 +1641,9 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
                unsigned long end, unsigned long floor, unsigned long ceiling);
 int
 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
-                  struct mmu_notifier_range *range,
-                  pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
+int follow_pte(struct mm_struct *mm, unsigned long address,
+               struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+               spinlock_t **ptlp);
 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
        unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
index 126e140baf73d28b8ae3af5bcef729b1d66224db..7d608765932b99e95aa5ba49e88328412084b1c4 100644 (file)
@@ -4707,9 +4707,9 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
 
-static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
-                           struct mmu_notifier_range *range,
-                           pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
+int follow_pte(struct mm_struct *mm, unsigned long address,
+              struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+              spinlock_t **ptlp)
 {
        pgd_t *pgd;
        p4d_t *p4d;
@@ -4774,31 +4774,6 @@ out:
        return -EINVAL;
 }
 
-static inline int follow_pte(struct mm_struct *mm, unsigned long address,
-                            pte_t **ptepp, spinlock_t **ptlp)
-{
-       int res;
-
-       /* (void) is needed to make gcc happy */
-       (void) __cond_lock(*ptlp,
-                          !(res = __follow_pte_pmd(mm, address, NULL,
-                                                   ptepp, NULL, ptlp)));
-       return res;
-}
-
-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
-                  struct mmu_notifier_range *range,
-                  pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
-{
-       int res;
-
-       /* (void) is needed to make gcc happy */
-       (void) __cond_lock(*ptlp,
-                          !(res = __follow_pte_pmd(mm, address, range,
-                                                   ptepp, pmdpp, ptlp)));
-       return res;
-}
-
 /**
  * follow_pfn - look up PFN at a user virtual address
  * @vma: memory mapping
@@ -4819,7 +4794,7 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
                return ret;
 
-       ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+       ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl);
        if (ret)
                return ret;
        *pfn = pte_pfn(*ptep);
@@ -4840,7 +4815,7 @@ int follow_phys(struct vm_area_struct *vma,
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
                goto out;
 
-       if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+       if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl))
                goto out;
        pte = *ptep;