mm: convert lock_page_or_retry() to folio_lock_or_retry()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:53 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:55 +0000 (14:02 -0700)
Remove a call to compound_head() in each of the two callers.

Link: https://lkml.kernel.org/r/20220902194653.1739778-58-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/pagemap.h
mm/memory.c

index 09de43e36a64bb9b12936e33d213ae040b7178eb..32846b6306dbd29b4cd0202c02cdbb5cec2bd4a7 100644 (file)
@@ -989,19 +989,16 @@ static inline int lock_page_killable(struct page *page)
 }
 
 /*
- * lock_page_or_retry - Lock the page, unless this would block and the
+ * folio_lock_or_retry - Lock the folio, unless this would block and the
  * caller indicated that it can handle a retry.
  *
  * Return value and mmap_lock implications depend on flags; see
  * __folio_lock_or_retry().
  */
-static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
-                                    unsigned int flags)
+static inline bool folio_lock_or_retry(struct folio *folio,
+               struct mm_struct *mm, unsigned int flags)
 {
-       struct folio *folio;
        might_sleep();
-
-       folio = page_folio(page);
        return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
 }
 
index 6e568f190e7a894aae6fe15b5d851ffa1754e164..d671ad367d6777692ad8995bbe19eb28d556631e 100644 (file)
@@ -3618,11 +3618,11 @@ EXPORT_SYMBOL(unmap_mapping_range);
  */
 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
 {
-       struct page *page = vmf->page;
+       struct folio *folio = page_folio(vmf->page);
        struct vm_area_struct *vma = vmf->vma;
        struct mmu_notifier_range range;
 
-       if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+       if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
                return VM_FAULT_RETRY;
        mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
                                vma->vm_mm, vmf->address & PAGE_MASK,
@@ -3632,10 +3632,10 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                                &vmf->ptl);
        if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
-               restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
+               restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
 
        pte_unmap_unlock(vmf->pte, vmf->ptl);
-       unlock_page(page);
+       folio_unlock(folio);
 
        mmu_notifier_invalidate_range_end(&range);
        return 0;
@@ -3835,7 +3835,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                goto out_release;
        }
 
-       locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
+       locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags);
 
        if (!locked) {
                ret |= VM_FAULT_RETRY;