mm: convert __do_fault() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 8 Nov 2023 18:28:05 +0000 (18:28 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 11 Dec 2023 00:51:38 +0000 (16:51 -0800)
Convert vmf->page to a folio as soon as we're going to use it.  This fixes
a bug if the fault handler returns a tail page with hardware poison; tail
pages have an invalid page->index, so we would fail to unmap the page from
the page tables.  We actually have to unmap the entire folio (or
mapping_evict_folio() will fail), so use unmap_mapping_folio() instead.

This also saves various calls to compound_head() hidden in lock_page(),
put_page(), etc.

Link: https://lkml.kernel.org/r/20231108182809.602073-3-willy@infradead.org
Fixes: 793917d997df ("mm/readahead: Add large folio readahead")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index a7025ed5c65b3d332d52588326de6cf452dc8788..e27e2e5beb3f5175abe4ff47e34ceec9db6ebab0 100644 (file)
@@ -4245,6 +4245,7 @@ oom:
 static vm_fault_t __do_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
+       struct folio *folio;
        vm_fault_t ret;
 
        /*
@@ -4273,27 +4274,26 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
                            VM_FAULT_DONE_COW)))
                return ret;
 
+       folio = page_folio(vmf->page);
        if (unlikely(PageHWPoison(vmf->page))) {
-               struct page *page = vmf->page;
                vm_fault_t poisonret = VM_FAULT_HWPOISON;
                if (ret & VM_FAULT_LOCKED) {
-                       if (page_mapped(page))
-                               unmap_mapping_pages(page_mapping(page),
-                                                   page->index, 1, false);
-                       /* Retry if a clean page was removed from the cache. */
-                       if (invalidate_inode_page(page))
+                       if (page_mapped(vmf->page))
+                               unmap_mapping_folio(folio);
+                       /* Retry if a clean folio was removed from the cache. */
+                       if (mapping_evict_folio(folio->mapping, folio))
                                poisonret = VM_FAULT_NOPAGE;
-                       unlock_page(page);
+                       folio_unlock(folio);
                }
-               put_page(page);
+               folio_put(folio);
                vmf->page = NULL;
                return poisonret;
        }
 
        if (unlikely(!(ret & VM_FAULT_LOCKED)))
-               lock_page(vmf->page);
+               folio_lock(folio);
        else
-               VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
+               VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
 
        return ret;
 }