dax: use huge_zero_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 26 Mar 2024 20:28:27 +0000 (20:28 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:20 +0000 (20:56 -0700)
Convert from huge_zero_page to huge_zero_folio.

Link: https://lkml.kernel.org/r/20240326202833.523759-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/dax.c
include/trace/events/fs_dax.h

index 423fc1607dfae5bfda9acb7042acb408e187823c..becb4a6920c6aa1fc0aadb909abefbd8dddd866f 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1207,17 +1207,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = mapping->host;
        pgtable_t pgtable = NULL;
-       struct page *zero_page;
+       struct folio *zero_folio;
        spinlock_t *ptl;
        pmd_t pmd_entry;
        pfn_t pfn;
 
-       zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
+       zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
 
-       if (unlikely(!zero_page))
+       if (unlikely(!zero_folio))
                goto fallback;
 
-       pfn = page_to_pfn_t(zero_page);
+       pfn = page_to_pfn_t(&zero_folio->page);
        *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
                                  DAX_PMD | DAX_ZERO_PAGE);
 
@@ -1237,17 +1237,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                mm_inc_nr_ptes(vma->vm_mm);
        }
-       pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
+       pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
        pmd_entry = pmd_mkhuge(pmd_entry);
        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
        spin_unlock(ptl);
-       trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
+       trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
        return VM_FAULT_NOPAGE;
 
 fallback:
        if (pgtable)
                pte_free(vma->vm_mm, pgtable);
-       trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
+       trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
        return VM_FAULT_FALLBACK;
 }
 #else
index 97b09fcf7e52876979b879269f8b39fdd3a73ad1..86fe6aecff1e794a3bdbdba9196e5380945f30ce 100644 (file)
@@ -62,14 +62,14 @@ DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
 
 DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
        TP_PROTO(struct inode *inode, struct vm_fault *vmf,
-               struct page *zero_page,
+               struct folio *zero_folio,
                void *radix_entry),
-       TP_ARGS(inode, vmf, zero_page, radix_entry),
+       TP_ARGS(inode, vmf, zero_folio, radix_entry),
        TP_STRUCT__entry(
                __field(unsigned long, ino)
                __field(unsigned long, vm_flags)
                __field(unsigned long, address)
-               __field(struct page *, zero_page)
+               __field(struct folio *, zero_folio)
                __field(void *, radix_entry)
                __field(dev_t, dev)
        ),
@@ -78,17 +78,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
                __entry->ino = inode->i_ino;
                __entry->vm_flags = vmf->vma->vm_flags;
                __entry->address = vmf->address;
-               __entry->zero_page = zero_page;
+               __entry->zero_folio = zero_folio;
                __entry->radix_entry = radix_entry;
        ),
-       TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p "
+       TP_printk("dev %d:%d ino %#lx %s address %#lx zero_folio %p "
                        "radix_entry %#lx",
                MAJOR(__entry->dev),
                MINOR(__entry->dev),
                __entry->ino,
                __entry->vm_flags & VM_SHARED ? "shared" : "private",
                __entry->address,
-               __entry->zero_page,
+               __entry->zero_folio,
                (unsigned long)__entry->radix_entry
        )
 )
@@ -96,8 +96,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
 #define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
 DEFINE_EVENT(dax_pmd_load_hole_class, name, \
        TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
-               struct page *zero_page, void *radix_entry), \
-       TP_ARGS(inode, vmf, zero_page, radix_entry))
+               struct folio *zero_folio, void *radix_entry), \
+       TP_ARGS(inode, vmf, zero_folio, radix_entry))
 
 DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
 DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);