userfaultfd: convert mfill_atomic_hugetlb() to use a folio
authorZhangPeng <zhangpeng362@huawei.com>
Mon, 10 Apr 2023 13:39:30 +0000 (21:39 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 18 Apr 2023 23:29:55 +0000 (16:29 -0700)
Convert hugetlb_mfill_atomic_pte() to take in a folio pointer instead of
a page pointer.

Convert mfill_atomic_hugetlb() to use a folio.

Link: https://lkml.kernel.org/r/20230410133932.32288-5-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/userfaultfd.c

index 2a758bcd67199714cc2e1f97694cccff5afce9c6..28703fe22386424a8aa9b6171125bbbfb73af8e2 100644 (file)
@@ -163,7 +163,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                             unsigned long dst_addr,
                             unsigned long src_addr,
                             uffd_flags_t flags,
-                            struct page **pagep);
+                            struct folio **foliop);
 #endif /* CONFIG_USERFAULTFD */
 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
                                                struct vm_area_struct *vma,
@@ -397,7 +397,7 @@ static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                                           unsigned long dst_addr,
                                           unsigned long src_addr,
                                           uffd_flags_t flags,
-                                          struct page **pagep)
+                                          struct folio **foliop)
 {
        BUG();
        return 0;
index aade1b5134741371b72d4ef6c690064459baa76e..c88f856ec2e2a42650b4282afa1a64dfa0db9694 100644 (file)
@@ -6178,7 +6178,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                             unsigned long dst_addr,
                             unsigned long src_addr,
                             uffd_flags_t flags,
-                            struct page **pagep)
+                            struct folio **foliop)
 {
        struct mm_struct *dst_mm = dst_vma->vm_mm;
        bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
@@ -6201,8 +6201,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                if (IS_ERR(folio))
                        goto out;
                folio_in_pagecache = true;
-       } else if (!*pagep) {
-               /* If a page already exists, then it's UFFDIO_COPY for
+       } else if (!*foliop) {
+               /* If a folio already exists, then it's UFFDIO_COPY for
                 * a non-missing case. Return -EEXIST.
                 */
                if (vm_shared &&
@@ -6237,33 +6237,33 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                                ret = -ENOMEM;
                                goto out;
                        }
-                       *pagep = &folio->page;
-                       /* Set the outparam pagep and return to the caller to
+                       *foliop = folio;
+                       /* Set the outparam foliop and return to the caller to
                         * copy the contents outside the lock. Don't free the
-                        * page.
+                        * folio.
                         */
                        goto out;
                }
        } else {
                if (vm_shared &&
                    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
-                       put_page(*pagep);
+                       folio_put(*foliop);
                        ret = -EEXIST;
-                       *pagep = NULL;
+                       *foliop = NULL;
                        goto out;
                }
 
                folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
                if (IS_ERR(folio)) {
-                       put_page(*pagep);
+                       folio_put(*foliop);
                        ret = -ENOMEM;
-                       *pagep = NULL;
+                       *foliop = NULL;
                        goto out;
                }
-               copy_user_huge_page(&folio->page, *pagep, dst_addr, dst_vma,
+               copy_user_huge_page(&folio->page, &(*foliop)->page, dst_addr, dst_vma,
                                    pages_per_huge_page(h));
-               put_page(*pagep);
-               *pagep = NULL;
+               folio_put(*foliop);
+               *foliop = NULL;
        }
 
        /*
index 1e7dba6c4c5fe9ef4377af503a181441b3ca6eae..2f263afb823d4a1115f88dfadffd44db549cb559 100644 (file)
@@ -321,7 +321,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
        pte_t *dst_pte;
        unsigned long src_addr, dst_addr;
        long copied;
-       struct page *page;
+       struct folio *folio;
        unsigned long vma_hpagesize;
        pgoff_t idx;
        u32 hash;
@@ -341,7 +341,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
        src_addr = src_start;
        dst_addr = dst_start;
        copied = 0;
-       page = NULL;
+       folio = NULL;
        vma_hpagesize = vma_kernel_pagesize(dst_vma);
 
        /*
@@ -410,7 +410,7 @@ retry:
                }
 
                err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
-                                              src_addr, flags, &page);
+                                              src_addr, flags, &folio);
 
                hugetlb_vma_unlock_read(dst_vma);
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
@@ -419,9 +419,9 @@ retry:
 
                if (unlikely(err == -ENOENT)) {
                        mmap_read_unlock(dst_mm);
-                       BUG_ON(!page);
+                       BUG_ON(!folio);
 
-                       err = copy_folio_from_user(page_folio(page),
+                       err = copy_folio_from_user(folio,
                                                   (const void __user *)src_addr, true);
                        if (unlikely(err)) {
                                err = -EFAULT;
@@ -432,7 +432,7 @@ retry:
                        dst_vma = NULL;
                        goto retry;
                } else
-                       BUG_ON(page);
+                       BUG_ON(folio);
 
                if (!err) {
                        dst_addr += vma_hpagesize;
@@ -449,8 +449,8 @@ retry:
 out_unlock:
        mmap_read_unlock(dst_mm);
 out:
-       if (page)
-               put_page(page);
+       if (folio)
+               folio_put(folio);
        BUG_ON(copied < 0);
        BUG_ON(err > 0);
        BUG_ON(!copied && !err);