userfaultfd: convert mfill_atomic_pte_copy() to use a folio
authorZhangPeng <zhangpeng362@huawei.com>
Mon, 10 Apr 2023 13:39:27 +0000 (21:39 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 18 Apr 2023 23:29:54 +0000 (16:29 -0700)
Patch series "userfaultfd: convert userfaultfd functions to use folios",
v6.

This patch series converts several userfaultfd functions to use folios.

This patch (of 6):

Call vma_alloc_folio() directly instead of alloc_page_vma() and convert
page_kaddr to kaddr in mfill_atomic_pte_copy().  Removes several calls to
compound_head().

Link: https://lkml.kernel.org/r/20230410133932.32288-1-zhangpeng362@huawei.com
Link: https://lkml.kernel.org/r/20230410133932.32288-2-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/userfaultfd.c

index 7f1b5f8b712cefdfae3c69c0206e69cc07cf06df..313bc683c2b66187623d06fd0f0a28d292039010 100644 (file)
@@ -135,17 +135,18 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
                                 uffd_flags_t flags,
                                 struct page **pagep)
 {
-       void *page_kaddr;
+       void *kaddr;
        int ret;
-       struct page *page;
+       struct folio *folio;
 
        if (!*pagep) {
                ret = -ENOMEM;
-               page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
-               if (!page)
+               folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
+                                       dst_addr, false);
+               if (!folio)
                        goto out;
 
-               page_kaddr = kmap_local_page(page);
+               kaddr = kmap_local_folio(folio, 0);
                /*
                 * The read mmap_lock is held here.  Despite the
                 * mmap_lock being read recursive a deadlock is still
@@ -162,45 +163,44 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
                 * and retry the copy outside the mmap_lock.
                 */
                pagefault_disable();
-               ret = copy_from_user(page_kaddr,
-                                    (const void __user *) src_addr,
+               ret = copy_from_user(kaddr, (const void __user *) src_addr,
                                     PAGE_SIZE);
                pagefault_enable();
-               kunmap_local(page_kaddr);
+               kunmap_local(kaddr);
 
                /* fallback to copy_from_user outside mmap_lock */
                if (unlikely(ret)) {
                        ret = -ENOENT;
-                       *pagep = page;
+                       *pagep = &folio->page;
                        /* don't free the page */
                        goto out;
                }
 
-               flush_dcache_page(page);
+               flush_dcache_folio(folio);
        } else {
-               page = *pagep;
+               folio = page_folio(*pagep);
                *pagep = NULL;
        }
 
        /*
-        * The memory barrier inside __SetPageUptodate makes sure that
+        * The memory barrier inside __folio_mark_uptodate makes sure that
         * preceding stores to the page contents become visible before
         * the set_pte_at() write.
         */
-       __SetPageUptodate(page);
+       __folio_mark_uptodate(folio);
 
        ret = -ENOMEM;
-       if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL))
+       if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
                goto out_release;
 
        ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
-                                      page, true, flags);
+                                      &folio->page, true, flags);
        if (ret)
                goto out_release;
 out:
        return ret;
 out_release:
-       put_page(page);
+       folio_put(folio);
        goto out;
 }