*/
 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
                       unsigned long address, pte_t *ptep, unsigned int flags,
-                      struct folio *pagecache_folio, spinlock_t *ptl)
+                      struct folio *pagecache_folio, spinlock_t *ptl,
+                      struct vm_fault *vmf)
 {
        const bool unshare = flags & FAULT_FLAG_UNSHARE;
        pte_t pte = huge_ptep_get(ptep);
         * When the original hugepage is shared one, it does not have
         * anon_vma prepared.
         */
-       if (unlikely(anon_vma_prepare(vma))) {
-               ret = VM_FAULT_OOM;
+       ret = vmf_anon_prepare(vmf);
+       if (unlikely(ret))
                goto out_release_all;
-       }
 
        if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
                ret = VM_FAULT_HWPOISON_LARGE;
                        new_pagecache_folio = true;
                } else {
                        folio_lock(folio);
-                       if (unlikely(anon_vma_prepare(vma))) {
-                               ret = VM_FAULT_OOM;
+
+                       ret = vmf_anon_prepare(vmf);
+                       if (unlikely(ret))
                                goto backout_unlocked;
-                       }
                        anon_rmap = 1;
                }
        } else {
        hugetlb_count_add(pages_per_huge_page(h), mm);
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
-               ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
+               ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl, vmf);
        }
 
        spin_unlock(ptl);
        if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
                if (!huge_pte_write(entry)) {
                        ret = hugetlb_wp(mm, vma, address, ptep, flags,
-                                        pagecache_folio, ptl);
+                                        pagecache_folio, ptl, &vmf);
                        goto out_put_page;
                } else if (likely(flags & FAULT_FLAG_WRITE)) {
                        entry = huge_pte_mkdirty(entry);