unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags);
+unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags,
+               vm_flags_t vm_flags);
 
 bool can_split_folio(struct folio *folio, int *pextra_pins);
 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
 
 #define thp_get_unmapped_area  NULL
 
+static inline unsigned long
+thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+                             unsigned long len, unsigned long pgoff,
+                             unsigned long flags, vm_flags_t vm_flags)
+{
+       return 0;
+}
+
 static inline bool
 can_split_folio(struct folio *folio, int *pextra_pins)
 {
 
 
 static unsigned long __thp_get_unmapped_area(struct file *filp,
                unsigned long addr, unsigned long len,
-               loff_t off, unsigned long flags, unsigned long size)
+               loff_t off, unsigned long flags, unsigned long size,
+               vm_flags_t vm_flags)
 {
        loff_t off_end = off + len;
        loff_t off_align = round_up(off, size);
        if (len_pad < len || (off + len_pad) < off)
                return 0;
 
-       ret = mm_get_unmapped_area(current->mm, filp, addr, len_pad,
-                                  off >> PAGE_SHIFT, flags);
+       ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad,
+                                          off >> PAGE_SHIFT, flags, vm_flags);
 
        /*
         * The failure might be due to length padding. The caller will retry
        return ret;
 }
 
-unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags,
+               vm_flags_t vm_flags)
 {
        unsigned long ret;
        loff_t off = (loff_t)pgoff << PAGE_SHIFT;
 
-       ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
+       ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
        if (ret)
                return ret;
 
-       return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
+       return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags,
+                                           vm_flags);
+}
+
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
 }
 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
 
 
                 * so use shmem's get_unmapped_area in case it can be huge.
                 */
                get_area = shmem_get_unmapped_area;
-       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
-               /* Ensures that larger anonymous mappings are THP aligned. */
-               get_area = thp_get_unmapped_area;
        }
 
        /* Always treat pgoff as zero for anonymous memory. */
        if (!file)
                pgoff = 0;
 
-       if (get_area)
+       if (get_area) {
                addr = get_area(file, addr, len, pgoff, flags);
-       else
+       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+               /* Ensures that larger anonymous mappings are THP aligned. */
+               addr = thp_get_unmapped_area_vmflags(file, addr, len,
+                                                    pgoff, flags, vm_flags);
+       } else {
                addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
                                                    pgoff, flags, vm_flags);
+       }
        if (IS_ERR_VALUE(addr))
                return addr;