* Don't actually need to do any preparation, but need to make sure
  * the address is in the right region.
  */
-int prepare_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
 {
+       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
 
        return 0;
 }
 
-int prepare_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
 {
        int err = 0;
 
-       if ( (addr+len) < addr )
+       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
                return -EINVAL;
 
        if (addr < 0x100000000UL)
 
        loff_t len, vma_len;
        int ret;
 
-       if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
-               return -EINVAL;
-
-       if (vma->vm_start & ~HPAGE_MASK)
-               return -EINVAL;
-
-       if (vma->vm_end & ~HPAGE_MASK)
-               return -EINVAL;
-
-       if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
-               return -EINVAL;
+       /*
+        * vma alignment has already been checked by prepare_hugepage_range.
+        * If you add any error returns here, do so after setting VM_HUGETLB,
+        * so is_vm_hugetlb_page tests below unmap_region go the right way
+        * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
+        */
+       vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
+       vma->vm_ops = &hugetlb_vm_ops;
 
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
 
        mutex_lock(&inode->i_mutex);
        file_accessed(file);
-       vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
-       vma->vm_ops = &hugetlb_vm_ops;
 
        ret = -ENOMEM;
        len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
  */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
+                                               pgoff_t pgoff)
 {
+       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
        return 0;
 }
 #else
-int prepare_hugepage_range(unsigned long addr, unsigned long len);
+int prepare_hugepage_range(unsigned long addr, unsigned long len,
+                                               pgoff_t pgoff);
 #endif
 
 #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
 #define hugetlb_report_meminfo(buf)            0
 #define hugetlb_report_node_meminfo(n, buf)    0
 #define follow_huge_pmd(mm, addr, pmd, write)  NULL
-#define prepare_hugepage_range(addr, len)      (-EINVAL)
+#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
 #define pmd_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
 
                 * Check if the given range is hugepage aligned, and
                 * can be made suitable for hugepages.
                 */
-               ret = prepare_hugepage_range(addr, len);
+               ret = prepare_hugepage_range(addr, len, pgoff);
        } else {
                /*
                 * Ensure that a normal request is not falling in a