#define tlb_end_vma(tlb, vma) do { } while (0)
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 
-#define tlb_flush(tlb)                                                 \
-{                                                                      \
-       if (!tlb->fullmm && !tlb->need_flush_all)                       \
-               flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
-       else                                                            \
-               flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL);   \
-}
+static inline void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
 
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+       unsigned long start = 0UL, end = TLB_FLUSH_ALL;
+       unsigned int stride_shift = tlb_get_unmap_shift(tlb);
+
+       if (!tlb->fullmm && !tlb->need_flush_all) {
+               start = tlb->start;
+               end = tlb->end;
+       }
+
+       flush_tlb_mm_range(tlb->mm, start, end, stride_shift);
+}
+
 /*
  * While x86 architecture in general requires an IPI to perform TLB
  * shootdown, enablement code for several hypervisors overrides
 
        unsigned long           start;
        unsigned long           end;
        u64                     new_tlb_gen;
+       unsigned int            stride_shift;
 };
 
 #define local_flush_tlb() __flush_tlb()
 
 #define flush_tlb_mm(mm)       flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
 
-#define flush_tlb_range(vma, start, end)       \
-               flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+#define flush_tlb_range(vma, start, end)                               \
+       flush_tlb_mm_range((vma)->vm_mm, start, end,                    \
+                          ((vma)->vm_flags & VM_HUGETLB)               \
+                               ? huge_page_shift(hstate_vma(vma))      \
+                               : PAGE_SHIFT)
 
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
-                               unsigned long end, unsigned long vmflag);
+                               unsigned long end, unsigned int stride_shift);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
 {
-       flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
+       flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
 
        map_ldt_struct_to_user(mm);
 
        va = (unsigned long)ldt_slot_va(slot);
-       flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
+       flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT);
 
        ldt->slot = slot;
        return 0;
 
        pte_unmap_unlock(pte, ptl);
 out:
        up_write(&mm->mmap_sem);
-       flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
+       flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT);
 }
 
 
 
            f->new_tlb_gen == local_tlb_gen + 1 &&
            f->new_tlb_gen == mm_tlb_gen) {
                /* Partial flush */
-               unsigned long addr;
-               unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
+               unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
+               unsigned long addr = f->start;
 
-               addr = f->start;
                while (addr < f->end) {
                        __flush_tlb_one_user(addr);
-                       addr += PAGE_SIZE;
+                       addr += 1UL << f->stride_shift;
                }
                if (local)
-                       count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
-               trace_tlb_flush(reason, nr_pages);
+                       count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
+               trace_tlb_flush(reason, nr_invalidate);
        } else {
                /* Full flush. */
                local_flush_tlb();
 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
-                               unsigned long end, unsigned long vmflag)
+                               unsigned long end, unsigned int stride_shift)
 {
        int cpu;
 
        struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
                .mm = mm,
+               .stride_shift = stride_shift,
        };
 
        cpu = get_cpu();
 
        /* Should we flush just the requested range? */
        if ((end != TLB_FLUSH_ALL) &&
-           !(vmflag & VM_HUGETLB) &&
-           ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
+           ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
                info.start = start;
                info.end = end;
        } else {
 
  */
 
 #include <linux/pagemap.h>
+#include <linux/hugetlb.h>
 #include <asm/tlb.h>
 #include <asm-generic/pgtable.h>