#include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
+#include <linux/gfp.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 {
        struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
        u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+       bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
        unsigned cpu = smp_processor_id();
        u64 next_tlb_gen;
        bool need_flush;
                           next->context.ctx_id);
 
                /*
-                * We don't currently support having a real mm loaded without
-                * our cpu set in mm_cpumask().  We have all the bookkeeping
-                * in place to figure out whether we would need to flush
-                * if our cpu were cleared in mm_cpumask(), but we don't
-                * currently use it.
+                * Even in lazy TLB mode, the CPU should stay set in the
+                * mm_cpumask. The TLB shootdown code can figure out from
+                * from cpu_tlbstate.is_lazy whether or not to send an IPI.
                 */
                if (WARN_ON_ONCE(real_prev != &init_mm &&
                                 !cpumask_test_cpu(cpu, mm_cpumask(next))))
                        cpumask_set_cpu(cpu, mm_cpumask(next));
 
-               return;
+               /*
+                * If the CPU is not in lazy TLB mode, we are just switching
+                * from one thread in a process to another thread in the same
+                * process. No TLB flush required.
+                */
+               if (!was_lazy)
+                       return;
+
+               /*
+                * Read the tlb_gen to check whether a flush is needed.
+                * If the TLB is up to date, just use it.
+                * The barrier synchronizes with the tlb_gen increment in
+                * the TLB shootdown code.
+                */
+               smp_mb();
+               next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+               if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
+                               next_tlb_gen)
+                       return;
+
+               /*
+                * TLB contents went out of date while we were in lazy
+                * mode. Fall through to the TLB switching code below.
+                */
+               new_asid = prev_asid;
+               need_flush = true;
        } else {
                u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
 
                 * paging-structure cache to avoid speculatively reading
                 * garbage into our TLB.  Since switching to init_mm is barely
                 * slower than a minimal flush, just switch to init_mm.
+                *
+                * This should be rare, with native_flush_tlb_others skipping
+                * IPIs to lazy TLB mode CPUs.
                 */
                switch_mm_irqs_off(NULL, &init_mm, NULL);
                return;
 void native_flush_tlb_others(const struct cpumask *cpumask,
                             const struct flush_tlb_info *info)
 {
+       cpumask_var_t lazymask;
+       unsigned int cpu;
+
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
        if (info->end == TLB_FLUSH_ALL)
                trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
                 * that UV should be updated so that smp_call_function_many(),
                 * etc, are optimal on UV.
                 */
-               unsigned int cpu;
-
                cpu = smp_processor_id();
                cpumask = uv_flush_tlb_others(cpumask, info);
                if (cpumask)
                                               (void *)info, 1);
                return;
        }
-       smp_call_function_many(cpumask, flush_tlb_func_remote,
+
+       /*
+        * A temporary cpumask is used in order to skip sending IPIs
+        * to CPUs in lazy TLB state, while keeping them in mm_cpumask(mm).
+        * If the allocation fails, simply IPI every CPU in mm_cpumask.
+        */
+       if (!alloc_cpumask_var(&lazymask, GFP_ATOMIC)) {
+               smp_call_function_many(cpumask, flush_tlb_func_remote,
+                              (void *)info, 1);
+               return;
+       }
+
+       cpumask_copy(lazymask, cpumask);
+
+       for_each_cpu(cpu, lazymask) {
+               if (per_cpu(cpu_tlbstate.is_lazy, cpu))
+                       cpumask_clear_cpu(cpu, lazymask);
+       }
+
+       smp_call_function_many(lazymask, flush_tlb_func_remote,
                               (void *)info, 1);
+
+       free_cpumask_var(lazymask);
 }
 
 /*