void error_interrupt(void);
 void reschedule_interrupt(void);
 void call_function_interrupt(void);
-void invalidate_interrupt(void);
+void invalidate_interrupt0(void);
+void invalidate_interrupt1(void);
+void invalidate_interrupt2(void);
+void invalidate_interrupt3(void);
+void invalidate_interrupt4(void);
+void invalidate_interrupt5(void);
+void invalidate_interrupt6(void);
+void invalidate_interrupt7(void);
 void thermal_interrupt(void);
 void i8254_timer_resume(void);
 
         */
        set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 
-       /* IPI for invalidation */
-       set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+       /* IPIs for invalidation */
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
+       set_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
 
        /* IPI for generic function call */
        set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
 
 #include <asm/proto.h>
 #include <asm/apicdef.h>
 
+#define __cpuinit __init
+
 /*
  *     Smarter SMP flushing macros. 
  *             c/o Linus Torvalds.
  *     writing to user space from interrupts. (Its not allowed anyway).
  *
  *     Optimizations Manfred Spraul <manfred@colorfullife.com>
+ *
+ *     More scalable flush, from Andi Kleen
+ *
+ *     To avoid global state use 8 different call vectors.
+ *     Each CPU uses a specific vector to trigger flushes on other
+ *     CPUs. Depending on the received vector the target CPUs look into
+ *     the right per cpu variable for the flush data.
+ *
+ *     With more than 8 CPUs they are hashed to the 8 available
+ *     vectors. The limited global vector space forces us to this right now.
+ *     In future when interrupts are split into per CPU domains this could be
+ *     fixed, at the cost of triggering multiple IPIs in some cases.
  */
 
-static cpumask_t flush_cpumask;
-static struct mm_struct * flush_mm;
-static unsigned long flush_va;
-static DEFINE_SPINLOCK(tlbstate_lock);
+union smp_flush_state {
+       struct {
+               cpumask_t flush_cpumask;
+               struct mm_struct *flush_mm;
+               unsigned long flush_va;
 #define FLUSH_ALL      -1ULL
+               spinlock_t tlbstate_lock;
+       };
+       char pad[SMP_CACHE_BYTES];
+} ____cacheline_aligned;
+
+/* State is put into the per CPU data section, but padded
+   to a full cache line because other CPUs can access it and we don't
+   want false sharing in the per cpu data segment. */
+static DEFINE_PER_CPU(union smp_flush_state, flush_state);
 
 /*
  * We cannot call mmdrop() because we are in interrupt context, 
  * instead update mm->cpu_vm_mask.
  */
-static inline void leave_mm (unsigned long cpu)
+static inline void leave_mm(int cpu)
 {
        if (read_pda(mmu_state) == TLBSTATE_OK)
                BUG();
  *
  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
  * 2) Leave the mm if we are in the lazy tlb mode.
+ *
+ * Interrupts are disabled.
  */
 
-asmlinkage void smp_invalidate_interrupt (void)
+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
 {
-       unsigned long cpu;
+       int cpu;
+       int sender;
+       union smp_flush_state *f;
 
-       cpu = get_cpu();
+       cpu = smp_processor_id();
+       /*
+        * orig_rax contains the interrupt vector - 256.
+        * Use that to determine where the sender put the data.
+        */
+       sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
+       f = &per_cpu(flush_state, sender);
 
-       if (!cpu_isset(cpu, flush_cpumask))
+       if (!cpu_isset(cpu, f->flush_cpumask))
                goto out;
                /* 
                 * This was a BUG() but until someone can quote me the
                 * BUG();
                 */
                 
-       if (flush_mm == read_pda(active_mm)) {
+       if (f->flush_mm == read_pda(active_mm)) {
                if (read_pda(mmu_state) == TLBSTATE_OK) {
-                       if (flush_va == FLUSH_ALL)
+                       if (f->flush_va == FLUSH_ALL)
                                local_flush_tlb();
                        else
-                               __flush_tlb_one(flush_va);
+                               __flush_tlb_one(f->flush_va);
                } else
                        leave_mm(cpu);
        }
 out:
        ack_APIC_irq();
-       cpu_clear(cpu, flush_cpumask);
-       put_cpu_no_resched();
+       cpu_clear(cpu, f->flush_cpumask);
 }
 
 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
                                                unsigned long va)
 {
-       cpumask_t tmp;
-       /*
-        * A couple of (to be removed) sanity checks:
-        *
-        * - we do not send IPIs to not-yet booted CPUs.
-        * - current CPU must not be in mask
-        * - mask must exist :)
-        */
-       BUG_ON(cpus_empty(cpumask));
-       cpus_and(tmp, cpumask, cpu_online_map);
-       BUG_ON(!cpus_equal(tmp, cpumask));
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-       if (!mm)
-               BUG();
+       int sender;
+       union smp_flush_state *f;
 
-       /*
-        * I'm not happy about this global shared spinlock in the
-        * MM hot path, but we'll see how contended it is.
-        * Temporarily this turns IRQs off, so that lockups are
-        * detected by the NMI watchdog.
-        */
-       spin_lock(&tlbstate_lock);
-       
-       flush_mm = mm;
-       flush_va = va;
-       cpus_or(flush_cpumask, cpumask, flush_cpumask);
+       /* Caller has disabled preemption */
+       sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
+       f = &per_cpu(flush_state, sender);
+
+       /* Could avoid this lock when
+          num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
+          probably not worth checking this for a cache-hot lock. */
+       spin_lock(&f->tlbstate_lock);
+
+       f->flush_mm = mm;
+       f->flush_va = va;
+       cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
 
        /*
         * We have to send the IPI only to
         * CPUs affected.
         */
-       send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
+       send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
 
-       while (!cpus_empty(flush_cpumask))
-               mb();   /* nothing. lockup detection does not belong here */;
+       while (!cpus_empty(f->flush_cpumask))
+               cpu_relax();
 
-       flush_mm = NULL;
-       flush_va = 0;
-       spin_unlock(&tlbstate_lock);
+       f->flush_mm = NULL;
+       f->flush_va = 0;
+       spin_unlock(&f->tlbstate_lock);
 }
+
+int __cpuinit init_smp_flush(void)
+{
+       int i;
+       for_each_cpu_mask(i, cpu_possible_map) {
+               spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
+       }
+       return 0;
+}
+
+core_initcall(init_smp_flush);
        
 void flush_tlb_current_task(void)
 {