extern int ftrace_dump_on_oops;
 
+#ifdef CONFIG_PREEMPT
+#define INIT_TRACE_RECURSION           .trace_recursion = 0,
+#endif
+
 #endif /* CONFIG_TRACING */
 
+#ifndef INIT_TRACE_RECURSION
+#define INIT_TRACE_RECURSION
+#endif
 
 #ifdef CONFIG_HW_BRANCH_TRACER
 
 
 #ifdef CONFIG_TRACING
        /* state flags for use by tracers */
        unsigned long trace;
-#endif
+       /* bitmask of trace recursion */
+       unsigned long trace_recursion;
+#endif /* CONFIG_TRACING */
 };
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 
        return event;
 }
 
+static int trace_irq_level(void)
+{
+       return hardirq_count() + softirq_count() + in_nmi();
+}
+
+static int trace_recursive_lock(void)
+{
+       int level;
+
+       level = trace_irq_level();
+
+       if (unlikely(current->trace_recursion & (1 << level))) {
+               /* Disable all tracing before we do anything else */
+               tracing_off_permanent();
+               WARN_ON_ONCE(1);
+               return -1;
+       }
+
+       current->trace_recursion |= 1 << level;
+
+       return 0;
+}
+
+static void trace_recursive_unlock(void)
+{
+       int level;
+
+       level = trace_irq_level();
+
+       WARN_ON_ONCE(!current->trace_recursion & (1 << level));
+
+       current->trace_recursion &= ~(1 << level);
+}
+
 static DEFINE_PER_CPU(int, rb_need_resched);
 
 /**
        /* If we are tracing schedule, we don't want to recurse */
        resched = ftrace_preempt_disable();
 
+       if (trace_recursive_lock())
+               goto out_nocheck;
+
        cpu = raw_smp_processor_id();
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
        return event;
 
  out:
+       trace_recursive_unlock();
+
+ out_nocheck:
        ftrace_preempt_enable(resched);
        return NULL;
 }
 
        rb_commit(cpu_buffer, event);
 
+       trace_recursive_unlock();
+
        /*
         * Only the last preempt count needs to restore preemption.
         */