tracing: Reuse logic from perf's get_recursion_context()
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 15 Oct 2021 17:42:40 +0000 (13:42 -0400)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Wed, 20 Oct 2021 00:33:20 +0000 (20:33 -0400)
Instead of having branches that adds noise to the branch prediction, use
the addition logic to set the bit for the level of interrupt context that
the state is currently in. This copies the logic from perf's
get_recursion_context() function.

Link: https://lore.kernel.org/all/20211015161702.GF174703@worktop.programming.kicks-ass.net/
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
include/linux/trace_recursion.h
kernel/trace/ring_buffer.c

index a9f9c5714e6505882b19000f6d44f1e705e3daef..f6da7a03bff045ca3d92f86e8ab960f048aa60c2 100644 (file)
@@ -137,12 +137,13 @@ enum {
 static __always_inline int trace_get_context_bit(void)
 {
        unsigned long pc = preempt_count();
+       unsigned char bit = 0;
 
-       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
-               return TRACE_CTX_NORMAL;
-       else
-               return pc & NMI_MASK ? TRACE_CTX_NMI :
-                       pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
+       bit += !!(pc & (NMI_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+       return TRACE_CTX_NORMAL - bit;
 }
 
 #ifdef CONFIG_FTRACE_RECORD_RECURSION
index c5a3fbf19617eded741d93bb815da66e44c65156..15d4380006e3ae9ca9d351940f1f5ac486284ad5 100644 (file)
@@ -3168,13 +3168,13 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
        unsigned int val = cpu_buffer->current_context;
        unsigned long pc = preempt_count();
-       int bit;
+       int bit = 0;
 
-       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
-               bit = RB_CTX_NORMAL;
-       else
-               bit = pc & NMI_MASK ? RB_CTX_NMI :
-                       pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+       bit += !!(pc & (NMI_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+       bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+       bit = RB_CTX_NORMAL - bit;
 
        if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
                /*