From: Steven Rostedt (VMware) Date: Fri, 6 Nov 2020 02:32:38 +0000 (-0500) Subject: ftrace: Optimize testing what context current is in X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=da5afbeb1724609996ca7bb4fbce2cd104c95914;p=linux.git ftrace: Optimize testing what context current is in The preempt_count() is not a simple location in memory, it could be part of per_cpu code or more. Each access to preempt_count(), or one of its accessor functions (like in_interrupt()) takes several cycles. By reading preempt_count() once, and then doing tests to find the context against the value return is slightly faster than using in_nmi() and in_interrupt(). Link: https://lkml.kernel.org/r/20201028115612.780796355@goodmis.org Link: https://lkml.kernel.org/r/20201106023546.558881845@goodmis.org Signed-off-by: Steven Rostedt (VMware) --- diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index f2a949dbfec70..ac3d73484cb26 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -117,22 +117,29 @@ enum { #define TRACE_CONTEXT_MASK TRACE_LIST_MAX +/* + * Used for setting context + * NMI = 0 + * IRQ = 1 + * SOFTIRQ = 2 + * NORMAL = 3 + */ +enum { + TRACE_CTX_NMI, + TRACE_CTX_IRQ, + TRACE_CTX_SOFTIRQ, + TRACE_CTX_NORMAL, +}; + static __always_inline int trace_get_context_bit(void) { - int bit; - - if (in_interrupt()) { - if (in_nmi()) - bit = 0; - - else if (in_irq()) - bit = 1; - else - bit = 2; - } else - bit = 3; + unsigned long pc = preempt_count(); - return bit; + if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) + return TRACE_CTX_NORMAL; + else + return pc & NMI_MASK ? TRACE_CTX_NMI : + pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ; } static __always_inline int trace_test_and_set_recursion(int start, int max)