From: Thomas Gleixner Date: Mon, 24 Feb 2020 14:01:37 +0000 (+0100) Subject: bpf/trace: Remove redundant preempt_disable from trace_call_bpf() X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=b0a81b94cc50a112601721fcc2f91fab78d7b9f3;p=linux.git bpf/trace: Remove redundant preempt_disable from trace_call_bpf() Similar to __bpf_trace_run this is redundant because __bpf_trace_run() is invoked from a trace point via __DO_TRACE() which already disables preemption _before_ invoking any of the functions which are attached to a trace point. Remove it and add a cant_sleep() check. Signed-off-by: Thomas Gleixner Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200224145643.059995527@linutronix.de --- diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 15fafaed027cb..07764c761073d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -83,7 +83,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) if (in_nmi()) /* not supported yet */ return 1; - preempt_disable(); + cant_sleep(); if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { /* @@ -115,7 +115,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) out: __this_cpu_dec(bpf_prog_active); - preempt_enable(); return ret; }