locking/mutex: Make contention tracepoints more consistent wrt adaptive spinning
authorPeter Zijlstra <peterz@infradead.org>
Wed, 30 Mar 2022 11:06:54 +0000 (13:06 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 5 Apr 2022 08:24:36 +0000 (10:24 +0200)
Have the trace_contention_*() tracepoints consistently include
adaptive spinning. In order to differentiate between the spinning and
non-spinning states add LCB_F_MUTEX and combine with LCB_F_SPIN.

The consequence is that a mutex contention can now triggler multiple
_begin() tracepoints before triggering an _end().

Additionally, this fixes one path where mutex would trigger _end()
without ever seeing a _begin().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
include/trace/events/lock.h
kernel/locking/mutex.c

index b9b6e3edd518390e74c66ce355b729e3d4abf096..9ebd081e057e8769a9779e3b00ee69d45fecf57d 100644 (file)
@@ -14,6 +14,7 @@
 #define LCB_F_WRITE    (1U << 2)
 #define LCB_F_RT       (1U << 3)
 #define LCB_F_PERCPU   (1U << 4)
+#define LCB_F_MUTEX    (1U << 5)
 
 
 #ifdef CONFIG_LOCKDEP
@@ -113,7 +114,8 @@ TRACE_EVENT(contention_begin,
                                { LCB_F_READ,           "READ" },
                                { LCB_F_WRITE,          "WRITE" },
                                { LCB_F_RT,             "RT" },
-                               { LCB_F_PERCPU,         "PERCPU" }
+                               { LCB_F_PERCPU,         "PERCPU" },
+                               { LCB_F_MUTEX,          "MUTEX" }
                          ))
 );
 
index c88deda77cf2f64fd3c2ac478e942e642aa7f6a8..d973fe6041bf6791c1f8f10c607548060c0fa32a 100644 (file)
@@ -602,12 +602,14 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
        preempt_disable();
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
+       trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
        if (__mutex_trylock(lock) ||
            mutex_optimistic_spin(lock, ww_ctx, NULL)) {
                /* got the lock, yay! */
                lock_acquired(&lock->dep_map, ip);
                if (ww_ctx)
                        ww_mutex_set_context_fastpath(ww, ww_ctx);
+               trace_contention_end(lock, 0);
                preempt_enable();
                return 0;
        }
@@ -644,7 +646,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
        }
 
        set_current_state(state);
-       trace_contention_begin(lock, 0);
+       trace_contention_begin(lock, LCB_F_MUTEX);
        for (;;) {
                bool first;
 
@@ -684,10 +686,16 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                 * state back to RUNNING and fall through the next schedule(),
                 * or we must see its unlock and acquire.
                 */
-               if (__mutex_trylock_or_handoff(lock, first) ||
-                   (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
+               if (__mutex_trylock_or_handoff(lock, first))
                        break;
 
+               if (first) {
+                       trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
+                       if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
+                               break;
+                       trace_contention_begin(lock, LCB_F_MUTEX);
+               }
+
                raw_spin_lock(&lock->wait_lock);
        }
        raw_spin_lock(&lock->wait_lock);
@@ -723,8 +731,8 @@ skip_wait:
 err:
        __set_current_state(TASK_RUNNING);
        __mutex_remove_waiter(lock, &waiter);
-       trace_contention_end(lock, ret);
 err_early_kill:
+       trace_contention_end(lock, ret);
        raw_spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, ip);