lib/stackdepot: fix and clean-up atomic annotations
authorAndrey Konovalov <andreyknvl@google.com>
Mon, 20 Nov 2023 17:47:05 +0000 (18:47 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 11 Dec 2023 00:51:44 +0000 (16:51 -0800)
Drop smp_load_acquire from next_pool_required in depot_init_pool, as both
depot_init_pool and the all smp_store_release's to this variable are
executed under the stack depot lock.

Also simplify and clean up comments accompanying the use of atomic
accesses in the stack depot code.

Link: https://lkml.kernel.org/r/c118ef044d8db80248d9e1f14592c72e8429e9d9.1700502145.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/stackdepot.c

index 682497dbe081320977f3cf50ae837676da029353..cfa3c6c7cc2e623761480bd9f5ad9b28c63c8214 100644 (file)
@@ -231,10 +231,10 @@ static void depot_init_pool(void **prealloc)
        /*
         * If the next pool is already initialized or the maximum number of
         * pools is reached, do not use the preallocated memory.
-        * smp_load_acquire() here pairs with smp_store_release() below and
-        * in depot_alloc_stack().
+        * Access next_pool_required non-atomically, as there are no concurrent
+        * write accesses to this variable.
         */
-       if (!smp_load_acquire(&next_pool_required))
+       if (!next_pool_required)
                return;
 
        /* Check if the current pool is not yet allocated. */
@@ -255,8 +255,8 @@ static void depot_init_pool(void **prealloc)
                 * At this point, either the next pool is initialized or the
                 * maximum number of pools is reached. In either case, take
                 * note that initializing another pool is not required.
-                * This smp_store_release pairs with smp_load_acquire() above
-                * and in stack_depot_save().
+                * smp_store_release() pairs with smp_load_acquire() in
+                * stack_depot_save().
                 */
                smp_store_release(&next_pool_required, 0);
        }
@@ -279,7 +279,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
 
                /*
                 * Move on to the next pool.
-                * WRITE_ONCE pairs with potential concurrent read in
+                * WRITE_ONCE() pairs with potential concurrent read in
                 * stack_depot_fetch().
                 */
                WRITE_ONCE(pool_index, pool_index + 1);
@@ -287,8 +287,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
                /*
                 * If the maximum number of pools is not reached, take note
                 * that the next pool needs to initialized.
-                * smp_store_release() here pairs with smp_load_acquire() in
-                * stack_depot_save() and depot_init_pool().
+                * smp_store_release() pairs with smp_load_acquire() in
+                * stack_depot_save().
                 */
                if (pool_index + 1 < DEPOT_MAX_POOLS)
                        smp_store_release(&next_pool_required, 1);
@@ -329,7 +329,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
 {
        union handle_parts parts = { .handle = handle };
        /*
-        * READ_ONCE pairs with potential concurrent write in
+        * READ_ONCE() pairs with potential concurrent write in
         * depot_alloc_stack().
         */
        int pool_index_cached = READ_ONCE(pool_index);
@@ -419,8 +419,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
 
        /*
         * Fast path: look the stack trace up without locking.
-        * The smp_load_acquire() here pairs with smp_store_release() to
-        * |bucket| below.
+        * smp_load_acquire() pairs with smp_store_release() to |bucket| below.
         */
        found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
        if (found)
@@ -430,8 +429,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
         * Check if another stack pool needs to be initialized. If so, allocate
         * the memory now - we won't be able to do that under the lock.
         *
-        * The smp_load_acquire() here pairs with smp_store_release() to
-        * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
+        * smp_load_acquire() pairs with smp_store_release() in
+        * depot_alloc_stack() and depot_init_pool().
         */
        if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
                /*
@@ -457,8 +456,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
                if (new) {
                        new->next = *bucket;
                        /*
-                        * This smp_store_release() pairs with
-                        * smp_load_acquire() from |bucket| above.
+                        * smp_store_release() pairs with smp_load_acquire()
+                        * from |bucket| above.
                         */
                        smp_store_release(bucket, new);
                        found = new;