lib/stackdepot: add depot_fetch_stack helper
authorAndrey Konovalov <andreyknvl@google.com>
Mon, 20 Nov 2023 17:47:03 +0000 (18:47 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 11 Dec 2023 00:51:44 +0000 (16:51 -0800)
Add a helper depot_fetch_stack function that fetches the pointer to a
stack record.

With this change, all static depot_* functions now operate on stack pools
and the exported stack_depot_* functions operate on the hash table.

Link: https://lkml.kernel.org/r/170d8c202f29dc8e3d5491ee074d1e9e029a46db.1700502145.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/stackdepot.c

index 46a422d31c1f4ac32848b739cf2e1014c2a78b2f..e41713983cac8bcca5a401634dcedba244d14cc0 100644 (file)
@@ -310,6 +310,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
        stack->handle.extra = 0;
        memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
        pool_offset += required_size;
+
        /*
         * Let KMSAN know the stored stack record is initialized. This shall
         * prevent false positive reports if instrumented code accesses it.
@@ -319,6 +320,32 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
        return stack;
 }
 
+static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
+{
+       union handle_parts parts = { .handle = handle };
+       /*
+        * READ_ONCE pairs with potential concurrent write in
+        * depot_alloc_stack().
+        */
+       int pool_index_cached = READ_ONCE(pool_index);
+       void *pool;
+       size_t offset = parts.offset << DEPOT_STACK_ALIGN;
+       struct stack_record *stack;
+
+       if (parts.pool_index > pool_index_cached) {
+               WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+                    parts.pool_index, pool_index_cached, handle);
+               return NULL;
+       }
+
+       pool = stack_pools[parts.pool_index];
+       if (!pool)
+               return NULL;
+
+       stack = pool + offset;
+       return stack;
+}
+
 /* Calculates the hash for a stack. */
 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
 {
@@ -462,14 +489,6 @@ EXPORT_SYMBOL_GPL(stack_depot_save);
 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
                               unsigned long **entries)
 {
-       union handle_parts parts = { .handle = handle };
-       /*
-        * READ_ONCE pairs with potential concurrent write in
-        * depot_alloc_stack.
-        */
-       int pool_index_cached = READ_ONCE(pool_index);
-       void *pool;
-       size_t offset = parts.offset << DEPOT_STACK_ALIGN;
        struct stack_record *stack;
 
        *entries = NULL;
@@ -482,15 +501,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
        if (!handle || stack_depot_disabled)
                return 0;
 
-       if (parts.pool_index > pool_index_cached) {
-               WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
-                       parts.pool_index, pool_index_cached, handle);
-               return 0;
-       }
-       pool = stack_pools[parts.pool_index];
-       if (!pool)
-               return 0;
-       stack = pool + offset;
+       stack = depot_fetch_stack(handle);
 
        *entries = stack->entries;
        return stack->size;