perf threads: Switch from rbtree to hashmap
authorIan Rogers <irogers@google.com>
Fri, 1 Mar 2024 05:36:44 +0000 (21:36 -0800)
committerNamhyung Kim <namhyung@kernel.org>
Mon, 4 Mar 2024 06:52:04 +0000 (22:52 -0800)
The rbtree provides a sorting on entries but this is unused. Switch to
using hashmap for O(1) rather than O(log n) find/insert/remove
complexity.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240301053646.1449657-7-irogers@google.com
tools/perf/util/threads.c
tools/perf/util/threads.h

index db52d233c2de1fd860af5b664282bde8a628e96f..ff2b169e0085ed773125263a67782291d0f0f8fb 100644 (file)
@@ -3,25 +3,30 @@
 #include "machine.h"
 #include "thread.h"
 
-struct thread_rb_node {
-       struct rb_node rb_node;
-       struct thread *thread;
-};
-
 static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
 {
        /* Cast it to handle tid == -1 */
        return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
 }
 
+static size_t key_hash(long key, void *ctx __maybe_unused)
+{
+       /* The table lookup removes low bit entropy, but this is just ignored here. */
+       return key;
+}
+
+static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+       return key1 == key2;
+}
+
 void threads__init(struct threads *threads)
 {
        for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
                struct threads_table_entry *table = &threads->table[i];
 
-               table->entries = RB_ROOT_CACHED;
+               hashmap__init(&table->shard, key_hash, key_equal, NULL);
                init_rwsem(&table->lock);
-               table->nr = 0;
                table->last_match = NULL;
        }
 }
@@ -32,6 +37,7 @@ void threads__exit(struct threads *threads)
        for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
                struct threads_table_entry *table = &threads->table[i];
 
+               hashmap__clear(&table->shard);
                exit_rwsem(&table->lock);
        }
 }
@@ -44,7 +50,7 @@ size_t threads__nr(struct threads *threads)
                struct threads_table_entry *table = &threads->table[i];
 
                down_read(&table->lock);
-               nr += table->nr;
+               nr += hashmap__size(&table->shard);
                up_read(&table->lock);
        }
        return nr;
@@ -86,28 +92,13 @@ static void threads_table_entry__set_last_match(struct threads_table_entry *tabl
 struct thread *threads__find(struct threads *threads, pid_t tid)
 {
        struct threads_table_entry *table  = threads__table(threads, tid);
-       struct rb_node **p;
-       struct thread *res = NULL;
+       struct thread *res;
 
        down_read(&table->lock);
        res = __threads_table_entry__get_last_match(table, tid);
-       if (res)
-               return res;
-
-       p = &table->entries.rb_root.rb_node;
-       while (*p != NULL) {
-               struct rb_node *parent = *p;
-               struct thread *th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
-
-               if (thread__tid(th) == tid) {
-                       res = thread__get(th);
-                       break;
-               }
-
-               if (tid < thread__tid(th))
-                       p = &(*p)->rb_left;
-               else
-                       p = &(*p)->rb_right;
+       if (!res) {
+               if (hashmap__find(&table->shard, tid, &res))
+                       res = thread__get(res);
        }
        up_read(&table->lock);
        if (res)
@@ -118,49 +109,25 @@ struct thread *threads__find(struct threads *threads, pid_t tid)
 struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
 {
        struct threads_table_entry *table  = threads__table(threads, tid);
-       struct rb_node **p;
-       struct rb_node *parent = NULL;
        struct thread *res = NULL;
-       struct thread_rb_node *nd;
-       bool leftmost = true;
 
        *created = false;
        down_write(&table->lock);
-       p = &table->entries.rb_root.rb_node;
-       while (*p != NULL) {
-               struct thread *th;
-
-               parent = *p;
-               th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
-
-               if (thread__tid(th) == tid) {
-                       __threads_table_entry__set_last_match(table, th);
-                       res = thread__get(th);
-                       goto out_unlock;
-               }
-
-               if (tid < thread__tid(th))
-                       p = &(*p)->rb_left;
-               else {
-                       leftmost = false;
-                       p = &(*p)->rb_right;
-               }
-       }
-       nd = malloc(sizeof(*nd));
-       if (nd == NULL)
-               goto out_unlock;
        res = thread__new(pid, tid);
-       if (!res)
-               free(nd);
-       else {
-               *created = true;
-               nd->thread = thread__get(res);
-               rb_link_node(&nd->rb_node, parent, p);
-               rb_insert_color_cached(&nd->rb_node, &table->entries, leftmost);
-               ++table->nr;
-               __threads_table_entry__set_last_match(table, res);
+       if (res) {
+               if (hashmap__add(&table->shard, tid, res)) {
+                       /* Add failed. Assume a race so find other entry. */
+                       thread__put(res);
+                       res = NULL;
+                       if (hashmap__find(&table->shard, tid, &res))
+                               res = thread__get(res);
+               } else {
+                       res = thread__get(res);
+                       *created = true;
+               }
+               if (res)
+                       __threads_table_entry__set_last_match(table, res);
        }
-out_unlock:
        up_write(&table->lock);
        return res;
 }
@@ -169,57 +136,32 @@ void threads__remove_all_threads(struct threads *threads)
 {
        for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
                struct threads_table_entry *table = &threads->table[i];
-               struct rb_node *nd;
+               struct hashmap_entry *cur, *tmp;
+               size_t bkt;
 
                down_write(&table->lock);
                __threads_table_entry__set_last_match(table, NULL);
-               nd = rb_first_cached(&table->entries);
-               while (nd) {
-                       struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
-
-                       nd = rb_next(nd);
-                       thread__put(trb->thread);
-                       rb_erase_cached(&trb->rb_node, &table->entries);
-                       RB_CLEAR_NODE(&trb->rb_node);
-                       --table->nr;
+               hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) {
+                       struct thread *old_value;
 
-                       free(trb);
+                       hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value);
+                       thread__put(old_value);
                }
-               assert(table->nr == 0);
                up_write(&table->lock);
        }
 }
 
 void threads__remove(struct threads *threads, struct thread *thread)
 {
-       struct rb_node **p;
        struct threads_table_entry *table  = threads__table(threads, thread__tid(thread));
-       pid_t tid = thread__tid(thread);
+       struct thread *old_value;
 
        down_write(&table->lock);
        if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
                __threads_table_entry__set_last_match(table, NULL);
 
-       p = &table->entries.rb_root.rb_node;
-       while (*p != NULL) {
-               struct rb_node *parent = *p;
-               struct thread_rb_node *nd = rb_entry(parent, struct thread_rb_node, rb_node);
-               struct thread *th = nd->thread;
-
-               if (RC_CHK_EQUAL(th, thread)) {
-                       thread__put(nd->thread);
-                       rb_erase_cached(&nd->rb_node, &table->entries);
-                       RB_CLEAR_NODE(&nd->rb_node);
-                       --table->nr;
-                       free(nd);
-                       break;
-               }
-
-               if (tid < thread__tid(th))
-                       p = &(*p)->rb_left;
-               else
-                       p = &(*p)->rb_right;
-       }
+       hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value);
+       thread__put(old_value);
        up_write(&table->lock);
 }
 
@@ -229,12 +171,12 @@ int threads__for_each_thread(struct threads *threads,
 {
        for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
                struct threads_table_entry *table = &threads->table[i];
-               struct rb_node *nd;
+               struct hashmap_entry *cur;
+               size_t bkt;
 
                down_read(&table->lock);
-               for (nd = rb_first_cached(&table->entries); nd; nd = rb_next(nd)) {
-                       struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
-                       int rc = fn(trb->thread, data);
+               hashmap__for_each_entry((&table->shard), cur, bkt) {
+                       int rc = fn((struct thread *)cur->pvalue, data);
 
                        if (rc != 0) {
                                up_read(&table->lock);
index ed67de62757821f7f1752959af05e715386cce45..d03bd91a77692994b7efee3d31d98781956f8359 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __PERF_THREADS_H
 #define __PERF_THREADS_H
 
-#include <linux/rbtree.h>
+#include "hashmap.h"
 #include "rwsem.h"
 
 struct thread;
@@ -11,9 +11,9 @@ struct thread;
 #define THREADS__TABLE_SIZE    (1 << THREADS__TABLE_BITS)
 
 struct threads_table_entry {
-       struct rb_root_cached  entries;
+       /* Key is tid, value is struct thread. */
+       struct hashmap         shard;
        struct rw_semaphore    lock;
-       unsigned int           nr;
        struct thread          *last_match;
 };