bcachefs: Improve sysfs internal/btree_cache
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 5 May 2024 13:47:53 +0000 (09:47 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 8 May 2024 21:29:24 +0000 (17:29 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_types.h

index debdd7dc04778e9d6530ea5146df1d6dd0677533..2c226e0cc26302192e8ae7d66950d2cd86337400 100644 (file)
@@ -162,6 +162,9 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
 
        /* Cause future lookups for this node to fail: */
        b->hash_val = 0;
+
+       if (b->c.btree_id < BTREE_ID_NR)
+               --bc->used_by_btree[b->c.btree_id];
 }
 
 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
@@ -169,8 +172,11 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
        BUG_ON(b->hash_val);
        b->hash_val = btree_ptr_hash_val(&b->key);
 
-       return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
-                                            bch_btree_cache_params);
+       int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
+                                               bch_btree_cache_params);
+       if (!ret && b->c.btree_id < BTREE_ID_NR)
+               bc->used_by_btree[b->c.btree_id]++;
+       return ret;
 }
 
 int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
@@ -1269,9 +1275,26 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
               stats.failed);
 }
 
+static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
+                                const char *label, unsigned nr)
+{
+       prt_printf(out, "%s\t", label);
+       prt_human_readable_u64(out, nr * c->opts.btree_node_size);
+       prt_printf(out, " (%u)\n", nr);
+}
+
 void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c)
 {
-       prt_printf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
-       prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty));
-       prt_printf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
+       const struct btree_cache *bc = &c->btree_cache;
+
+       if (!out->nr_tabstops)
+               printbuf_tabstop_push(out, 24);
+
+       prt_btree_cache_line(out, c, "total:",          bc->used);
+       prt_btree_cache_line(out, c, "nr dirty:",       atomic_read(&bc->dirty));
+       prt_printf(out, "cannibalize lock:\t%p\n",      bc->alloc_lock);
+       prt_newline(out);
+
+       for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++)
+               prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]);
 }
index 4ff5213219a50e73c1dd7c05a96b05a40ef28568..76364bd4347e36c8cab0a8cda45200e9cce5c6fd 100644 (file)
@@ -166,6 +166,8 @@ struct btree_cache {
        atomic_t                dirty;
        struct shrinker         *shrink;
 
+       unsigned                used_by_btree[BTREE_ID_NR];
+
        /*
         * If we need to allocate memory for a new btree node and that
         * allocation fails, we can cannibalize another node in the btree cache