bcachefs: Add a kmem_cache for btree_key_cache objects
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 18 Nov 2020 19:09:33 +0000 (14:09 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:47 +0000 (17:08 -0400)
We allocate a lot of these, and we're seeing sporading OOMs - this will
help with tracking those down.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_key_cache.h
fs/bcachefs/super.c

index 71d5bfa4caab0b209be32d913e5242c5c06c9f49..441cdc88b94089750b18ff058aff2af7386c37f8 100644 (file)
@@ -12,6 +12,8 @@
 
 #include <linux/sched/mm.h>
 
+static struct kmem_cache *bch2_key_cache;
+
 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
                                       const void *obj)
 {
@@ -104,7 +106,7 @@ bkey_cached_alloc(struct btree_key_cache *c)
                        return ck;
                }
 
-       ck = kzalloc(sizeof(*ck), GFP_NOFS);
+       ck = kmem_cache_alloc(bch2_key_cache, GFP_NOFS|__GFP_ZERO);
        if (!ck)
                return NULL;
 
@@ -516,7 +518,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
                if (poll_state_synchronize_srcu(&c->btree_trans_barrier,
                                                ck->btree_trans_barrier_seq)) {
                        list_del(&ck->list);
-                       kfree(ck);
+                       kmem_cache_free(bch2_key_cache, ck);
                        freed++;
                }
 
@@ -571,15 +573,18 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
                bch2_journal_preres_put(&c->journal, &ck->res);
 
                kfree(ck->k);
-               kfree(ck);
+               list_del(&ck->list);
+               kmem_cache_free(bch2_key_cache, ck);
                bc->nr_keys--;
        }
 
        BUG_ON(bc->nr_dirty && !bch2_journal_error(&c->journal));
        BUG_ON(bc->nr_keys);
 
-       list_for_each_entry_safe(ck, n, &bc->freed, list)
-               kfree(ck);
+       list_for_each_entry_safe(ck, n, &bc->freed, list) {
+               list_del(&ck->list);
+               kmem_cache_free(bch2_key_cache, ck);
+       }
        mutex_unlock(&bc->lock);
 
        rhashtable_destroy(&bc->table);
@@ -627,3 +632,18 @@ void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *
        }
        mutex_unlock(&c->lock);
 }
+
+void bch2_btree_key_cache_exit(void)
+{
+       if (bch2_key_cache)
+               kmem_cache_destroy(bch2_key_cache);
+}
+
+int __init bch2_btree_key_cache_init(void)
+{
+       bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
+       if (!bch2_key_cache)
+               return -ENOMEM;
+
+       return 0;
+}
index d448264abcc89db5382ff6fe99f539c28d6a4326..e64a8e9c726ff2ec4e9aae6e69ce03a112a50b88 100644 (file)
@@ -25,4 +25,7 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *);
 
 void bch2_btree_key_cache_to_text(struct printbuf *, struct btree_key_cache *);
 
+void bch2_btree_key_cache_exit(void);
+int __init bch2_btree_key_cache_init(void);
+
 #endif /* _BCACHEFS_BTREE_KEY_CACHE_H */
index 61b7e750037c756cb983d04ae0329bbaece020a8..12ce4a6277466a71a32f818c572346a15845ed51 100644 (file)
@@ -2020,6 +2020,7 @@ static void bcachefs_exit(void)
        bch2_debug_exit();
        bch2_vfs_exit();
        bch2_chardev_exit();
+       bch2_btree_key_cache_exit();
        if (bcachefs_kset)
                kset_unregister(bcachefs_kset);
 }
@@ -2029,6 +2030,7 @@ static int __init bcachefs_init(void)
        bch2_bkey_pack_test();
 
        if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
+           bch2_btree_key_cache_init() ||
            bch2_chardev_init() ||
            bch2_vfs_init() ||
            bch2_debug_init())