bcachefs: More bset.c microoptimization
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 23 Oct 2019 23:50:01 +0000 (19:50 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:30 +0000 (17:08 -0400)
Improve a few paper cuts that've shown up during profiling.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bkey.c
fs/bcachefs/bset.c
fs/bcachefs/btree_iter.c

index 8b3c9ae8d2662a851dd74e4e03e0351fca059eb9..dd551cc3a162ad86ac4ee5b0a71acdd9c664883a 100644 (file)
@@ -329,7 +329,7 @@ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
 void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
                 const struct bkey_packed *src)
 {
-       dst->k = bkey_unpack_key(b, src);
+       __bkey_unpack_key(b, &dst->k, src);
 
        memcpy_u64s(&dst->v,
                    bkeyp_val(&b->format, src),
index 1dd2bcc69c351637ee27c2660be395f3ee38d673..6b3b7bd4002b14dc4c0bf083504dbe541f070fa9 100644 (file)
@@ -1548,11 +1548,13 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
  *    So we've got to search for start_of_range, then after the lookup iterate
  *    past any extents that compare equal to the position we searched for.
  */
+__flatten
 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
                               struct btree *b, struct bpos *search)
 {
        struct bset_tree *t;
        struct bkey_packed p, *packed_search = NULL;
+       struct btree_node_iter_set *pos = iter->data;
 
        EBUG_ON(bkey_cmp(*search, b->data->min_key) < 0);
        bset_aux_tree_verify(b);
@@ -1571,11 +1573,17 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
                return;
        }
 
-       for_each_bset(b, t)
-               __bch2_btree_node_iter_push(iter, b,
-                                          bch2_bset_search(b, t, search,
-                                                           packed_search, &p),
-                                          btree_bkey_last(b, t));
+       for_each_bset(b, t) {
+               struct bkey_packed *k = bch2_bset_search(b, t, search,
+                                                        packed_search, &p);
+               struct bkey_packed *end = btree_bkey_last(b, t);
+
+               if (k != end)
+                       *pos++ = (struct btree_node_iter_set) {
+                               __btree_node_key_to_offset(b, k),
+                               __btree_node_key_to_offset(b, end)
+                       };
+       }
 
        bch2_btree_node_iter_sort(iter, b);
 }
index 8aaaa6615efffa6a8b5935893d48bd5b55ff96fd..25ad6b69b6bdbcae7ea115ae9e2baa74b3b8ce68 100644 (file)
@@ -939,7 +939,7 @@ static void btree_iter_prefetch(struct btree_iter *iter)
                btree_node_unlock(iter, iter->level);
 }
 
-static inline int btree_iter_down(struct btree_iter *iter)
+static __always_inline int btree_iter_down(struct btree_iter *iter)
 {
        struct bch_fs *c = iter->trans->c;
        struct btree_iter_level *l = &iter->l[iter->level];
@@ -948,7 +948,7 @@ static inline int btree_iter_down(struct btree_iter *iter)
        enum six_lock_type lock_type = __btree_lock_want(iter, level);
        BKEY_PADDED(k) tmp;
 
-       BUG_ON(!btree_node_locked(iter, iter->level));
+       EBUG_ON(!btree_node_locked(iter, iter->level));
 
        bch2_bkey_unpack(l->b, &tmp.k,
                         bch2_btree_node_iter_peek(&l->iter, l->b));