bcachefs: btree_bkey_cached_common
authorKent Overstreet <kent.overstreet@gmail.com>
Sat, 6 Jun 2020 16:28:01 +0000 (12:28 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:21 +0000 (17:08 -0400)
This is prep work for the btree key cache: btree iterators will point to
either struct btree, or a new struct bkey_cached.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
16 files changed:
fs/bcachefs/alloc_background.c
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_cache.h
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.h
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.h
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.h
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/debug.c
fs/bcachefs/trace.h

index a6d3417ac26259b046995acb8b2bfb7fdefd7c18..5988971521eb7410b84cda04e63eb51bb82a2397 100644 (file)
@@ -1529,7 +1529,7 @@ again:
                                rcu_read_unlock();
                                btree_node_lock_type(c, b, SIX_LOCK_read);
                                bch2_btree_node_write(c, b, SIX_LOCK_read);
-                               six_unlock_read(&b->lock);
+                               six_unlock_read(&b->c.lock);
                                goto again;
                        } else {
                                nodes_unwritten = true;
index bb88ce1415c829d619ea1d859f52015598864990..2e932ee7ad0c0f356810363e296b7632788c09bb 100644 (file)
@@ -27,7 +27,7 @@ void bch2_recalc_btree_reserve(struct bch_fs *c)
        for (i = 0; i < BTREE_ID_NR; i++)
                if (c->btree_roots[i].b)
                        reserve += min_t(unsigned, 1,
-                                        c->btree_roots[i].b->level) * 8;
+                                        c->btree_roots[i].b->c.level) * 8;
 
        c->btree_cache.reserve = reserve;
 }
@@ -98,8 +98,8 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
                return NULL;
 
        bkey_btree_ptr_init(&b->key);
-       six_lock_init(&b->lock);
-       lockdep_set_novalidate_class(&b->lock);
+       six_lock_init(&b->c.lock);
+       lockdep_set_novalidate_class(&b->c.lock);
        INIT_LIST_HEAD(&b->list);
        INIT_LIST_HEAD(&b->write_blocked);
 
@@ -128,8 +128,8 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
 {
        int ret;
 
-       b->level        = level;
-       b->btree_id     = id;
+       b->c.level      = level;
+       b->c.btree_id   = id;
 
        mutex_lock(&bc->lock);
        ret = __bch2_btree_node_hash_insert(bc, b);
@@ -159,10 +159,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
 
        lockdep_assert_held(&bc->lock);
 
-       if (!six_trylock_intent(&b->lock))
+       if (!six_trylock_intent(&b->c.lock))
                return -ENOMEM;
 
-       if (!six_trylock_write(&b->lock))
+       if (!six_trylock_write(&b->c.lock))
                goto out_unlock_intent;
 
        if (btree_node_noevict(b))
@@ -203,9 +203,9 @@ out:
                trace_btree_node_reap(c, b);
        return ret;
 out_unlock:
-       six_unlock_write(&b->lock);
+       six_unlock_write(&b->c.lock);
 out_unlock_intent:
-       six_unlock_intent(&b->lock);
+       six_unlock_intent(&b->c.lock);
        ret = -ENOMEM;
        goto out;
 }
@@ -263,8 +263,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
                if (++i > 3 &&
                    !btree_node_reclaim(c, b)) {
                        btree_node_data_free(c, b);
-                       six_unlock_write(&b->lock);
-                       six_unlock_intent(&b->lock);
+                       six_unlock_write(&b->c.lock);
+                       six_unlock_intent(&b->c.lock);
                        freed++;
                }
        }
@@ -290,8 +290,8 @@ restart:
                        mutex_unlock(&bc->lock);
 
                        bch2_btree_node_hash_remove(bc, b);
-                       six_unlock_write(&b->lock);
-                       six_unlock_intent(&b->lock);
+                       six_unlock_write(&b->c.lock);
+                       six_unlock_intent(&b->c.lock);
 
                        if (freed >= nr)
                                goto out;
@@ -530,8 +530,8 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
                        if (b->data)
                                goto out_unlock;
 
-                       six_unlock_write(&b->lock);
-                       six_unlock_intent(&b->lock);
+                       six_unlock_write(&b->c.lock);
+                       six_unlock_intent(&b->c.lock);
                        goto err;
                }
 
@@ -539,8 +539,8 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
        if (!b)
                goto err;
 
-       BUG_ON(!six_trylock_intent(&b->lock));
-       BUG_ON(!six_trylock_write(&b->lock));
+       BUG_ON(!six_trylock_intent(&b->c.lock));
+       BUG_ON(!six_trylock_write(&b->c.lock));
 out_unlock:
        BUG_ON(btree_node_hashed(b));
        BUG_ON(btree_node_write_in_flight(b));
@@ -611,8 +611,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
                list_add(&b->list, &bc->freeable);
                mutex_unlock(&bc->lock);
 
-               six_unlock_write(&b->lock);
-               six_unlock_intent(&b->lock);
+               six_unlock_write(&b->c.lock);
+               six_unlock_intent(&b->c.lock);
                return NULL;
        }
 
@@ -630,15 +630,15 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
 
        bch2_btree_node_read(c, b, sync);
 
-       six_unlock_write(&b->lock);
+       six_unlock_write(&b->c.lock);
 
        if (!sync) {
-               six_unlock_intent(&b->lock);
+               six_unlock_intent(&b->c.lock);
                return NULL;
        }
 
        if (lock_type == SIX_LOCK_read)
-               six_lock_downgrade(&b->lock);
+               six_lock_downgrade(&b->c.lock);
 
        return b;
 }
@@ -727,9 +727,9 @@ retry:
                        return ERR_PTR(-EINTR);
 
                if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) ||
-                            b->level != level ||
+                            b->c.level != level ||
                             race_fault())) {
-                       six_unlock_type(&b->lock, lock_type);
+                       six_unlock_type(&b->c.lock, lock_type);
                        if (bch2_btree_node_relock(iter, level + 1))
                                goto retry;
 
@@ -758,11 +758,11 @@ retry:
                set_btree_node_accessed(b);
 
        if (unlikely(btree_node_read_error(b))) {
-               six_unlock_type(&b->lock, lock_type);
+               six_unlock_type(&b->c.lock, lock_type);
                return ERR_PTR(-EIO);
        }
 
-       EBUG_ON(b->btree_id != iter->btree_id ||
+       EBUG_ON(b->c.btree_id != iter->btree_id ||
                BTREE_NODE_LEVEL(b->data) != level ||
                bkey_cmp(b->data->max_key, k->k.p));
 
@@ -780,7 +780,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
        struct bkey_packed *k;
        BKEY_PADDED(k) tmp;
        struct btree *ret = NULL;
-       unsigned level = b->level;
+       unsigned level = b->c.level;
 
        parent = btree_iter_node(iter, level + 1);
        if (!parent)
@@ -789,7 +789,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
        if (!bch2_btree_node_relock(iter, level + 1))
                goto out_upgrade;
 
-       node_iter = iter->l[parent->level].iter;
+       node_iter = iter->l[parent->c.level].iter;
 
        k = bch2_btree_node_iter_peek_all(&node_iter, parent);
        BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
@@ -836,7 +836,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
                        btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
 
                        if (!IS_ERR(ret)) {
-                               six_unlock_intent(&ret->lock);
+                               six_unlock_intent(&ret->c.lock);
                                ret = ERR_PTR(-EINTR);
                        }
                }
@@ -859,7 +859,7 @@ out:
                if (sib != btree_prev_sib)
                        swap(n1, n2);
 
-               BUG_ON(bkey_cmp(btree_type_successor(n1->btree_id,
+               BUG_ON(bkey_cmp(btree_type_successor(n1->c.btree_id,
                                                     n1->key.k.p),
                                n2->data->min_key));
        }
@@ -904,7 +904,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
        pr_buf(out,
               "l %u %llu:%llu - %llu:%llu:\n"
               "    ptrs: ",
-              b->level,
+              b->c.level,
               b->data->min_key.inode,
               b->data->min_key.offset,
               b->data->max_key.inode,
index 7bd2bc84160d056aef6c4a575311f1541915774d..e0f233583796d45d85648ac2c47d9a4fa9fb4d6e 100644 (file)
@@ -83,7 +83,7 @@ static inline unsigned btree_blocks(struct bch_fs *c)
        (BTREE_FOREGROUND_MERGE_THRESHOLD(c) +                  \
         (BTREE_FOREGROUND_MERGE_THRESHOLD(c) << 2))
 
-#define btree_node_root(_c, _b)        ((_c)->btree_roots[(_b)->btree_id].b)
+#define btree_node_root(_c, _b)        ((_c)->btree_roots[(_b)->c.btree_id].b)
 
 void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
                             struct btree *);
index 5ad933ba049b12795a229a851cb3f2f91682e451..3dc073e5e5b68e250730740018862a25326a605f 100644 (file)
@@ -71,10 +71,10 @@ static void btree_node_range_checks_init(struct range_checks *r, unsigned depth)
 static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
                                    struct range_checks *r)
 {
-       struct range_level *l = &r->l[b->level];
+       struct range_level *l = &r->l[b->c.level];
 
        struct bpos expected_min = bkey_cmp(l->min, l->max)
-               ? btree_type_successor(b->btree_id, l->max)
+               ? btree_type_successor(b->c.btree_id, l->max)
                : l->max;
 
        bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
@@ -86,8 +86,8 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
 
        l->max = b->data->max_key;
 
-       if (b->level > r->depth) {
-               l = &r->l[b->level - 1];
+       if (b->c.level > r->depth) {
+               l = &r->l[b->c.level - 1];
 
                bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
                        "btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
@@ -105,7 +105,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
 
                if (bkey_cmp(b->data->max_key, POS_MAX))
                        l->min = l->max =
-                               btree_type_successor(b->btree_id,
+                               btree_type_successor(b->c.btree_id,
                                                     b->data->max_key);
        }
 }
@@ -261,7 +261,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
        if (!btree_node_fake(b))
                ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
                                       &max_stale, initial);
-       gc_pos_set(c, gc_pos_btree_root(b->btree_id));
+       gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
        mutex_unlock(&c->btree_root_lock);
 
        return ret;
@@ -932,9 +932,9 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
 
                        set_btree_bset_end(n1, n1->set);
 
-                       six_unlock_write(&n2->lock);
+                       six_unlock_write(&n2->c.lock);
                        bch2_btree_node_free_never_inserted(c, n2);
-                       six_unlock_intent(&n2->lock);
+                       six_unlock_intent(&n2->c.lock);
 
                        memmove(new_nodes + i - 1,
                                new_nodes + i,
@@ -970,7 +970,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
                btree_node_reset_sib_u64s(n);
 
                bch2_btree_build_aux_trees(n);
-               six_unlock_write(&n->lock);
+               six_unlock_write(&n->c.lock);
 
                bch2_btree_node_write(c, n, SIX_LOCK_intent);
        }
@@ -1013,7 +1013,7 @@ next:
 
        BUG_ON(!bch2_keylist_empty(&keylist));
 
-       BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
+       BUG_ON(iter->l[old_nodes[0]->c.level].b != old_nodes[0]);
 
        bch2_btree_iter_node_replace(iter, new_nodes[0]);
 
@@ -1035,7 +1035,7 @@ next:
                } else {
                        old_nodes[i] = NULL;
                        if (new_nodes[i])
-                               six_unlock_intent(&new_nodes[i]->lock);
+                               six_unlock_intent(&new_nodes[i]->c.lock);
                }
        }
 
@@ -1078,11 +1078,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
 
                for (i = 1; i < GC_MERGE_NODES; i++) {
                        if (!merge[i] ||
-                           !six_relock_intent(&merge[i]->lock, lock_seq[i]))
+                           !six_relock_intent(&merge[i]->c.lock, lock_seq[i]))
                                break;
 
-                       if (merge[i]->level != merge[0]->level) {
-                               six_unlock_intent(&merge[i]->lock);
+                       if (merge[i]->c.level != merge[0]->c.level) {
+                               six_unlock_intent(&merge[i]->c.lock);
                                break;
                        }
                }
@@ -1091,11 +1091,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
                bch2_coalesce_nodes(c, iter, merge);
 
                for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
-                       lock_seq[i] = merge[i]->lock.state.seq;
-                       six_unlock_intent(&merge[i]->lock);
+                       lock_seq[i] = merge[i]->c.lock.state.seq;
+                       six_unlock_intent(&merge[i]->c.lock);
                }
 
-               lock_seq[0] = merge[0]->lock.state.seq;
+               lock_seq[0] = merge[0]->c.lock.state.seq;
 
                if (kthread && kthread_should_stop()) {
                        bch2_trans_exit(&trans);
index bd5f2752954fede861e8cb5170d9909cc8e41da0..3966d5e54cfdd40ee799efed31d5c4ed054151a1 100644 (file)
@@ -81,7 +81,7 @@ static inline struct gc_pos gc_pos_btree(enum btree_id id,
  */
 static inline struct gc_pos gc_pos_btree_node(struct btree *b)
 {
-       return gc_pos_btree(b->btree_id, b->key.k.p, b->level);
+       return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
 }
 
 /*
index fa261a175f5ee11fa29b64151b80f7034b29554a..baffb58fd10be0ca53183a5519aaba041569b86f 100644 (file)
@@ -473,8 +473,8 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
        struct btree_node_entry *bne;
        bool did_sort;
 
-       EBUG_ON(!(b->lock.state.seq & 1));
-       EBUG_ON(iter && iter->l[b->level].b != b);
+       EBUG_ON(!(b->c.lock.state.seq & 1));
+       EBUG_ON(iter && iter->l[b->c.level].b != b);
 
        did_sort = btree_node_compact(c, b, iter);
 
@@ -524,8 +524,8 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
               "at btree %u level %u/%u\n"
               "pos %llu:%llu node offset %u",
               write ? "before write " : "",
-              b->btree_id, b->level,
-              c->btree_roots[b->btree_id].level,
+              b->c.btree_id, b->c.level,
+              c->btree_roots[b->c.btree_id].level,
               b->key.k.p.inode, b->key.k.p.offset,
               b->written);
        if (i)
@@ -610,11 +610,11 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
 
        if (i == &b->data->keys) {
                /* These indicate that we read the wrong btree node: */
-               btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
+               btree_err_on(BTREE_NODE_ID(b->data) != b->c.btree_id,
                             BTREE_ERR_MUST_RETRY, c, b, i,
                             "incorrect btree id");
 
-               btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
+               btree_err_on(BTREE_NODE_LEVEL(b->data) != b->c.level,
                             BTREE_ERR_MUST_RETRY, c, b, i,
                             "incorrect level");
 
@@ -1105,8 +1105,8 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
 
        bch2_btree_set_root_for_read(c, b);
 err:
-       six_unlock_write(&b->lock);
-       six_unlock_intent(&b->lock);
+       six_unlock_write(&b->c.lock);
+       six_unlock_intent(&b->c.lock);
 
        return ret;
 }
@@ -1153,15 +1153,15 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
 
        bch2_trans_init(&trans, c);
 
-       iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p,
-                                       BTREE_MAX_DEPTH, b->level, 0);
+       iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
+                                       BTREE_MAX_DEPTH, b->c.level, 0);
 retry:
        ret = bch2_btree_iter_traverse(iter);
        if (ret)
                goto err;
 
        /* has node been freed? */
-       if (iter->l[b->level].b != b) {
+       if (iter->l[b->c.level].b != b) {
                /* node has been freed: */
                BUG_ON(!btree_node_dying(b));
                goto out;
@@ -1359,9 +1359,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
         * doing btree writes:
         */
        if (lock_type_held == SIX_LOCK_intent &&
-           six_trylock_write(&b->lock)) {
+           six_trylock_write(&b->c.lock)) {
                __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
-               six_unlock_write(&b->lock);
+               six_unlock_write(&b->c.lock);
        } else {
                __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
        }
@@ -1606,18 +1606,18 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
        BUG_ON(lock_type_held == SIX_LOCK_write);
 
        if (lock_type_held == SIX_LOCK_intent ||
-           six_lock_tryupgrade(&b->lock)) {
+           six_lock_tryupgrade(&b->c.lock)) {
                __bch2_btree_node_write(c, b, SIX_LOCK_intent);
 
                /* don't cycle lock unnecessarily: */
                if (btree_node_just_written(b) &&
-                   six_trylock_write(&b->lock)) {
+                   six_trylock_write(&b->c.lock)) {
                        bch2_btree_post_write_cleanup(c, b);
-                       six_unlock_write(&b->lock);
+                       six_unlock_write(&b->c.lock);
                }
 
                if (lock_type_held == SIX_LOCK_read)
-                       six_lock_downgrade(&b->lock);
+                       six_lock_downgrade(&b->c.lock);
        } else {
                __bch2_btree_node_write(c, b, SIX_LOCK_read);
        }
@@ -1688,7 +1688,7 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
                       b,
                       (flags & (1 << BTREE_NODE_dirty)) != 0,
                       (flags & (1 << BTREE_NODE_need_write)) != 0,
-                      b->level,
+                      b->c.level,
                       b->written,
                       !list_empty_careful(&b->write_blocked),
                       b->will_make_reachable != 0,
index c817aeed878adf0c005732b40eeaaa10d5aabe20..3fb0aa20b340f627050c6527ebdb07b75ac5fbcc 100644 (file)
@@ -111,7 +111,7 @@ static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b)
                        break;
                }
 
-               six_unlock_read(&b->lock);
+               six_unlock_read(&b->c.lock);
                btree_node_wait_on_io(b);
                btree_node_lock_type(c, b, SIX_LOCK_read);
        }
index eeb9a59283a7afdca48e266d84888369554ee3a1..3fdf5ab255785ce1cc3727bfbdb9dfa795e50304 100644 (file)
@@ -54,7 +54,7 @@ static inline int btree_iter_pos_cmp(struct btree_iter *iter,
                                     const struct btree *b,
                                     const struct bkey_packed *k)
 {
-       return __btree_iter_pos_cmp(iter, b, k, b->level != 0);
+       return __btree_iter_pos_cmp(iter, b, k, b->c.level != 0);
 }
 
 /* Btree node locking: */
@@ -67,13 +67,13 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
 {
        struct btree_iter *linked;
 
-       EBUG_ON(iter->l[b->level].b != b);
-       EBUG_ON(iter->l[b->level].lock_seq + 1 != b->lock.state.seq);
+       EBUG_ON(iter->l[b->c.level].b != b);
+       EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
 
        trans_for_each_iter_with_node(iter->trans, b, linked)
-               linked->l[b->level].lock_seq += 2;
+               linked->l[b->c.level].lock_seq += 2;
 
-       six_unlock_write(&b->lock);
+       six_unlock_write(&b->c.lock);
 }
 
 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
@@ -81,11 +81,11 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
        struct btree_iter *linked;
        unsigned readers = 0;
 
-       EBUG_ON(btree_node_read_locked(iter, b->level));
+       EBUG_ON(btree_node_read_locked(iter, b->c.level));
 
        trans_for_each_iter(iter->trans, linked)
-               if (linked->l[b->level].b == b &&
-                   btree_node_read_locked(linked, b->level))
+               if (linked->l[b->c.level].b == b &&
+                   btree_node_read_locked(linked, b->c.level))
                        readers++;
 
        /*
@@ -95,10 +95,10 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
         * locked:
         */
        atomic64_sub(__SIX_VAL(read_lock, readers),
-                    &b->lock.state.counter);
+                    &b->c.lock.state.counter);
        btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
        atomic64_add(__SIX_VAL(read_lock, readers),
-                    &b->lock.state.counter);
+                    &b->c.lock.state.counter);
 }
 
 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
@@ -112,8 +112,8 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
        if (race_fault())
                return false;
 
-       if (!six_relock_type(&b->lock, want, iter->l[level].lock_seq) &&
-           !(iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1 &&
+       if (!six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) &&
+           !(iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1 &&
              btree_node_lock_increment(iter, b, level, want)))
                return false;
 
@@ -137,11 +137,11 @@ static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
                return false;
 
        if (btree_node_locked(iter, level)
-           ? six_lock_tryupgrade(&b->lock)
-           : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq))
+           ? six_lock_tryupgrade(&b->c.lock)
+           : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
                goto success;
 
-       if (iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1 &&
+       if (iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1 &&
            btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) {
                btree_node_unlock(iter, level);
                goto success;
@@ -378,7 +378,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
                                btree_node_unlock(linked, l);
                        } else {
                                if (btree_node_intent_locked(linked, l)) {
-                                       six_lock_downgrade(&linked->l[l].b->lock);
+                                       six_lock_downgrade(&linked->l[l].b->c.lock);
                                        linked->nodes_intent_locked ^= 1 << l;
                                }
                                break;
@@ -427,7 +427,7 @@ void bch2_btree_trans_unlock(struct btree_trans *trans)
 static void __bch2_btree_iter_verify(struct btree_iter *iter,
                                     struct btree *b)
 {
-       struct btree_iter_level *l = &iter->l[b->level];
+       struct btree_iter_level *l = &iter->l[b->c.level];
        struct btree_node_iter tmp = l->iter;
        struct bkey_packed *k;
 
@@ -446,7 +446,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
         * For extents, the iterator may have skipped past deleted keys (but not
         * whiteouts)
         */
-       k = b->level || iter->flags & BTREE_ITER_IS_EXTENTS
+       k = b->c.level || iter->flags & BTREE_ITER_IS_EXTENTS
                ? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard)
                : bch2_btree_node_iter_prev_all(&tmp, b);
        if (k && btree_iter_pos_cmp(iter, b, k) > 0) {
@@ -519,7 +519,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
 
                bch2_btree_node_iter_push(node_iter, b, where, end);
 
-               if (!b->level &&
+               if (!b->c.level &&
                    node_iter == &iter->l[0].iter)
                        bkey_disassemble(b,
                                bch2_btree_node_iter_peek_all(node_iter, b),
@@ -548,7 +548,7 @@ found:
        btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
 
        bch2_btree_node_iter_sort(node_iter, b);
-       if (!b->level && node_iter == &iter->l[0].iter) {
+       if (!b->c.level && node_iter == &iter->l[0].iter) {
                /*
                 * not legal to call bkey_debugcheck() here, because we're
                 * called midway through the update path after update has been
@@ -590,7 +590,7 @@ iter_current_key_not_modified:
         * always point to the key for the child node the btree iterator points
         * to.
         */
-       if (b->level && new_u64s &&
+       if (b->c.level && new_u64s &&
            btree_iter_pos_cmp(iter, b, where) > 0) {
                struct bset_tree *t, *where_set = bch2_bkey_to_bset_inlined(b, where);
                struct bkey_packed *k;
@@ -633,13 +633,13 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
        struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
        struct btree_iter *linked;
 
-       if (node_iter != &iter->l[b->level].iter)
+       if (node_iter != &iter->l[b->c.level].iter)
                __bch2_btree_node_iter_fix(iter, b, node_iter, t,
                                          where, clobber_u64s, new_u64s);
 
        trans_for_each_iter_with_node(iter->trans, b, linked)
                __bch2_btree_node_iter_fix(linked, b,
-                                         &linked->l[b->level].iter, t,
+                                         &linked->l[b->c.level].iter, t,
                                          where, clobber_u64s, new_u64s);
 }
 
@@ -715,7 +715,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
        if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
                return;
 
-       plevel = b->level + 1;
+       plevel = b->c.level + 1;
        if (!btree_iter_node(iter, plevel))
                return;
 
@@ -738,7 +738,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
        }
 
        if (!parent_locked)
-               btree_node_unlock(iter, b->level + 1);
+               btree_node_unlock(iter, b->c.level + 1);
 }
 
 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
@@ -751,7 +751,7 @@ static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
                                          struct btree *b)
 {
-       return iter->btree_id == b->btree_id &&
+       return iter->btree_id == b->c.btree_id &&
                bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
                !btree_iter_pos_after_node(iter, b);
 }
@@ -779,11 +779,11 @@ static inline void btree_iter_node_set(struct btree_iter *iter,
        btree_iter_verify_new_node(iter, b);
 
        EBUG_ON(!btree_iter_pos_in_node(iter, b));
-       EBUG_ON(b->lock.state.seq & 1);
+       EBUG_ON(b->c.lock.state.seq & 1);
 
-       iter->l[b->level].lock_seq = b->lock.state.seq;
-       iter->l[b->level].b = b;
-       __btree_iter_init(iter, b->level);
+       iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
+       iter->l[b->c.level].b = b;
+       __btree_iter_init(iter, b->c.level);
 }
 
 /*
@@ -802,24 +802,24 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
                         * the old node we're replacing has already been
                         * unlocked and the pointer invalidated
                         */
-                       BUG_ON(btree_node_locked(linked, b->level));
+                       BUG_ON(btree_node_locked(linked, b->c.level));
 
-                       t = btree_lock_want(linked, b->level);
+                       t = btree_lock_want(linked, b->c.level);
                        if (t != BTREE_NODE_UNLOCKED) {
-                               six_lock_increment(&b->lock, (enum six_lock_type) t);
-                               mark_btree_node_locked(linked, b->level, (enum six_lock_type) t);
+                               six_lock_increment(&b->c.lock, (enum six_lock_type) t);
+                               mark_btree_node_locked(linked, b->c.level, (enum six_lock_type) t);
                        }
 
                        btree_iter_node_set(linked, b);
                }
 
-       six_unlock_intent(&b->lock);
+       six_unlock_intent(&b->c.lock);
 }
 
 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
 {
        struct btree_iter *linked;
-       unsigned level = b->level;
+       unsigned level = b->c.level;
 
        trans_for_each_iter(iter->trans, linked)
                if (linked->l[level].b == b) {
@@ -837,7 +837,7 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
        struct btree_iter *linked;
 
        trans_for_each_iter_with_node(iter->trans, b, linked)
-               __btree_iter_init(linked, b->level);
+               __btree_iter_init(linked, b->c.level);
 }
 
 static inline int btree_iter_lock_root(struct btree_iter *iter,
@@ -852,7 +852,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
 
        while (1) {
                b = READ_ONCE(c->btree_roots[iter->btree_id].b);
-               iter->level = READ_ONCE(b->level);
+               iter->level = READ_ONCE(b->c.level);
 
                if (unlikely(iter->level < depth_want)) {
                        /*
@@ -872,7 +872,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
                        return -EINTR;
 
                if (likely(b == c->btree_roots[iter->btree_id].b &&
-                          b->level == iter->level &&
+                          b->c.level == iter->level &&
                           !race_fault())) {
                        for (i = 0; i < iter->level; i++)
                                iter->l[i].b = BTREE_ITER_NOT_END;
@@ -884,7 +884,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
 
                }
 
-               six_unlock_type(&b->lock, lock_type);
+               six_unlock_type(&b->c.lock, lock_type);
        }
 }
 
@@ -1842,7 +1842,7 @@ struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans,
 
        for (i = 0; i < BTREE_MAX_DEPTH; i++)
                if (btree_node_locked(iter, i))
-                       six_lock_increment(&iter->l[i].b->lock,
+                       six_lock_increment(&iter->l[i].b->c.lock,
                                           __btree_lock_want(iter, i));
 
        return &trans->iters[idx];
index dc15d1b831a8c75b840ce8c1ad757f141a336cd0..171e729ed3ea4aa50d6033d70a0e54dc5bad561d 100644 (file)
@@ -17,10 +17,23 @@ static inline struct btree *btree_iter_node(struct btree_iter *iter,
        return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
 }
 
+static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
+                                       const struct btree *b, unsigned level)
+{
+       /*
+        * We don't compare the low bits of the lock sequence numbers because
+        * @iter might have taken a write lock on @b, and we don't want to skip
+        * the linked iterator if the sequence numbers were equal before taking
+        * that write lock. The lock sequence number is incremented by taking
+        * and releasing write locks and is even when unlocked:
+        */
+       return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
+}
+
 static inline struct btree *btree_node_parent(struct btree_iter *iter,
                                              struct btree *b)
 {
-       return btree_iter_node(iter, b->level + 1);
+       return btree_iter_node(iter, b->c.level + 1);
 }
 
 static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
@@ -55,16 +68,8 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx)
 static inline bool __iter_has_node(const struct btree_iter *iter,
                                   const struct btree *b)
 {
-       /*
-        * We don't compare the low bits of the lock sequence numbers because
-        * @iter might have taken a write lock on @b, and we don't want to skip
-        * the linked iterator if the sequence numbers were equal before taking
-        * that write lock. The lock sequence number is incremented by taking
-        * and releasing write locks and is even when unlocked:
-        */
-
-       return iter->l[b->level].b == b &&
-               iter->l[b->level].lock_seq >> 1 == b->lock.state.seq >> 1;
+       return iter->l[b->c.level].b == b &&
+               btree_node_lock_seq_matches(iter, b, b->c.level);
 }
 
 static inline struct btree_iter *
index 37e09474fde460105226927cdaaf2b4beb35d93a..e75e56c34f5f75e9ec23f11ba3c86869f8e77a49 100644 (file)
@@ -101,7 +101,7 @@ static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
        EBUG_ON(level >= BTREE_MAX_DEPTH);
 
        if (lock_type != BTREE_NODE_UNLOCKED)
-               six_unlock_type(&iter->l[level].b->lock, lock_type);
+               six_unlock_type(&iter->l[level].b->c.lock, lock_type);
        mark_btree_node_unlocked(iter, level);
 }
 
@@ -142,14 +142,14 @@ static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
 {
        u64 start_time = local_clock();
 
-       six_lock_type(&b->lock, type, NULL, NULL);
+       six_lock_type(&b->c.lock, type, NULL, NULL);
        bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
 }
 
 static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
                                        enum six_lock_type type)
 {
-       if (!six_trylock_type(&b->lock, type))
+       if (!six_trylock_type(&b->c.lock, type))
                __btree_node_lock_type(c, b, type);
 }
 
@@ -167,7 +167,7 @@ static inline bool btree_node_lock_increment(struct btree_iter *iter,
                if (linked != iter &&
                    linked->l[level].b == b &&
                    btree_node_locked_type(linked, level) >= want) {
-                       six_lock_increment(&b->lock, want);
+                       six_lock_increment(&b->c.lock, want);
                        return true;
                }
 
@@ -185,7 +185,7 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos,
 {
        EBUG_ON(level >= BTREE_MAX_DEPTH);
 
-       return likely(six_trylock_type(&b->lock, type)) ||
+       return likely(six_trylock_type(&b->c.lock, type)) ||
                btree_node_lock_increment(iter, b, level, type) ||
                __bch2_btree_node_lock(b, pos, level, iter,
                                       type, may_drop_locks);
@@ -210,10 +210,10 @@ void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
 
 static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
 {
-       EBUG_ON(iter->l[b->level].b != b);
-       EBUG_ON(iter->l[b->level].lock_seq != b->lock.state.seq);
+       EBUG_ON(iter->l[b->c.level].b != b);
+       EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
 
-       if (!six_trylock_write(&b->lock))
+       if (!six_trylock_write(&b->c.lock))
                __bch2_btree_node_lock_write(b, iter);
 }
 
index dd4fa2f595ec96098a65cc8a63553ca61a6b35b3..7bd3adcd4b52fcdaea4232fc1cbabe728d82f1dd 100644 (file)
@@ -60,19 +60,22 @@ struct btree_alloc {
        BKEY_PADDED(k);
 };
 
+struct btree_bkey_cached_common {
+       struct six_lock         lock;
+       u8                      level;
+       u8                      btree_id;
+};
+
 struct btree {
-       /* Hottest entries first */
+       struct btree_bkey_cached_common c;
+
        struct rhash_head       hash;
 
        /* Key/pointer for this btree node */
        __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
 
-       struct six_lock         lock;
-
        unsigned long           flags;
        u16                     written;
-       u8                      level;
-       u8                      btree_id;
        u8                      nsets;
        u8                      nr_key_bits;
 
@@ -451,7 +454,7 @@ static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_
 /* Type of keys @b contains: */
 static inline enum btree_node_type btree_node_type(struct btree *b)
 {
-       return __btree_node_type(b->level, b->btree_id);
+       return __btree_node_type(b->c.level, b->c.btree_id);
 }
 
 static inline bool btree_node_type_is_extents(enum btree_node_type type)
index cc0cd465b86385fa55341da397c6eb5f24fc0ea1..73675af8743a7a425b8299d7e5db17f5ae16ebed 100644 (file)
@@ -33,7 +33,7 @@ static void btree_node_interior_verify(struct btree *b)
        struct btree_node_iter iter;
        struct bkey_packed *k;
 
-       BUG_ON(!b->level);
+       BUG_ON(!b->c.level);
 
        bch2_btree_node_iter_init(&iter, b, &b->key.k.p);
 #if 1
@@ -229,7 +229,7 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
 
        btree_node_lock_type(c, b, SIX_LOCK_write);
        __btree_node_free(c, b);
-       six_unlock_write(&b->lock);
+       six_unlock_write(&b->c.lock);
 
        bch2_open_buckets_put(c, &ob);
 }
@@ -240,7 +240,7 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
        struct btree_iter *linked;
 
        trans_for_each_iter(iter->trans, linked)
-               BUG_ON(linked->l[b->level].b == b);
+               BUG_ON(linked->l[b->c.level].b == b);
 
        /*
         * Is this a node that isn't reachable on disk yet?
@@ -253,10 +253,10 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
         */
        btree_update_drop_new_node(c, b);
 
-       six_lock_write(&b->lock, NULL, NULL);
+       six_lock_write(&b->c.lock, NULL, NULL);
        __btree_node_free(c, b);
-       six_unlock_write(&b->lock);
-       six_unlock_intent(&b->lock);
+       six_unlock_write(&b->c.lock);
+       six_unlock_intent(&b->c.lock);
 }
 
 static void bch2_btree_node_free_ondisk(struct bch_fs *c,
@@ -387,7 +387,7 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
 {
        struct btree *n;
 
-       n = bch2_btree_node_alloc(as, b->level);
+       n = bch2_btree_node_alloc(as, b->c.level);
 
        n->data->min_key        = b->data->min_key;
        n->data->max_key        = b->data->max_key;
@@ -431,7 +431,7 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
        btree_node_set_format(b, b->data->format);
        bch2_btree_build_aux_trees(b);
 
-       six_unlock_write(&b->lock);
+       six_unlock_write(&b->c.lock);
 
        return b;
 }
@@ -445,7 +445,7 @@ static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reser
        while (reserve->nr) {
                struct btree *b = reserve->b[--reserve->nr];
 
-               six_unlock_write(&b->lock);
+               six_unlock_write(&b->c.lock);
 
                if (c->btree_reserve_cache_nr <
                    ARRAY_SIZE(c->btree_reserve_cache)) {
@@ -461,9 +461,9 @@ static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reser
 
                btree_node_lock_type(c, b, SIX_LOCK_write);
                __btree_node_free(c, b);
-               six_unlock_write(&b->lock);
+               six_unlock_write(&b->c.lock);
 
-               six_unlock_intent(&b->lock);
+               six_unlock_intent(&b->c.lock);
        }
 
        mutex_unlock(&c->btree_reserve_cache_lock);
@@ -586,7 +586,7 @@ static void btree_update_nodes_reachable(struct closure *cl)
                 */
                btree_node_lock_type(c, b, SIX_LOCK_read);
                bch2_btree_node_write_cond(c, b, btree_node_need_write(b));
-               six_unlock_read(&b->lock);
+               six_unlock_read(&b->c.lock);
                mutex_lock(&c->btree_interior_update_lock);
        }
 
@@ -641,10 +641,10 @@ retry:
                /* The usual case: */
                b = READ_ONCE(as->b);
 
-               if (!six_trylock_read(&b->lock)) {
+               if (!six_trylock_read(&b->c.lock)) {
                        mutex_unlock(&c->btree_interior_update_lock);
                        btree_node_lock_type(c, b, SIX_LOCK_read);
-                       six_unlock_read(&b->lock);
+                       six_unlock_read(&b->c.lock);
                        goto retry;
                }
 
@@ -665,7 +665,7 @@ retry:
                 * write it now if it needs to be written:
                 */
                bch2_btree_node_write_cond(c, b, true);
-               six_unlock_read(&b->lock);
+               six_unlock_read(&b->c.lock);
                break;
 
        case BTREE_INTERIOR_UPDATING_AS:
@@ -688,15 +688,15 @@ retry:
                /* b is the new btree root: */
                b = READ_ONCE(as->b);
 
-               if (!six_trylock_read(&b->lock)) {
+               if (!six_trylock_read(&b->c.lock)) {
                        mutex_unlock(&c->btree_interior_update_lock);
                        btree_node_lock_type(c, b, SIX_LOCK_read);
-                       six_unlock_read(&b->lock);
+                       six_unlock_read(&b->c.lock);
                        goto retry;
                }
 
-               BUG_ON(c->btree_roots[b->btree_id].as != as);
-               c->btree_roots[b->btree_id].as = NULL;
+               BUG_ON(c->btree_roots[b->c.btree_id].as != as);
+               c->btree_roots[b->c.btree_id].as = NULL;
 
                bch2_btree_set_root_ondisk(c, b, WRITE);
 
@@ -707,7 +707,7 @@ retry:
                 * have the pointer to the new root, and before the allocator
                 * can reuse the old nodes it'll have to do a journal commit:
                 */
-               six_unlock_read(&b->lock);
+               six_unlock_read(&b->c.lock);
                mutex_unlock(&c->btree_interior_update_lock);
 
                /*
@@ -908,8 +908,8 @@ static void btree_interior_update_add_node_reference(struct btree_update *as,
        d = &as->pending[as->nr_pending++];
        d->index_update_done    = false;
        d->seq                  = b->data->keys.seq;
-       d->btree_id             = b->btree_id;
-       d->level                = b->level;
+       d->btree_id             = b->c.btree_id;
+       d->level                = b->c.level;
        bkey_copy(&d->key, &b->key);
 
        mutex_unlock(&c->btree_interior_update_lock);
@@ -1053,7 +1053,7 @@ static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
 
        mutex_lock(&c->btree_root_lock);
        BUG_ON(btree_node_root(c, b) &&
-              (b->level < btree_node_root(c, b)->level ||
+              (b->c.level < btree_node_root(c, b)->c.level ||
                !btree_node_dying(btree_node_root(c, b))));
 
        btree_node_root(c, b) = b;
@@ -1076,7 +1076,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
 
        bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
                      true, 0, &fs_usage->u, 0, 0);
-       if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
+       if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
                bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
                                     true, 0, NULL, 0,
                                     BCH_BUCKET_MARK_GC);
@@ -1094,13 +1094,13 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
 
 static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b, int rw)
 {
-       struct btree_root *r = &c->btree_roots[b->btree_id];
+       struct btree_root *r = &c->btree_roots[b->c.btree_id];
 
        mutex_lock(&c->btree_root_lock);
 
        BUG_ON(b != r->b);
        bkey_copy(&r->key, &b->key);
-       r->level = b->level;
+       r->level = b->c.level;
        r->alive = true;
        if (rw == WRITE)
                c->btree_roots_dirty = true;
@@ -1214,7 +1214,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
        struct bset *set1, *set2;
        struct bkey_packed *k, *prev = NULL;
 
-       n2 = bch2_btree_node_alloc(as, n1->level);
+       n2 = bch2_btree_node_alloc(as, n1->c.level);
 
        n2->data->max_key       = n1->data->max_key;
        n2->data->format        = n1->format;
@@ -1251,7 +1251,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
        n1->key.k.p = bkey_unpack_pos(n1, prev);
        n1->data->max_key = n1->key.k.p;
        n2->data->min_key =
-               btree_type_successor(n1->btree_id, n1->key.k.p);
+               btree_type_successor(n1->c.btree_id, n1->key.k.p);
 
        set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
        set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
@@ -1282,7 +1282,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
        bch2_verify_btree_nr_keys(n1);
        bch2_verify_btree_nr_keys(n2);
 
-       if (n1->level) {
+       if (n1->c.level) {
                btree_node_interior_verify(n1);
                btree_node_interior_verify(n2);
        }
@@ -1359,7 +1359,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
        u64 start_time = local_clock();
 
        BUG_ON(!parent && (b != btree_node_root(c, b)));
-       BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
+       BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
 
        bch2_btree_interior_update_will_free_node(as, b);
 
@@ -1375,8 +1375,8 @@ static void btree_split(struct btree_update *as, struct btree *b,
 
                bch2_btree_build_aux_trees(n2);
                bch2_btree_build_aux_trees(n1);
-               six_unlock_write(&n2->lock);
-               six_unlock_write(&n1->lock);
+               six_unlock_write(&n2->c.lock);
+               six_unlock_write(&n1->c.lock);
 
                bch2_btree_node_write(c, n2, SIX_LOCK_intent);
 
@@ -1390,7 +1390,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
 
                if (!parent) {
                        /* Depth increases, make a new root */
-                       n3 = __btree_root_alloc(as, b->level + 1);
+                       n3 = __btree_root_alloc(as, b->c.level + 1);
 
                        n3->sib_u64s[0] = U16_MAX;
                        n3->sib_u64s[1] = U16_MAX;
@@ -1403,7 +1403,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
                trace_btree_compact(c, b);
 
                bch2_btree_build_aux_trees(n1);
-               six_unlock_write(&n1->lock);
+               six_unlock_write(&n1->c.lock);
 
                bch2_keylist_add(&as->parent_keys, &n1->key);
        }
@@ -1430,7 +1430,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
 
        /* Successful split, update the iterator to point to the new nodes: */
 
-       six_lock_increment(&b->lock, SIX_LOCK_intent);
+       six_lock_increment(&b->c.lock, SIX_LOCK_intent);
        bch2_btree_iter_node_drop(iter, b);
        if (n3)
                bch2_btree_iter_node_replace(iter, n3);
@@ -1456,7 +1456,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
        struct bkey_packed *k;
 
        /* Don't screw up @iter's position: */
-       node_iter = iter->l[b->level].iter;
+       node_iter = iter->l[b->c.level].iter;
 
        /*
         * btree_split(), btree_gc_coalesce() will insert keys before
@@ -1477,7 +1477,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
        btree_update_updated_node(as, b);
 
        trans_for_each_iter_with_node(iter->trans, b, linked)
-               bch2_btree_node_iter_peek(&linked->l[b->level].iter, b);
+               bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
 
        bch2_btree_iter_verify(iter, b);
 }
@@ -1503,8 +1503,8 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
        int old_live_u64s = b->nr.live_u64s;
        int live_u64s_added, u64s_added;
 
-       BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
-       BUG_ON(!b->level);
+       BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
+       BUG_ON(!b->c.level);
        BUG_ON(!as || as->b);
        bch2_verify_keylist_sorted(keys);
 
@@ -1541,7 +1541,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
         * the btree iterator yet, so the merge path's unlock/wait/relock dance
         * won't work:
         */
-       bch2_foreground_maybe_merge(c, iter, b->level,
+       bch2_foreground_maybe_merge(c, iter, b->c.level,
                                    flags|BTREE_INSERT_NOUNLOCK);
        return;
 split:
@@ -1686,7 +1686,7 @@ retry:
        b->sib_u64s[sib] = sib_u64s;
 
        if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
-               six_unlock_intent(&m->lock);
+               six_unlock_intent(&m->c.lock);
                goto out;
        }
 
@@ -1716,7 +1716,7 @@ retry:
        bch2_btree_interior_update_will_free_node(as, b);
        bch2_btree_interior_update_will_free_node(as, m);
 
-       n = bch2_btree_node_alloc(as, b->level);
+       n = bch2_btree_node_alloc(as, b->c.level);
 
        n->data->min_key        = prev->data->min_key;
        n->data->max_key        = next->data->max_key;
@@ -1729,7 +1729,7 @@ retry:
        bch2_btree_sort_into(c, n, next);
 
        bch2_btree_build_aux_trees(n);
-       six_unlock_write(&n->lock);
+       six_unlock_write(&n->c.lock);
 
        bkey_init(&delete.k);
        delete.k.p = prev->key.k.p;
@@ -1742,7 +1742,7 @@ retry:
 
        bch2_open_buckets_put(c, &n->ob);
 
-       six_lock_increment(&b->lock, SIX_LOCK_intent);
+       six_lock_increment(&b->c.lock, SIX_LOCK_intent);
        bch2_btree_iter_node_drop(iter, b);
        bch2_btree_iter_node_drop(iter, m);
 
@@ -1773,7 +1773,7 @@ out:
        return;
 
 err_cycle_gc_lock:
-       six_unlock_intent(&m->lock);
+       six_unlock_intent(&m->c.lock);
 
        if (flags & BTREE_INSERT_NOUNLOCK)
                goto out;
@@ -1786,7 +1786,7 @@ err_cycle_gc_lock:
        goto err;
 
 err_unlock:
-       six_unlock_intent(&m->lock);
+       six_unlock_intent(&m->c.lock);
        if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
                up_read(&c->gc_lock);
 err:
@@ -1828,7 +1828,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
        n = bch2_btree_node_alloc_replacement(as, b);
 
        bch2_btree_build_aux_trees(n);
-       six_unlock_write(&n->lock);
+       six_unlock_write(&n->c.lock);
 
        trace_btree_gc_rewrite_node(c, b);
 
@@ -1843,7 +1843,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
 
        bch2_open_buckets_put(c, &n->ob);
 
-       six_lock_increment(&b->lock, SIX_LOCK_intent);
+       six_lock_increment(&b->c.lock, SIX_LOCK_intent);
        bch2_btree_iter_node_drop(iter, b);
        bch2_btree_iter_node_replace(iter, n);
        bch2_btree_node_free_inmem(c, b, iter);
@@ -1963,7 +1963,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
                if (new_hash) {
                        bkey_copy(&new_hash->key, &new_key->k_i);
                        ret = bch2_btree_node_hash_insert(&c->btree_cache,
-                                       new_hash, b->level, b->btree_id);
+                                       new_hash, b->c.level, b->c.btree_id);
                        BUG_ON(ret);
                }
 
@@ -1996,7 +1996,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
 
                bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
                              true, 0, &fs_usage->u, 0, 0);
-               if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
+               if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
                        bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
                                             true, 0, NULL, 0,
                                             BCH_BUCKET_MARK_GC);
@@ -2110,8 +2110,8 @@ err:
                list_move(&new_hash->list, &c->btree_cache.freeable);
                mutex_unlock(&c->btree_cache.lock);
 
-               six_unlock_write(&new_hash->lock);
-               six_unlock_intent(&new_hash->lock);
+               six_unlock_write(&new_hash->c.lock);
+               six_unlock_intent(&new_hash->c.lock);
        }
        up_read(&c->gc_lock);
        closure_sync(&cl);
@@ -2151,8 +2151,8 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
        bch2_btree_cache_cannibalize_unlock(c);
 
        set_btree_node_fake(b);
-       b->level        = 0;
-       b->btree_id     = id;
+       b->c.level      = 0;
+       b->c.btree_id   = id;
 
        bkey_btree_ptr_init(&b->key);
        b->key.k.p = POS_MAX;
@@ -2166,13 +2166,14 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
        b->data->format = bch2_btree_calc_format(b);
        btree_node_set_format(b, b->data->format);
 
-       ret = bch2_btree_node_hash_insert(&c->btree_cache, b, b->level, b->btree_id);
+       ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
+                                         b->c.level, b->c.btree_id);
        BUG_ON(ret);
 
        __bch2_btree_set_root_inmem(c, b);
 
-       six_unlock_write(&b->lock);
-       six_unlock_intent(&b->lock);
+       six_unlock_write(&b->c.lock);
+       six_unlock_intent(&b->c.lock);
 }
 
 ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf)
index e5156e9081106e97e57833e6c30240849123c1e2..f9e092bf69aa5e1c1407c39ba4ff3804064f98b6 100644 (file)
@@ -190,7 +190,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
                                                     struct btree *b)
 {
-       unsigned depth = btree_node_root(c, b)->level + 1;
+       unsigned depth = btree_node_root(c, b)->c.level + 1;
 
        /*
         * Number of nodes we might have to allocate in a worst case btree
@@ -198,9 +198,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c,
         * a new root, unless we're already at max depth:
         */
        if (depth < BTREE_MAX_DEPTH)
-               return (depth - b->level) * 2 + 1;
+               return (depth - b->c.level) * 2 + 1;
        else
-               return (depth - b->level) * 2 - 1;
+               return (depth - b->c.level) * 2 - 1;
 }
 
 static inline void btree_node_reset_sib_u64s(struct btree *b)
index 3425ad6f68b2321b5f5f6af8789c624b1f322f9f..5e13ad34ec42e9135150cc568c94411229fe77bc 100644 (file)
@@ -155,7 +155,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
        btree_node_lock_type(c, b, SIX_LOCK_read);
        bch2_btree_node_write_cond(c, b,
                (btree_current_write(b) == w && w->journal.seq == seq));
-       six_unlock_read(&b->lock);
+       six_unlock_read(&b->c.lock);
 }
 
 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
@@ -198,7 +198,7 @@ void bch2_btree_journal_key(struct btree_trans *trans,
        struct btree *b = iter->l[0].b;
        struct btree_write *w = btree_current_write(b);
 
-       EBUG_ON(iter->level || b->level);
+       EBUG_ON(iter->level || b->c.level);
        EBUG_ON(trans->journal_res.ref !=
                !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
 
index bb69a2acd8dd13b7ead441d253be65099c5204c4..a11d7923ea5a8b127b32cbd331943d5c5c514d70 100644 (file)
@@ -52,8 +52,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
 
        bkey_copy(&v->key, &b->key);
        v->written      = 0;
-       v->level        = b->level;
-       v->btree_id     = b->btree_id;
+       v->c.level      = b->c.level;
+       v->c.btree_id   = b->c.btree_id;
        bch2_btree_keys_init(v, &c->expensive_debug_checks);
 
        if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
index 2864a72938ce7eaffe30f945bd47e76858adf312..22a378d5f64f6f0c42911667e8912206a904db76 100644 (file)
@@ -144,8 +144,8 @@ DECLARE_EVENT_CLASS(btree_node,
 
        TP_fast_assign(
                memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->level          = b->level;
-               __entry->id             = b->btree_id;
+               __entry->level          = b->c.level;
+               __entry->id             = b->c.btree_id;
                __entry->inode          = b->key.k.p.inode;
                __entry->offset         = b->key.k.p.offset;
        ),
@@ -262,7 +262,7 @@ TRACE_EVENT(btree_insert_key,
        ),
 
        TP_fast_assign(
-               __entry->id             = b->btree_id;
+               __entry->id             = b->c.btree_id;
                __entry->inode          = k->k.p.inode;
                __entry->offset         = k->k.p.offset;
                __entry->size           = k->k.size;