bcachefs: bch2_trans_unlock() must always be followed by relock() or begin()
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 10 Apr 2024 03:23:08 +0000 (23:23 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 8 May 2024 21:29:19 +0000 (17:29 -0400)
We're about to add new asserts for btree_trans locking consistency, and
part of that requires that aren't using the btree_trans while it's
unlocked.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_foreground.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/data_update.c
fs/bcachefs/fs.c
fs/bcachefs/io_write.c
fs/bcachefs/movinggc.c
fs/bcachefs/rebalance.c
fs/bcachefs/recovery.c

index 2f91ff67453f27e74345f7b3dde01bb871740f5d..f07373b781745663934702fb5b2947ccf9e57491 100644 (file)
@@ -2172,6 +2172,9 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
        u64 now;
        int ret = 0;
 
+       if (bch2_trans_relock(trans))
+               bch2_trans_begin(trans);
+
        a = bch2_trans_start_alloc_update(trans, &iter,  POS(dev, bucket_nr));
        ret = PTR_ERR_OR_ZERO(a);
        if (ret)
index df4439a38df5a6f918b8d5d0e66743b4feff2244..fb8825c4e7ad82a75cfb6b12af990975d2294cdc 100644 (file)
@@ -1342,6 +1342,10 @@ retry:
 
        *wp_ret = wp = writepoint_find(trans, write_point.v);
 
+       ret = bch2_trans_relock(trans);
+       if (ret)
+               goto err;
+
        /* metadata may not allocate on cache devices: */
        if (wp->data_type != BCH_DATA_user)
                have_cache = true;
index 3bef5b1af16d0d5b170864564b87e8805d0ed8da..0d44d613e4ac16353e9a585d96f05c66c17cbe43 100644 (file)
@@ -729,6 +729,8 @@ transaction_restart:                                                        \
 #define for_each_btree_key_upto(_trans, _iter, _btree_id,              \
                                _start, _end, _flags, _k, _do)          \
 ({                                                                     \
+       bch2_trans_begin(trans);                                        \
+                                                                       \
        struct btree_iter _iter;                                        \
        bch2_trans_iter_init((_trans), &(_iter), (_btree_id),           \
                             (_start), (_flags));                       \
index 4aa62a74a59bf8cf57ccbeb46625ff8ab17d4948..35f80c97b9737631848002dad42b25b1969db8ac 100644 (file)
@@ -737,9 +737,6 @@ err:
         */
        b = READ_ONCE(as->b);
        if (b) {
-               btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
-                                               as->btree_id, b->c.level, b->key.k.p);
-               struct btree_path *path = trans->paths + path_idx;
                /*
                 * @b is the node we did the final insert into:
                 *
@@ -763,6 +760,10 @@ err:
                 * have here:
                 */
                bch2_trans_unlock(trans);
+               bch2_trans_begin(trans);
+               btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
+                                               as->btree_id, b->c.level, b->key.k.p);
+               struct btree_path *path = trans->paths + path_idx;
                btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
                mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
                path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
index cae4e4eb5329abf37cb72bc224339aee5275f8c2..e8c7c5cec03fedc5bd2e55f1b208dd77eab2e6ee 100644 (file)
@@ -386,6 +386,8 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
        while (bio_sectors(bio)) {
                unsigned sectors = bio_sectors(bio);
 
+               bch2_trans_begin(trans);
+
                bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
                                     BTREE_ITER_slots);
                ret = lockrestart_do(trans, ({
index 81cfc74828fd72b2d37ca4819e8249a399634bef..841bb92e53dfd2574243131d481ee5345c8e430b 100644 (file)
@@ -1036,6 +1036,10 @@ retry:
 
                bch2_btree_iter_set_pos(&iter,
                        POS(iter.pos.inode, iter.pos.offset + sectors));
+
+               ret = bch2_trans_relock(trans);
+               if (ret)
+                       break;
        }
        start = iter.pos.offset;
        bch2_trans_iter_exit(trans, &iter);
index 217cb98ed707c3e90b31b7eee9934f94ad419b0d..73e25250de7540407dd27f29f3a6db2cddbd2db8 100644 (file)
@@ -1248,6 +1248,10 @@ retry:
 
                buckets.nr = 0;
 
+               ret = bch2_trans_relock(trans);
+               if (ret)
+                       break;
+
                k = bch2_btree_iter_peek_slot(&iter);
                ret = bkey_err(k);
                if (ret)
index d0afd8bc0193bdb800c6932100f7e3de6d669cc2..10bfb31c151b208844ff2acc3801bff1d72795c7 100644 (file)
@@ -158,6 +158,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
        if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
                return ret;
 
+       bch2_trans_begin(trans);
+
        ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
                                  lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
                                  lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
index c640c0f13976c60f8c160291c567924e795959fe..cf81e5128c3abb02b0b0ec49deb9d2d8d31dd3e3 100644 (file)
@@ -323,6 +323,8 @@ static int do_rebalance(struct moving_context *ctxt)
        struct bkey_s_c k;
        int ret = 0;
 
+       bch2_trans_begin(trans);
+
        bch2_move_stats_init(&r->work_stats, "rebalance_work");
        bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
 
index cc603276f917d205dd4b704614751acb9f77220d..27ee27b285bdeda91b5702d3b1616f6420a79a4f 100644 (file)
@@ -202,7 +202,7 @@ int bch2_journal_replay(struct bch_fs *c)
        struct journal *j = &c->journal;
        u64 start_seq   = c->journal_replay_seq_start;
        u64 end_seq     = c->journal_replay_seq_start;
-       struct btree_trans *trans = bch2_trans_get(c);
+       struct btree_trans *trans = NULL;
        bool immediate_flush = false;
        int ret = 0;
 
@@ -216,6 +216,7 @@ int bch2_journal_replay(struct bch_fs *c)
        BUG_ON(!atomic_read(&keys->ref));
 
        move_gap(keys, keys->nr);
+       trans = bch2_trans_get(c);
 
        /*
         * First, attempt to replay keys in sorted order. This is more