bcachefs: Track held write locks
authorKent Overstreet <kent.overstreet@linux.dev>
Tue, 23 Aug 2022 01:05:31 +0000 (21:05 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:39 +0000 (17:09 -0400)
The upcoming lock cycle detection code will need to know precisely which
locks every btree_trans is holding, including write locks - this patch
updates btree_node_locked_type to include write locks.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_locking.c
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_update_leaf.c

index 84d1e37a074151d750897d222d18ebb4c5dd92ee..be288fb96ea2a9516c8215ead5e7f157578dd159 100644 (file)
@@ -246,6 +246,8 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
                return bch2_btree_node_relock(trans, path, level);
        case BTREE_NODE_INTENT_LOCKED:
                break;
+       case BTREE_NODE_WRITE_LOCKED:
+               BUG();
        }
 
        if (btree_node_intent_locked(path, level))
@@ -448,9 +450,17 @@ void bch2_btree_path_verify_locks(struct btree_path *path)
                return;
        }
 
-       for (l = 0; btree_path_node(path, l); l++)
-               BUG_ON(btree_lock_want(path, l) !=
-                      btree_node_locked_type(path, l));
+       for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+               int want = btree_lock_want(path, l);
+               int have = btree_node_locked_type(path, l);
+
+               BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
+
+               BUG_ON(is_btree_node(path, l) &&
+                      (want == BTREE_NODE_UNLOCKED ||
+                       have != BTREE_NODE_WRITE_LOCKED) &&
+                      want != have);
+       }
 }
 
 void bch2_trans_verify_locks(struct btree_trans *trans)
index 3a9a4a0d61c430d30d9eebffa1d0b325cb3f2224..a221c4fd1bf9527347e930ce27ffa807ad2d6972 100644 (file)
@@ -32,6 +32,7 @@ enum btree_node_locked_type {
        BTREE_NODE_UNLOCKED             = -1,
        BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
        BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
+       BTREE_NODE_WRITE_LOCKED         = SIX_LOCK_write,
 };
 
 static inline int btree_node_locked_type(struct btree_path *path,
@@ -40,16 +41,19 @@ static inline int btree_node_locked_type(struct btree_path *path,
        return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
 }
 
-static inline bool btree_node_intent_locked(struct btree_path *path,
-                                           unsigned level)
+static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
+{
+       return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
+}
+
+static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
 {
-       return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
+       return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
 }
 
-static inline bool btree_node_read_locked(struct btree_path *path,
-                                         unsigned level)
+static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
 {
-       return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
+       return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
 }
 
 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
@@ -72,6 +76,7 @@ static inline void mark_btree_node_locked_noreset(struct btree_path *path,
 static inline void mark_btree_node_unlocked(struct btree_path *path,
                                            unsigned level)
 {
+       EBUG_ON(btree_node_write_locked(path, level));
        mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
 }
 
@@ -179,6 +184,9 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
 
        EBUG_ON(path->l[b->c.level].b != b);
        EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
+       EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
+
+       mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
 
        trans_for_each_path_with_node(trans, b, linked)
                linked->l[b->c.level].lock_seq += 2;
@@ -288,6 +296,8 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
 
        if (unlikely(!six_trylock_write(&b->c.lock)))
                __bch2_btree_node_lock_write(trans, b);
+
+       mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
 }
 
 /* relock: */
@@ -311,8 +321,8 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans,
                                          struct btree_path *path, unsigned level)
 {
        EBUG_ON(btree_node_locked(path, level) &&
-               btree_node_locked_type(path, level) !=
-               __btree_lock_want(path, level));
+               !btree_node_write_locked(path, level) &&
+               btree_node_locked_type(path, level) != __btree_lock_want(path, level));
 
        return likely(btree_node_locked(path, level)) ||
                __bch2_btree_node_relock(trans, path, level);
index 291c1a3ff8c9fa34dda072fbc1efffed566239d4..6ae4755cfd24dccf3363f8b6cd1106cdf89914b1 100644 (file)
@@ -828,6 +828,8 @@ static inline int trans_lock_write(struct btree_trans *trans)
                        BUG_ON(ret);
                }
 
+               mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write);
+
                bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
        }