bcachefs: Mark write locks before taking lock
authorKent Overstreet <kent.overstreet@linux.dev>
Fri, 26 Aug 2022 18:55:00 +0000 (14:55 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:40 +0000 (17:09 -0400)
six locks are unfair: while a thread is blocked trying to take a write
lock, new read locks will fail. The new deadlock cycle detector makes
use of our existing lock tracing, so we need to tell it we're holding a
write lock before we take the lock for it to work correctly.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_update_leaf.c

index 1e4c81d8084b398b36fa4ddc4fc49c2c0b8289d1..ab3161c1b1f4743a119257f17b336db24177f9eb 100644 (file)
@@ -271,10 +271,15 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
        EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
        EBUG_ON(!btree_node_intent_locked(path, b->c.level));
 
+       /*
+        * six locks are unfair, and read locks block while a thread wants a
+        * write lock: thus, we need to tell the cycle detector we have a write
+        * lock _before_ taking the lock:
+        */
+       mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
+
        if (unlikely(!six_trylock_write(&b->c.lock)))
                __bch2_btree_node_lock_write(trans, b);
-
-       mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
 }
 
 /* relock: */
index 732d09d4504124589d159c1674743d0d034ad9b7..a8306b16956d697c412b89118725ebd8bb415035 100644 (file)
@@ -817,6 +817,13 @@ static inline int trans_lock_write(struct btree_trans *trans)
                if (same_leaf_as_prev(trans, i))
                        continue;
 
+               /*
+                * six locks are unfair, and read locks block while a thread
+                * wants a write lock: thus, we need to tell the cycle detector
+                * we have a write lock _before_ taking the lock:
+                */
+               mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write);
+
                if (!six_trylock_write(&insert_l(i)->b->c.lock)) {
                        if (have_conflicting_read_lock(trans, i->path))
                                goto fail;
@@ -828,13 +835,13 @@ static inline int trans_lock_write(struct btree_trans *trans)
                        BUG_ON(ret);
                }
 
-               mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write);
-
                bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
        }
 
        return 0;
 fail:
+       mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_intent);
+
        while (--i >= trans->updates) {
                if (same_leaf_as_prev(trans, i))
                        continue;