From dcf141b9e13d261629806aa37e0fa7769d38b789 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 28 Oct 2020 14:18:18 -0400 Subject: [PATCH] bcachefs: Fix spurious transaction restarts The check for whether locking a btree node would deadlock was wrong - we have to check that interior nodes are locked before descendents, but this check was wrong when consider cached vs. non cached iterators. Signed-off-by: Kent Overstreet Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 1 + fs/bcachefs/btree_types.h | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index a76e13000d11a..d310b2389e384 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -244,6 +244,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, * we're about to lock, it must have the ancestors locked too: */ if (linked->btree_id == iter->btree_id && + btree_iter_is_cached(linked) == btree_iter_is_cached(iter) && level > __fls(linked->nodes_locked)) { if (!(trans->nounlock)) { linked->locks_want = diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index f02518f9d9ec4..d4f0db1fe457b 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -283,6 +283,11 @@ btree_iter_type(const struct btree_iter *iter) return iter->flags & BTREE_ITER_TYPE; } +static inline bool btree_iter_is_cached(const struct btree_iter *iter) +{ + return btree_iter_type(iter) == BTREE_ITER_CACHED; +} + static inline struct btree_iter_level *iter_l(struct btree_iter *iter) { return iter->l + iter->level; -- 2.30.2