#include <linux/prefetch.h>
-static inline void btree_trans_sort_paths(struct btree_trans *);
-
static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
struct btree_path *);
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
- mark_btree_node_locked(path, level, want);
+ mark_btree_node_locked(trans, path, level, want);
return true;
} else {
return false;
return false;
success:
- mark_btree_node_intent_locked(path, level);
+ mark_btree_node_intent_locked(trans, path, level);
return true;
}
t = btree_lock_want(path, b->c.level);
if (t != BTREE_NODE_UNLOCKED) {
six_lock_increment(&b->c.lock, (enum six_lock_type) t);
- mark_btree_node_locked(path, b->c.level, (enum six_lock_type) t);
+ mark_btree_node_locked(trans, path, b->c.level, (enum six_lock_type) t);
}
btree_path_level_init(trans, path, b);
for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
path->l[i].b = NULL;
- mark_btree_node_locked(path, path->level, lock_type);
+ mark_btree_node_locked(trans, path, path->level, lock_type);
btree_path_level_init(trans, path, b);
return 0;
}
if (unlikely(ret))
goto err;
- mark_btree_node_locked(path, level, lock_type);
+ mark_btree_node_locked(trans, path, level, lock_type);
btree_path_level_init(trans, path, b);
if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
path = trans->paths + trans->sorted[i];
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans->traverse_all_idx = path->idx;
+#endif
ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
if (ret)
out:
bch2_btree_cache_cannibalize_unlock(c);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans->traverse_all_idx = U8_MAX;
+#endif
trans->in_traverse_all = false;
trace_trans_traverse_all(trans->ip, trace_ip);
BUG_ON(trans->paths[idx].sorted_idx != i);
}
}
-#else
-static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
-#endif
static void btree_trans_verify_sorted(struct btree_trans *trans)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
struct btree_path *path, *prev = NULL;
unsigned i;
BUG_ON(prev && btree_path_cmp(prev, path) > 0);
prev = path;
}
-#endif
}
+#else
+static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
+static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
+#endif
-static noinline void __btree_trans_sort_paths(struct btree_trans *trans)
+void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
{
int i, l = 0, r = trans->nr_sorted, inc = 1;
bool swapped;
+ btree_trans_verify_sorted_refs(trans);
+
+ if (trans->paths_sorted)
+ goto out;
+
/*
* Cocktail shaker sort: this is efficient because iterators will be
* mostly sorteda.
} while (swapped);
trans->paths_sorted = true;
-
+out:
btree_trans_verify_sorted(trans);
}
-static inline void btree_trans_sort_paths(struct btree_trans *trans)
-{
- btree_trans_verify_sorted_refs(trans);
-
- if (trans->paths_sorted) {
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- btree_trans_verify_sorted(trans);
- return;
- }
- __btree_trans_sort_paths(trans);
-}
-
static inline void btree_path_list_remove(struct btree_trans *trans,
struct btree_path *path)
{
{
unsigned i;
- path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
+ path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
path->nodes_intent_locked &= ~(1 << level);
}
-static inline void mark_btree_node_locked(struct btree_path *path,
+static inline void mark_btree_node_locked(struct btree_trans *trans,
+ struct btree_path *path,
unsigned level,
enum six_lock_type type)
{
path->nodes_locked |= 1 << level;
path->nodes_intent_locked |= type << level;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ path->ip_locked = _RET_IP_;
+ btree_trans_sort_paths(trans);
+ BUG_ON(trans->in_traverse_all &&
+ trans->traverse_all_idx != U8_MAX &&
+ path->sorted_idx > trans->paths[trans->traverse_all_idx].sorted_idx);
+#endif
}
-static inline void mark_btree_node_intent_locked(struct btree_path *path,
+static inline void mark_btree_node_intent_locked(struct btree_trans *trans,
+ struct btree_path *path,
unsigned level)
{
- mark_btree_node_locked(path, level, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path, level, SIX_LOCK_intent);
}
static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
while (path->nodes_locked)
btree_node_unlock(path, __ffs(path->nodes_locked));
+#ifdef CONFIG_BCACHEFS_DEBUG
+ path->ip_locked = 0;
+#endif
}
static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)