if (cmp < 0)
continue;
- if (!(path->nodes_locked & 1) ||
+ if (!btree_node_locked(path, 0) ||
!path->should_be_locked)
continue;
for (l = 0; l < BTREE_MAX_DEPTH; l++) {
if (btree_node_locked(path, l) &&
!IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
- prt_printf(out, " %s l=%u ",
- btree_node_intent_locked(path, l) ? "i" : "r", l);
+ prt_printf(out, " %c l=%u ",
+ lock_types[btree_node_locked_type(path, l)], l);
bch2_btree_path_node_to_text(out, b, path->cached);
prt_printf(out, "\n");
}
six_lock_readers_add(&b->c.lock, readers);
}
+static inline bool path_has_read_locks(struct btree_path *path)
+{
+ unsigned l;
+
+ for (l = 0; l < BTREE_MAX_DEPTH; l++)
+ if (btree_node_read_locked(path, l))
+ return true;
+ return false;
+}
+
/* Slowpath: */
int __bch2_btree_node_lock(struct btree_trans *trans,
struct btree_path *path,
* already have read locked - deadlock:
*/
if (type == SIX_LOCK_intent &&
- linked->nodes_locked != linked->nodes_intent_locked) {
+ path_has_read_locks(linked)) {
reason = 1;
goto deadlock;
}
* another path has possible descendants locked of the node
* we're about to lock, it must have the ancestors locked too:
*/
- if (level > __fls(linked->nodes_locked)) {
+ if (level > btree_path_highest_level_locked(linked)) {
reason = 5;
goto deadlock;
}
trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
return false;
success:
- mark_btree_node_intent_locked(trans, path, level);
+ mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
return true;
}
path->locks_want = new_locks_want;
while (path->nodes_locked &&
- (l = __fls(path->nodes_locked)) >= path->locks_want) {
+ (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
if (l > path->level) {
btree_node_unlock(trans, path, l);
} else {
if (btree_node_intent_locked(path, l)) {
six_lock_downgrade(&path->l[l].b->c.lock);
- path->nodes_intent_locked ^= 1 << l;
+ mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
}
break;
}
static inline bool btree_node_locked(struct btree_path *path, unsigned level)
{
- return path->nodes_locked & (1 << level);
+ return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
}
-static inline void mark_btree_node_unlocked(struct btree_path *path,
- unsigned level)
-{
- path->nodes_locked &= ~(1 << level);
- path->nodes_intent_locked &= ~(1 << level);
-}
-
-static inline void mark_btree_node_locked_noreset(struct btree_trans *trans,
- struct btree_path *path,
+static inline void mark_btree_node_locked_noreset(struct btree_path *path,
unsigned level,
- enum six_lock_type type)
+ enum btree_node_locked_type type)
{
/* relying on this to avoid a branch */
BUILD_BUG_ON(SIX_LOCK_read != 0);
BUILD_BUG_ON(SIX_LOCK_intent != 1);
- path->nodes_locked |= 1 << level;
- path->nodes_intent_locked |= type << level;
+ path->nodes_locked &= ~(1 << level);
+ path->nodes_intent_locked &= ~(1 << level);
+
+ if (type != BTREE_NODE_UNLOCKED) {
+ path->nodes_locked |= 1 << level;
+ path->nodes_intent_locked |= type << level;
+ }
+}
+
+static inline void mark_btree_node_unlocked(struct btree_path *path,
+ unsigned level)
+{
+ mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
}
static inline void mark_btree_node_locked(struct btree_trans *trans,
unsigned level,
enum six_lock_type type)
{
- mark_btree_node_locked_noreset(trans, path, level, type);
+ mark_btree_node_locked_noreset(path, level, type);
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[level].lock_taken_time = ktime_get_ns();
#endif
}
-static inline void mark_btree_node_intent_locked(struct btree_trans *trans,
- struct btree_path *path,
- unsigned level)
-{
- mark_btree_node_locked_noreset(trans, path, level, SIX_LOCK_intent);
-}
-
static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
{
return level < path->locks_want
mark_btree_node_unlocked(path, level);
}
+static inline int btree_path_lowest_level_locked(struct btree_path *path)
+{
+ return __ffs(path->nodes_locked);
+}
+
+static inline int btree_path_highest_level_locked(struct btree_path *path)
+{
+ return __fls(path->nodes_locked);
+}
+
static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
struct btree_path *path)
{
btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
while (path->nodes_locked)
- btree_node_unlock(trans, path, __ffs(path->nodes_locked));
+ btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
}
/*
static inline void upgrade_readers(struct btree_trans *trans, struct btree_path *path)
{
struct btree *b = path_l(path)->b;
+ unsigned l;
do {
- if (path->nodes_locked &&
- path->nodes_locked != path->nodes_intent_locked)
- path_upgrade_readers(trans, path);
+ for (l = 0; l < BTREE_MAX_DEPTH; l++)
+ if (btree_node_read_locked(path, l))
+ path_upgrade_readers(trans, path);
} while ((path = prev_btree_path(trans, path)) &&
path_l(path)->b == b);
}
? trans->paths + trans->sorted[i + 1]
: NULL;
- if (path->nodes_locked) {
- if (path->nodes_intent_locked)
- nr_intent++;
- else
- nr_read++;
+ switch (btree_node_locked_type(path, path->level)) {
+ case BTREE_NODE_READ_LOCKED:
+ nr_read++;
+ break;
+ case BTREE_NODE_INTENT_LOCKED:
+ nr_intent++;
+ break;
}
if (!next || path_l(path)->b != path_l(next)->b) {
//if (path == pos)
// break;
- if (path->nodes_locked != path->nodes_intent_locked &&
+ if (btree_node_read_locked(path, path->level) &&
!bch2_btree_path_upgrade(trans, path, path->level + 1))
return true;
}