unsigned flags)
{
struct btree_iter *iter, *best = NULL;
+ struct bpos real_pos, pos_min = POS_MIN;
+
+ if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
+ btree_node_type_is_extents(btree_id) &&
+ !(flags & BTREE_ITER_NOT_EXTENTS) &&
+ !(flags & BTREE_ITER_ALL_SNAPSHOTS))
+ flags |= BTREE_ITER_IS_EXTENTS;
if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
!btree_type_has_snapshots(btree_id))
pos.snapshot = btree_type_has_snapshots(btree_id)
? U32_MAX : 0;
+ real_pos = pos;
+
+ if ((flags & BTREE_ITER_IS_EXTENTS) &&
+ bkey_cmp(pos, POS_MAX))
+ real_pos = bpos_nosnap_successor(pos);
+
trans_for_each_iter(trans, iter) {
if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
continue;
continue;
if (best) {
- int cmp = bkey_cmp(bpos_diff(best->real_pos, pos),
- bpos_diff(iter->real_pos, pos));
+ int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
+ bpos_diff(iter->real_pos, real_pos));
if (cmp < 0 ||
((cmp == 0 && btree_iter_keep(trans, iter))))
best = iter;
}
+ trace_trans_get_iter(_RET_IP_, trans->ip,
+ btree_id,
+ &real_pos, locks_want,
+ best ? &best->real_pos : &pos_min,
+ best ? best->locks_want : 0,
+ best ? best->uptodate : BTREE_ITER_NEED_TRAVERSE);
+
if (!best) {
iter = btree_trans_iter_alloc(trans);
bch2_btree_iter_init(trans, iter, btree_id);
trans->iters_live |= 1ULL << iter->idx;
trans->iters_touched |= 1ULL << iter->idx;
- if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
- btree_node_type_is_extents(btree_id) &&
- !(flags & BTREE_ITER_NOT_EXTENTS) &&
- !(flags & BTREE_ITER_ALL_SNAPSHOTS))
- flags |= BTREE_ITER_IS_EXTENTS;
-
iter->flags = flags;
iter->snapshot = pos.snapshot;
iter->min_depth = depth;
bch2_btree_iter_set_pos(iter, pos);
- btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
+ btree_iter_set_search_pos(iter, real_pos);
return iter;
}
__entry->buckets_moved, __entry->buckets_not_moved)
);
+TRACE_EVENT(trans_get_iter,
+ TP_PROTO(unsigned long caller, unsigned long ip,
+ enum btree_id btree_id,
+ struct bpos *pos_want,
+ unsigned locks_want,
+ struct bpos *pos_found,
+ unsigned locks_found,
+ unsigned uptodate),
+ TP_ARGS(caller, ip, btree_id,
+ pos_want, locks_want,
+ pos_found, locks_found,
+ uptodate),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller )
+ __field(unsigned long, ip )
+ __field(u8, btree_id )
+ __field(u8, uptodate )
+ __field(u8, locks_want )
+ __field(u8, locks_found )
+ __field(u64, pos_want_inode )
+ __field(u64, pos_want_offset )
+ __field(u32, pos_want_snapshot )
+ __field(u64, pos_found_inode )
+ __field(u64, pos_found_offset )
+ __field(u32, pos_found_snapshot )
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->ip = ip;
+ __entry->btree_id = btree_id;
+ __entry->uptodate = uptodate;
+ __entry->pos_want_inode = pos_want->inode;
+ __entry->pos_want_offset = pos_want->offset;
+ __entry->pos_want_snapshot = pos_want->snapshot;
+ __entry->pos_found_inode = pos_found->inode;
+ __entry->pos_found_offset = pos_found->offset;
+ __entry->pos_found_snapshot = pos_found->snapshot;
+ ),
+
+ TP_printk("%ps %pS btree %u uptodate %u want %llu:%llu:%u locks %u found %llu:%llu:%u locks %u",
+ (void *) __entry->caller,
+ (void *) __entry->ip,
+ __entry->btree_id,
+ __entry->uptodate,
+ __entry->pos_want_inode,
+ __entry->pos_want_offset,
+ __entry->pos_want_snapshot,
+ __entry->locks_want,
+ __entry->pos_found_inode,
+ __entry->pos_found_offset,
+ __entry->pos_found_snapshot,
+ __entry->locks_found)
+);
+
TRACE_EVENT(transaction_restart_ip,
TP_PROTO(unsigned long caller, unsigned long ip),
TP_ARGS(caller, ip),