struct btree_iter *iter;
struct bkey_s_c k;
struct bkey_s_extent e;
- struct bch_extent_ptr *ptr;
struct bkey_on_stack sk;
int ret = 0, dev, idx;
while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k)) &&
bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
+ struct bch_extent_ptr *ptr, *ec_ptr = NULL;
+
if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
bch2_btree_iter_next(iter);
continue;
bkey_reassemble(sk.k, k);
e = bkey_i_to_s_extent(sk.k);
- extent_for_each_ptr(e, ptr)
- if (ptr->dev != dev)
+ extent_for_each_ptr(e, ptr) {
+ if (ptr->dev == dev)
+ ec_ptr = ptr;
+ else
ptr->cached = true;
+ }
- ptr = (void *) bch2_extent_has_device(e.c, dev);
- BUG_ON(!ptr);
-
- extent_stripe_ptr_add(e, s, ptr, idx);
+ extent_stripe_ptr_add(e, s, ec_ptr, idx);
bch2_trans_update(&trans, iter, sk.k);
void bch2_bkey_mark_replicas_cached(struct bch_fs *, struct bkey_s,
unsigned, unsigned);
-const struct bch_extent_ptr *
-bch2_extent_has_device(struct bkey_s_c_extent, unsigned);
-
unsigned bch2_extent_is_compressed(struct bkey_s_c);
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
w = j->buf + !state.idx;
ret = state.prev_buf_unwritten &&
- bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), dev_idx);
+ bch2_bkey_has_device(bkey_i_to_s_c(&w->key), dev_idx);
spin_unlock(&j->lock);
return ret;
for_each_rw_member(ca, c, i)
if (journal_flushes_device(ca) &&
- !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) {
+ !bch2_bkey_has_device(bkey_i_to_s_c(&w->key), i)) {
percpu_ref_get(&ca->io_ref);
bio = ca->journal.bio;