POS(ca->dev_idx, ca->mi.first_bucket));
while (iter.pos.offset < ca->mi.nbuckets) {
- bch2_trans_cond_resched(&trans);
-
ret = bch2_alloc_write_key(&trans, &iter, flags);
if (ret) {
percpu_ref_put(&ca->ref);
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
}
-
- bch2_trans_cond_resched(&trans);
}
bch2_trans_iter_exit(&trans, &iter);
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
+/*
+ * Unlocks before scheduling
+ * Note: does not revalidate iterator
+ */
+static inline int bch2_trans_cond_resched(struct btree_trans *trans)
+{
+ if (need_resched() || race_fault()) {
+ bch2_trans_unlock(trans);
+ schedule();
+ return bch2_trans_relock(trans) ? 0 : -EINTR;
+ } else {
+ return 0;
+ }
+}
+
static inline int __btree_path_cmp(const struct btree_path *l,
enum btree_id r_btree_id,
bool r_cached,
bch2_btree_iter_set_pos(iter, pos);
}
-/*
- * Unlocks before scheduling
- * Note: does not revalidate iterator
- */
-static inline int bch2_trans_cond_resched(struct btree_trans *trans)
-{
- if (need_resched() || race_fault()) {
- bch2_trans_unlock(trans);
- schedule();
- return bch2_trans_relock(trans) ? 0 : -EINTR;
- } else {
- return 0;
- }
-}
-
void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
unsigned, struct bpos, unsigned);
BTREE_INSERT_NOFAIL);
if (ret)
break;
-
- bch2_trans_cond_resched(trans);
}
if (ret == -EINTR) {
d.k->p.snapshot);
break;
}
-
- bch2_trans_cond_resched(&trans);
}
bch2_trans_iter_exit(&trans, &iter);
&stats->sectors_seen);
next_nondata:
bch2_btree_iter_advance(&iter);
- bch2_trans_cond_resched(&trans);
}
out:
ret = bch2_btree_node_rewrite(&trans, &iter,
b->data->keys.seq, 0) ?: ret;
next:
- bch2_trans_cond_resched(&trans);
bch2_btree_iter_next_node(&iter);
}
if (ret == -EINTR)