LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
-#define BCH_TIER_MAX 4U
-
#if 0
LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
struct btree_path *path, unsigned flags)
{
- int ret;
-
if (path->uptodate < BTREE_ITER_NEED_RELOCK)
return 0;
- ret = bch2_trans_cond_resched(trans) ?:
+ return bch2_trans_cond_resched(trans) ?:
btree_path_traverse_one(trans, path, flags, _RET_IP_);
- if (unlikely(ret) && hweight64(trans->paths_allocated) == 1) {
- ret = __btree_path_traverse_all(trans, ret, _RET_IP_);
- BUG_ON(ret == -EINTR);
- }
-
- return ret;
}
static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
if (!btree_path_node(path, path->level))
goto out;
- ret = bch2_trans_cond_resched(trans);
- if (ret)
- goto err;
-
btree_node_unlock(path, path->level);
path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
path->level++;
}
}
+void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
+void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
+ unsigned, struct bpos, unsigned);
+void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
+ enum btree_id, struct bpos,
+ unsigned, unsigned, unsigned);
+void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
+
+static inline void set_btree_iter_dontneed(struct btree_iter *iter)
+{
+ iter->path->preserve = false;
+}
+
+void *bch2_trans_kmalloc(struct btree_trans *, size_t);
+void bch2_trans_begin(struct btree_trans *);
+
+static inline struct btree *
+__btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter *iter)
+{
+ struct btree *b;
+
+ while (b = bch2_btree_iter_peek_node(iter),
+ PTR_ERR_OR_ZERO(b) == -EINTR)
+ bch2_trans_begin(trans);
+
+ return b;
+}
+
#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
_locks_want, _depth, _flags, _b, _ret) \
for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
- _start, _locks_want, _depth, _flags), \
- _b = bch2_btree_iter_peek_node(&(_iter)); \
+ _start, _locks_want, _depth, _flags); \
+ (_b) = __btree_iter_peek_node_and_restart((_trans), &(_iter)),\
!((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
(_b) = bch2_btree_iter_next_node(&(_iter)))
__for_each_btree_node(_trans, _iter, _btree_id, _start, \
0, 0, _flags, _b, _ret)
+static inline int bkey_err(struct bkey_s_c k)
+{
+ return PTR_ERR_OR_ZERO(k.k);
+}
+
static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
unsigned flags)
{
: bch2_btree_iter_peek(iter);
}
-static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
- unsigned flags)
+static inline struct bkey_s_c
+__bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned flags)
{
- return flags & BTREE_ITER_SLOTS
- ? bch2_btree_iter_next_slot(iter)
- : bch2_btree_iter_next(iter);
-}
+ struct bkey_s_c k;
-static inline int bkey_err(struct bkey_s_c k)
-{
- return PTR_ERR_OR_ZERO(k.k);
+ while (k = __bch2_btree_iter_peek(iter, flags),
+ bkey_err(k) == -EINTR)
+ bch2_trans_begin(trans);
+
+ return k;
}
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)), \
- (_k) = __bch2_btree_iter_peek(&(_iter), _flags); \
+ (_start), (_flags)); \
+ (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
!((_ret) = bkey_err(_k)) && (_k).k; \
- (_k) = __bch2_btree_iter_next(&(_iter), _flags))
+ bch2_btree_iter_advance(&(_iter)))
-#define for_each_btree_key_continue(_iter, _flags, _k, _ret) \
- for ((_k) = __bch2_btree_iter_peek(&(_iter), _flags); \
+#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = __bch2_btree_iter_peek(&(_iter), _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
- (_k) = __bch2_btree_iter_next(&(_iter), _flags))
+ bch2_btree_iter_advance(&(_iter)))
-/* new multiple iterator interface: */
-
-void bch2_dump_trans_paths_updates(struct btree_trans *);
+#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
+ for (; \
+ (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
-void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
-void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
- unsigned, struct bpos, unsigned);
-void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
- enum btree_id, struct bpos,
- unsigned, unsigned, unsigned);
-void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
+#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
+ for (; \
+ (_k) = __bch2_btree_iter_peek(&(_iter), _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
-static inline void set_btree_iter_dontneed(struct btree_iter *iter)
-{
- iter->path->preserve = false;
-}
+/* new multiple iterator interface: */
-void *bch2_trans_kmalloc(struct btree_trans *, size_t);
-void bch2_trans_begin(struct btree_trans *);
+void bch2_dump_trans_paths_updates(struct btree_trans *);
void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
void bch2_trans_exit(struct btree_trans *);
pos.snapshot++;
- for_each_btree_key(trans, iter, btree_id, pos,
+ for_each_btree_key_norestart(trans, iter, btree_id, pos,
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
if (bkey_cmp(k.k->p, pos))
break;
if (ret)
return ret;
- for_each_btree_key(trans, iter, BTREE_ID_dirents,
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_dirents,
SPOS(dir.inum, 0, snapshot), 0, k, ret) {
if (k.k->p.inode > dir.inum)
break;
if (ret)
goto err;
- for_each_btree_key(&trans, iter, BTREE_ID_dirents,
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_dirents,
SPOS(inum.inum, ctx->pos, snapshot), 0, k, ret) {
if (k.k->p.inode > inum.inum)
break;
struct btree_iter iter;
struct bkey_s_c r_k;
- for_each_btree_key(trans, iter,
+ for_each_btree_key_norestart(trans, iter,
BTREE_ID_reflink, POS(0, idx + offset),
BTREE_ITER_SLOTS, r_k, ret2) {
if (bkey_cmp(bkey_start_pos(r_k.k),
bch2_trans_copy_iter(©, iter);
- for_each_btree_key_continue(copy, 0, k, ret) {
+ for_each_btree_key_continue_norestart(copy, 0, k, ret) {
unsigned offset = 0;
if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
if (err)
goto err;
- for_each_btree_key(&trans, iter, BTREE_ID_extents,
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
if (ret)
goto err;
- for_each_btree_key(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
if (ret)
goto err;
- for_each_btree_key(&trans, iter, BTREE_ID_extents,
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
break;
if (ret)
goto err;
- for_each_btree_key(&trans, iter, BTREE_ID_extents,
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inode->v.i_ino, offset >> 9, snapshot),
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
bch2_trans_copy_iter(&iter, extent_iter);
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
+ for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, old, ret) {
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
max(bkey_start_offset(&new->k),
bkey_start_offset(old.k));
if (orig->k.type == KEY_TYPE_inline_data)
bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
- for_each_btree_key(trans, reflink_iter, BTREE_ID_reflink,
+ for_each_btree_key_norestart(trans, reflink_iter, BTREE_ID_reflink,
POS(0, c->reflink_hint),
BTREE_ITER_INTENT|BTREE_ITER_SLOTS, k, ret) {
if (reflink_iter.pos.inode) {
struct bkey_s_c k;
int ret;
- for_each_btree_key_continue(*iter, 0, k, ret) {
+ for_each_btree_key_continue_norestart(*iter, 0, k, ret) {
if (bkey_cmp(iter->pos, end) >= 0)
break;
if (ret)
return ret;
- for_each_btree_key(trans, *iter, desc.btree_id,
+ for_each_btree_key_norestart(trans, *iter, desc.btree_id,
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
BTREE_ITER_SLOTS|flags, k, ret) {
if (iter->pos.inode != inum.inum)
if (ret)
return ret;
- for_each_btree_key(trans, *iter, desc.btree_id,
+ for_each_btree_key_norestart(trans, *iter, desc.btree_id,
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (iter->pos.inode != inum.inum)
bch2_btree_iter_advance(&iter);
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
+ for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, k, ret) {
if (k.k->type != desc.key_type &&
k.k->type != KEY_TYPE_hash_whiteout)
break;
if (ret)
return ret;
- for_each_btree_key(trans, iter, desc.btree_id,
+ for_each_btree_key_norestart(trans, iter, desc.btree_id,
SPOS(inum.inum,
desc.hash_bkey(info, bkey_i_to_s_c(insert)),
snapshot),
if (ret)
goto err;
- for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_xattrs,
SPOS(inum, offset, snapshot), 0, k, ret) {
BUG_ON(k.k->p.inode < inum);