goto out;
}
- if (bkey_cmp(*discard_pos_done, iter.pos) &&
+ if (!bkey_eq(*discard_pos_done, iter.pos) &&
ca->mi.discard && !c->opts.nochanges) {
/*
* This works without any other locks because this is the only
BTREE_ITER_SLOTS, k, ret) {
struct bch_alloc_v4 a;
- if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
+ if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
break;
if (ca->new_fs_bucket_idx &&
return bkey_cmp_left_packed(b, l, &r);
}
+static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
+{
+ return !((l.inode ^ r.inode) |
+ (l.offset ^ r.offset) |
+ (l.snapshot ^ r.snapshot));
+}
+
+static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode ? l.inode < r.inode :
+ l.offset != r.offset ? l.offset < r.offset :
+ l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
+}
+
+static __always_inline bool bpos_le(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode ? l.inode < r.inode :
+ l.offset != r.offset ? l.offset < r.offset :
+ l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
+}
+
+static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
+{
+ return bpos_lt(r, l);
+}
+
+static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
+{
+ return bpos_le(r, l);
+}
+
static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
{
return cmp_int(l.inode, r.inode) ?:
cmp_int(l.snapshot, r.snapshot);
}
+static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
+{
+ return !((l.inode ^ r.inode) |
+ (l.offset ^ r.offset));
+}
+
+static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode
+ ? l.inode < r.inode
+ : l.offset < r.offset;
+}
+
+static __always_inline bool bkey_le(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode
+ ? l.inode < r.inode
+ : l.offset <= r.offset;
+}
+
+static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
+{
+ return bkey_lt(r, l);
+}
+
+static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
+{
+ return bkey_le(r, l);
+}
+
static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
{
return cmp_int(l.inode, r.inode) ?:
static inline struct bpos bpos_min(struct bpos l, struct bpos r)
{
- return bpos_cmp(l, r) < 0 ? l : r;
+ return bpos_lt(l, r) ? l : r;
}
static inline struct bpos bpos_max(struct bpos l, struct bpos r)
{
- return bpos_cmp(l, r) > 0 ? l : r;
+ return bpos_gt(l, r) ? l : r;
}
void bch2_bpos_swab(struct bpos *);
}
if (type != BKEY_TYPE_btree &&
- !bkey_cmp(k.k->p, POS_MAX)) {
+ bkey_eq(k.k->p, POS_MAX)) {
prt_printf(err, "key at POS_MAX");
return -EINVAL;
}
int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
struct printbuf *err)
{
- if (bpos_cmp(k.k->p, b->data->min_key) < 0) {
+ if (bpos_lt(k.k->p, b->data->min_key)) {
prt_printf(err, "key before start of btree node");
return -EINVAL;
}
- if (bpos_cmp(k.k->p, b->data->max_key) > 0) {
+ if (bpos_gt(k.k->p, b->data->max_key)) {
prt_printf(err, "key past end of btree node");
return -EINVAL;
}
void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
{
- if (!bpos_cmp(pos, POS_MIN))
+ if (bpos_eq(pos, POS_MIN))
prt_printf(out, "POS_MIN");
- else if (!bpos_cmp(pos, POS_MAX))
+ else if (bpos_eq(pos, POS_MAX))
prt_printf(out, "POS_MAX");
- else if (!bpos_cmp(pos, SPOS_MAX))
+ else if (bpos_eq(pos, SPOS_MAX))
prt_printf(out, "SPOS_MAX");
else {
if (pos.inode == U64_MAX)
{
return l->type == r->type &&
!bversion_cmp(l->version, r->version) &&
- !bpos_cmp(l->p, bkey_start_pos(r));
+ bpos_eq(l->p, bkey_start_pos(r));
}
bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
n = bkey_unpack_key(b, _n);
- if (bpos_cmp(n.p, k.k->p) < 0) {
+ if (bpos_lt(n.p, k.k->p)) {
printk(KERN_ERR "Key skipped backwards\n");
continue;
}
- if (!bkey_deleted(k.k) &&
- !bpos_cmp(n.p, k.k->p))
+ if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
printk(KERN_ERR "Duplicate keys\n");
}
goto start;
while (1) {
if (rw_aux_to_bkey(b, t, j) == k) {
- BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k,
+ BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
bkey_unpack_pos(b, k)));
start:
if (++j == t->size)
while (l + 1 != r) {
unsigned m = (l + r) >> 1;
- if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
+ if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
l = m;
else
r = m;
struct bkey_packed *k[MAX_BSETS];
unsigned i;
- EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0);
- EBUG_ON(bpos_cmp(*search, b->data->max_key) > 0);
+ EBUG_ON(bpos_lt(*search, b->data->min_key));
+ EBUG_ON(bpos_gt(*search, b->data->max_key));
bset_aux_tree_verify(b);
memset(iter, 0, sizeof(*iter));
{
if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
b->c.level != BTREE_NODE_LEVEL(b->data) ||
- bpos_cmp(b->data->max_key, b->key.k.p) ||
+ !bpos_eq(b->data->max_key, b->key.k.p) ||
(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- bpos_cmp(b->data->min_key,
+ !bpos_eq(b->data->min_key,
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
btree_bad_header(c, b);
}
if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
- if (bpos_cmp(expected_start, bp->v.min_key)) {
+ if (!bpos_eq(expected_start, bp->v.min_key)) {
bch2_topology_error(c);
if (bkey_deleted(&prev->k->k)) {
}
}
- if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
+ if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
bch2_topology_error(c);
printbuf_reset(&buf1);
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
if (prev &&
- bpos_cmp(expected_start, cur->data->min_key) > 0 &&
+ bpos_gt(expected_start, cur->data->min_key) &&
BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
/* cur overwrites prev: */
- if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key,
- cur->data->min_key) >= 0, c,
+ if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
+ cur->data->min_key), c,
"btree node overwritten by next node at btree %s level %u:\n"
" node %s\n"
" next %s",
goto out;
}
- if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
+ if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
bpos_predecessor(cur->data->min_key)), c,
"btree node with incorrect max_key at btree %s level %u:\n"
" node %s\n"
} else {
/* prev overwrites cur: */
- if (mustfix_fsck_err_on(bpos_cmp(expected_start,
- cur->data->max_key) >= 0, c,
+ if (mustfix_fsck_err_on(bpos_ge(expected_start,
+ cur->data->max_key), c,
"btree node overwritten by prev node at btree %s level %u:\n"
" prev %s\n"
" node %s",
goto out;
}
- if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
+ if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" node %s",
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
bch2_bpos_to_text(&buf2, b->key.k.p);
- if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
+ if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
- BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+ BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+ BUG_ON(bpos_gt(k.k->p, b->data->max_key));
bch2_btree_and_journal_iter_advance(&iter);
bch2_bkey_buf_reassemble(&cur_k, c, k);
bkey_init(&prev.k->k);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
- BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+ BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+ BUG_ON(bpos_gt(k.k->p, b->data->max_key));
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
false, &k, true);
six_lock_read(&b->c.lock, NULL, NULL);
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->min_key);
- if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
+ if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
"btree root with incorrect min_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented;
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->max_key);
- if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
+ if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
"btree root with incorrect max_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented;
enum bch_data_type type;
int ret;
- if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
+ if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
return 1;
bch2_alloc_to_v4(k, &old);
struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k);
- BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
+ BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
}
#endif
}
bch2_btree_build_aux_trees(b);
for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
- BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
- BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+ BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+ BUG_ON(bpos_gt(k.k->p, b->data->max_key));
}
}
b->data->max_key = b->key.k.p;
}
- btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
+ btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
"incorrect min_key: got %s should be %s",
(printbuf_reset(&buf1),
bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
}
- btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
+ btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
BTREE_ERR_MUST_RETRY, c, ca, b, i,
"incorrect max key %s",
(printbuf_reset(&buf1),
{
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
- bpos_cmp(bn->min_key, POS_MIN) &&
+ !bpos_eq(bn->min_key, POS_MIN) &&
write)
bn->min_key = bpos_nosnap_predecessor(bn->min_key);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
- bpos_cmp(bn->min_key, POS_MIN) &&
+ !bpos_eq(bn->min_key, POS_MIN) &&
!write)
bn->min_key = bpos_nosnap_successor(bn->min_key);
}
struct bpos pos = iter->pos;
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
- bkey_cmp(pos, POS_MAX))
+ !bkey_eq(pos, POS_MAX))
pos = bkey_successor(iter, pos);
return pos;
}
static inline bool btree_path_pos_before_node(struct btree_path *path,
struct btree *b)
{
- return bpos_cmp(path->pos, b->data->min_key) < 0;
+ return bpos_lt(path->pos, b->data->min_key);
}
static inline bool btree_path_pos_after_node(struct btree_path *path,
struct btree *b)
{
- return bpos_cmp(b->key.k.p, path->pos) < 0;
+ return bpos_gt(path->pos, b->key.k.p);
}
static inline bool btree_path_pos_in_node(struct btree_path *path,
ck = (void *) path->l[0].b;
BUG_ON(ck->key.btree_id != path->btree_id ||
- bkey_cmp(ck->key.pos, path->pos));
+ !bkey_eq(ck->key.pos, path->pos));
if (!locked)
btree_node_unlock(trans, path, 0);
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
iter->pos.snapshot != iter->snapshot);
- BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
- bkey_cmp(iter->pos, iter->k.p) > 0);
+ BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
+ bkey_gt(iter->pos, iter->k.p));
}
static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
if (ret)
goto out;
- if (!bkey_cmp(prev.k->p, k.k->p) &&
+ if (bkey_eq(prev.k->p, k.k->p) &&
bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
prev.k->p.snapshot) > 0) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
continue;
if (!key_cache) {
- if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
- bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
+ if (bkey_ge(pos, path->l[0].b->data->min_key) &&
+ bkey_le(pos, path->l[0].b->key.k.p))
return;
} else {
- if (!bkey_cmp(pos, path->pos))
+ if (bkey_eq(pos, path->pos))
return;
}
}
_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
- EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
+ EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
- if (!k.k || bpos_cmp(path->pos, k.k->p))
+ if (!k.k || !bpos_eq(path->pos, k.k->p))
goto hole;
} else {
struct bkey_cached *ck = (void *) path->l[0].b;
EBUG_ON(ck &&
(path->btree_id != ck->key.btree_id ||
- bkey_cmp(path->pos, ck->key.pos)));
+ !bkey_eq(path->pos, ck->key.pos)));
EBUG_ON(!ck || !ck->valid);
*u = ck->k->k;
if (!b)
goto out;
- BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
+ BUG_ON(bpos_lt(b->key.k.p, iter->pos));
bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p;
b = btree_path_node(path, path->level + 1);
- if (!bpos_cmp(iter->pos, b->key.k.p)) {
+ if (bpos_eq(iter->pos, b->key.k.p)) {
__btree_path_set_level_up(trans, path, path->level++);
} else {
/*
{
if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
struct bpos pos = iter->k.p;
- bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
- ? bpos_cmp(pos, SPOS_MAX)
- : bkey_cmp(pos, SPOS_MAX)) != 0;
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, SPOS_MAX)
+ : bkey_eq(pos, SPOS_MAX));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_successor(iter, pos);
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
- bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
- ? bpos_cmp(pos, POS_MIN)
- : bkey_cmp(pos, POS_MIN)) != 0;
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, POS_MIN)
+ : bkey_eq(pos, POS_MIN));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_predecessor(iter, pos);
continue;
if (i->btree_id > iter->btree_id)
break;
- if (bpos_cmp(i->k->k.p, iter->path->pos) < 0)
+ if (bpos_lt(i->k->k.p, iter->path->pos))
continue;
if (i->key_cache_already_flushed)
continue;
- if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
+ if (!ret || bpos_lt(i->k->k.p, ret->k.p))
ret = i->k;
}
{
struct bkey_i *k;
- if (bpos_cmp(iter->path->pos, iter->journal_pos) < 0)
+ if (bpos_lt(iter->path->pos, iter->journal_pos))
iter->journal_idx = 0;
k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
next_update = btree_trans_peek_updates(iter);
if (next_update &&
- bpos_cmp(next_update->k.p,
- k.k ? k.k->p : l->b->key.k.p) <= 0) {
+ bpos_le(next_update->k.p,
+ k.k ? k.k->p : l->b->key.k.p)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
}
* whiteout, with a real key at the same position, since
* in the btree deleted keys sort before non deleted.
*/
- search_key = bpos_cmp(search_key, k.k->p)
+ search_key = !bpos_eq(search_key, k.k->p)
? k.k->p
: bpos_successor(k.k->p);
continue;
if (likely(k.k)) {
break;
- } else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) {
+ } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
/* Advance to next leaf node: */
search_key = bpos_successor(l->b->key.k.p);
} else {
*/
if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
iter_pos = k.k->p;
- else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ else if (bkey_gt(bkey_start_pos(k.k), iter->pos))
iter_pos = bkey_start_pos(k.k);
else
iter_pos = iter->pos;
- if (bkey_cmp(iter_pos, end) > 0) {
+ if (bkey_gt(iter_pos, end)) {
bch2_btree_iter_set_pos(iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
if (iter->update_path &&
- bkey_cmp(iter->update_path->pos, k.k->p)) {
+ !bkey_eq(iter->update_path->pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL;
/* Check if we should go up to the parent node: */
if (!k.k ||
(iter->advanced &&
- !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
+ bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
iter->pos = path_l(iter->path)->b->key.k.p;
btree_path_set_level_up(trans, iter->path);
iter->advanced = false;
if (iter->path->level != iter->min_depth &&
(iter->advanced ||
!k.k ||
- bpos_cmp(iter->pos, k.k->p))) {
+ !bpos_eq(iter->pos, k.k->p))) {
btree_path_set_level_down(trans, iter->path, iter->min_depth);
iter->pos = bpos_successor(iter->pos);
iter->advanced = false;
if (iter->path->level == iter->min_depth &&
iter->advanced &&
k.k &&
- !bpos_cmp(iter->pos, k.k->p)) {
+ bpos_eq(iter->pos, k.k->p)) {
iter->pos = bpos_successor(iter->pos);
iter->advanced = false;
continue;
if (iter->advanced &&
iter->path->level == iter->min_depth &&
- bpos_cmp(k.k->p, iter->pos))
+ !bpos_eq(k.k->p, iter->pos))
iter->advanced = false;
BUG_ON(iter->advanced);
&iter->path->l[0], &iter->k);
if (!k.k ||
((iter->flags & BTREE_ITER_IS_EXTENTS)
- ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
- : bpos_cmp(k.k->p, search_key) > 0))
+ ? bpos_ge(bkey_start_pos(k.k), search_key)
+ : bpos_gt(k.k->p, search_key)))
k = btree_path_level_prev(trans, iter->path,
&iter->path->l[0], &iter->k);
* longer at the same _key_ (not pos), return
* that candidate
*/
- if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
+ if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
bch2_path_put_nokeep(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
iter->path = saved_path;
}
break;
- } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
+ } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
} else {
}
}
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
+ EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
/* Extents can straddle iter->pos: */
- if (bkey_cmp(k.k->p, iter->pos) < 0)
+ if (bkey_lt(k.k->p, iter->pos))
iter->pos = k.k->p;
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
struct bkey_i *next_update;
if ((next_update = btree_trans_peek_updates(iter)) &&
- !bpos_cmp(next_update->k.p, iter->pos)) {
+ bpos_eq(next_update->k.p, iter->pos)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
goto out;
next = k.k ? bkey_start_pos(k.k) : POS_MAX;
- if (bkey_cmp(iter->pos, next) < 0) {
+ if (bkey_lt(iter->pos, next)) {
bkey_init(&iter->k);
iter->k.p = iter->pos;
if (!(flags & BTREE_ITER_SLOTS))
return bch2_btree_iter_peek_upto(iter, end);
- if (bkey_cmp(iter->pos, end) > 0)
+ if (bkey_gt(iter->pos, end))
return bkey_s_c_null;
return bch2_btree_iter_peek_slot(iter);
const struct bkey_cached *ck = obj;
const struct bkey_cached_key *key = arg->key;
- return cmp_int(ck->key.btree_id, key->btree_id) ?:
- bpos_cmp(ck->key.pos, key->pos);
+ return ck->key.btree_id != key->btree_id ||
+ !bpos_eq(ck->key.pos, key->pos);
}
static const struct rhashtable_params bch2_btree_key_cache_params = {
BUG_ON(ret);
if (ck->key.btree_id != path->btree_id ||
- bpos_cmp(ck->key.pos, path->pos)) {
+ !bpos_eq(ck->key.pos, path->pos)) {
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
}
return ret;
if (ck->key.btree_id != path->btree_id ||
- bpos_cmp(ck->key.pos, path->pos)) {
+ !bpos_eq(ck->key.pos, path->pos)) {
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
}
break;
bp = bkey_s_c_to_btree_ptr_v2(k);
- if (bpos_cmp(next_node, bp.v->min_key)) {
+ if (!bpos_eq(next_node, bp.v->min_key)) {
bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, next_node);
bch2_bpos_to_text(&buf2, bp.v->min_key);
bch2_btree_node_iter_advance(&iter, b);
if (bch2_btree_node_iter_end(&iter)) {
- if (bpos_cmp(k.k->p, b->key.k.p)) {
+ if (!bpos_eq(k.k->p, b->key.k.p)) {
bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, b->key.k.p);
bch2_bpos_to_text(&buf2, k.k->p);
while (!bch2_keylist_empty(keys)) {
struct bkey_i *k = bch2_keylist_front(keys);
- if (bpos_cmp(k->k.p, b->key.k.p) > 0)
+ if (bpos_gt(k->k.p, b->key.k.p))
break;
bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
struct keylist *keys)
{
if (!bch2_keylist_empty(keys) &&
- bpos_cmp(bch2_keylist_front(keys)->k.p,
- b->data->max_key) <= 0) {
+ bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
struct btree_node_iter node_iter;
bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
b = path->l[level].b;
- if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
- (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
+ if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
+ (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
b->sib_u64s[sib] = U16_MAX;
return 0;
}
next = m;
}
- if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
+ if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
bch2_bpos_to_text(&buf1, prev->data->max_key);
iter2.flags & BTREE_ITER_INTENT);
BUG_ON(iter2.path->level != b->c.level);
- BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
+ BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
btree_path_set_level_up(trans, iter2.path);
EBUG_ON(btree_node_just_written(b));
EBUG_ON(bset_written(b, btree_bset_last(b)));
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
- EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
- EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
+ EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
+ EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
EBUG_ON(insert->k.u64s >
bch_btree_keys_u64s_remaining(trans->c, b));
static inline void btree_insert_entry_checks(struct btree_trans *trans,
struct btree_insert_entry *i)
{
- BUG_ON(bpos_cmp(i->k->k.p, i->path->pos));
+ BUG_ON(!bpos_eq(i->k->k.p, i->path->pos));
BUG_ON(i->cached != i->path->cached);
BUG_ON(i->level != i->path->level);
BUG_ON(i->btree_id != i->path->btree_id);
if (!k.k)
break;
- if (bkey_cmp(pos, k.k->p))
+ if (!bkey_eq(pos, k.k->p))
break;
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
if (!k.k)
goto out;
- if (!bkey_cmp(k.k->p, bkey_start_pos(&insert->k))) {
+ if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
ret = extent_front_merge(trans, &iter, k, &insert, flags);
if (ret)
goto next;
}
- while (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) > 0) {
- bool front_split = bkey_cmp(bkey_start_pos(k.k), start) < 0;
- bool back_split = bkey_cmp(k.k->p, insert->k.p) > 0;
+ while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
+ bool front_split = bkey_lt(bkey_start_pos(k.k), start);
+ bool back_split = bkey_gt(k.k->p, insert->k.p);
/*
* If we're going to be splitting a compressed extent, note it
goto err;
}
- if (bkey_cmp(k.k->p, insert->k.p) <= 0) {
+ if (bkey_le(k.k->p, insert->k.p)) {
update = bch2_trans_kmalloc(trans, sizeof(*update));
if ((ret = PTR_ERR_OR_ZERO(update)))
goto err;
for_each_btree_key_norestart(trans, iter, btree_id, pos,
BTREE_ITER_ALL_SNAPSHOTS|
BTREE_ITER_NOPRESERVE, k, ret) {
- if (bkey_cmp(k.k->p, pos))
+ if (!bkey_eq(k.k->p, pos))
break;
if (bch2_snapshot_is_ancestor(trans->c, snapshot,
EBUG_ON(!path->should_be_locked);
EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
- EBUG_ON(bpos_cmp(k->k.p, path->pos));
+ EBUG_ON(!bpos_eq(k->k.p, path->pos));
n = (struct btree_insert_entry) {
.flags = flags,
btree_id_cached(trans->c, path->btree_id)) {
if (!iter->key_cache_path ||
!iter->key_cache_path->should_be_locked ||
- bpos_cmp(iter->key_cache_path->pos, k->k.p)) {
+ !bpos_eq(iter->key_cache_path->pos, k->k.p)) {
if (!iter->key_cache_path)
iter->key_cache_path =
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
if (ret)
goto err;
- if (bkey_cmp(iter.pos, end) >= 0)
+ if (bkey_ge(iter.pos, end))
break;
bkey_init(&delete.k);
darray_init(&s);
- if (!bkey_cmp(old_pos, new_pos))
+ if (bkey_eq(old_pos, new_pos))
return 0;
if (!snapshot_t(c, old_pos.snapshot)->children[0])
if (ret)
break;
- if (bkey_cmp(old_pos, k.k->p))
+ if (!bkey_eq(old_pos, k.k->p))
break;
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
if (ret)
break;
next:
- while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
+ while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
bch2_keylist_pop_front(keys);
if (bch2_keylist_empty(keys))
goto out;
if (ret)
return ret;
- if (!bpos_cmp(SPOS_MAX, i->from))
+ if (bpos_eq(SPOS_MAX, i->from))
return i->ret;
bch2_trans_init(&trans, i->c, 0, 0);
break;
bch2_btree_node_to_text(&i->buf, i->c, b);
- i->from = bpos_cmp(SPOS_MAX, b->key.k.p)
+ i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
? bpos_successor(b->key.k.p)
: b->key.k.p;
}
if (ret)
break;
- if (bpos_cmp(l->b->key.k.p, i->prev_node) > 0) {
+ if (bpos_gt(l->b->key.k.p, i->prev_node)) {
bch2_btree_node_to_text(&i->buf, i->c, l->b);
i->prev_node = l->b->key.k.p;
}
bkey_init(&new_src->k);
new_src->k.p = src_iter.pos;
- if (bkey_cmp(dst_pos, src_iter.pos) <= 0 &&
- bkey_cmp(src_iter.pos, dst_iter.pos) < 0) {
+ if (bkey_le(dst_pos, src_iter.pos) &&
+ bkey_lt(src_iter.pos, dst_iter.pos)) {
/*
* We have a hash collision for the new dst key,
* and new_src - the key we're deleting - is between
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
- if (!bkey_cmp(k.k->p, POS_MIN)) {
+ if (bkey_eq(k.k->p, POS_MIN)) {
prt_printf(err, "stripe at POS_MIN");
return -EINVAL;
}
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
- if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
+ if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
if (start_pos.offset) {
start_pos = min_pos;
bch2_btree_iter_set_pos(&iter, start_pos);
for_each_btree_key_norestart(trans, iter,
BTREE_ID_reflink, POS(0, idx + offset),
BTREE_ITER_SLOTS, r_k, ret2) {
- if (bkey_cmp(bkey_start_pos(r_k.k),
- POS(0, idx + sectors)) >= 0)
+ if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
break;
/* extent_update_to_keys(), for the reflink_v update */
for_each_btree_key_continue_norestart(copy, 0, k, ret) {
unsigned offset = 0;
- if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
+ if (bkey_ge(bkey_start_pos(k.k), *end))
break;
- if (bkey_cmp(bkey_start_pos(&insert->k),
- bkey_start_pos(k.k)) > 0)
+ if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_node_type_is_extents(btree_id) &&
- bkey_cmp(bp.v->min_key, POS_MIN))
+ !bkey_eq(bp.v->min_key, POS_MIN))
bp.v->min_key = write
? bpos_nosnap_predecessor(bp.v->min_key)
: bpos_nosnap_successor(bp.v->min_key);
int val_u64s_delta;
u64 sub;
- if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
+ if (bkey_le(where, bkey_start_pos(k.k)))
return 0;
- EBUG_ON(bkey_cmp(where, k.k->p) > 0);
+ EBUG_ON(bkey_gt(where, k.k->p));
sub = where.offset - bkey_start_offset(k.k);
int val_u64s_delta;
u64 len = 0;
- if (bkey_cmp(where, k.k->p) >= 0)
+ if (bkey_ge(where, k.k->p))
return 0;
- EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
+ EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
len = where.offset - bkey_start_offset(k.k);
static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
const struct bkey *m)
{
- int cmp1 = bkey_cmp(k->p, m->p) < 0;
- int cmp2 = bkey_cmp(bkey_start_pos(k),
- bkey_start_pos(m)) > 0;
+ int cmp1 = bkey_lt(k->p, m->p);
+ int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
return (cmp1 << 1) + cmp2;
}
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, err) {
- if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
+ if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
break;
if (k.k->p.snapshot != snapshot ||
goto err;
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
- if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
+ if (bkey_ge(bkey_start_pos(k.k), end))
break;
if (bkey_extent_is_data(k.k)) {
break;
if (insert &&
- bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
+ bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
break;
reassemble:
bch2_bkey_buf_reassemble(©, c, k);
if (insert &&
- bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
+ bkey_lt(bkey_start_pos(k.k), move_pos))
bch2_cut_front(move_pos, copy.k);
copy.k->k.p.offset += shift >> 9;
if (ret)
continue;
- if (bkey_cmp(atomic_end, copy.k->k.p)) {
+ if (!bkey_eq(atomic_end, copy.k->k.p)) {
if (insert) {
move_pos = atomic_end;
move_pos.offset -= shift >> 9;
POS(inode->v.i_ino, start_sector),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
+ while (!ret && bkey_lt(iter.pos, end_pos)) {
s64 i_sectors_delta = 0;
struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 };
if (ret)
goto err;
- if (!k.k || bkey_cmp(k.k->p, POS(0, inode_nr))) {
+ if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
ret = -ENOENT;
goto err;
}
};
int ret = 0;
- if (bkey_cmp(s->pos, pos))
+ if (!bkey_eq(s->pos, pos))
s->ids.nr = 0;
pos.snapshot = n.equiv;
for_each_btree_key_norestart(trans, iter, desc.btree_id,
POS(hash_k.k->p.inode, hash),
BTREE_ITER_SLOTS, k, ret) {
- if (!bkey_cmp(k.k->p, hash_k.k->p))
+ if (bkey_eq(k.k->p, hash_k.k->p))
break;
if (fsck_err_on(k.k->type == desc.key_type &&
BUG_ON(!iter->path->should_be_locked);
#if 0
- if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
+ if (bkey_gt(prev.k->k.p, bkey_start_pos(k.k))) {
char buf1[200];
char buf2[200];
again:
while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k)) &&
- bkey_cmp(k.k->p, POS(0, max)) < 0) {
+ bkey_lt(k.k->p, POS(0, max))) {
if (pos < iter->pos.offset)
goto found_slot;
(!new_compressed && bch2_bkey_sectors_compressed(old))))
*usage_increasing = true;
- if (bkey_cmp(old.k->p, new->k.p) >= 0) {
+ if (bkey_ge(old.k->p, new->k.p)) {
/*
* Check if there's already data above where we're
* going to be writing to - this means we're definitely
bch2_btree_iter_set_snapshot(iter, snapshot);
k = bch2_btree_iter_peek(iter);
- if (bkey_cmp(iter->pos, end_pos) >= 0) {
+ if (bkey_ge(iter->pos, end_pos)) {
bch2_btree_iter_set_pos(iter, end_pos);
break;
}
if (ec_ob)
bch2_ob_add_backpointer(c, ec_ob, &sk.k->k);
- if (bkey_cmp(iter.pos, k->k.p) >= 0)
+ if (bkey_ge(iter.pos, k->k.p))
bch2_keylist_pop_front(&op->insert_keys);
else
bch2_cut_front(iter.pos, k);
EBUG_ON(op->cl.parent);
BUG_ON(!op->nr_replicas);
BUG_ON(!op->write_point.v);
- BUG_ON(!bkey_cmp(op->pos, POS_MAX));
+ BUG_ON(bkey_eq(op->pos, POS_MAX));
op->start_time = local_clock();
bch2_keylist_init(&op->insert_keys, op->inline_keys);
struct bkey_i *where;
for_each_keylist_key(l, where)
- if (bkey_cmp(insert->k.p, where->k.p) < 0)
+ if (bpos_lt(insert->k.p, where->k.p))
break;
memmove_u64s_up((u64 *) where + insert->k.u64s,
for_each_keylist_key(l, k)
BUG_ON(bkey_next(k) != l->top &&
- bpos_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
+ bpos_ge(k->k.p, bkey_next(k)->k.p));
}
#endif
if (ret)
goto err;
- if (!k.k || bkey_cmp(k.k->p, pos)) {
+ if (!k.k || !bkey_eq(k.k->p, pos)) {
ret = -ENOENT;
goto err;
}
if (ret)
break;
- if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
+ if (bkey_ge(bkey_start_pos(k.k), end))
break;
ctxt->stats->pos = iter.pos;
(k = idx_to_key(keys, *idx),
k->btree_id == btree_id &&
k->level == level &&
- bpos_cmp(k->k->k.p, end_pos) <= 0)) {
- if (bpos_cmp(k->k->k.p, pos) >= 0 &&
- !k->overwritten)
+ bpos_le(k->k->k.p, end_pos))) {
+ if (bpos_ge(k->k->k.p, pos) && !k->overwritten)
return k->k;
(*idx)++;
if (idx < keys->size &&
keys->d[idx].btree_id == btree &&
keys->d[idx].level == level &&
- !bpos_cmp(keys->d[idx].k->k.p, pos))
+ bpos_eq(keys->d[idx].k->k.p, pos))
keys->d[idx].overwritten = true;
}
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
{
- if (!bpos_cmp(iter->pos, SPOS_MAX))
+ if (bpos_eq(iter->pos, SPOS_MAX))
iter->at_end = true;
else
iter->pos = bpos_successor(iter->pos);
return bkey_s_c_null;
while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
- bpos_cmp(btree_k.k->p, iter->pos) < 0)
+ bpos_lt(btree_k.k->p, iter->pos))
bch2_journal_iter_advance_btree(iter);
while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
- bpos_cmp(journal_k.k->p, iter->pos) < 0)
+ bpos_lt(journal_k.k->p, iter->pos))
bch2_journal_iter_advance(&iter->journal);
ret = journal_k.k &&
- (!btree_k.k || bpos_cmp(journal_k.k->p, btree_k.k->p) <= 0)
+ (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
? journal_k
: btree_k;
- if (ret.k && iter->b && bpos_cmp(ret.k->p, iter->b->data->max_key) > 0)
+ if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
ret = bkey_s_c_null;
if (ret.k) {
while (src + 1 < keys->d + keys->nr &&
src[0].btree_id == src[1].btree_id &&
src[0].level == src[1].level &&
- !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
+ bpos_eq(src[0].k->k.p, src[1].k->k.p))
src++;
*dst++ = *src++;
int ret;
for_each_btree_key_continue_norestart(*iter, 0, k, ret) {
- if (bkey_cmp(iter->pos, end) >= 0)
+ if (bkey_ge(iter->pos, end))
break;
if (bkey_extent_is_data(k.k))
return k;
}
- if (bkey_cmp(iter->pos, end) >= 0)
+ if (bkey_ge(iter->pos, end))
bch2_btree_iter_set_pos(iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
while ((ret == 0 ||
bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
- bkey_cmp(dst_iter.pos, dst_end) < 0) {
+ bkey_lt(dst_iter.pos, dst_end)) {
struct disk_reservation disk_res = { 0 };
bch2_trans_begin(&trans);
if (ret)
continue;
- if (bkey_cmp(src_want, src_iter.pos) < 0) {
+ if (bkey_lt(src_want, src_iter.pos)) {
ret = bch2_fpunch_at(&trans, &dst_iter, dst_inum,
min(dst_end.offset,
dst_iter.pos.offset +
bch2_trans_iter_exit(&trans, &dst_iter);
bch2_trans_iter_exit(&trans, &src_iter);
- BUG_ON(!ret && bkey_cmp(dst_iter.pos, dst_end));
- BUG_ON(bkey_cmp(dst_iter.pos, dst_end) > 0);
+ BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
+ BUG_ON(bkey_gt(dst_iter.pos, dst_end));
dst_done = dst_iter.pos.offset - dst_start.offset;
new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
struct bkey_s_c_snapshot s;
u32 i, id;
- if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 ||
- bkey_cmp(k.k->p, POS(0, 1)) < 0) {
+ if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+ bkey_lt(k.k->p, POS(0, 1))) {
prt_printf(err, "bad pos");
return -EINVAL;
}
struct bch_fs *c = trans->c;
u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
- if (bkey_cmp(k.k->p, *last_pos))
+ if (!bkey_eq(k.k->p, *last_pos))
equiv_seen->nr = 0;
*last_pos = k.k->p;
int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
{
- if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0 ||
- bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) {
+ if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
+ bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
prt_printf(err, "invalid pos");
return -EINVAL;
}
for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
- if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
+ if (bkey_gt(k.k->p, SUBVOL_POS_MAX))
break;
/*