bcachefs: New bpos_cmp(), bkey_cmp() replacements
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 24 Nov 2022 08:12:22 +0000 (03:12 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:47 +0000 (17:09 -0400)
This patch introduces
 - bpos_eq()
 - bpos_lt()
 - bpos_le()
 - bpos_gt()
 - bpos_ge()

and equivalent replacements for bkey_cmp().

Looking at the generated assembly these could probably be improved
further, but we already see a significant code size improvement with
this patch.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
31 files changed:
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_foreground.c
fs/bcachefs/bkey.h
fs/bcachefs/bkey_methods.c
fs/bcachefs/bkey_methods.h
fs/bcachefs/bset.c
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.h
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/data_update.c
fs/bcachefs/debug.c
fs/bcachefs/dirent.c
fs/bcachefs/ec.c
fs/bcachefs/extent_update.c
fs/bcachefs/extents.c
fs/bcachefs/extents.h
fs/bcachefs/fs-io.c
fs/bcachefs/fsck.c
fs/bcachefs/inode.c
fs/bcachefs/io.c
fs/bcachefs/keylist.c
fs/bcachefs/move.c
fs/bcachefs/recovery.c
fs/bcachefs/reflink.c
fs/bcachefs/subvolume.c

index ffcfb9f1916e9d75ccf3cfbe43a70ef5ecfecb94..a0b9fa30260afaeca97c233a5f9cf4460c551f8e 100644 (file)
@@ -982,7 +982,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
                goto out;
        }
 
-       if (bkey_cmp(*discard_pos_done, iter.pos) &&
+       if (!bkey_eq(*discard_pos_done, iter.pos) &&
            ca->mi.discard && !c->opts.nochanges) {
                /*
                 * This works without any other locks because this is the only
index 9a4a62211755eadfc77c8635c3530719a0fe2672..dd47eeb1efc580d4a6fc7acc9cf05df35bb43aac 100644 (file)
@@ -399,7 +399,7 @@ again:
                           BTREE_ITER_SLOTS, k, ret) {
                struct bch_alloc_v4 a;
 
-               if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
+               if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
                        break;
 
                if (ca->new_fs_bucket_idx &&
index df8189476016c8a299ffcc9ee785bf40b8fb98d5..dc2b91bc67f3710c2182f5fb98f2f0126a6b3223 100644 (file)
@@ -144,6 +144,37 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b,
        return bkey_cmp_left_packed(b, l, &r);
 }
 
+static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
+{
+       return  !((l.inode      ^ r.inode) |
+                 (l.offset     ^ r.offset) |
+                 (l.snapshot   ^ r.snapshot));
+}
+
+static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
+{
+       return  l.inode != r.inode ? l.inode < r.inode :
+               l.offset != r.offset ? l.offset < r.offset :
+               l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
+}
+
+static __always_inline bool bpos_le(struct bpos l, struct bpos r)
+{
+       return  l.inode != r.inode ? l.inode < r.inode :
+               l.offset != r.offset ? l.offset < r.offset :
+               l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
+}
+
+static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
+{
+       return bpos_lt(r, l);
+}
+
+static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
+{
+       return bpos_le(r, l);
+}
+
 static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
 {
        return  cmp_int(l.inode,    r.inode) ?:
@@ -151,6 +182,36 @@ static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
                cmp_int(l.snapshot, r.snapshot);
 }
 
+static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
+{
+       return  !((l.inode      ^ r.inode) |
+                 (l.offset     ^ r.offset));
+}
+
+static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
+{
+       return  l.inode != r.inode
+               ? l.inode < r.inode
+               : l.offset < r.offset;
+}
+
+static __always_inline bool bkey_le(struct bpos l, struct bpos r)
+{
+       return  l.inode != r.inode
+               ? l.inode < r.inode
+               : l.offset <= r.offset;
+}
+
+static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
+{
+       return bkey_lt(r, l);
+}
+
+static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
+{
+       return bkey_le(r, l);
+}
+
 static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
 {
        return  cmp_int(l.inode,    r.inode) ?:
@@ -159,12 +220,12 @@ static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
 
 static inline struct bpos bpos_min(struct bpos l, struct bpos r)
 {
-       return bpos_cmp(l, r) < 0 ? l : r;
+       return bpos_lt(l, r) ? l : r;
 }
 
 static inline struct bpos bpos_max(struct bpos l, struct bpos r)
 {
-       return bpos_cmp(l, r) > 0 ? l : r;
+       return bpos_gt(l, r) ? l : r;
 }
 
 void bch2_bpos_swab(struct bpos *);
index 141754db5fa1c0d22344972c4ff6bb7107abc3ce..7fcd6ca40b9311dc32aa71bb00e0a0142ed9e052 100644 (file)
@@ -245,7 +245,7 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
        }
 
        if (type != BKEY_TYPE_btree &&
-           !bkey_cmp(k.k->p, POS_MAX)) {
+           bkey_eq(k.k->p, POS_MAX)) {
                prt_printf(err, "key at POS_MAX");
                return -EINVAL;
        }
@@ -264,12 +264,12 @@ int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
 int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
                            struct printbuf *err)
 {
-       if (bpos_cmp(k.k->p, b->data->min_key) < 0) {
+       if (bpos_lt(k.k->p, b->data->min_key)) {
                prt_printf(err, "key before start of btree node");
                return -EINVAL;
        }
 
-       if (bpos_cmp(k.k->p, b->data->max_key) > 0) {
+       if (bpos_gt(k.k->p, b->data->max_key)) {
                prt_printf(err, "key past end of btree node");
                return -EINVAL;
        }
@@ -279,11 +279,11 @@ int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
 
 void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
 {
-       if (!bpos_cmp(pos, POS_MIN))
+       if (bpos_eq(pos, POS_MIN))
                prt_printf(out, "POS_MIN");
-       else if (!bpos_cmp(pos, POS_MAX))
+       else if (bpos_eq(pos, POS_MAX))
                prt_printf(out, "POS_MAX");
-       else if (!bpos_cmp(pos, SPOS_MAX))
+       else if (bpos_eq(pos, SPOS_MAX))
                prt_printf(out, "SPOS_MAX");
        else {
                if (pos.inode == U64_MAX)
index 0c74ba335e6406ba46fedad7cd56016ed2723f3b..7c907b7fd0d74da5708fae1250bc9589233d79e0 100644 (file)
@@ -60,7 +60,7 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b
 {
        return l->type == r->type &&
                !bversion_cmp(l->version, r->version) &&
-               !bpos_cmp(l->p, bkey_start_pos(r));
+               bpos_eq(l->p, bkey_start_pos(r));
 }
 
 bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
index aa8508efca00ce8237353b82bbddc62e3591908c..50a1c9d8ebabae865a8fed0bc2681201b31af2f7 100644 (file)
@@ -83,13 +83,12 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
 
                n = bkey_unpack_key(b, _n);
 
-               if (bpos_cmp(n.p, k.k->p) < 0) {
+               if (bpos_lt(n.p, k.k->p)) {
                        printk(KERN_ERR "Key skipped backwards\n");
                        continue;
                }
 
-               if (!bkey_deleted(k.k) &&
-                   !bpos_cmp(n.p, k.k->p))
+               if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
                        printk(KERN_ERR "Duplicate keys\n");
        }
 
@@ -530,7 +529,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
        goto start;
        while (1) {
                if (rw_aux_to_bkey(b, t, j) == k) {
-                       BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k,
+                       BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
                                        bkey_unpack_pos(b, k)));
 start:
                        if (++j == t->size)
@@ -1065,7 +1064,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
        while (l + 1 != r) {
                unsigned m = (l + r) >> 1;
 
-               if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
+               if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
                        l = m;
                else
                        r = m;
@@ -1318,8 +1317,8 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
        struct bkey_packed *k[MAX_BSETS];
        unsigned i;
 
-       EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0);
-       EBUG_ON(bpos_cmp(*search, b->data->max_key) > 0);
+       EBUG_ON(bpos_lt(*search, b->data->min_key));
+       EBUG_ON(bpos_gt(*search, b->data->max_key));
        bset_aux_tree_verify(b);
 
        memset(iter, 0, sizeof(*iter));
index 90be4c7325f7f5d8dd607b2ec4683f74afe06bb8..0ac8636edba2a9e73d24828bcef8bc53c5dc5936 100644 (file)
@@ -793,9 +793,9 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b)
 {
        if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
            b->c.level != BTREE_NODE_LEVEL(b->data) ||
-           bpos_cmp(b->data->max_key, b->key.k.p) ||
+           !bpos_eq(b->data->max_key, b->key.k.p) ||
            (b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
-            bpos_cmp(b->data->min_key,
+            !bpos_eq(b->data->min_key,
                      bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
                btree_bad_header(c, b);
 }
index 3395fa56c724f605cfa982d7577ce20806391361..f5b46f382340aba2052c19e840192e0fd8bc9a64 100644 (file)
@@ -76,7 +76,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
        if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
                struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
 
-               if (bpos_cmp(expected_start, bp->v.min_key)) {
+               if (!bpos_eq(expected_start, bp->v.min_key)) {
                        bch2_topology_error(c);
 
                        if (bkey_deleted(&prev->k->k)) {
@@ -106,7 +106,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
                }
        }
 
-       if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
+       if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
                bch2_topology_error(c);
 
                printbuf_reset(&buf1);
@@ -274,12 +274,12 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
        bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
 
        if (prev &&
-           bpos_cmp(expected_start, cur->data->min_key) > 0 &&
+           bpos_gt(expected_start, cur->data->min_key) &&
            BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
                /* cur overwrites prev: */
 
-               if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key,
-                                                cur->data->min_key) >= 0, c,
+               if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
+                                               cur->data->min_key), c,
                                "btree node overwritten by next node at btree %s level %u:\n"
                                "  node %s\n"
                                "  next %s",
@@ -289,7 +289,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
                        goto out;
                }
 
-               if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
+               if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
                                                 bpos_predecessor(cur->data->min_key)), c,
                                "btree node with incorrect max_key at btree %s level %u:\n"
                                "  node %s\n"
@@ -301,8 +301,8 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
        } else {
                /* prev overwrites cur: */
 
-               if (mustfix_fsck_err_on(bpos_cmp(expected_start,
-                                                cur->data->max_key) >= 0, c,
+               if (mustfix_fsck_err_on(bpos_ge(expected_start,
+                                               cur->data->max_key), c,
                                "btree node overwritten by prev node at btree %s level %u:\n"
                                "  prev %s\n"
                                "  node %s",
@@ -312,7 +312,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
                        goto out;
                }
 
-               if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
+               if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
                                "btree node with incorrect min_key at btree %s level %u:\n"
                                "  prev %s\n"
                                "  node %s",
@@ -336,7 +336,7 @@ static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
        bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
        bch2_bpos_to_text(&buf2, b->key.k.p);
 
-       if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
+       if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
                        "btree node with incorrect max_key at btree %s level %u:\n"
                        "  %s\n"
                        "  expected %s",
@@ -374,8 +374,8 @@ again:
        bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
 
        while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
-               BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
-               BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+               BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+               BUG_ON(bpos_gt(k.k->p, b->data->max_key));
 
                bch2_btree_and_journal_iter_advance(&iter);
                bch2_bkey_buf_reassemble(&cur_k, c, k);
@@ -912,8 +912,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
        bkey_init(&prev.k->k);
 
        while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
-               BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
-               BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+               BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+               BUG_ON(bpos_gt(k.k->p, b->data->max_key));
 
                ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
                                       false, &k, true);
@@ -1018,7 +1018,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
        six_lock_read(&b->c.lock, NULL, NULL);
        printbuf_reset(&buf);
        bch2_bpos_to_text(&buf, b->data->min_key);
-       if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
+       if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
                        "btree root with incorrect min_key: %s", buf.buf)) {
                bch_err(c, "repair unimplemented");
                ret = -BCH_ERR_fsck_repair_unimplemented;
@@ -1027,7 +1027,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
 
        printbuf_reset(&buf);
        bch2_bpos_to_text(&buf, b->data->max_key);
-       if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
+       if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
                        "btree root with incorrect max_key: %s", buf.buf)) {
                bch_err(c, "repair unimplemented");
                ret = -BCH_ERR_fsck_repair_unimplemented;
@@ -1341,7 +1341,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
        enum bch_data_type type;
        int ret;
 
-       if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
+       if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
                return 1;
 
        bch2_alloc_to_v4(k, &old);
index 8dbe930c1eb249d2e9717a1bcad390aedc135335..9dedac2c78858a1b54436160a4f6bc9b7efdfef7 100644 (file)
@@ -77,7 +77,7 @@ static void verify_no_dups(struct btree *b,
                struct bkey l = bkey_unpack_key(b, p);
                struct bkey r = bkey_unpack_key(b, k);
 
-               BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
+               BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
        }
 #endif
 }
@@ -645,8 +645,8 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
        bch2_btree_build_aux_trees(b);
 
        for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
-               BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
-               BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+               BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+               BUG_ON(bpos_gt(k.k->p, b->data->max_key));
        }
 }
 
@@ -744,7 +744,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
                                b->data->max_key = b->key.k.p;
                        }
 
-                       btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
+                       btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
                                     BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
                                     "incorrect min_key: got %s should be %s",
                                     (printbuf_reset(&buf1),
@@ -753,7 +753,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
                                      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
                }
 
-               btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
+               btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
                             BTREE_ERR_MUST_RETRY, c, ca, b, i,
                             "incorrect max key %s",
                             (printbuf_reset(&buf1),
index 4b1810ad7d912dd15f28498256b061ed57a86ece..a720dd74139b5766d4c439abc739e8e3ec13112e 100644 (file)
@@ -201,7 +201,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
 {
        if (version < bcachefs_metadata_version_inode_btree_change &&
            btree_node_type_is_extents(btree_id) &&
-           bpos_cmp(bn->min_key, POS_MIN) &&
+           !bpos_eq(bn->min_key, POS_MIN) &&
            write)
                bn->min_key = bpos_nosnap_predecessor(bn->min_key);
 
@@ -218,7 +218,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
 
        if (version < bcachefs_metadata_version_inode_btree_change &&
            btree_node_type_is_extents(btree_id) &&
-           bpos_cmp(bn->min_key, POS_MIN) &&
+           !bpos_eq(bn->min_key, POS_MIN) &&
            !write)
                bn->min_key = bpos_nosnap_successor(bn->min_key);
 }
index 238ba10d34e42167e9f4abf0a0e260b66704120e..8a18b55cab261cc74daf1c1f632d2fc5412b5aca 100644 (file)
@@ -93,7 +93,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
        struct bpos pos = iter->pos;
 
        if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
-           bkey_cmp(pos, POS_MAX))
+           !bkey_eq(pos, POS_MAX))
                pos = bkey_successor(iter, pos);
        return pos;
 }
@@ -101,13 +101,13 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
 static inline bool btree_path_pos_before_node(struct btree_path *path,
                                              struct btree *b)
 {
-       return bpos_cmp(path->pos, b->data->min_key) < 0;
+       return bpos_lt(path->pos, b->data->min_key);
 }
 
 static inline bool btree_path_pos_after_node(struct btree_path *path,
                                             struct btree *b)
 {
-       return bpos_cmp(b->key.k.p, path->pos) < 0;
+       return bpos_gt(path->pos, b->key.k.p);
 }
 
 static inline bool btree_path_pos_in_node(struct btree_path *path,
@@ -133,7 +133,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
 
        ck = (void *) path->l[0].b;
        BUG_ON(ck->key.btree_id != path->btree_id ||
-              bkey_cmp(ck->key.pos, path->pos));
+              !bkey_eq(ck->key.pos, path->pos));
 
        if (!locked)
                btree_node_unlock(trans, path, 0);
@@ -278,8 +278,8 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
        BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
               iter->pos.snapshot != iter->snapshot);
 
-       BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
-              bkey_cmp(iter->pos, iter->k.p) > 0);
+       BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
+              bkey_gt(iter->pos, iter->k.p));
 }
 
 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
@@ -313,7 +313,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
        if (ret)
                goto out;
 
-       if (!bkey_cmp(prev.k->p, k.k->p) &&
+       if (bkey_eq(prev.k->p, k.k->p) &&
            bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
                                      prev.k->p.snapshot) > 0) {
                struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
@@ -355,11 +355,11 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
                        continue;
 
                if (!key_cache) {
-                       if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
-                           bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
+                       if (bkey_ge(pos, path->l[0].b->data->min_key) &&
+                           bkey_le(pos, path->l[0].b->key.k.p))
                                return;
                } else {
-                       if (!bkey_cmp(pos, path->pos))
+                       if (bkey_eq(pos, path->pos))
                                return;
                }
        }
@@ -1571,16 +1571,16 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *
                _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
                k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
 
-               EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
+               EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
 
-               if (!k.k || bpos_cmp(path->pos, k.k->p))
+               if (!k.k || !bpos_eq(path->pos, k.k->p))
                        goto hole;
        } else {
                struct bkey_cached *ck = (void *) path->l[0].b;
 
                EBUG_ON(ck &&
                        (path->btree_id != ck->key.btree_id ||
-                        bkey_cmp(path->pos, ck->key.pos)));
+                        !bkey_eq(path->pos, ck->key.pos)));
                EBUG_ON(!ck || !ck->valid);
 
                *u = ck->k->k;
@@ -1638,7 +1638,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
        if (!b)
                goto out;
 
-       BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
+       BUG_ON(bpos_lt(b->key.k.p, iter->pos));
 
        bkey_init(&iter->k);
        iter->k.p = iter->pos = b->key.k.p;
@@ -1689,7 +1689,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
 
        b = btree_path_node(path, path->level + 1);
 
-       if (!bpos_cmp(iter->pos, b->key.k.p)) {
+       if (bpos_eq(iter->pos, b->key.k.p)) {
                __btree_path_set_level_up(trans, path, path->level++);
        } else {
                /*
@@ -1732,9 +1732,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
 {
        if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
                struct bpos pos = iter->k.p;
-               bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
-                           ? bpos_cmp(pos, SPOS_MAX)
-                           : bkey_cmp(pos, SPOS_MAX)) != 0;
+               bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+                            ? bpos_eq(pos, SPOS_MAX)
+                            : bkey_eq(pos, SPOS_MAX));
 
                if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
                        pos = bkey_successor(iter, pos);
@@ -1752,9 +1752,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
 {
        struct bpos pos = bkey_start_pos(&iter->k);
-       bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
-                   ? bpos_cmp(pos, POS_MIN)
-                   : bkey_cmp(pos, POS_MIN)) != 0;
+       bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+                    ? bpos_eq(pos, POS_MIN)
+                    : bkey_eq(pos, POS_MIN));
 
        if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
                pos = bkey_predecessor(iter, pos);
@@ -1773,11 +1773,11 @@ struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
                        continue;
                if (i->btree_id > iter->btree_id)
                        break;
-               if (bpos_cmp(i->k->k.p, iter->path->pos) < 0)
+               if (bpos_lt(i->k->k.p, iter->path->pos))
                        continue;
                if (i->key_cache_already_flushed)
                        continue;
-               if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
+               if (!ret || bpos_lt(i->k->k.p, ret->k.p))
                        ret = i->k;
        }
 
@@ -1797,7 +1797,7 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
 {
        struct bkey_i *k;
 
-       if (bpos_cmp(iter->path->pos, iter->journal_pos) < 0)
+       if (bpos_lt(iter->path->pos, iter->journal_pos))
                iter->journal_idx = 0;
 
        k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
@@ -1936,8 +1936,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
                next_update = btree_trans_peek_updates(iter);
 
                if (next_update &&
-                   bpos_cmp(next_update->k.p,
-                            k.k ? k.k->p : l->b->key.k.p) <= 0) {
+                   bpos_le(next_update->k.p,
+                           k.k ? k.k->p : l->b->key.k.p)) {
                        iter->k = next_update->k;
                        k = bkey_i_to_s_c(next_update);
                }
@@ -1950,7 +1950,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
                         * whiteout, with a real key at the same position, since
                         * in the btree deleted keys sort before non deleted.
                         */
-                       search_key = bpos_cmp(search_key, k.k->p)
+                       search_key = !bpos_eq(search_key, k.k->p)
                                ? k.k->p
                                : bpos_successor(k.k->p);
                        continue;
@@ -1958,7 +1958,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
 
                if (likely(k.k)) {
                        break;
-               } else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) {
+               } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
                        /* Advance to next leaf node: */
                        search_key = bpos_successor(l->b->key.k.p);
                } else {
@@ -2008,19 +2008,19 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
                 */
                if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
                        iter_pos = k.k->p;
-               else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+               else if (bkey_gt(bkey_start_pos(k.k), iter->pos))
                        iter_pos = bkey_start_pos(k.k);
                else
                        iter_pos = iter->pos;
 
-               if (bkey_cmp(iter_pos, end) > 0) {
+               if (bkey_gt(iter_pos, end)) {
                        bch2_btree_iter_set_pos(iter, end);
                        k = bkey_s_c_null;
                        goto out_no_locked;
                }
 
                if (iter->update_path &&
-                   bkey_cmp(iter->update_path->pos, k.k->p)) {
+                   !bkey_eq(iter->update_path->pos, k.k->p)) {
                        bch2_path_put_nokeep(trans, iter->update_path,
                                             iter->flags & BTREE_ITER_INTENT);
                        iter->update_path = NULL;
@@ -2143,7 +2143,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
                /* Check if we should go up to the parent node: */
                if (!k.k ||
                    (iter->advanced &&
-                    !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
+                    bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
                        iter->pos = path_l(iter->path)->b->key.k.p;
                        btree_path_set_level_up(trans, iter->path);
                        iter->advanced = false;
@@ -2159,7 +2159,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
                if (iter->path->level != iter->min_depth &&
                    (iter->advanced ||
                     !k.k ||
-                    bpos_cmp(iter->pos, k.k->p))) {
+                    !bpos_eq(iter->pos, k.k->p))) {
                        btree_path_set_level_down(trans, iter->path, iter->min_depth);
                        iter->pos = bpos_successor(iter->pos);
                        iter->advanced = false;
@@ -2170,7 +2170,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
                if (iter->path->level == iter->min_depth &&
                    iter->advanced &&
                    k.k &&
-                   !bpos_cmp(iter->pos, k.k->p)) {
+                   bpos_eq(iter->pos, k.k->p)) {
                        iter->pos = bpos_successor(iter->pos);
                        iter->advanced = false;
                        continue;
@@ -2178,7 +2178,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
 
                if (iter->advanced &&
                    iter->path->level == iter->min_depth &&
-                   bpos_cmp(k.k->p, iter->pos))
+                   !bpos_eq(k.k->p, iter->pos))
                        iter->advanced = false;
 
                BUG_ON(iter->advanced);
@@ -2248,8 +2248,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
                                          &iter->path->l[0], &iter->k);
                if (!k.k ||
                    ((iter->flags & BTREE_ITER_IS_EXTENTS)
-                    ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
-                    : bpos_cmp(k.k->p, search_key) > 0))
+                    ? bpos_ge(bkey_start_pos(k.k), search_key)
+                    : bpos_gt(k.k->p, search_key)))
                        k = btree_path_level_prev(trans, iter->path,
                                                  &iter->path->l[0], &iter->k);
 
@@ -2263,7 +2263,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
                                 * longer at the same _key_ (not pos), return
                                 * that candidate
                                 */
-                               if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
+                               if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
                                        bch2_path_put_nokeep(trans, iter->path,
                                                      iter->flags & BTREE_ITER_INTENT);
                                        iter->path = saved_path;
@@ -2298,7 +2298,7 @@ got_key:
                        }
 
                        break;
-               } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
+               } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
                        /* Advance to previous leaf node: */
                        search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
                } else {
@@ -2309,10 +2309,10 @@ got_key:
                }
        }
 
-       EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
+       EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
 
        /* Extents can straddle iter->pos: */
-       if (bkey_cmp(k.k->p, iter->pos) < 0)
+       if (bkey_lt(k.k->p, iter->pos))
                iter->pos = k.k->p;
 
        if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
@@ -2377,7 +2377,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
                struct bkey_i *next_update;
 
                if ((next_update = btree_trans_peek_updates(iter)) &&
-                   !bpos_cmp(next_update->k.p, iter->pos)) {
+                   bpos_eq(next_update->k.p, iter->pos)) {
                        iter->k = next_update->k;
                        k = bkey_i_to_s_c(next_update);
                        goto out;
@@ -2433,7 +2433,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
 
                next = k.k ? bkey_start_pos(k.k) : POS_MAX;
 
-               if (bkey_cmp(iter->pos, next) < 0) {
+               if (bkey_lt(iter->pos, next)) {
                        bkey_init(&iter->k);
                        iter->k.p = iter->pos;
 
index 3f46c60b748baec1c47846c84ab482ac2c1a1a5b..3cf0b453a4c02e6fec363f6ca507fd6952e88bc0 100644 (file)
@@ -478,7 +478,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
        if (!(flags & BTREE_ITER_SLOTS))
                return bch2_btree_iter_peek_upto(iter, end);
 
-       if (bkey_cmp(iter->pos, end) > 0)
+       if (bkey_gt(iter->pos, end))
                return bkey_s_c_null;
 
        return bch2_btree_iter_peek_slot(iter);
index 1ac91221cc95b6a2383e5590c8644839959c972b..0ae5d893a4f79805f09aab50a905f875cf9d2285 100644 (file)
@@ -27,8 +27,8 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
        const struct bkey_cached *ck = obj;
        const struct bkey_cached_key *key = arg->key;
 
-       return cmp_int(ck->key.btree_id, key->btree_id) ?:
-               bpos_cmp(ck->key.pos, key->pos);
+       return ck->key.btree_id != key->btree_id ||
+               !bpos_eq(ck->key.pos, key->pos);
 }
 
 static const struct rhashtable_params bch2_btree_key_cache_params = {
@@ -476,7 +476,7 @@ retry:
                BUG_ON(ret);
 
                if (ck->key.btree_id != path->btree_id ||
-                   bpos_cmp(ck->key.pos, path->pos)) {
+                   !bpos_eq(ck->key.pos, path->pos)) {
                        six_unlock_type(&ck->c.lock, lock_want);
                        goto retry;
                }
@@ -550,7 +550,7 @@ retry:
                        return ret;
 
                if (ck->key.btree_id != path->btree_id ||
-                   bpos_cmp(ck->key.pos, path->pos)) {
+                   !bpos_eq(ck->key.pos, path->pos)) {
                        six_unlock_type(&ck->c.lock, lock_want);
                        goto retry;
                }
index 03e016758af3b2516ac4509af1f34b2e9ef9e83f..e184b857c4c49ea868177ff578ca6750371d4e9f 100644 (file)
@@ -71,7 +71,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
                        break;
                bp = bkey_s_c_to_btree_ptr_v2(k);
 
-               if (bpos_cmp(next_node, bp.v->min_key)) {
+               if (!bpos_eq(next_node, bp.v->min_key)) {
                        bch2_dump_btree_node(c, b);
                        bch2_bpos_to_text(&buf1, next_node);
                        bch2_bpos_to_text(&buf2, bp.v->min_key);
@@ -81,7 +81,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
                bch2_btree_node_iter_advance(&iter, b);
 
                if (bch2_btree_node_iter_end(&iter)) {
-                       if (bpos_cmp(k.k->p, b->key.k.p)) {
+                       if (!bpos_eq(k.k->p, b->key.k.p)) {
                                bch2_dump_btree_node(c, b);
                                bch2_bpos_to_text(&buf1, b->key.k.p);
                                bch2_bpos_to_text(&buf2, k.k->p);
@@ -1328,7 +1328,7 @@ __bch2_btree_insert_keys_interior(struct btree_update *as,
        while (!bch2_keylist_empty(keys)) {
                struct bkey_i *k = bch2_keylist_front(keys);
 
-               if (bpos_cmp(k->k.p, b->key.k.p) > 0)
+               if (bpos_gt(k->k.p, b->key.k.p))
                        break;
 
                bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
@@ -1445,8 +1445,7 @@ static void btree_split_insert_keys(struct btree_update *as,
                                    struct keylist *keys)
 {
        if (!bch2_keylist_empty(keys) &&
-           bpos_cmp(bch2_keylist_front(keys)->k.p,
-                    b->data->max_key) <= 0) {
+           bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
                struct btree_node_iter node_iter;
 
                bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
@@ -1770,8 +1769,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
 
        b = path->l[level].b;
 
-       if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
-           (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
+       if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
+           (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
                b->sib_u64s[sib] = U16_MAX;
                return 0;
        }
@@ -1804,7 +1803,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
                next = m;
        }
 
-       if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
+       if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
                struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
 
                bch2_bpos_to_text(&buf1, prev->data->max_key);
@@ -2097,7 +2096,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
                                iter2.flags & BTREE_ITER_INTENT);
 
                BUG_ON(iter2.path->level != b->c.level);
-               BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
+               BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
 
                btree_path_set_level_up(trans, iter2.path);
 
index 8db474c6146e4a4a01195a2378beaef08519ab1b..323f2942b11d7005a4e7bbce363f04177d49846b 100644 (file)
@@ -92,8 +92,8 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
        EBUG_ON(btree_node_just_written(b));
        EBUG_ON(bset_written(b, btree_bset_last(b)));
        EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
-       EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
-       EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
+       EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
+       EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
        EBUG_ON(insert->k.u64s >
                bch_btree_keys_u64s_remaining(trans->c, b));
 
@@ -257,7 +257,7 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
 static inline void btree_insert_entry_checks(struct btree_trans *trans,
                                             struct btree_insert_entry *i)
 {
-       BUG_ON(bpos_cmp(i->k->k.p, i->path->pos));
+       BUG_ON(!bpos_eq(i->k->k.p, i->path->pos));
        BUG_ON(i->cached        != i->path->cached);
        BUG_ON(i->level         != i->path->level);
        BUG_ON(i->btree_id      != i->path->btree_id);
@@ -1141,7 +1141,7 @@ static noinline int __check_pos_snapshot_overwritten(struct btree_trans *trans,
                if (!k.k)
                        break;
 
-               if (bkey_cmp(pos, k.k->p))
+               if (!bkey_eq(pos, k.k->p))
                        break;
 
                if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
@@ -1242,7 +1242,7 @@ int bch2_trans_update_extent(struct btree_trans *trans,
        if (!k.k)
                goto out;
 
-       if (!bkey_cmp(k.k->p, bkey_start_pos(&insert->k))) {
+       if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
                if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
                        ret = extent_front_merge(trans, &iter, k, &insert, flags);
                        if (ret)
@@ -1252,9 +1252,9 @@ int bch2_trans_update_extent(struct btree_trans *trans,
                goto next;
        }
 
-       while (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) > 0) {
-               bool front_split = bkey_cmp(bkey_start_pos(k.k), start) < 0;
-               bool back_split  = bkey_cmp(k.k->p, insert->k.p) > 0;
+       while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
+               bool front_split = bkey_lt(bkey_start_pos(k.k), start);
+               bool back_split  = bkey_gt(k.k->p, insert->k.p);
 
                /*
                 * If we're going to be splitting a compressed extent, note it
@@ -1313,7 +1313,7 @@ int bch2_trans_update_extent(struct btree_trans *trans,
                                goto err;
                }
 
-               if (bkey_cmp(k.k->p, insert->k.p) <= 0) {
+               if (bkey_le(k.k->p, insert->k.p)) {
                        update = bch2_trans_kmalloc(trans, sizeof(*update));
                        if ((ret = PTR_ERR_OR_ZERO(update)))
                                goto err;
@@ -1407,7 +1407,7 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
        for_each_btree_key_norestart(trans, iter, btree_id, pos,
                           BTREE_ITER_ALL_SNAPSHOTS|
                           BTREE_ITER_NOPRESERVE, k, ret) {
-               if (bkey_cmp(k.k->p, pos))
+               if (!bkey_eq(k.k->p, pos))
                        break;
 
                if (bch2_snapshot_is_ancestor(trans->c, snapshot,
@@ -1463,7 +1463,7 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
 
        EBUG_ON(!path->should_be_locked);
        EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
-       EBUG_ON(bpos_cmp(k->k.p, path->pos));
+       EBUG_ON(!bpos_eq(k->k.p, path->pos));
 
        n = (struct btree_insert_entry) {
                .flags          = flags,
@@ -1573,7 +1573,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
            btree_id_cached(trans->c, path->btree_id)) {
                if (!iter->key_cache_path ||
                    !iter->key_cache_path->should_be_locked ||
-                   bpos_cmp(iter->key_cache_path->pos, k->k.p)) {
+                   !bpos_eq(iter->key_cache_path->pos, k->k.p)) {
                        if (!iter->key_cache_path)
                                iter->key_cache_path =
                                        bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
@@ -1682,7 +1682,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
                if (ret)
                        goto err;
 
-               if (bkey_cmp(iter.pos, end) >= 0)
+               if (bkey_ge(iter.pos, end))
                        break;
 
                bkey_init(&delete.k);
index 9d1290ff179a934ef49bb652358b521e22f097c4..b4480852e935f6a9fd7aeda934f52297a6706849 100644 (file)
@@ -30,7 +30,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
 
        darray_init(&s);
 
-       if (!bkey_cmp(old_pos, new_pos))
+       if (bkey_eq(old_pos, new_pos))
                return 0;
 
        if (!snapshot_t(c, old_pos.snapshot)->children[0])
@@ -45,7 +45,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
                if (ret)
                        break;
 
-               if (bkey_cmp(old_pos, k.k->p))
+               if (!bkey_eq(old_pos, k.k->p))
                        break;
 
                if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
@@ -244,7 +244,7 @@ err:
                if (ret)
                        break;
 next:
-               while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
+               while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
                        bch2_keylist_pop_front(keys);
                        if (bch2_keylist_empty(keys))
                                goto out;
index 16be8d3db2ad9e2daa64aa2412bc26c371302155..d3e769b1eb3e000089b8e6ff438acbb16e079eee 100644 (file)
@@ -306,7 +306,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
        if (ret)
                return ret;
 
-       if (!bpos_cmp(SPOS_MAX, i->from))
+       if (bpos_eq(SPOS_MAX, i->from))
                return i->ret;
 
        bch2_trans_init(&trans, i->c, 0, 0);
@@ -317,7 +317,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
                        break;
 
                bch2_btree_node_to_text(&i->buf, i->c, b);
-               i->from = bpos_cmp(SPOS_MAX, b->key.k.p)
+               i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
                        ? bpos_successor(b->key.k.p)
                        : b->key.k.p;
        }
@@ -368,7 +368,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
                if (ret)
                        break;
 
-               if (bpos_cmp(l->b->key.k.p, i->prev_node) > 0) {
+               if (bpos_gt(l->b->key.k.p, i->prev_node)) {
                        bch2_btree_node_to_text(&i->buf, i->c, l->b);
                        i->prev_node = l->b->key.k.p;
                }
index 288f46b55876b72bf0ca004904a6d67452956a90..c2126f39369bed737b6225cb5bcd6f39ee7b51a9 100644 (file)
@@ -350,8 +350,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
                bkey_init(&new_src->k);
                new_src->k.p = src_iter.pos;
 
-               if (bkey_cmp(dst_pos, src_iter.pos) <= 0 &&
-                   bkey_cmp(src_iter.pos, dst_iter.pos) < 0) {
+               if (bkey_le(dst_pos, src_iter.pos) &&
+                   bkey_lt(src_iter.pos, dst_iter.pos)) {
                        /*
                         * We have a hash collision for the new dst key,
                         * and new_src - the key we're deleting - is between
index 2dcca5c7fcec4873a5c2918aa167be3383441c06..503a47b39ad14d6968f86db71bbf2a988c39a261 100644 (file)
@@ -107,7 +107,7 @@ int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
 {
        const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
 
-       if (!bkey_cmp(k.k->p, POS_MIN)) {
+       if (bkey_eq(k.k->p, POS_MIN)) {
                prt_printf(err, "stripe at POS_MIN");
                return -EINVAL;
        }
@@ -724,7 +724,7 @@ static int ec_stripe_bkey_insert(struct btree_trans *trans,
 
        for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
                           BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
-               if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
+               if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
                        if (start_pos.offset) {
                                start_pos = min_pos;
                                bch2_btree_iter_set_pos(&iter, start_pos);
index 2fd5d9672a44287b42a10acfcdc81c5865830c07..21d6f88c7397cd96d27e3a978c83c46266d11922 100644 (file)
@@ -73,8 +73,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
                for_each_btree_key_norestart(trans, iter,
                                   BTREE_ID_reflink, POS(0, idx + offset),
                                   BTREE_ITER_SLOTS, r_k, ret2) {
-                       if (bkey_cmp(bkey_start_pos(r_k.k),
-                                    POS(0, idx + sectors)) >= 0)
+                       if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
                                break;
 
                        /* extent_update_to_keys(), for the reflink_v update */
@@ -132,11 +131,10 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
        for_each_btree_key_continue_norestart(copy, 0, k, ret) {
                unsigned offset = 0;
 
-               if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
+               if (bkey_ge(bkey_start_pos(k.k), *end))
                        break;
 
-               if (bkey_cmp(bkey_start_pos(&insert->k),
-                            bkey_start_pos(k.k)) > 0)
+               if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
                        offset = bkey_start_offset(&insert->k) -
                                bkey_start_offset(k.k);
 
index bb1b862bfa655c567b63a136260abd824f6d53c0..e3bc39bee197927a2f4363d45c44f17e2bc550ce 100644 (file)
@@ -227,7 +227,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
 
        if (version < bcachefs_metadata_version_inode_btree_change &&
            btree_node_type_is_extents(btree_id) &&
-           bkey_cmp(bp.v->min_key, POS_MIN))
+           !bkey_eq(bp.v->min_key, POS_MIN))
                bp.v->min_key = write
                        ? bpos_nosnap_predecessor(bp.v->min_key)
                        : bpos_nosnap_successor(bp.v->min_key);
@@ -1211,10 +1211,10 @@ int bch2_cut_front_s(struct bpos where, struct bkey_s k)
        int val_u64s_delta;
        u64 sub;
 
-       if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
+       if (bkey_le(where, bkey_start_pos(k.k)))
                return 0;
 
-       EBUG_ON(bkey_cmp(where, k.k->p) > 0);
+       EBUG_ON(bkey_gt(where, k.k->p));
 
        sub = where.offset - bkey_start_offset(k.k);
 
@@ -1291,10 +1291,10 @@ int bch2_cut_back_s(struct bpos where, struct bkey_s k)
        int val_u64s_delta;
        u64 len = 0;
 
-       if (bkey_cmp(where, k.k->p) >= 0)
+       if (bkey_ge(where, k.k->p))
                return 0;
 
-       EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
+       EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
 
        len = where.offset - bkey_start_offset(k.k);
 
index 224df17206cb4f77c3f40c5665de5aba51947aec..21dbdf96bd592314f753446f80e9a7ac71d49f30 100644 (file)
@@ -636,9 +636,8 @@ enum bch_extent_overlap {
 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
                                                          const struct bkey *m)
 {
-       int cmp1 = bkey_cmp(k->p, m->p) < 0;
-       int cmp2 = bkey_cmp(bkey_start_pos(k),
-                           bkey_start_pos(m)) > 0;
+       int cmp1 = bkey_lt(k->p, m->p);
+       int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
 
        return (cmp1 << 1) + cmp2;
 }
index 4dd5ebafe74259bc7985877a09e33afa88e92232..0bb8b39140ec10d0e125863feb17d78c05d55202 100644 (file)
@@ -2043,7 +2043,7 @@ retry:
        for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
                           SPOS(inum.inum, offset, snapshot),
                           BTREE_ITER_SLOTS, k, err) {
-               if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
+               if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
                        break;
 
                if (k.k->p.snapshot != snapshot ||
@@ -2532,7 +2532,7 @@ retry:
                goto err;
 
        for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
-               if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
+               if (bkey_ge(bkey_start_pos(k.k), end))
                        break;
 
                if (bkey_extent_is_data(k.k)) {
@@ -2970,13 +2970,13 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
                        break;
 
                if (insert &&
-                   bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
+                   bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
                        break;
 reassemble:
                bch2_bkey_buf_reassemble(&copy, c, k);
 
                if (insert &&
-                   bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
+                   bkey_lt(bkey_start_pos(k.k), move_pos))
                        bch2_cut_front(move_pos, copy.k);
 
                copy.k->k.p.offset += shift >> 9;
@@ -2986,7 +2986,7 @@ reassemble:
                if (ret)
                        continue;
 
-               if (bkey_cmp(atomic_end, copy.k->k.p)) {
+               if (!bkey_eq(atomic_end, copy.k->k.p)) {
                        if (insert) {
                                move_pos = atomic_end;
                                move_pos.offset -= shift >> 9;
@@ -3064,7 +3064,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                        POS(inode->v.i_ino, start_sector),
                        BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
-       while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
+       while (!ret && bkey_lt(iter.pos, end_pos)) {
                s64 i_sectors_delta = 0;
                struct disk_reservation disk_res = { 0 };
                struct quota_res quota_res = { 0 };
index f4f0e0cec85d7c536fa8c242fed641dcf801fb5e..7db1486a1143e886711afe8c094fc3b425f30f50 100644 (file)
@@ -133,7 +133,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
        if (ret)
                goto err;
 
-       if (!k.k || bkey_cmp(k.k->p, POS(0, inode_nr))) {
+       if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
                ret = -ENOENT;
                goto err;
        }
@@ -527,7 +527,7 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
        };
        int ret = 0;
 
-       if (bkey_cmp(s->pos, pos))
+       if (!bkey_eq(s->pos, pos))
                s->ids.nr = 0;
 
        pos.snapshot = n.equiv;
@@ -825,7 +825,7 @@ static int hash_check_key(struct btree_trans *trans,
        for_each_btree_key_norestart(trans, iter, desc.btree_id,
                                     POS(hash_k.k->p.inode, hash),
                                     BTREE_ITER_SLOTS, k, ret) {
-               if (!bkey_cmp(k.k->p, hash_k.k->p))
+               if (bkey_eq(k.k->p, hash_k.k->p))
                        break;
 
                if (fsck_err_on(k.k->type == desc.key_type &&
@@ -1199,7 +1199,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
 
        BUG_ON(!iter->path->should_be_locked);
 #if 0
-       if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
+       if (bkey_gt(prev.k->k.p, bkey_start_pos(k.k))) {
                char buf1[200];
                char buf2[200];
 
index 827a0b04b00f0626eeae3d1b402badba19576610..b4f09d77148d62583c0cf72715f70a90016c5f00 100644 (file)
@@ -543,7 +543,7 @@ int bch2_inode_create(struct btree_trans *trans,
 again:
        while ((k = bch2_btree_iter_peek(iter)).k &&
               !(ret = bkey_err(k)) &&
-              bkey_cmp(k.k->p, POS(0, max)) < 0) {
+              bkey_lt(k.k->p, POS(0, max))) {
                if (pos < iter->pos.offset)
                        goto found_slot;
 
index 5fe049d64e278fa085adc0a997736b8e17b7445d..491fad4dfb28e0fea687735434c11e5e9e9ba33d 100644 (file)
@@ -237,7 +237,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
                     (!new_compressed && bch2_bkey_sectors_compressed(old))))
                        *usage_increasing = true;
 
-               if (bkey_cmp(old.k->p, new->k.p) >= 0) {
+               if (bkey_ge(old.k->p, new->k.p)) {
                        /*
                         * Check if there's already data above where we're
                         * going to be writing to - this means we're definitely
@@ -420,7 +420,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
                bch2_btree_iter_set_snapshot(iter, snapshot);
 
                k = bch2_btree_iter_peek(iter);
-               if (bkey_cmp(iter->pos, end_pos) >= 0) {
+               if (bkey_ge(iter->pos, end_pos)) {
                        bch2_btree_iter_set_pos(iter, end_pos);
                        break;
                }
@@ -518,7 +518,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
                if (ec_ob)
                        bch2_ob_add_backpointer(c, ec_ob, &sk.k->k);
 
-               if (bkey_cmp(iter.pos, k->k.p) >= 0)
+               if (bkey_ge(iter.pos, k->k.p))
                        bch2_keylist_pop_front(&op->insert_keys);
                else
                        bch2_cut_front(iter.pos, k);
@@ -1398,7 +1398,7 @@ void bch2_write(struct closure *cl)
        EBUG_ON(op->cl.parent);
        BUG_ON(!op->nr_replicas);
        BUG_ON(!op->write_point.v);
-       BUG_ON(!bkey_cmp(op->pos, POS_MAX));
+       BUG_ON(bkey_eq(op->pos, POS_MAX));
 
        op->start_time = local_clock();
        bch2_keylist_init(&op->insert_keys, op->inline_keys);
index 5e85055b0f9382df6ef9ababd31975c16f50cd98..29e51bde8313419cd44dcc83d4b339d84abd6935 100644 (file)
@@ -36,7 +36,7 @@ void bch2_keylist_add_in_order(struct keylist *l, struct bkey_i *insert)
        struct bkey_i *where;
 
        for_each_keylist_key(l, where)
-               if (bkey_cmp(insert->k.p, where->k.p) < 0)
+               if (bpos_lt(insert->k.p, where->k.p))
                        break;
 
        memmove_u64s_up((u64 *) where + insert->k.u64s,
@@ -63,6 +63,6 @@ void bch2_verify_keylist_sorted(struct keylist *l)
 
        for_each_keylist_key(l, k)
                BUG_ON(bkey_next(k) != l->top &&
-                      bpos_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
+                      bpos_ge(k->k.p, bkey_next(k)->k.p));
 }
 #endif
index a66fbc1faa7b869fa71df127665d3bbae5b8ca11..9125cea080bd938dcb68bed20903222ca9bba4bf 100644 (file)
@@ -340,7 +340,7 @@ static int lookup_inode(struct btree_trans *trans, struct bpos pos,
        if (ret)
                goto err;
 
-       if (!k.k || bkey_cmp(k.k->p, pos)) {
+       if (!k.k || !bkey_eq(k.k->p, pos)) {
                ret = -ENOENT;
                goto err;
        }
@@ -446,7 +446,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
                if (ret)
                        break;
 
-               if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
+               if (bkey_ge(bkey_start_pos(k.k), end))
                        break;
 
                ctxt->stats->pos = iter.pos;
index 8d767e787d6bf2e9b1ded703c93cae050d1eff85..7c9f4a97bc0311f350670505cf98546268726064 100644 (file)
@@ -132,9 +132,8 @@ search:
               (k = idx_to_key(keys, *idx),
                k->btree_id == btree_id &&
                k->level == level &&
-               bpos_cmp(k->k->k.p, end_pos) <= 0)) {
-               if (bpos_cmp(k->k->k.p, pos) >= 0 &&
-                   !k->overwritten)
+               bpos_le(k->k->k.p, end_pos))) {
+               if (bpos_ge(k->k->k.p, pos) && !k->overwritten)
                        return k->k;
 
                (*idx)++;
@@ -295,7 +294,7 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
        if (idx < keys->size &&
            keys->d[idx].btree_id       == btree &&
            keys->d[idx].level          == level &&
-           !bpos_cmp(keys->d[idx].k->k.p, pos))
+           bpos_eq(keys->d[idx].k->k.p, pos))
                keys->d[idx].overwritten = true;
 }
 
@@ -354,7 +353,7 @@ static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
 
 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
 {
-       if (!bpos_cmp(iter->pos, SPOS_MAX))
+       if (bpos_eq(iter->pos, SPOS_MAX))
                iter->at_end = true;
        else
                iter->pos = bpos_successor(iter->pos);
@@ -368,19 +367,19 @@ again:
                return bkey_s_c_null;
 
        while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
-              bpos_cmp(btree_k.k->p, iter->pos) < 0)
+              bpos_lt(btree_k.k->p, iter->pos))
                bch2_journal_iter_advance_btree(iter);
 
        while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
-              bpos_cmp(journal_k.k->p, iter->pos) < 0)
+              bpos_lt(journal_k.k->p, iter->pos))
                bch2_journal_iter_advance(&iter->journal);
 
        ret = journal_k.k &&
-               (!btree_k.k || bpos_cmp(journal_k.k->p, btree_k.k->p) <= 0)
+               (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
                ? journal_k
                : btree_k;
 
-       if (ret.k && iter->b && bpos_cmp(ret.k->p, iter->b->data->max_key) > 0)
+       if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
                ret = bkey_s_c_null;
 
        if (ret.k) {
@@ -528,7 +527,7 @@ static int journal_keys_sort(struct bch_fs *c)
                while (src + 1 < keys->d + keys->nr &&
                       src[0].btree_id  == src[1].btree_id &&
                       src[0].level     == src[1].level &&
-                      !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
+                      bpos_eq(src[0].k->k.p, src[1].k->k.p))
                        src++;
 
                *dst++ = *src++;
index 0d4c004d7f9dba82132403ff7f4e9ca0dfbb343e..aebed671c43a9b5ac5639e2ea69cdb53e6b8f3c7 100644 (file)
@@ -252,14 +252,14 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
        int ret;
 
        for_each_btree_key_continue_norestart(*iter, 0, k, ret) {
-               if (bkey_cmp(iter->pos, end) >= 0)
+               if (bkey_ge(iter->pos, end))
                        break;
 
                if (bkey_extent_is_data(k.k))
                        return k;
        }
 
-       if (bkey_cmp(iter->pos, end) >= 0)
+       if (bkey_ge(iter->pos, end))
                bch2_btree_iter_set_pos(iter, end);
        return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
 }
@@ -301,7 +301,7 @@ s64 bch2_remap_range(struct bch_fs *c,
 
        while ((ret == 0 ||
                bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
-              bkey_cmp(dst_iter.pos, dst_end) < 0) {
+              bkey_lt(dst_iter.pos, dst_end)) {
                struct disk_reservation disk_res = { 0 };
 
                bch2_trans_begin(&trans);
@@ -334,7 +334,7 @@ s64 bch2_remap_range(struct bch_fs *c,
                if (ret)
                        continue;
 
-               if (bkey_cmp(src_want, src_iter.pos) < 0) {
+               if (bkey_lt(src_want, src_iter.pos)) {
                        ret = bch2_fpunch_at(&trans, &dst_iter, dst_inum,
                                        min(dst_end.offset,
                                            dst_iter.pos.offset +
@@ -386,8 +386,8 @@ s64 bch2_remap_range(struct bch_fs *c,
        bch2_trans_iter_exit(&trans, &dst_iter);
        bch2_trans_iter_exit(&trans, &src_iter);
 
-       BUG_ON(!ret && bkey_cmp(dst_iter.pos, dst_end));
-       BUG_ON(bkey_cmp(dst_iter.pos, dst_end) > 0);
+       BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
+       BUG_ON(bkey_gt(dst_iter.pos, dst_end));
 
        dst_done = dst_iter.pos.offset - dst_start.offset;
        new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
index 0e3b6ae3835a23c8cc9019d75651d16f17bfa21d..e37ffaad58837c3cfe8a6d8f90f9da4fd5b25f95 100644 (file)
@@ -30,8 +30,8 @@ int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
        struct bkey_s_c_snapshot s;
        u32 i, id;
 
-       if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 ||
-           bkey_cmp(k.k->p, POS(0, 1)) < 0) {
+       if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+           bkey_lt(k.k->p, POS(0, 1))) {
                prt_printf(err, "bad pos");
                return -EINVAL;
        }
@@ -592,7 +592,7 @@ static int snapshot_delete_key(struct btree_trans *trans,
        struct bch_fs *c = trans->c;
        u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
 
-       if (bkey_cmp(k.k->p, *last_pos))
+       if (!bkey_eq(k.k->p, *last_pos))
                equiv_seen->nr = 0;
        *last_pos = k.k->p;
 
@@ -770,8 +770,8 @@ static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
                           int rw, struct printbuf *err)
 {
-       if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0 ||
-           bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) {
+       if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
+           bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
                prt_printf(err, "invalid pos");
                return -EINVAL;
        }
@@ -1028,7 +1028,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
 
        for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
                           BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
-               if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
+               if (bkey_gt(k.k->p, SUBVOL_POS_MAX))
                        break;
 
                /*