bcachefs: convert bch2_btree_insert_at() usage to bch2_trans_commit()
authorKent Overstreet <kent.overstreet@gmail.com>
Thu, 14 Mar 2019 00:49:16 +0000 (20:49 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:18 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
16 files changed:
fs/bcachefs/acl.c
fs/bcachefs/alloc_background.c
fs/bcachefs/btree_update.h
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/dirent.c
fs/bcachefs/ec.c
fs/bcachefs/fsck.c
fs/bcachefs/inode.c
fs/bcachefs/io.c
fs/bcachefs/journal_io.c
fs/bcachefs/migrate.c
fs/bcachefs/move.c
fs/bcachefs/quota.c
fs/bcachefs/str_hash.h
fs/bcachefs/tests.c
fs/bcachefs/xattr.c

index bcfc9fdce35e4141e343df352c587773c9dccc94..c7f6bcb87387bfe2afa3b716a4ff7f7260799fb8 100644 (file)
@@ -268,8 +268,8 @@ int bch2_set_acl_trans(struct btree_trans *trans,
                if (IS_ERR(xattr))
                        return PTR_ERR(xattr);
 
-               ret = __bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
-                                     inode_u->bi_inum, &xattr->k_i, 0);
+               ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
+                                   inode_u->bi_inum, &xattr->k_i, 0);
        } else {
                struct xattr_search_key search =
                        X_SEARCH(acl_to_xattr_type(type), "", 0);
index 2f1a8e70ad885450a86e2563887c06d07ce20cd3..c11136506352e8af0360a3097322a4ac162165b1 100644 (file)
@@ -310,10 +310,53 @@ int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
        return 0;
 }
 
-static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
+int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
+{
+       struct btree_trans trans;
+       struct btree_iter *iter;
+       struct bch_dev *ca;
+       int ret;
+
+       if (k->k.p.inode >= c->sb.nr_devices ||
+           !c->devs[k->k.p.inode])
+               return 0;
+
+       ca = bch_dev_bkey_exists(c, k->k.p.inode);
+
+       if (k->k.p.offset >= ca->mi.nbuckets)
+               return 0;
+
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
+                                  BTREE_ITER_INTENT);
+
+       ret = bch2_btree_iter_traverse(iter);
+       if (ret)
+               goto err;
+
+       /* check buckets_written with btree node locked: */
+       if (test_bit(k->k.p.offset, ca->buckets_written)) {
+               ret = 0;
+               goto err;
+       }
+
+       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
+
+       ret = bch2_trans_commit(&trans, NULL, NULL,
+                               BTREE_INSERT_NOFAIL|
+                               BTREE_INSERT_JOURNAL_REPLAY|
+                               BTREE_INSERT_NOMARK);
+err:
+       bch2_trans_exit(&trans);
+       return ret;
+}
+
+static int __bch2_alloc_write_key(struct btree_trans *trans, struct bch_dev *ca,
                                  size_t b, struct btree_iter *iter,
                                  u64 *journal_seq, unsigned flags)
 {
+       struct bch_fs *c = trans->c;
 #if 0
        __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
 #else
@@ -349,14 +392,15 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
 
        bch2_btree_iter_cond_resched(iter);
 
-       ret = bch2_btree_insert_at(c, NULL, journal_seq,
+       bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
+
+       ret = bch2_trans_commit(trans, NULL, journal_seq,
                                   BTREE_INSERT_NOCHECK_RW|
                                   BTREE_INSERT_NOFAIL|
                                   BTREE_INSERT_USE_RESERVE|
                                   BTREE_INSERT_USE_ALLOC_RESERVE|
                                   BTREE_INSERT_NOMARK|
-                                  flags,
-                                  BTREE_INSERT_ENTRY(iter, &a->k_i));
+                                  flags);
        if (ret)
                return ret;
 
@@ -370,42 +414,6 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
        return 0;
 }
 
-int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
-{
-       struct bch_dev *ca;
-       struct btree_iter iter;
-       int ret;
-
-       if (k->k.p.inode >= c->sb.nr_devices ||
-           !c->devs[k->k.p.inode])
-               return 0;
-
-       ca = bch_dev_bkey_exists(c, k->k.p.inode);
-
-       if (k->k.p.offset >= ca->mi.nbuckets)
-               return 0;
-
-       bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, k->k.p,
-                            BTREE_ITER_INTENT);
-
-       ret = bch2_btree_iter_traverse(&iter);
-       if (ret)
-               goto err;
-
-       /* check buckets_written with btree node locked: */
-
-       ret = test_bit(k->k.p.offset, ca->buckets_written)
-               ? 0
-               : bch2_btree_insert_at(c, NULL, NULL,
-                                      BTREE_INSERT_NOFAIL|
-                                      BTREE_INSERT_JOURNAL_REPLAY|
-                                      BTREE_INSERT_NOMARK,
-                                      BTREE_INSERT_ENTRY(&iter, k));
-err:
-       bch2_btree_iter_unlock(&iter);
-       return ret;
-}
-
 int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
 {
        struct bch_dev *ca;
@@ -415,12 +423,15 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
        *wrote = false;
 
        for_each_rw_member(ca, c, i) {
-               struct btree_iter iter;
+               struct btree_trans trans;
+               struct btree_iter *iter;
                struct bucket_array *buckets;
                size_t b;
 
-               bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
-                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+               bch2_trans_init(&trans, c);
+
+               iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
+                                          BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
                down_read(&ca->bucket_lock);
                buckets = bucket_array(ca);
@@ -431,7 +442,7 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
                        if (!buckets->b[b].mark.dirty)
                                continue;
 
-                       ret = __bch2_alloc_write_key(c, ca, b, &iter, NULL,
+                       ret = __bch2_alloc_write_key(&trans, ca, b, iter, NULL,
                                                     nowait
                                                     ? BTREE_INSERT_NOWAIT
                                                     : 0);
@@ -441,7 +452,8 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
                        *wrote = true;
                }
                up_read(&ca->bucket_lock);
-               bch2_btree_iter_unlock(&iter);
+
+               bch2_trans_exit(&trans);
 
                if (ret) {
                        percpu_ref_put(&ca->io_ref);
@@ -887,7 +899,8 @@ static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
        }
 }
 
-static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca,
+static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
+                                      struct bch_dev *ca,
                                       struct btree_iter *iter,
                                       u64 *journal_seq, unsigned flags)
 {
@@ -897,6 +910,7 @@ static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca,
        /* hack: */
        __BKEY_PADDED(k, 8) alloc_key;
 #endif
+       struct bch_fs *c = trans->c;
        struct bkey_i_alloc *a;
        struct bkey_alloc_unpacked u;
        struct bucket_mark m;
@@ -959,6 +973,8 @@ retry:
        a->k.p = iter->pos;
        bch2_alloc_pack(a, u);
 
+       bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
+
        /*
         * XXX:
         * when using deferred btree updates, we have journal reclaim doing
@@ -966,16 +982,15 @@ retry:
         * progress, and here the allocator is requiring space in the journal -
         * so we need a journal pre-reservation:
         */
-       ret = bch2_btree_insert_at(c, NULL,
-                       invalidating_cached_data ? journal_seq : NULL,
-                       BTREE_INSERT_ATOMIC|
-                       BTREE_INSERT_NOUNLOCK|
-                       BTREE_INSERT_NOCHECK_RW|
-                       BTREE_INSERT_NOFAIL|
-                       BTREE_INSERT_USE_RESERVE|
-                       BTREE_INSERT_USE_ALLOC_RESERVE|
-                       flags,
-                       BTREE_INSERT_ENTRY(iter, &a->k_i));
+       ret = bch2_trans_commit(trans, NULL,
+                               invalidating_cached_data ? journal_seq : NULL,
+                               BTREE_INSERT_ATOMIC|
+                               BTREE_INSERT_NOUNLOCK|
+                               BTREE_INSERT_NOCHECK_RW|
+                               BTREE_INSERT_NOFAIL|
+                               BTREE_INSERT_USE_RESERVE|
+                               BTREE_INSERT_USE_ALLOC_RESERVE|
+                               flags);
        if (ret == -EINTR)
                goto retry;
 
@@ -1049,23 +1064,27 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
  */
 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        u64 journal_seq = 0;
        int ret = 0;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
-                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
+                                  POS(ca->dev_idx, 0),
+                                  BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
        /* Only use nowait if we've already invalidated at least one bucket: */
        while (!ret &&
               !fifo_full(&ca->free_inc) &&
               ca->alloc_heap.used)
-               ret = bch2_invalidate_one_bucket2(c, ca, &iter, &journal_seq,
+               ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
                                BTREE_INSERT_GC_LOCK_HELD|
                                (!fifo_empty(&ca->free_inc)
                                 ? BTREE_INSERT_NOWAIT : 0));
 
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 
        /* If we used NOWAIT, don't return the error: */
        if (!fifo_empty(&ca->free_inc))
index 3e6a616b818220d2cbc30cc2d04abe4af7711732..2bdb8b532aad6a295bdfc462fd6d613277248604 100644 (file)
@@ -21,8 +21,6 @@ void bch2_deferred_update_free(struct bch_fs *,
 struct deferred_update *
 bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned);
 
-/* Normal update interface: */
-
 struct btree_insert {
        struct bch_fs           *c;
        struct disk_reservation *disk_res;
@@ -35,8 +33,6 @@ struct btree_insert {
        struct btree_insert_entry  *entries;
 };
 
-int __bch2_btree_insert_at(struct btree_insert *);
-
 #define BTREE_INSERT_ENTRY(_iter, _k)                                  \
        ((struct btree_insert_entry) {                                  \
                .iter           = (_iter),                              \
@@ -50,30 +46,6 @@ int __bch2_btree_insert_at(struct btree_insert *);
                .deferred       = true,                                 \
        })
 
-/**
- * bch_btree_insert_at - insert one or more keys at iterator positions
- * @iter:              btree iterator
- * @insert_key:                key to insert
- * @disk_res:          disk reservation
- * @hook:              extent insert callback
- *
- * Return values:
- * -EINTR: locking changed, this function should be called again. Only returned
- *  if passed BTREE_INSERT_ATOMIC.
- * -EROFS: filesystem read only
- * -EIO: journal or btree node IO error
- */
-#define bch2_btree_insert_at(_c, _disk_res, _journal_seq, _flags, ...) \
-       __bch2_btree_insert_at(&(struct btree_insert) {                 \
-               .c              = (_c),                                 \
-               .disk_res       = (_disk_res),                          \
-               .journal_seq    = (_journal_seq),                       \
-               .flags          = (_flags),                             \
-               .nr             = COUNT_ARGS(__VA_ARGS__),              \
-               .entries        = (struct btree_insert_entry[]) {       \
-                       __VA_ARGS__                                     \
-               }})
-
 enum {
        __BTREE_INSERT_ATOMIC,
        __BTREE_INSERT_NOUNLOCK,
@@ -125,7 +97,7 @@ enum {
 #define BCH_HASH_SET_MUST_CREATE       (1 << __BCH_HASH_SET_MUST_CREATE)
 #define BCH_HASH_SET_MUST_REPLACE      (1 << __BCH_HASH_SET_MUST_REPLACE)
 
-int bch2_btree_delete_at(struct btree_iter *, unsigned);
+int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
 
 int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
                     struct disk_reservation *, u64 *, int flags);
@@ -138,8 +110,6 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
 int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
                               struct btree *, struct bkey_i_btree_ptr *);
 
-/* new transactional interface: */
-
 static inline void
 bch2_trans_update(struct btree_trans *trans,
                  struct btree_insert_entry entry)
index 21822bda67fc132dc018dc8fc55ebaa7932f2697..d2f57b6b924d36cd3c1467a8c552dc15baf249dd 100644 (file)
@@ -631,7 +631,7 @@ static inline void btree_insert_entry_checks(struct bch_fs *c,
  * -EROFS: filesystem read only
  * -EIO: journal or btree node IO error
  */
-int __bch2_btree_insert_at(struct btree_insert *trans)
+static int __bch2_btree_insert_at(struct btree_insert *trans)
 {
        struct bch_fs *c = trans->c;
        struct btree_insert_entry *i;
@@ -847,17 +847,18 @@ int bch2_trans_commit(struct btree_trans *trans,
        return __bch2_btree_insert_at(&insert);
 }
 
-int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
+int bch2_btree_delete_at(struct btree_trans *trans,
+                        struct btree_iter *iter, unsigned flags)
 {
        struct bkey_i k;
 
        bkey_init(&k.k);
        k.k.p = iter->pos;
 
-       return bch2_btree_insert_at(iter->c, NULL, NULL,
-                                   BTREE_INSERT_NOFAIL|
-                                   BTREE_INSERT_USE_RESERVE|flags,
-                                   BTREE_INSERT_ENTRY(iter, &k));
+       bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &k));
+       return bch2_trans_commit(trans, NULL, NULL,
+                                BTREE_INSERT_NOFAIL|
+                                BTREE_INSERT_USE_RESERVE|flags);
 }
 
 /**
@@ -872,14 +873,19 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
                     struct disk_reservation *disk_res,
                     u64 *journal_seq, int flags)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        int ret;
 
-       bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
-                            BTREE_ITER_INTENT);
-       ret = bch2_btree_insert_at(c, disk_res, journal_seq, flags,
-                                  BTREE_INSERT_ENTRY(&iter, k));
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
+                                  BTREE_ITER_INTENT);
+
+       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
+
+       ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
+       bch2_trans_exit(&trans);
 
        return ret;
 }
@@ -893,16 +899,18 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
                            struct bpos start, struct bpos end,
                            u64 *journal_seq)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        int ret = 0;
 
-       bch2_btree_iter_init(&iter, c, id, start,
-                            BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
 
-       while ((k = bch2_btree_iter_peek(&iter)).k &&
+       iter = bch2_trans_get_iter(&trans, id, start, BTREE_ITER_INTENT);
+
+       while ((k = bch2_btree_iter_peek(iter)).k &&
               !(ret = btree_iter_err(k)) &&
-              bkey_cmp(iter.pos, end) < 0) {
+              bkey_cmp(iter->pos, end) < 0) {
                unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
                /* really shouldn't be using a bare, unpadded bkey_i */
                struct bkey_i delete;
@@ -919,24 +927,25 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
                 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
                 * bkey_start_pos(k.k)).
                 */
-               delete.k.p = iter.pos;
+               delete.k.p = iter->pos;
 
-               if (iter.flags & BTREE_ITER_IS_EXTENTS) {
+               if (iter->flags & BTREE_ITER_IS_EXTENTS) {
                        /* create the biggest key we can */
                        bch2_key_resize(&delete.k, max_sectors);
                        bch2_cut_back(end, &delete.k);
-                       bch2_extent_trim_atomic(&delete, &iter);
+                       bch2_extent_trim_atomic(&delete, iter);
                }
 
-               ret = bch2_btree_insert_at(c, NULL, journal_seq,
-                                          BTREE_INSERT_NOFAIL,
-                                          BTREE_INSERT_ENTRY(&iter, &delete));
+               bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &delete));
+
+               ret = bch2_trans_commit(&trans, NULL, journal_seq,
+                                       BTREE_INSERT_NOFAIL);
                if (ret)
                        break;
 
-               bch2_btree_iter_cond_resched(&iter);
+               bch2_btree_iter_cond_resched(iter);
        }
 
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
        return ret;
 }
index 80d37c56827202a7f2a9423a04503c12c98f6b81..dc3883204d8098677a0e156a8939ae3e5bcc0ca4 100644 (file)
@@ -151,8 +151,8 @@ int __bch2_dirent_create(struct btree_trans *trans,
        if (ret)
                return ret;
 
-       return __bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
-                              dir_inum, &dirent->k_i, flags);
+       return bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
+                            dir_inum, &dirent->k_i, flags);
 }
 
 int bch2_dirent_create(struct bch_fs *c, u64 dir_inum,
index fc73823f6358ebfa1433d0b4b9422f285891edca..a989ba172faaa89473076f303096ec85452e9eb6 100644 (file)
@@ -629,36 +629,12 @@ void bch2_stripes_heap_insert(struct bch_fs *c,
 
 /* stripe deletion */
 
-static void ec_stripe_delete(struct bch_fs *c, size_t idx)
+static int ec_stripe_delete(struct bch_fs *c, size_t idx)
 {
-       struct btree_iter iter;
-       struct bch_stripe *v = NULL;
-       struct bkey_s_c k;
-       struct bkey_i delete;
-       u64 journal_seq = 0;
-
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EC,
-                            POS(0, idx),
-                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-       k = bch2_btree_iter_peek_slot(&iter);
-       if (btree_iter_err(k) || k.k->type != KEY_TYPE_stripe)
-               goto out;
-
-       v = kmalloc(bkey_val_bytes(k.k), GFP_KERNEL);
-       BUG_ON(!v);
-       memcpy(v, bkey_s_c_to_stripe(k).v, bkey_val_bytes(k.k));
-
-       bkey_init(&delete.k);
-       delete.k.p = iter.pos;
-
-       bch2_btree_insert_at(c, NULL, &journal_seq,
-                            BTREE_INSERT_NOFAIL|
-                            BTREE_INSERT_USE_RESERVE|
-                            BTREE_INSERT_NOUNLOCK,
-                            BTREE_INSERT_ENTRY(&iter, &delete));
-out:
-       bch2_btree_iter_unlock(&iter);
-       kfree(v);
+       return bch2_btree_delete_range(c, BTREE_ID_EC,
+                                      POS(0, idx),
+                                      POS(0, idx + 1),
+                                      NULL);
 }
 
 static void ec_stripe_delete_work(struct work_struct *work)
@@ -690,39 +666,46 @@ static void ec_stripe_delete_work(struct work_struct *work)
 static int ec_stripe_bkey_insert(struct bch_fs *c,
                                 struct bkey_i_stripe *stripe)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        int ret;
 
-       /* XXX: start pos hint */
+       bch2_trans_init(&trans, c);
 retry:
-       for_each_btree_key(&iter, c, BTREE_ID_EC, POS_MIN,
-                          BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
-               if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
-                       bch2_btree_iter_unlock(&iter);
-                       return -ENOSPC;
-               }
+       bch2_trans_begin(&trans);
+
+       /* XXX: start pos hint */
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
+                                  BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+
+       for_each_btree_key_continue(iter, BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
+               if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0)
+                       break;
 
                if (bkey_deleted(k.k))
                        goto found_slot;
        }
 
-       return bch2_btree_iter_unlock(&iter) ?: -ENOSPC;
+       ret = -ENOSPC;
+       goto out;
 found_slot:
-       ret = ec_stripe_mem_alloc(c, &iter);
+       ret = ec_stripe_mem_alloc(c, iter);
 
        if (ret == -EINTR)
                goto retry;
        if (ret)
                return ret;
 
-       stripe->k.p = iter.pos;
+       stripe->k.p = iter->pos;
 
-       ret = bch2_btree_insert_at(c, NULL, NULL,
-                                  BTREE_INSERT_NOFAIL|
-                                  BTREE_INSERT_USE_RESERVE,
-                                  BTREE_INSERT_ENTRY(&iter, &stripe->k_i));
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &stripe->k_i));
+
+       ret = bch2_trans_commit(&trans, NULL, NULL,
+                               BTREE_INSERT_NOFAIL|
+                               BTREE_INSERT_USE_RESERVE);
+out:
+       bch2_trans_exit(&trans);
 
        return ret;
 }
@@ -749,23 +732,26 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
                                 struct ec_stripe_buf *s,
                                 struct bkey *pos)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        struct bkey_s_extent e;
        struct bch_extent_ptr *ptr;
        BKEY_PADDED(k) tmp;
        int ret = 0, dev, idx;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
-                            bkey_start_pos(pos),
-                            BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
 
-       while ((k = bch2_btree_iter_peek(&iter)).k &&
-              !btree_iter_err(k) &&
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+                                  bkey_start_pos(pos),
+                                  BTREE_ITER_INTENT);
+
+       while ((k = bch2_btree_iter_peek(iter)).k &&
+              !(ret = btree_iter_err(k)) &&
               bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
                idx = extent_matches_stripe(c, &s->key.v, k);
                if (idx < 0) {
-                       bch2_btree_iter_next(&iter);
+                       bch2_btree_iter_next(iter);
                        continue;
                }
 
@@ -783,18 +769,21 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
 
                extent_stripe_ptr_add(e, s, ptr, idx);
 
-               ret = bch2_btree_insert_at(c, NULL, NULL,
-                               BTREE_INSERT_ATOMIC|
-                               BTREE_INSERT_NOFAIL|
-                               BTREE_INSERT_USE_RESERVE,
-                               BTREE_INSERT_ENTRY(&iter, &tmp.k));
+               bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.k));
+
+               ret = bch2_trans_commit(&trans, NULL, NULL,
+                                       BTREE_INSERT_ATOMIC|
+                                       BTREE_INSERT_NOFAIL|
+                                       BTREE_INSERT_USE_RESERVE);
                if (ret == -EINTR)
                        ret = 0;
                if (ret)
                        break;
        }
 
-       return bch2_btree_iter_unlock(&iter) ?: ret;
+       bch2_trans_exit(&trans);
+
+       return ret;
 }
 
 /*
@@ -1163,13 +1152,14 @@ unlock:
        mutex_unlock(&c->ec_new_stripe_lock);
 }
 
-static int __bch2_stripe_write_key(struct bch_fs *c,
+static int __bch2_stripe_write_key(struct btree_trans *trans,
                                   struct btree_iter *iter,
                                   struct stripe *m,
                                   size_t idx,
                                   struct bkey_i_stripe *new_key,
                                   unsigned flags)
 {
+       struct bch_fs *c = trans->c;
        struct bkey_s_c k;
        unsigned i;
        int ret;
@@ -1195,14 +1185,16 @@ static int __bch2_stripe_write_key(struct bch_fs *c,
 
        spin_unlock(&c->ec_stripes_heap_lock);
 
-       return bch2_btree_insert_at(c, NULL, NULL,
-                                  BTREE_INSERT_NOFAIL|flags,
-                                  BTREE_INSERT_ENTRY(iter, &new_key->k_i));
+       bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new_key->k_i));
+
+       return bch2_trans_commit(trans, NULL, NULL,
+                                BTREE_INSERT_NOFAIL|flags);
 }
 
 int bch2_stripes_write(struct bch_fs *c, bool *wrote)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct genradix_iter giter;
        struct bkey_i_stripe *new_key;
        struct stripe *m;
@@ -1211,14 +1203,16 @@ int bch2_stripes_write(struct bch_fs *c, bool *wrote)
        new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
        BUG_ON(!new_key);
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EC, POS_MIN,
-                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
+                                  BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
        genradix_for_each(&c->stripes[0], giter, m) {
                if (!m->dirty)
                        continue;
 
-               ret = __bch2_stripe_write_key(c, &iter, m, giter.pos,
+               ret = __bch2_stripe_write_key(&trans, iter, m, giter.pos,
                                        new_key, BTREE_INSERT_NOCHECK_RW);
                if (ret)
                        break;
@@ -1226,7 +1220,7 @@ int bch2_stripes_write(struct bch_fs *c, bool *wrote)
                *wrote = true;
        }
 
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 
        kfree(new_key);
 
index e79846a96f9c4ebd970981933448148fe70a4ae7..2561773cd6dc233d90d12457e529e5856854d039 100644 (file)
@@ -152,7 +152,7 @@ static void hash_check_set_inode(struct hash_check *h, struct bch_fs *c,
 }
 
 static int hash_redo_key(const struct bch_hash_desc desc,
-                        struct hash_check *h, struct bch_fs *c,
+                        struct btree_trans *trans, struct hash_check *h,
                         struct btree_iter *k_iter, struct bkey_s_c k,
                         u64 hashed)
 {
@@ -165,15 +165,16 @@ static int hash_redo_key(const struct bch_hash_desc desc,
 
        bkey_reassemble(tmp, k);
 
-       ret = bch2_btree_delete_at(k_iter, 0);
+       ret = bch2_btree_delete_at(trans, k_iter, 0);
        if (ret)
                goto err;
 
        bch2_btree_iter_unlock(k_iter);
 
-       bch2_hash_set(desc, &h->info, c, k_iter->pos.inode, NULL, tmp,
-                     BTREE_INSERT_NOFAIL|
-                     BCH_HASH_SET_MUST_CREATE);
+       bch2_hash_set(trans, desc, &h->info, k_iter->pos.inode,
+                     tmp, BCH_HASH_SET_MUST_CREATE);
+       ret = bch2_trans_commit(trans, NULL, NULL,
+                               BTREE_INSERT_NOFAIL);
 err:
        kfree(tmp);
        return ret;
@@ -272,9 +273,10 @@ static bool key_has_correct_hash(const struct bch_hash_desc desc,
 }
 
 static int hash_check_key(const struct bch_hash_desc desc,
-                         struct hash_check *h, struct bch_fs *c,
+                         struct btree_trans *trans, struct hash_check *h,
                          struct btree_iter *k_iter, struct bkey_s_c k)
 {
+       struct bch_fs *c = trans->c;
        char buf[200];
        u64 hashed;
        int ret = 0;
@@ -300,7 +302,7 @@ static int hash_check_key(const struct bch_hash_desc desc,
                        hashed, h->chain->pos.offset,
                        (bch2_bkey_val_to_text(&PBUF(buf), c,
                                               k), buf))) {
-               ret = hash_redo_key(desc, h, c, k_iter, k, hashed);
+               ret = hash_redo_key(desc, trans, h, k_iter, k, hashed);
                if (ret) {
                        bch_err(c, "hash_redo_key err %i", ret);
                        return ret;
@@ -313,9 +315,10 @@ fsck_err:
        return ret;
 }
 
-static int check_dirent_hash(struct hash_check *h, struct bch_fs *c,
+static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h,
                             struct btree_iter *iter, struct bkey_s_c *k)
 {
+       struct bch_fs *c = trans->c;
        struct bkey_i_dirent *d = NULL;
        int ret = -EINVAL;
        char buf[200];
@@ -360,9 +363,9 @@ static int check_dirent_hash(struct hash_check *h, struct bch_fs *c,
 
        if (fsck_err(c, "dirent with junk at end, was %s (%zu) now %s (%u)",
                     buf, strlen(buf), d->v.d_name, len)) {
-               ret = bch2_btree_insert_at(c, NULL, NULL,
-                                          BTREE_INSERT_NOFAIL,
-                                          BTREE_INSERT_ENTRY(iter, &d->k_i));
+               bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &d->k_i));
+
+               ret = bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
                if (ret)
                        goto err;
 
@@ -384,8 +387,8 @@ err_redo:
                     k->k->p.offset, hash, h->chain->pos.offset,
                     (bch2_bkey_val_to_text(&PBUF(buf), c,
                                            *k), buf))) {
-               ret = hash_redo_key(bch2_dirent_hash_desc,
-                                   h, c, iter, *k, hash);
+               ret = hash_redo_key(bch2_dirent_hash_desc, trans,
+                                   h, iter, *k, hash);
                if (ret)
                        bch_err(c, "hash_redo_key err %i", ret);
                else
@@ -532,7 +535,7 @@ static int check_dirents(struct bch_fs *c)
                                mode_to_type(w.inode.bi_mode),
                                (bch2_bkey_val_to_text(&PBUF(buf), c,
                                                       k), buf))) {
-                       ret = bch2_btree_delete_at(iter, 0);
+                       ret = bch2_btree_delete_at(&trans, iter, 0);
                        if (ret)
                                goto err;
                        continue;
@@ -541,7 +544,7 @@ static int check_dirents(struct bch_fs *c)
                if (w.first_this_inode && w.have_inode)
                        hash_check_set_inode(&h, c, &w.inode);
 
-               ret = check_dirent_hash(&h, c, iter, &k);
+               ret = check_dirent_hash(&trans, &h, iter, &k);
                if (ret > 0) {
                        ret = 0;
                        continue;
@@ -623,9 +626,11 @@ static int check_dirents(struct bch_fs *c)
                        bkey_reassemble(&n->k_i, d.s_c);
                        n->v.d_type = mode_to_type(target.bi_mode);
 
-                       ret = bch2_btree_insert_at(c, NULL, NULL,
-                                       BTREE_INSERT_NOFAIL,
-                                       BTREE_INSERT_ENTRY(iter, &n->k_i));
+                       bch2_trans_update(&trans,
+                               BTREE_INSERT_ENTRY(iter, &n->k_i));
+
+                       ret = bch2_trans_commit(&trans, NULL, NULL,
+                                               BTREE_INSERT_NOFAIL);
                        kfree(n);
                        if (ret)
                                goto err;
@@ -669,7 +674,7 @@ static int check_xattrs(struct bch_fs *c)
                if (fsck_err_on(!w.have_inode, c,
                                "xattr for missing inode %llu",
                                k.k->p.inode)) {
-                       ret = bch2_btree_delete_at(iter, 0);
+                       ret = bch2_btree_delete_at(&trans, iter, 0);
                        if (ret)
                                goto err;
                        continue;
@@ -678,7 +683,7 @@ static int check_xattrs(struct bch_fs *c)
                if (w.first_this_inode && w.have_inode)
                        hash_check_set_inode(&h, c, &w.inode);
 
-               ret = hash_check_key(bch2_xattr_hash_desc, &h, c, iter, k);
+               ret = hash_check_key(bch2_xattr_hash_desc, &trans, &h, iter, k);
                if (ret)
                        goto fsck_err;
        }
@@ -1163,12 +1168,13 @@ fsck_err:
        return ret;
 }
 
-static int check_inode(struct bch_fs *c,
+static int check_inode(struct btree_trans *trans,
                       struct bch_inode_unpacked *lostfound_inode,
                       struct btree_iter *iter,
                       struct bkey_s_c_inode inode,
                       struct nlink *link)
 {
+       struct bch_fs *c = trans->c;
        struct bch_inode_unpacked u;
        bool do_update = false;
        int ret = 0;
@@ -1259,10 +1265,10 @@ static int check_inode(struct bch_fs *c,
                struct bkey_inode_buf p;
 
                bch2_inode_pack(&p, &u);
+               bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
 
-               ret = bch2_btree_insert_at(c, NULL, NULL,
-                                         BTREE_INSERT_NOFAIL,
-                                         BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
+               ret = bch2_trans_commit(trans, NULL, NULL,
+                                       BTREE_INSERT_NOFAIL);
                if (ret && ret != -EINTR)
                        bch_err(c, "error in fs gc: error %i "
                                "updating inode", ret);
@@ -1277,25 +1283,29 @@ static int bch2_gc_walk_inodes(struct bch_fs *c,
                               nlink_table *links,
                               u64 range_start, u64 range_end)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        struct nlink *link, zero_links = { 0, 0 };
        struct genradix_iter nlinks_iter;
        int ret = 0, ret2 = 0;
        u64 nlinks_pos;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(range_start, 0), 0);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
+                                  POS(range_start, 0), 0);
        nlinks_iter = genradix_iter_init(links, 0);
 
-       while ((k = bch2_btree_iter_peek(&iter)).k &&
-              !btree_iter_err(k)) {
+       while ((k = bch2_btree_iter_peek(iter)).k &&
+              !(ret2 = btree_iter_err(k))) {
 peek_nlinks:   link = genradix_iter_peek(&nlinks_iter, links);
 
-               if (!link && (!k.k || iter.pos.inode >= range_end))
+               if (!link && (!k.k || iter->pos.inode >= range_end))
                        break;
 
                nlinks_pos = range_start + nlinks_iter.pos;
-               if (iter.pos.inode > nlinks_pos) {
+               if (iter->pos.inode > nlinks_pos) {
                        /* Should have been caught by dirents pass: */
                        need_fsck_err_on(link && link->count, c,
                                "missing inode %llu (nlink %u)",
@@ -1304,7 +1314,7 @@ peek_nlinks:      link = genradix_iter_peek(&nlinks_iter, links);
                        goto peek_nlinks;
                }
 
-               if (iter.pos.inode < nlinks_pos || !link)
+               if (iter->pos.inode < nlinks_pos || !link)
                        link = &zero_links;
 
                if (k.k && k.k->type == KEY_TYPE_inode) {
@@ -1312,9 +1322,9 @@ peek_nlinks:      link = genradix_iter_peek(&nlinks_iter, links);
                         * Avoid potential deadlocks with iter for
                         * truncate/rm/etc.:
                         */
-                       bch2_btree_iter_unlock(&iter);
+                       bch2_btree_iter_unlock(iter);
 
-                       ret = check_inode(c, lostfound_inode, &iter,
+                       ret = check_inode(&trans, lostfound_inode, iter,
                                          bkey_s_c_to_inode(k), link);
                        BUG_ON(ret == -EINTR);
                        if (ret)
@@ -1326,14 +1336,15 @@ peek_nlinks:    link = genradix_iter_peek(&nlinks_iter, links);
                                nlinks_pos, link->count);
                }
 
-               if (nlinks_pos == iter.pos.inode)
+               if (nlinks_pos == iter->pos.inode)
                        genradix_iter_advance(&nlinks_iter, links);
 
-               bch2_btree_iter_next(&iter);
-               bch2_btree_iter_cond_resched(&iter);
+               bch2_btree_iter_next(iter);
+               bch2_btree_iter_cond_resched(iter);
        }
 fsck_err:
-       ret2 = bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
+
        if (ret2)
                bch_err(c, "error in fs gc: btree error %i while walking inodes", ret2);
 
@@ -1379,12 +1390,18 @@ static int check_inode_nlinks(struct bch_fs *c,
 noinline_for_stack
 static int check_inodes_fast(struct bch_fs *c)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        struct bkey_s_c_inode inode;
        int ret = 0;
 
-       for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, 0, k) {
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
+                                  POS_MIN, 0);
+
+       for_each_btree_key_continue(iter, 0, k) {
                if (k.k->type != KEY_TYPE_inode)
                        continue;
 
@@ -1394,14 +1411,19 @@ static int check_inodes_fast(struct bch_fs *c)
                    (BCH_INODE_I_SIZE_DIRTY|
                     BCH_INODE_I_SECTORS_DIRTY|
                     BCH_INODE_UNLINKED)) {
-                       ret = check_inode(c, NULL, &iter, inode, NULL);
+                       ret = check_inode(&trans, NULL, iter, inode, NULL);
                        BUG_ON(ret == -EINTR);
                        if (ret)
                                break;
                }
        }
 
-       return bch2_btree_iter_unlock(&iter) ?: ret;
+       if (!ret)
+               ret = bch2_btree_iter_unlock(iter);
+
+       bch2_trans_exit(&trans);
+
+       return ret;
 }
 
 /*
index 6acb487312a8d50562ca0b5627f29333798db0fd..811c917cba84d802ce2ebbd89b5d94f9ae93f3d8 100644 (file)
@@ -367,7 +367,8 @@ int bch2_inode_create(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
 
 int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_i_inode_generation delete;
        struct bpos start = POS(inode_nr, 0);
        struct bpos end = POS(inode_nr + 1, 0);
@@ -390,17 +391,17 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
        if (ret)
                return ret;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(inode_nr, 0),
-                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(inode_nr, 0),
+                                  BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
        do {
-               struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
+               struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
                u32 bi_generation = 0;
 
                ret = btree_iter_err(k);
-               if (ret) {
-                       bch2_btree_iter_unlock(&iter);
-                       return ret;
-               }
+               if (ret)
+                       break;
 
                bch2_fs_inconsistent_on(k.k->type != KEY_TYPE_inode, c,
                                        "inode %llu not found when deleting",
@@ -431,13 +432,15 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
                        delete.v.bi_generation = cpu_to_le32(bi_generation);
                }
 
-               ret = bch2_btree_insert_at(c, NULL, NULL,
-                               BTREE_INSERT_ATOMIC|
-                               BTREE_INSERT_NOFAIL,
-                               BTREE_INSERT_ENTRY(&iter, &delete.k_i));
+               bch2_trans_update(&trans,
+                                 BTREE_INSERT_ENTRY(iter, &delete.k_i));
+
+               ret = bch2_trans_commit(&trans, NULL, NULL,
+                                       BTREE_INSERT_ATOMIC|
+                                       BTREE_INSERT_NOFAIL);
        } while (ret == -EINTR);
 
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
        return ret;
 }
 
index 092500591b8fa4255ef84ca098bca6df07b0ce31..f4c49bf8245695a3a76a8ecd42f94bb026db81e5 100644 (file)
@@ -294,36 +294,43 @@ static void bch2_write_done(struct closure *cl)
 int bch2_write_index_default(struct bch_write_op *op)
 {
        struct bch_fs *c = op->c;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct keylist *keys = &op->insert_keys;
-       struct btree_iter iter;
        int ret;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
-                            bkey_start_pos(&bch2_keylist_front(keys)->k),
-                            BTREE_ITER_INTENT);
+       BUG_ON(bch2_keylist_empty(keys));
+       bch2_verify_keylist_sorted(keys);
+
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+                                  bkey_start_pos(&bch2_keylist_front(keys)->k),
+                                  BTREE_ITER_INTENT);
 
        do {
                BKEY_PADDED(k) split;
 
                bkey_copy(&split.k, bch2_keylist_front(keys));
 
-               bch2_extent_trim_atomic(&split.k, &iter);
+               bch2_extent_trim_atomic(&split.k, iter);
 
-               ret = bch2_btree_insert_at(c, &op->res,
-                               op_journal_seq(op),
-                               BTREE_INSERT_NOFAIL|
-                               BTREE_INSERT_USE_RESERVE,
-                               BTREE_INSERT_ENTRY(&iter, &split.k));
+               bch2_trans_update(&trans,
+                                 BTREE_INSERT_ENTRY(iter, &split.k));
+
+               ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
+                                       BTREE_INSERT_NOFAIL|
+                                       BTREE_INSERT_USE_RESERVE);
                if (ret)
                        break;
 
-               if (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) < 0)
-                       bch2_cut_front(iter.pos, bch2_keylist_front(keys));
+               if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
+                       bch2_cut_front(iter->pos, bch2_keylist_front(keys));
                else
                        bch2_keylist_pop_front(keys);
        } while (!bch2_keylist_empty(keys));
 
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 
        return ret;
 }
@@ -1403,7 +1410,8 @@ static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
 {
        struct bch_fs *c = rbio->c;
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        struct bkey_i_extent *e;
        BKEY_PADDED(k) new;
@@ -1414,10 +1422,13 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
        if (rbio->pick.crc.compression_type)
                return;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, rbio->pos,
-                            BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
 retry:
-       k = bch2_btree_iter_peek(&iter);
+       bch2_trans_begin(&trans);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, rbio->pos,
+                                  BTREE_ITER_INTENT);
+       k = bch2_btree_iter_peek(iter);
        if (IS_ERR_OR_NULL(k.k))
                goto out;
 
@@ -1453,15 +1464,15 @@ retry:
        if (!bch2_extent_narrow_crcs(e, new_crc))
                goto out;
 
-       ret = bch2_btree_insert_at(c, NULL, NULL,
-                                  BTREE_INSERT_ATOMIC|
-                                  BTREE_INSERT_NOFAIL|
-                                  BTREE_INSERT_NOWAIT,
-                                  BTREE_INSERT_ENTRY(&iter, &e->k_i));
+       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &e->k_i));
+       ret = bch2_trans_commit(&trans, NULL, NULL,
+                               BTREE_INSERT_ATOMIC|
+                               BTREE_INSERT_NOFAIL|
+                               BTREE_INSERT_NOWAIT);
        if (ret == -EINTR)
                goto retry;
 out:
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 }
 
 static bool should_narrow_crcs(struct bkey_s_c k,
index 9997a2793e0a4d5c42b721143a2248ca2747dd07..d20672a37fd36c22adea298478728ed642981197 100644 (file)
@@ -825,6 +825,8 @@ fsck_err:
 
 static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
 {
+       struct btree_trans trans;
+       struct btree_iter *iter;
        /*
         * We might cause compressed extents to be
         * split, so we need to pass in a
@@ -833,20 +835,21 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
        struct disk_reservation disk_res =
                bch2_disk_reservation_init(c, 0);
        BKEY_PADDED(k) split;
-       struct btree_iter iter;
        int ret;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
-                            bkey_start_pos(&k->k),
-                            BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+                                  bkey_start_pos(&k->k),
+                                  BTREE_ITER_INTENT);
        do {
-               ret = bch2_btree_iter_traverse(&iter);
+               ret = bch2_btree_iter_traverse(iter);
                if (ret)
                        break;
 
                bkey_copy(&split.k, k);
-               bch2_cut_front(iter.pos, &split.k);
-               bch2_extent_trim_atomic(&split.k, &iter);
+               bch2_cut_front(iter->pos, &split.k);
+               bch2_extent_trim_atomic(&split.k, iter);
 
                ret = bch2_disk_reservation_add(c, &disk_res,
                                split.k.k.size *
@@ -854,13 +857,13 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
                                BCH_DISK_RESERVATION_NOFAIL);
                BUG_ON(ret);
 
-               ret = bch2_btree_insert_at(c, &disk_res, NULL,
-                                          BTREE_INSERT_ATOMIC|
-                                          BTREE_INSERT_NOFAIL|
-                                          BTREE_INSERT_JOURNAL_REPLAY,
-                                          BTREE_INSERT_ENTRY(&iter, &split.k));
+               bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &split.k));
+               ret = bch2_trans_commit(&trans, &disk_res, NULL,
+                                       BTREE_INSERT_ATOMIC|
+                                       BTREE_INSERT_NOFAIL|
+                                       BTREE_INSERT_JOURNAL_REPLAY);
        } while ((!ret || ret == -EINTR) &&
-                bkey_cmp(k->k.p, iter.pos));
+                bkey_cmp(k->k.p, iter->pos));
 
        bch2_disk_reservation_put(c, &disk_res);
 
@@ -873,9 +876,9 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
         * before journal replay finishes
         */
        bch2_mark_key(c, bkey_i_to_s_c(k), false, -((s64) k->k.size),
-                     gc_pos_btree_node(iter.l[0].b),
+                     gc_pos_btree_node(iter->l[0].b),
                      NULL, 0, 0);
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 
        return ret;
 }
index b97a5a8f3910de40c32e149a7b406b7dff204196..955831a508240dd6198448b09025cd3041e86294 100644 (file)
@@ -36,25 +36,29 @@ static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k,
 
 static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
 {
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        BKEY_PADDED(key) tmp;
-       struct btree_iter iter;
        int ret = 0;
 
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+                                  POS_MIN, BTREE_ITER_PREFETCH);
+
        mutex_lock(&c->replicas_gc_lock);
        bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED));
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
-                            POS_MIN, BTREE_ITER_PREFETCH);
 
-       while ((k = bch2_btree_iter_peek(&iter)).k &&
+       while ((k = bch2_btree_iter_peek(iter)).k &&
               !(ret = btree_iter_err(k))) {
                if (!bkey_extent_is_data(k.k) ||
                    !bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
                        ret = bch2_mark_bkey_replicas(c, k);
                        if (ret)
                                break;
-                       bch2_btree_iter_next(&iter);
+                       bch2_btree_iter_next(iter);
                        continue;
                }
 
@@ -72,12 +76,14 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
                 */
                bch2_extent_normalize(c, bkey_i_to_s(&tmp.key));
 
-               iter.pos = bkey_start_pos(&tmp.key.k);
+               /* XXX not sketchy at all */
+               iter->pos = bkey_start_pos(&tmp.key.k);
+
+               bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.key));
 
-               ret = bch2_btree_insert_at(c, NULL, NULL,
-                                          BTREE_INSERT_ATOMIC|
-                                          BTREE_INSERT_NOFAIL,
-                                          BTREE_INSERT_ENTRY(&iter, &tmp.key));
+               ret = bch2_trans_commit(&trans, NULL, NULL,
+                                       BTREE_INSERT_ATOMIC|
+                                       BTREE_INSERT_NOFAIL);
 
                /*
                 * don't want to leave ret == -EINTR, since if we raced and
@@ -90,11 +96,11 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
                        break;
        }
 
-       bch2_btree_iter_unlock(&iter);
-
        bch2_replicas_gc_end(c, ret);
        mutex_unlock(&c->replicas_gc_lock);
 
+       bch2_trans_exit(&trans);
+
        return ret;
 }
 
index 5a35f76006cf089637abaeddfb0bebbd0ad7e14d..8c453ae315259a477a88ec366caff5cc145cd5ae 100644 (file)
@@ -54,18 +54,21 @@ struct moving_context {
 static int bch2_migrate_index_update(struct bch_write_op *op)
 {
        struct bch_fs *c = op->c;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct migrate_write *m =
                container_of(op, struct migrate_write, op);
        struct keylist *keys = &op->insert_keys;
-       struct btree_iter iter;
        int ret = 0;
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
-                            bkey_start_pos(&bch2_keylist_front(keys)->k),
-                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+                                  bkey_start_pos(&bch2_keylist_front(keys)->k),
+                                  BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
        while (1) {
-               struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
+               struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
                struct bkey_i_extent *insert, *new =
                        bkey_i_to_extent(bch2_keylist_front(keys));
                BKEY_PADDED(k) _new, _insert;
@@ -74,10 +77,9 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
                bool did_work = false;
                int nr;
 
-               if (btree_iter_err(k)) {
-                       ret = bch2_btree_iter_unlock(&iter);
+               ret = btree_iter_err(k);
+               if (ret)
                        break;
-               }
 
                if (bversion_cmp(k.k->version, new->k.version) ||
                    !bkey_extent_is_data(k.k) ||
@@ -96,7 +98,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
                bkey_copy(&_new.k, bch2_keylist_front(keys));
                new = bkey_i_to_extent(&_new.k);
 
-               bch2_cut_front(iter.pos, &insert->k_i);
+               bch2_cut_front(iter->pos, &insert->k_i);
                bch2_cut_back(new->k.p, &insert->k);
                bch2_cut_back(insert->k.p, &new->k);
 
@@ -138,12 +140,6 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
                if (insert->k.size < k.k->size &&
                    bch2_extent_is_compressed(k) &&
                    nr > 0) {
-                       /*
-                        * can't call bch2_disk_reservation_add() with btree
-                        * locks held, at least not without a song and dance
-                        */
-                       bch2_btree_iter_unlock(&iter);
-
                        ret = bch2_disk_reservation_add(c, &op->res,
                                        keylist_sectors(keys) * nr, 0);
                        if (ret)
@@ -153,13 +149,15 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
                        goto next;
                }
 
-               ret = bch2_btree_insert_at(c, &op->res,
+               bch2_trans_update(&trans,
+                               BTREE_INSERT_ENTRY(iter, &insert->k_i));
+
+               ret = bch2_trans_commit(&trans, &op->res,
                                op_journal_seq(op),
                                BTREE_INSERT_ATOMIC|
                                BTREE_INSERT_NOFAIL|
                                BTREE_INSERT_USE_RESERVE|
-                               m->data_opts.btree_insert_flags,
-                               BTREE_INSERT_ENTRY(&iter, &insert->k_i));
+                               m->data_opts.btree_insert_flags);
                if (!ret)
                        atomic_long_inc(&c->extent_migrate_done);
                if (ret == -EINTR)
@@ -167,25 +165,25 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
                if (ret)
                        break;
 next:
-               while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
+               while (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) >= 0) {
                        bch2_keylist_pop_front(keys);
                        if (bch2_keylist_empty(keys))
                                goto out;
                }
 
-               bch2_cut_front(iter.pos, bch2_keylist_front(keys));
+               bch2_cut_front(iter->pos, bch2_keylist_front(keys));
                continue;
 nomatch:
                if (m->ctxt)
-                       atomic64_add(k.k->p.offset - iter.pos.offset,
+                       atomic64_add(k.k->p.offset - iter->pos.offset,
                                     &m->ctxt->stats->sectors_raced);
                atomic_long_inc(&c->extent_migrate_raced);
                trace_move_race(&new->k);
-               bch2_btree_iter_next_slot(&iter);
+               bch2_btree_iter_next_slot(iter);
                goto next;
        }
 out:
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
        return ret;
 }
 
index 113a2ca88ffcb5662ca05e03c8557c404110a3f4..492ab73c39e7393f537002ddd382cd588360c80a 100644 (file)
@@ -708,7 +708,8 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
                          struct qc_dqblk *qdq)
 {
        struct bch_fs *c = sb->s_fs_info;
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        struct bkey_i_quota new_quota;
        int ret;
@@ -719,9 +720,11 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
        bkey_quota_init(&new_quota.k_i);
        new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_QUOTAS, new_quota.k.p,
-                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-       k = bch2_btree_iter_peek_slot(&iter);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_QUOTAS, new_quota.k.p,
+                                  BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+       k = bch2_btree_iter_peek_slot(iter);
 
        ret = btree_iter_err(k);
        if (unlikely(ret))
@@ -743,9 +746,11 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
        if (qdq->d_fieldmask & QC_INO_HARD)
                new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
 
-       ret = bch2_btree_insert_at(c, NULL, NULL, 0,
-                                  BTREE_INSERT_ENTRY(&iter, &new_quota.k_i));
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new_quota.k_i));
+
+       ret = bch2_trans_commit(&trans, NULL, NULL, 0);
+
+       bch2_trans_exit(&trans);
 
        if (ret)
                return ret;
index c5bce01bf34cc232529b5c5a9086ed4877a2cd11..ffa7af0820eafc723b11ca062fd8eac36c2f3052 100644 (file)
@@ -213,10 +213,10 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
 }
 
 static __always_inline
-int __bch2_hash_set(struct btree_trans *trans,
-                   const struct bch_hash_desc desc,
-                   const struct bch_hash_info *info,
-                   u64 inode, struct bkey_i *insert, int flags)
+int bch2_hash_set(struct btree_trans *trans,
+                 const struct bch_hash_desc desc,
+                 const struct bch_hash_info *info,
+                 u64 inode, struct bkey_i *insert, int flags)
 {
        struct btree_iter *iter, *slot = NULL;
        struct bkey_s_c k;
@@ -267,17 +267,6 @@ found:
        return 0;
 }
 
-static inline int bch2_hash_set(const struct bch_hash_desc desc,
-                              const struct bch_hash_info *info,
-                              struct bch_fs *c, u64 inode,
-                              u64 *journal_seq,
-                              struct bkey_i *insert, int flags)
-{
-       return bch2_trans_do(c, journal_seq, flags|BTREE_INSERT_ATOMIC,
-                       __bch2_hash_set(&trans, desc, info,
-                                       inode, insert, flags));
-}
-
 static __always_inline
 int bch2_hash_delete_at(struct btree_trans *trans,
                        const struct bch_hash_desc desc,
index bcbe782260f066fca6e1bd2d9b8269ebf41beded..652e22125dcf1625b31e9aac2c49afc8c6741ee8 100644 (file)
@@ -28,57 +28,63 @@ static void delete_test_keys(struct bch_fs *c)
 
 static void test_delete(struct bch_fs *c, u64 nr)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_i_cookie k;
        int ret;
 
        bkey_cookie_init(&k.k_i);
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
-                            BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
+                                  BTREE_ITER_INTENT);
 
-       ret = bch2_btree_iter_traverse(&iter);
+       ret = bch2_btree_iter_traverse(iter);
        BUG_ON(ret);
 
-       ret = bch2_btree_insert_at(c, NULL, NULL, 0,
-                                  BTREE_INSERT_ENTRY(&iter, &k.k_i));
+       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
+       ret = bch2_trans_commit(&trans, NULL, NULL, 0);
        BUG_ON(ret);
 
        pr_info("deleting once");
-       ret = bch2_btree_delete_at(&iter, 0);
+       ret = bch2_btree_delete_at(&trans, iter, 0);
        BUG_ON(ret);
 
        pr_info("deleting twice");
-       ret = bch2_btree_delete_at(&iter, 0);
+       ret = bch2_btree_delete_at(&trans, iter, 0);
        BUG_ON(ret);
 
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 }
 
 static void test_delete_written(struct bch_fs *c, u64 nr)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_i_cookie k;
        int ret;
 
        bkey_cookie_init(&k.k_i);
 
-       bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
-                            BTREE_ITER_INTENT);
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
+                                  BTREE_ITER_INTENT);
 
-       ret = bch2_btree_iter_traverse(&iter);
+       ret = bch2_btree_iter_traverse(iter);
        BUG_ON(ret);
 
-       ret = bch2_btree_insert_at(c, NULL, NULL, 0,
-                                  BTREE_INSERT_ENTRY(&iter, &k.k_i));
+       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
+       ret = bch2_trans_commit(&trans, NULL, NULL, 0);
        BUG_ON(ret);
 
        bch2_journal_flush_all_pins(&c->journal);
 
-       ret = bch2_btree_delete_at(&iter, 0);
+       ret = bch2_btree_delete_at(&trans, iter, 0);
        BUG_ON(ret);
 
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 }
 
 static void test_iterate(struct bch_fs *c, u64 nr)
@@ -415,26 +421,29 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
        u64 i;
 
        for (i = 0; i < nr; i++) {
-               struct btree_iter iter;
+               struct btree_trans trans;
+               struct btree_iter *iter;
                struct bkey_s_c k;
 
-               bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
-                                    POS(0, test_rand()), 0);
+               bch2_trans_init(&trans, c);
 
-               k = bch2_btree_iter_peek(&iter);
+               iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
+                                          POS(0, test_rand()), 0);
+
+               k = bch2_btree_iter_peek(iter);
 
                if (!(i & 3) && k.k) {
                        struct bkey_i_cookie k;
 
                        bkey_cookie_init(&k.k_i);
-                       k.k.p = iter.pos;
+                       k.k.p = iter->pos;
 
-                       ret = bch2_btree_insert_at(c, NULL, NULL, 0,
-                                                  BTREE_INSERT_ENTRY(&iter, &k.k_i));
+                       bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
+                       ret = bch2_trans_commit(&trans, NULL, NULL, 0);
                        BUG_ON(ret);
                }
 
-               bch2_btree_iter_unlock(&iter);
+               bch2_trans_exit(&trans);
        }
 
 }
@@ -457,7 +466,8 @@ static void rand_delete(struct bch_fs *c, u64 nr)
 
 static void seq_insert(struct bch_fs *c, u64 nr)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        struct bkey_i_cookie insert;
        int ret;
@@ -465,18 +475,22 @@ static void seq_insert(struct bch_fs *c, u64 nr)
 
        bkey_cookie_init(&insert.k_i);
 
-       for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
-                          BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
-               insert.k.p = iter.pos;
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
+                                  BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+
+       for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+               insert.k.p = iter->pos;
 
-               ret = bch2_btree_insert_at(c, NULL, NULL, 0,
-                               BTREE_INSERT_ENTRY(&iter, &insert.k_i));
+               bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
+               ret = bch2_trans_commit(&trans, NULL, NULL, 0);
                BUG_ON(ret);
 
                if (++i == nr)
                        break;
        }
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 }
 
 static void seq_lookup(struct bch_fs *c, u64 nr)
@@ -491,21 +505,26 @@ static void seq_lookup(struct bch_fs *c, u64 nr)
 
 static void seq_overwrite(struct bch_fs *c, u64 nr)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        int ret;
 
-       for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
-                          BTREE_ITER_INTENT, k) {
+       bch2_trans_init(&trans, c);
+
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
+                                  BTREE_ITER_INTENT);
+
+       for_each_btree_key_continue(iter, 0, k) {
                struct bkey_i_cookie u;
 
                bkey_reassemble(&u.k_i, k);
 
-               ret = bch2_btree_insert_at(c, NULL, NULL, 0,
-                                          BTREE_INSERT_ENTRY(&iter, &u.k_i));
+               bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
+               ret = bch2_trans_commit(&trans, NULL, NULL, 0);
                BUG_ON(ret);
        }
-       bch2_btree_iter_unlock(&iter);
+       bch2_trans_exit(&trans);
 }
 
 static void seq_delete(struct bch_fs *c, u64 nr)
index f31eec2f1fcecf46fa60081bb7ba9ecec7b6b2cd..545e743972fb4c8145e126f2cf317bfc438bda77 100644 (file)
@@ -180,7 +180,7 @@ int bch2_xattr_set(struct btree_trans *trans, u64 inum,
                memcpy(xattr->v.x_name, name, namelen);
                memcpy(xattr_val(&xattr->v), value, size);
 
-               ret = __bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
+               ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
                              inum, &xattr->k_i,
                              (flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
                              (flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0));