bcachefs: Refactor delete_dead_snapshots()
authorKent Overstreet <kent.overstreet@linux.dev>
Mon, 27 May 2024 02:22:30 +0000 (22:22 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Tue, 28 May 2024 15:29:26 +0000 (11:29 -0400)
Consolidate per-key work into delete_dead_snapshots_process_key(), so we
now walk all keys once, not twice.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/snapshot.c

index 629900a5e6411c2ee78e9be528646c973b500b09..466fa3e6a4b656aae48fc1ce357b09aa1c08e7a6 100644 (file)
@@ -1351,7 +1351,7 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
  * that key to snapshot leaf nodes, where we can mutate it
  */
 
-static int snapshot_delete_key(struct btree_trans *trans,
+static int delete_dead_snapshots_process_key(struct btree_trans *trans,
                               struct btree_iter *iter,
                               struct bkey_s_c k,
                               snapshot_id_list *deleted,
@@ -1360,26 +1360,26 @@ static int snapshot_delete_key(struct btree_trans *trans,
 {
        struct bch_fs *c = trans->c;
        u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+       if (!equiv) /* key for invalid snapshot node, but we chose not to delete */
+               return 0;
 
        if (!bkey_eq(k.k->p, *last_pos))
                equiv_seen->nr = 0;
-       *last_pos = k.k->p;
 
-       if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
-           snapshot_list_has_id(equiv_seen, equiv)) {
+       if (snapshot_list_has_id(deleted, k.k->p.snapshot))
                return bch2_btree_delete_at(trans, iter,
                                            BTREE_UPDATE_internal_snapshot_node);
-       } else {
-               return snapshot_list_add(c, equiv_seen, equiv);
-       }
-}
 
-static int move_key_to_correct_snapshot(struct btree_trans *trans,
-                              struct btree_iter *iter,
-                              struct bkey_s_c k)
-{
-       struct bch_fs *c = trans->c;
-       u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+       if (!bpos_eq(*last_pos, k.k->p) &&
+           snapshot_list_has_id(equiv_seen, equiv))
+               return bch2_btree_delete_at(trans, iter,
+                                           BTREE_UPDATE_internal_snapshot_node);
+
+       *last_pos = k.k->p;
+
+       int ret = snapshot_list_add_nodup(c, equiv_seen, equiv);
+       if (ret)
+               return ret;
 
        /*
         * When we have a linear chain of snapshot nodes, we consider
@@ -1389,21 +1389,20 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
         *
         * If there are multiple keys in different snapshots at the same
         * position, we're only going to keep the one in the newest
-        * snapshot - the rest have been overwritten and are redundant,
-        * and for the key we're going to keep we need to move it to the
-        * equivalance class ID if it's not there already.
+        * snapshot (we delete the others above) - the rest have been
+        * overwritten and are redundant, and for the key we're going to keep we
+        * need to move it to the equivalance class ID if it's not there
+        * already.
         */
        if (equiv != k.k->p.snapshot) {
                struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
-               struct btree_iter new_iter;
-               int ret;
-
-               ret = PTR_ERR_OR_ZERO(new);
+               int ret = PTR_ERR_OR_ZERO(new);
                if (ret)
                        return ret;
 
                new->k.p.snapshot = equiv;
 
+               struct btree_iter new_iter;
                bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
                                     BTREE_ITER_all_snapshots|
                                     BTREE_ITER_cached|
@@ -1538,7 +1537,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
        struct btree_trans *trans;
        snapshot_id_list deleted = { 0 };
        snapshot_id_list deleted_interior = { 0 };
-       u32 id;
        int ret = 0;
 
        if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
@@ -1585,33 +1583,20 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
        if (ret)
                goto err;
 
-       for (id = 0; id < BTREE_ID_NR; id++) {
+       for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
                struct bpos last_pos = POS_MIN;
                snapshot_id_list equiv_seen = { 0 };
                struct disk_reservation res = { 0 };
 
-               if (!btree_type_has_snapshots(id))
-                       continue;
-
-               /*
-                * deleted inodes btree is maintained by a trigger on the inodes
-                * btree - no work for us to do here, and it's not safe to scan
-                * it because we'll see out of date keys due to the btree write
-                * buffer:
-                */
-               if (id == BTREE_ID_deleted_inodes)
+               if (!btree_type_has_snapshots(btree))
                        continue;
 
                ret = for_each_btree_key_commit(trans, iter,
-                               id, POS_MIN,
-                               BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
-                               &res, NULL, BCH_TRANS_COMMIT_no_enospc,
-                       snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
-                     for_each_btree_key_commit(trans, iter,
-                               id, POS_MIN,
+                               btree, POS_MIN,
                                BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
                                &res, NULL, BCH_TRANS_COMMIT_no_enospc,
-                       move_key_to_correct_snapshot(trans, &iter, k));
+                       delete_dead_snapshots_process_key(trans, &iter, k, &deleted,
+                                                         &equiv_seen, &last_pos));
 
                bch2_disk_reservation_put(c, &res);
                darray_exit(&equiv_seen);