bcachefs: Factor out btree_key_can_insert()
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 3 Aug 2018 23:41:44 +0000 (19:41 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:08 +0000 (17:08 -0400)
working on getting rid of all the reasons bch2_insert_fixup_extent() can
fail/stop partway, which is needed for other refactorings.

One of the reasons we could have to bail out is if we're splitting a
compressed extent we might need to add to our disk reservation - but we
can check that before actually starting the insert.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/extents.c
fs/bcachefs/extents.h

index 438ef0c0762339b557727d1c64caa64692eaa845..2ca3b1f0236f86abc250e9bcb6f60008699f1abf 100644 (file)
@@ -448,7 +448,6 @@ enum btree_insert_ret {
        /* extent spanned multiple leaf nodes: have to traverse to next node: */
        BTREE_INSERT_NEED_TRAVERSE,
        /* write lock held for too long */
-       BTREE_INSERT_NEED_RESCHED,
        /* leaf node needs to be split */
        BTREE_INSERT_BTREE_NODE_FULL,
        BTREE_INSERT_JOURNAL_RES_FULL,
index 3d0c6f5c98ada9e22495defc8309febc82e44a1a..32126b02ce3ad8d3a6612d6de6b077bf7591d9f6 100644 (file)
@@ -297,6 +297,30 @@ static inline int btree_trans_cmp(struct btree_insert_entry l,
 
 /* Normal update interface: */
 
+static enum btree_insert_ret
+btree_key_can_insert(struct btree_insert *trans,
+                     struct btree_insert_entry *insert,
+                     unsigned *u64s)
+{
+       struct bch_fs *c = trans->c;
+       struct btree *b = insert->iter->l[0].b;
+       static enum btree_insert_ret ret;
+
+       if (unlikely(btree_node_fake(b)))
+               return BTREE_INSERT_BTREE_NODE_FULL;
+
+       ret = !btree_node_is_extents(b)
+               ? BTREE_INSERT_OK
+               : bch2_extent_can_insert(trans, insert, u64s);
+       if (ret)
+               return ret;
+
+       if (*u64s > bch_btree_keys_u64s_remaining(c, b))
+               return BTREE_INSERT_BTREE_NODE_FULL;
+
+       return BTREE_INSERT_OK;
+}
+
 /*
  * Get journal reservation, take write locks, and attempt to do btree update(s):
  */
@@ -336,24 +360,34 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
                goto out;
        }
 
+       /*
+        * Check if the insert will fit in the leaf node with the write lock
+        * held, otherwise another thread could write the node changing the
+        * amount of space available:
+        */
        u64s = 0;
        trans_for_each_entry(trans, i) {
                /* Multiple inserts might go to same leaf: */
                if (!same_leaf_as_prev(trans, i))
                        u64s = 0;
 
-               /*
-                * bch2_btree_node_insert_fits() must be called under write lock:
-                * with only an intent lock, another thread can still call
-                * bch2_btree_node_write(), converting an unwritten bset to a
-                * written one
-                */
                u64s += i->k->k.u64s + i->extra_res;
-               if (!bch2_btree_node_insert_fits(c,
-                               i->iter->l[0].b, u64s)) {
+               switch (btree_key_can_insert(trans, i, &u64s)) {
+               case BTREE_INSERT_OK:
+                       break;
+               case BTREE_INSERT_BTREE_NODE_FULL:
                        ret = -EINTR;
                        *split = i->iter;
                        goto out;
+               case BTREE_INSERT_ENOSPC:
+                       ret = -ENOSPC;
+                       goto out;
+               case BTREE_INSERT_NEED_GC_LOCK:
+                       ret = -EINTR;
+                       *cycle_gc_lock = true;
+                       goto out;
+               default:
+                       BUG();
                }
        }
 
@@ -373,7 +407,6 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
                        break;
                case BTREE_INSERT_JOURNAL_RES_FULL:
                case BTREE_INSERT_NEED_TRAVERSE:
-               case BTREE_INSERT_NEED_RESCHED:
                        ret = -EINTR;
                        break;
                case BTREE_INSERT_BTREE_NODE_FULL:
@@ -383,10 +416,6 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
                case BTREE_INSERT_ENOSPC:
                        ret = -ENOSPC;
                        break;
-               case BTREE_INSERT_NEED_GC_LOCK:
-                       ret = -EINTR;
-                       *cycle_gc_lock = true;
-                       break;
                default:
                        BUG();
                }
index 276545dfa2462dd5274511bf2741209893e24d92..02a49d9845fb41c6eaacde68131a6162c9f1c9b3 100644 (file)
@@ -1113,8 +1113,6 @@ static bool bch2_extent_merge_inline(struct bch_fs *,
                                     struct bkey_packed *,
                                     bool);
 
-#define MAX_LOCK_HOLD_TIME     (5 * NSEC_PER_MSEC)
-
 static enum btree_insert_ret
 extent_insert_should_stop(struct extent_insert_state *s)
 {
@@ -1287,23 +1285,41 @@ extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
        return __extent_insert_advance_pos(s, next_pos, k);
 }
 
-static enum btree_insert_ret
-extent_insert_check_split_compressed(struct extent_insert_state *s,
-                                    struct bkey_s_c k,
-                                    enum bch_extent_overlap overlap)
+enum btree_insert_ret
+bch2_extent_can_insert(struct btree_insert *trans,
+                      struct btree_insert_entry *insert,
+                      unsigned *u64s)
 {
-       struct bch_fs *c = s->trans->c;
-       unsigned sectors;
+       struct btree_iter_level *l = &insert->iter->l[0];
+       struct btree_node_iter node_iter = l->iter;
+       enum bch_extent_overlap overlap;
+       struct bkey_packed *_k;
+       struct bkey unpacked;
+       struct bkey_s_c k;
+       int sectors;
+
+       _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
+                                             KEY_TYPE_DISCARD);
+       if (!_k)
+               return BTREE_INSERT_OK;
+
+       k = bkey_disassemble(l->b, _k, &unpacked);
+
+       overlap = bch2_extent_overlap(&insert->k->k, k.k);
+
+       /* account for having to split existing extent: */
+       if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
+               *u64s += _k->u64s;
 
        if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
            (sectors = bch2_extent_is_compressed(k))) {
                int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
 
-               if (s->trans->flags & BTREE_INSERT_NOFAIL)
+               if (trans->flags & BTREE_INSERT_NOFAIL)
                        flags |= BCH_DISK_RESERVATION_NOFAIL;
 
-               switch (bch2_disk_reservation_add(c,
-                               s->trans->disk_res,
+               switch (bch2_disk_reservation_add(trans->c,
+                               trans->disk_res,
                                sectors * bch2_extent_nr_dirty_ptrs(k),
                                flags)) {
                case 0:
@@ -1471,10 +1487,6 @@ __bch2_delete_fixup_extent(struct extent_insert_state *s)
 
                overlap = bch2_extent_overlap(&insert->k, k.k);
 
-               ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
-               if (ret)
-                       break;
-
                ret = extent_insert_advance_pos(s, k.s_c);
                if (ret)
                        break;
@@ -1550,10 +1562,6 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s)
 
                overlap = bch2_extent_overlap(&insert->k, k.k);
 
-               ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
-               if (ret)
-                       break;
-
                if (!k.k->size)
                        goto squash;
 
index 0598d630969772396fda49b44f6fdaeec3cf9c1b..fddf25c3fa4bc2703d818e95b2015eb71adb1e3a 100644 (file)
@@ -63,8 +63,10 @@ int bch2_extent_pick_ptr(struct bch_fs *, struct bkey_s_c,
                         struct extent_pick_ptr *);
 
 enum btree_insert_ret
-bch2_insert_fixup_extent(struct btree_insert *,
-                       struct btree_insert_entry *);
+bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
+                      unsigned *);
+enum btree_insert_ret
+bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *);
 
 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
 void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,