bcachefs: Don't set accessed bit on btree node fill
authorKent Overstreet <kent.overstreet@linux.dev>
Fri, 25 Nov 2022 21:04:42 +0000 (16:04 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:47 +0000 (17:09 -0400)
Btree nodes shouldn't have their accessed bit set when entering the
btree cache by being read in from disk - this fixes linear scans
thrashing the cache.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_cache.c

index 91ddbc7b84897ef7b00cbd3b8c319b7c50d22f7a..90be4c7325f7f5d8dd607b2ec4683f74afe06bb8 100644 (file)
@@ -707,6 +707,12 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
        if (IS_ERR(b))
                return b;
 
+       /*
+        * Btree nodes read in from disk should not have the accessed bit set
+        * initially, so that linear scans don't thrash the cache:
+        */
+       clear_btree_node_accessed(b);
+
        bkey_copy(&b->key, k);
        if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
                /* raced with another fill: */
@@ -843,6 +849,10 @@ retry:
                        trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
                        return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
                }
+
+               /* avoid atomic set bit if it's not needed: */
+               if (!btree_node_accessed(b))
+                       set_btree_node_accessed(b);
        }
 
        if (unlikely(btree_node_read_in_flight(b))) {
@@ -880,10 +890,6 @@ retry:
                prefetch(p + L1_CACHE_BYTES * 2);
        }
 
-       /* avoid atomic set bit if it's not needed: */
-       if (!btree_node_accessed(b))
-               set_btree_node_accessed(b);
-
        if (unlikely(btree_node_read_error(b))) {
                six_unlock_type(&b->c.lock, lock_type);
                return ERR_PTR(-EIO);