bcachefs: kill bch2_dev_bkey_exists() in bkey_pick_read_device()
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 1 May 2024 00:49:13 +0000 (20:49 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 8 May 2024 21:29:23 +0000 (17:29 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/extents.c

index 7b889f93878bb4ce2ac3cf2fb2307f9f8cfc081e..b785712ae9b266591b1af1ca817dabc75a99424f 100644 (file)
@@ -71,6 +71,12 @@ void bch2_mark_io_failure(struct bch_io_failures *failed,
        }
 }
 
+static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
+{
+       struct bch_dev *ca = bch2_dev_rcu(c, dev);
+       return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
+}
+
 /*
  * returns true if p1 is better than p2:
  */
@@ -79,11 +85,8 @@ static inline bool ptr_better(struct bch_fs *c,
                              const struct extent_ptr_decoded p2)
 {
        if (likely(!p1.idx && !p2.idx)) {
-               struct bch_dev *dev1 = bch2_dev_bkey_exists(c, p1.ptr.dev);
-               struct bch_dev *dev2 = bch2_dev_bkey_exists(c, p2.ptr.dev);
-
-               u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
-               u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
+               u64 l1 = dev_latency(c, p1.ptr.dev);
+               u64 l2 = dev_latency(c, p2.ptr.dev);
 
                /* Pick at random, biased in favor of the faster device: */
 
@@ -109,21 +112,21 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
        const union bch_extent_entry *entry;
        struct extent_ptr_decoded p;
        struct bch_dev_io_failures *f;
-       struct bch_dev *ca;
        int ret = 0;
 
        if (k.k->type == KEY_TYPE_error)
                return -EIO;
 
+       rcu_read_lock();
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
                /*
                 * Unwritten extent: no need to actually read, treat it as a
                 * hole and return 0s:
                 */
-               if (p.ptr.unwritten)
-                       return 0;
-
-               ca = bch2_dev_bkey_exists(c, p.ptr.dev);
+               if (p.ptr.unwritten) {
+                       ret = 0;
+                       break;
+               }
 
                /*
                 * If there are any dirty pointers it's an error if we can't
@@ -132,7 +135,9 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
                if (!ret && !p.ptr.cached)
                        ret = -EIO;
 
-               if (p.ptr.cached && dev_ptr_stale(ca, &p.ptr))
+               struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+
+               if (p.ptr.cached && (!ca || dev_ptr_stale(ca, &p.ptr)))
                        continue;
 
                f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
@@ -141,12 +146,13 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
                                ? f->idx
                                : f->idx + 1;
 
-               if (!p.idx &&
-                   !bch2_dev_is_readable(ca))
+               if (!p.idx && !ca)
                        p.idx++;
 
-               if (bch2_force_reconstruct_read &&
-                   !p.idx && p.has_ec)
+               if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
+                       p.idx++;
+
+               if (!p.idx && !bch2_dev_is_readable(ca))
                        p.idx++;
 
                if (p.idx >= (unsigned) p.has_ec + 1)
@@ -158,6 +164,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
                *pick = p;
                ret = 1;
        }
+       rcu_read_unlock();
 
        return ret;
 }