bcachefs: ptr_stale() -> dev_ptr_stale()
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 1 May 2024 00:56:54 +0000 (20:56 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 8 May 2024 21:29:23 +0000 (17:29 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/btree_gc.c
fs/bcachefs/buckets.h
fs/bcachefs/ec.c
fs/bcachefs/extents.c
fs/bcachefs/io_read.c

index 22d303813d31f31397e689cc5071ed5dab55f309..ab415f0dd14ee05bba0a2b170d1ffe97f86a397c 100644 (file)
@@ -573,7 +573,7 @@ struct bch_dev {
         * Buckets:
         * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
         * gc_lock, for device resize - holding any is sufficient for access:
-        * Or rcu_read_lock(), but only for ptr_stale():
+        * Or rcu_read_lock(), but only for dev_ptr_stale():
         */
        struct bucket_array __rcu *buckets_gc;
        struct bucket_gens __rcu *bucket_gens;
index 07b343e7ddeb99264bcf98e2805680e18e588a43..05ebf1186be102e126de7dfabd4eca5ffce3a623 100644 (file)
@@ -1257,7 +1257,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
        bkey_for_each_ptr(ptrs, ptr) {
                struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
 
-               if (ptr_stale(ca, ptr) > 16) {
+               if (dev_ptr_stale(ca, ptr) > 16) {
                        percpu_up_read(&c->mark_lock);
                        goto update;
                }
index 0a54faf7c50a640100725941d3a3e8d00272cee5..617ffde2fb7ad96f3c4c0e0b101df98a8cba7b51 100644 (file)
@@ -170,17 +170,19 @@ static inline int gen_after(u8 a, u8 b)
        return r > 0 ? r : 0;
 }
 
+static inline u8 dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
+{
+       return gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+}
+
 /**
- * ptr_stale() - check if a pointer points into a bucket that has been
+ * dev_ptr_stale() - check if a pointer points into a bucket that has been
  * invalidated.
  */
-static inline u8 ptr_stale(struct bch_dev *ca,
-                          const struct bch_extent_ptr *ptr)
+static inline u8 dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
 {
-       u8 ret;
-
        rcu_read_lock();
-       ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+       u8 ret = dev_ptr_stale_rcu(ca, ptr);
        rcu_read_unlock();
 
        return ret;
index be8bddb38957a01e229d1266f42f4e2fbc4a2173..99a8c8fe8e3b423d4274d6f5ae2077a5773b3b5c 100644 (file)
@@ -714,7 +714,7 @@ static void ec_block_endio(struct bio *bio)
                               bch2_blk_status_to_str(bio->bi_status)))
                clear_bit(ec_bio->idx, ec_bio->buf->valid);
 
-       if (ptr_stale(ca, ptr)) {
+       if (dev_ptr_stale(ca, ptr)) {
                bch_err_ratelimited(ca->fs,
                                    "error %s stripe: stale pointer after io",
                                    bio_data_dir(bio) == READ ? "reading from" : "writing to");
@@ -738,7 +738,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
                : BCH_DATA_parity;
        int rw = op_is_write(opf);
 
-       if (ptr_stale(ca, ptr)) {
+       if (dev_ptr_stale(ca, ptr)) {
                bch_err_ratelimited(c,
                                    "error %s stripe: stale pointer",
                                    rw == READ ? "reading from" : "writing to");
index dc78ace56181018a7355697b0e42d81e2e6ecccf..a12fffb32f61b1a597d368ff7fbb74a649773d77 100644 (file)
@@ -132,7 +132,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
                if (!ret && !p.ptr.cached)
                        ret = -EIO;
 
-               if (p.ptr.cached && ptr_stale(ca, &p.ptr))
+               if (p.ptr.cached && dev_ptr_stale(ca, &p.ptr))
                        continue;
 
                f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
@@ -874,7 +874,7 @@ bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
        bkey_for_each_ptr(ptrs, ptr)
                if (bch2_dev_in_target(c, ptr->dev, target) &&
                    (!ptr->cached ||
-                    !ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr)))
+                    !dev_ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr)))
                        return true;
 
        return false;
@@ -981,7 +981,7 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
 {
        bch2_bkey_drop_ptrs(k, ptr,
                ptr->cached &&
-               ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr));
+               dev_ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr));
 
        return bkey_deleted(k.k);
 }
@@ -1005,7 +1005,7 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
                        prt_str(out, " cached");
                if (ptr->unwritten)
                        prt_str(out, " unwritten");
-               if (bucket_valid(ca, b) && ptr_stale(ca, ptr))
+               if (bucket_valid(ca, b) && dev_ptr_stale_rcu(ca, ptr))
                        prt_printf(out, " stale");
        }
        rcu_read_unlock();
index 1091b066639e4d0a7ed8bc522d2d85525898c0fe..0107b2fb4d3331a8d38c23334a80579f03f7d998 100644 (file)
@@ -697,7 +697,7 @@ static void bch2_read_endio(struct bio *bio)
        }
 
        if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
-           ptr_stale(ca, &rbio->pick.ptr)) {
+           dev_ptr_stale(ca, &rbio->pick.ptr)) {
                trace_and_count(c, read_reuse_race, &rbio->bio);
 
                if (rbio->flags & BCH_READ_RETRY_IF_STALE)
@@ -841,7 +841,7 @@ retry_pick:
         */
        if ((flags & BCH_READ_IN_RETRY) &&
            !pick.ptr.cached &&
-           unlikely(ptr_stale(ca, &pick.ptr))) {
+           unlikely(dev_ptr_stale(ca, &pick.ptr))) {
                read_from_stale_dirty_pointer(trans, k, pick.ptr);
                bch2_mark_io_failure(failed, &pick);
                goto retry_pick;