* Buckets:
* Per-bucket arrays are protected by c->mark_lock, bucket_lock and
* gc_lock, for device resize - holding any is sufficient for access:
- * Or rcu_read_lock(), but only for ptr_stale():
+ * Or rcu_read_lock(), but only for dev_ptr_stale():
*/
struct bucket_array __rcu *buckets_gc;
struct bucket_gens __rcu *bucket_gens;
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
- if (ptr_stale(ca, ptr) > 16) {
+ if (dev_ptr_stale(ca, ptr) > 16) {
percpu_up_read(&c->mark_lock);
goto update;
}
return r > 0 ? r : 0;
}
+static inline u8 dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
+{
+ return gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+}
+
/**
- * ptr_stale() - check if a pointer points into a bucket that has been
+ * dev_ptr_stale() - check if a pointer points into a bucket that has been
* invalidated.
*/
-static inline u8 ptr_stale(struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
+static inline u8 dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{
- u8 ret;
-
rcu_read_lock();
- ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+ u8 ret = dev_ptr_stale_rcu(ca, ptr);
rcu_read_unlock();
return ret;
bch2_blk_status_to_str(bio->bi_status)))
clear_bit(ec_bio->idx, ec_bio->buf->valid);
- if (ptr_stale(ca, ptr)) {
+ if (dev_ptr_stale(ca, ptr)) {
bch_err_ratelimited(ca->fs,
"error %s stripe: stale pointer after io",
bio_data_dir(bio) == READ ? "reading from" : "writing to");
: BCH_DATA_parity;
int rw = op_is_write(opf);
- if (ptr_stale(ca, ptr)) {
+ if (dev_ptr_stale(ca, ptr)) {
bch_err_ratelimited(c,
"error %s stripe: stale pointer",
rw == READ ? "reading from" : "writing to");
if (!ret && !p.ptr.cached)
ret = -EIO;
- if (p.ptr.cached && ptr_stale(ca, &p.ptr))
+ if (p.ptr.cached && dev_ptr_stale(ca, &p.ptr))
continue;
f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
bkey_for_each_ptr(ptrs, ptr)
if (bch2_dev_in_target(c, ptr->dev, target) &&
(!ptr->cached ||
- !ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr)))
+ !dev_ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr)))
return true;
return false;
{
bch2_bkey_drop_ptrs(k, ptr,
ptr->cached &&
- ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr));
+ dev_ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr));
return bkey_deleted(k.k);
}
prt_str(out, " cached");
if (ptr->unwritten)
prt_str(out, " unwritten");
- if (bucket_valid(ca, b) && ptr_stale(ca, ptr))
+ if (bucket_valid(ca, b) && dev_ptr_stale_rcu(ca, ptr))
prt_printf(out, " stale");
}
rcu_read_unlock();
}
if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
- ptr_stale(ca, &rbio->pick.ptr)) {
+ dev_ptr_stale(ca, &rbio->pick.ptr)) {
trace_and_count(c, read_reuse_race, &rbio->bio);
if (rbio->flags & BCH_READ_RETRY_IF_STALE)
*/
if ((flags & BCH_READ_IN_RETRY) &&
!pick.ptr.cached &&
- unlikely(ptr_stale(ca, &pick.ptr))) {
+ unlikely(dev_ptr_stale(ca, &pick.ptr))) {
read_from_stale_dirty_pointer(trans, k, pick.ptr);
bch2_mark_io_failure(failed, &pick);
goto retry_pick;