bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
bch2_journal_flush_async(&c->journal, NULL);
- if (s->ca)
- percpu_ref_put(&s->ca->ref);
+ bch2_dev_put(s->ca);
if (ca)
- percpu_ref_get(&ca->ref);
+ bch2_dev_get(ca);
s->ca = ca;
s->need_journal_commit_this_dev = 0;
}
invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
if (ret < 0) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
break;
}
}
ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
if (ret) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
bch_err_fn(c, ret);
return ret;
}
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
if (ca)
- percpu_ref_get(&ca->ref);
+ bch2_dev_get(ca);
rcu_read_unlock();
if (!ca)
continue;
if (!ca->mi.durability && *have_cache) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
continue;
}
ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
if (IS_ERR(ob)) {
ret = PTR_ERR(ob);
#undef copy_stripe_field
#undef copy_field
fsck_err:
- if (ca)
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
bch_err_fn(c, ret);
-
percpu_up_write(&c->mark_lock);
printbuf_exit(&buf);
return ret;
ca->usage_gc = alloc_percpu(struct bch_dev_usage);
if (!ca->usage_gc) {
bch_err(c, "error allocating ca->usage_gc");
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return -BCH_ERR_ENOMEM_gc_start;
}
NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
bch2_alloc_write_key(trans, &iter, k)));
if (ret) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
break;
}
}
ca->mi.nbuckets * sizeof(struct bucket),
GFP_KERNEL|__GFP_ZERO);
if (!buckets) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
bch_err(c, "error allocating ca->buckets[gc]");
return -BCH_ERR_ENOMEM_gc_alloc_start;
}
ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
if (!ca->oldest_gen) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
ret = -BCH_ERR_ENOMEM_gc_gens;
goto err;
}
for_each_online_member(c, ca) {
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
if (ret) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
}
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO);
if (!ca->buckets_nouse) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return -BCH_ERR_ENOMEM_buckets_nouse;
}
}
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
if (ca)
- percpu_ref_get(&ca->ref);
+ bch2_dev_get(ca);
rcu_read_unlock();
if (!ca)
return PTR_ERR(ca);
ret = bch2_dev_offline(c, ca, arg.flags);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
if (ret)
bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
arg.d[i].fragmented = src.d[i].fragmented;
}
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return copy_to_user_errcode(user_arg, &arg, sizeof(arg));
}
goto err;
}
err:
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
if (arg.flags & BCH_READ_DEV) {
ca = bch2_device_lookup(c, arg.dev, arg.flags);
-
- if (IS_ERR(ca)) {
- ret = PTR_ERR(ca);
- goto err;
- }
+ ret = PTR_ERR_OR_ZERO(ca);
+ if (ret)
+ goto err_unlock;
sb = ca->disk_sb.sb;
} else {
ret = copy_to_user_errcode((void __user *)(unsigned long)arg.sb, sb,
vstruct_bytes(sb));
err:
- if (!IS_ERR_OR_NULL(ca))
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
+err_unlock:
mutex_unlock(&c->sb_lock);
return ret;
}
ret = bch2_dev_resize(c, ca, arg.nbuckets);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
if (c->opts.nocow_enabled)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, ptr), 0);
- percpu_ref_put(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
+ bch2_dev_put(bch2_dev_bkey_exists(c, ptr->dev));
}
bch2_bkey_buf_exit(&update->k, c);
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
bkey_for_each_ptr(ptrs, ptr)
- percpu_ref_get(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
+ bch2_dev_get(bch2_dev_bkey_exists(c, ptr->dev));
unsigned durability_have = 0, durability_removing = 0;
if ((1U << i) & ptrs_locked)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0);
- percpu_ref_put(&bch2_dev_bkey_exists(c, p.ptr.dev)->ref);
+ bch2_dev_put(bch2_dev_bkey_exists(c, p.ptr.dev));
i++;
}
ca = bch2_dev_lookup(c, val);
if (!IS_ERR(ca)) {
*res = dev_to_target(ca->dev_idx);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return 0;
}
for (struct bch_dev *_ca = NULL; \
(_ca = __bch2_next_dev((_c), _ca, (_mask)));)
-static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
+static inline void bch2_dev_get(struct bch_dev *ca)
+{
+ percpu_ref_get(&ca->ref);
+}
+
+static inline void __bch2_dev_put(struct bch_dev *ca)
+{
+ percpu_ref_put(&ca->ref);
+}
+
+static inline void bch2_dev_put(struct bch_dev *ca)
{
- rcu_read_lock();
if (ca)
- percpu_ref_put(&ca->ref);
+ __bch2_dev_put(ca);
+}
+static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
+{
+ rcu_read_lock();
+ bch2_dev_put(ca);
if ((ca = __bch2_next_dev(c, ca, NULL)))
- percpu_ref_get(&ca->ref);
+ bch2_dev_get(ca);
rcu_read_unlock();
return ca;
ret = bch2_dev_sysfs_online(c, ca);
if (ret) {
bch_err(c, "error creating sysfs objects");
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
goto err;
}
}
* We consume a reference to ca->ref, regardless of whether we succeed
* or fail:
*/
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");