unsigned i;
for_each_member_device(ca, c, i) {
- struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
free += bucket_to_sector(ca,
__dev_buckets_free(ca, stats)) << 9;
if (gc_count != c->gc_count)
ca->inc_gen_really_needs_gc = 0;
- available = max_t(s64, 0, dev_buckets_available(c, ca) -
+ available = max_t(s64, 0, dev_buckets_available(ca) -
ca->inc_gen_really_needs_gc);
if (available > fifo_free(&ca->free_inc) ||
return ret;
}
-void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
+void bch2_dev_stripe_increment(struct bch_dev *ca,
struct dev_stripe_state *stripe)
{
u64 *v = stripe->next_alloc + ca->dev_idx;
- u64 free_space = dev_buckets_free(c, ca);
+ u64 free_space = dev_buckets_free(ca);
u64 free_space_inv = free_space
? div64_u64(1ULL << 48, free_space)
: 1ULL << 48;
add_new_bucket(c, ptrs, devs_may_alloc,
nr_effective, have_cache, flags, ob);
- bch2_dev_stripe_increment(c, ca, stripe);
+ bch2_dev_stripe_increment(ca, stripe);
if (*nr_effective >= nr_replicas)
return ALLOC_SUCCESS;
struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *,
struct dev_stripe_state *,
struct bch_devs_mask *);
-void bch2_dev_stripe_increment(struct bch_fs *, struct bch_dev *,
- struct dev_stripe_state *);
+void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
long bch2_bucket_alloc_new_fs(struct bch_dev *);
return ret;
}
-struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
+struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
{
struct bch_dev_usage ret;
/* Device usage: */
-struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
+struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *);
void bch2_dev_usage_from_buckets(struct bch_fs *);
/*
* Number of reclaimable buckets - only for use by the allocator thread:
*/
-static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
+static inline u64 dev_buckets_available(struct bch_dev *ca)
{
- return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
+ return __dev_buckets_available(ca, bch2_dev_usage_read(ca));
}
static inline u64 __dev_buckets_free(struct bch_dev *ca,
fifo_used(&ca->free_inc);
}
-static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
+static inline u64 dev_buckets_free(struct bch_dev *ca)
{
- return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
+ return __dev_buckets_free(ca, bch2_dev_usage_read(ca));
}
/* Filesystem usage: */
if (IS_ERR(ca))
return PTR_ERR(ca);
- src = bch2_dev_usage_read(c, ca);
+ src = bch2_dev_usage_read(ca);
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
sectors > ja->sectors_free)
continue;
- bch2_dev_stripe_increment(c, ca, &j->wp.stripe);
+ bch2_dev_stripe_increment(ca, &j->wp.stripe);
bch2_bkey_append_ptr(&w->key,
(struct bch_extent_ptr) {
u64 fragmented = 0;
for_each_rw_member(ca, c, dev_idx) {
- struct bch_dev_usage usage = bch2_dev_usage_read(c, ca);
+ struct bch_dev_usage usage = bch2_dev_usage_read(ca);
fragmented_allowed += ((__dev_buckets_available(ca, usage) *
ca->mi.bucket_size) >> 1);
static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
{
struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
unsigned i, nr[BCH_DATA_NR];
memset(nr, 0, sizeof(nr));
stats.buckets[BCH_DATA_user],
stats.buckets[BCH_DATA_cached],
stats.buckets_ec,
- ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
+ __dev_buckets_available(ca, stats),
stats.sectors[BCH_DATA_sb],
stats.sectors[BCH_DATA_journal],
stats.sectors[BCH_DATA_btree],