struct bio_set dio_write_bioset;
struct bio_set dio_read_bioset;
+
+ atomic64_t btree_writes_nr;
+ atomic64_t btree_writes_sectors;
struct bio_list btree_write_error_list;
struct work_struct btree_write_error_work;
spinlock_t btree_write_error_lock;
b->written += sectors_to_write;
+ atomic64_inc(&c->btree_writes_nr);
+ atomic64_add(sectors_to_write, &c->btree_writes_sectors);
+
/* XXX: submitting IO with btree locks held: */
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k);
bch2_bkey_buf_exit(&k, c);
read_attribute(io_latency_stats_write);
read_attribute(congested);
+read_attribute(btree_avg_write_size);
+
read_attribute(bucket_quantiles_last_read);
read_attribute(bucket_quantiles_last_write);
read_attribute(bucket_quantiles_fragmentation);
return ret;
}
+static size_t bch2_btree_avg_write_size(struct bch_fs *c)
+{
+ u64 nr = atomic64_read(&c->btree_writes_nr);
+ u64 sectors = atomic64_read(&c->btree_writes_sectors);
+
+ return nr ? div64_u64(sectors, nr) : 0;
+}
+
static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{
struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
sysfs_print(block_size, block_bytes(c));
sysfs_print(btree_node_size, btree_bytes(c));
sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
+ sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
sysfs_print(read_realloc_races,
atomic_long_read(&c->read_realloc_races));
&sysfs_block_size,
&sysfs_btree_node_size,
&sysfs_btree_cache_size,
+ &sysfs_btree_avg_write_size,
&sysfs_journal_write_delay_ms,
&sysfs_journal_reclaim_delay_ms,