if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
if (cl)
closure_wait(&c->open_buckets_wait, cl);
+
+ if (!c->blocked_allocate_open_bucket)
+ c->blocked_allocate_open_bucket = local_clock();
+
spin_unlock(&c->freelist_lock);
trace_open_bucket_alloc_fail(ca, reserve);
return ERR_PTR(-OPEN_BUCKETS_EMPTY);
if (cl)
closure_wait(&c->freelist_wait, cl);
+ if (!c->blocked_allocate)
+ c->blocked_allocate = local_clock();
+
spin_unlock(&c->freelist_lock);
trace_bucket_alloc_fail(ca, reserve);
bucket_io_clock_reset(c, ca, bucket, WRITE);
spin_unlock(&ob->lock);
+ if (c->blocked_allocate_open_bucket) {
+ bch2_time_stats_update(
+ &c->times[BCH_TIME_blocked_allocate_open_bucket],
+ c->blocked_allocate_open_bucket);
+ c->blocked_allocate_open_bucket = 0;
+ }
+
+ if (c->blocked_allocate) {
+ bch2_time_stats_update(
+ &c->times[BCH_TIME_blocked_allocate],
+ c->blocked_allocate);
+ c->blocked_allocate = 0;
+ }
+
spin_unlock(&c->freelist_lock);
bch2_wake_allocator(ca);
x(data_promote) \
x(journal_write) \
x(journal_delay) \
- x(journal_blocked) \
- x(journal_flush_seq)
+ x(journal_flush_seq) \
+ x(blocked_journal) \
+ x(blocked_allocate) \
+ x(blocked_allocate_open_bucket)
enum bch_time_stats {
#define x(name) BCH_TIME_##name,
/* ALLOCATOR */
spinlock_t freelist_lock;
struct closure_waitlist freelist_wait;
+ u64 blocked_allocate;
+ u64 blocked_allocate_open_bucket;
u8 open_buckets_freelist;
u8 open_buckets_nr_free;
struct closure_waitlist open_buckets_wait;
c->journal.write_time = &c->times[BCH_TIME_journal_write];
c->journal.delay_time = &c->times[BCH_TIME_journal_delay];
- c->journal.blocked_time = &c->times[BCH_TIME_journal_blocked];
+ c->journal.blocked_time = &c->times[BCH_TIME_blocked_journal];
c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
bch2_fs_btree_cache_init_early(&c->btree_cache);