{
size_t nr_dirty = READ_ONCE(c->btree_key_cache.nr_dirty);
size_t nr_keys = READ_ONCE(c->btree_key_cache.nr_dirty);
- size_t max_dirty = 1024 + (nr_keys * 3) / 4;
+ size_t max_dirty = 4096 + nr_keys / 2;
return max_t(ssize_t, 0, nr_dirty - max_dirty);
}
+static inline bool bch2_btree_key_cache_must_wait(struct bch_fs *c)
+{
+ size_t nr_dirty = READ_ONCE(c->btree_key_cache.nr_dirty);
+ size_t nr_keys = READ_ONCE(c->btree_key_cache.nr_dirty);
+ size_t max_dirty = 4096 + (nr_keys * 3) / 4;
+
+ return nr_dirty > max_dirty;
+}
+
struct bkey_cached *
bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos);
BTREE_INSERT_ENOSPC,
BTREE_INSERT_NEED_MARK_REPLICAS,
BTREE_INSERT_NEED_JOURNAL_RES,
+ BTREE_INSERT_NEED_JOURNAL_RECLAIM,
};
enum btree_gc_coalesce_fail_reason {
BUG_ON(iter->level);
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
+ bch2_btree_key_cache_must_wait(trans->c))
+ return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
+
if (u64s <= ck->u64s)
return BTREE_INSERT_OK;
trace_trans_restart_journal_res_get(trans->ip);
ret = -EINTR;
break;
+ case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
+ bch2_trans_unlock(trans);
+
+ while (bch2_btree_key_cache_must_wait(c)) {
+ mutex_lock(&c->journal.reclaim_lock);
+ bch2_journal_reclaim(&c->journal);
+ mutex_unlock(&c->journal.reclaim_lock);
+ }
+
+ if (bch2_trans_relock(trans))
+ return 0;
+
+ trace_trans_restart_journal_reclaim(trans->ip);
+ ret = -EINTR;
+ break;
default:
BUG_ON(ret >= 0);
break;
TP_ARGS(ip)
);
+DEFINE_EVENT(transaction_restart, trans_restart_journal_reclaim,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
TP_PROTO(unsigned long ip),
TP_ARGS(ip)