From c5f51cdd5f1c0368c73637bea045d6d20c6f87c2 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 28 Mar 2021 20:57:59 -0400 Subject: [PATCH] bcachefs: Have journal reclaim thread flush more aggressively This adds a new watermark for the journal reclaim when flushing btree key cache entries - it should try and stay ahead of where foreground threads doing transaction commits will enter direct journal reclaim. Signed-off-by: Kent Overstreet Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_key_cache.h | 9 +++++++++ fs/bcachefs/journal_reclaim.c | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h index 02715cd258ab0..4e1e5a9c76561 100644 --- a/fs/bcachefs/btree_key_cache.h +++ b/fs/bcachefs/btree_key_cache.h @@ -1,6 +1,15 @@ #ifndef _BCACHEFS_BTREE_KEY_CACHE_H #define _BCACHEFS_BTREE_KEY_CACHE_H +static inline size_t bch2_nr_btree_keys_want_flush(struct bch_fs *c) +{ + size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty); + size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys); + size_t max_dirty = nr_keys / 4; + + return max_t(ssize_t, 0, nr_dirty - max_dirty); +} + static inline size_t bch2_nr_btree_keys_need_flush(struct bch_fs *c) { size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty); diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index 3957312d12f28..17af2bbeadeea 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -602,7 +602,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct) if (fifo_free(&j->pin) <= 32) min_nr = 1; - min_nr = max(min_nr, bch2_nr_btree_keys_need_flush(c)); + min_nr = max(min_nr, bch2_nr_btree_keys_want_flush(c)); trace_journal_reclaim_start(c, min_nr, -- 2.30.2