From f3721e12d07ab3c3e400a1a635e999ef72780de4 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 16 Oct 2020 21:32:02 -0400 Subject: [PATCH] bcachefs: Perf improvements for bch_alloc_read() On large filesystems reading in the alloc info takes a significant amount of time. But we don't need to be calling into the fully general bch2_mark_key() path, just open code what we need in bch2_alloc_read_fn(). Signed-off-by: Kent Overstreet Signed-off-by: Kent Overstreet --- fs/bcachefs/alloc_background.c | 26 ++++++++++++++++++++++---- fs/bcachefs/btree_types.h | 2 -- fs/bcachefs/buckets.c | 4 ++-- fs/bcachefs/ec.c | 1 - 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index fd8b57c806cc5..9fa7184188c27 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -209,10 +209,25 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id, unsigned level, struct bkey_s_c k) { - if (!level) - bch2_mark_key(c, k, 0, 0, NULL, 0, - BTREE_TRIGGER_ALLOC_READ| - BTREE_TRIGGER_NOATOMIC); + struct bch_dev *ca; + struct bucket *g; + struct bkey_alloc_unpacked u; + + if (level || k.k->type != KEY_TYPE_alloc) + return 0; + + ca = bch_dev_bkey_exists(c, k.k->p.inode); + g = __bucket(ca, k.k->p.offset, 0); + u = bch2_alloc_unpack(k); + + g->_mark.gen = u.gen; + g->_mark.data_type = u.data_type; + g->_mark.dirty_sectors = u.dirty_sectors; + g->_mark.cached_sectors = u.cached_sectors; + g->io_time[READ] = u.read_time; + g->io_time[WRITE] = u.write_time; + g->oldest_gen = u.oldest_gen; + g->gen_valid = 1; return 0; } @@ -223,8 +238,11 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys) unsigned i; int ret = 0; + down_read(&c->gc_lock); ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC, NULL, bch2_alloc_read_fn); + up_read(&c->gc_lock); + if (ret) { bch_err(c, "error reading alloc info: %i", ret); return ret; diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 297cf26ca13e5..b295e46de0590 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -602,7 +602,6 @@ enum btree_trigger_flags { __BTREE_TRIGGER_GC, __BTREE_TRIGGER_BUCKET_INVALIDATE, - __BTREE_TRIGGER_ALLOC_READ, __BTREE_TRIGGER_NOATOMIC, }; @@ -614,7 +613,6 @@ enum btree_trigger_flags { #define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC) #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE) -#define BTREE_TRIGGER_ALLOC_READ (1U << __BTREE_TRIGGER_ALLOC_READ) #define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC) static inline bool btree_node_type_needs_gc(enum btree_node_type type) diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index a34a9fe5a21cf..7bc51f397c7b1 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -482,6 +482,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, bch2_wake_allocator(ca); } +__flatten void bch2_dev_usage_from_buckets(struct bch_fs *c) { struct bch_dev *ca; @@ -756,8 +757,7 @@ static int bch2_mark_alloc(struct bch_fs *c, } })); - if (!(flags & BTREE_TRIGGER_ALLOC_READ)) - bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc); + bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc); g->io_time[READ] = u.read_time; g->io_time[WRITE] = u.write_time; diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 2120f0a9b424b..0b1d0d2c323b1 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -1497,7 +1497,6 @@ static int bch2_stripes_read_fn(struct bch_fs *c, enum btree_id id, ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL) ?: bch2_mark_key(c, k, 0, 0, NULL, 0, - BTREE_TRIGGER_ALLOC_READ| BTREE_TRIGGER_NOATOMIC); if (ret) return ret; -- 2.30.2