From 494dcc57a7bf639c39364b5f84c1b6db39a0f83a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 3 Jan 2023 17:32:16 -0500 Subject: [PATCH] bcachefs: Plumb saw_error through to btree_err() The btree node read path has the ability to kick off an asynchronous btree node rewrite if we saw and corrected an error. Previously this was only used for errors that caused one of the replicas to be unusable - this patch plumbs it through to all error paths, so that normal fsck errors can be corrected. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_io.c | 25 +++++++++++++++---------- fs/bcachefs/btree_io.h | 2 +- fs/bcachefs/debug.c | 4 ++-- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 61603b3a4a5dd..700ce14baa244 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -608,7 +608,7 @@ fsck_err: \ if (_ret != -BCH_ERR_fsck_fix) \ goto fsck_err; \ - true; \ + *saw_error = true; \ }) #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) @@ -668,7 +668,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) static int validate_bset(struct bch_fs *c, struct bch_dev *ca, struct btree *b, struct bset *i, unsigned offset, unsigned sectors, - int write, bool have_retry) + int write, bool have_retry, bool *saw_error) { unsigned version = le16_to_cpu(i->version); const char *err; @@ -805,7 +805,8 @@ static int bset_key_invalid(struct bch_fs *c, struct btree *b, } static int validate_bset_keys(struct bch_fs *c, struct btree *b, - struct bset *i, int write, bool have_retry) + struct bset *i, int write, + bool have_retry, bool *saw_error) { unsigned version = le16_to_cpu(i->version); struct bkey_packed *k, *prev = NULL; @@ -892,7 +893,7 @@ fsck_err: } int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, - struct btree *b, bool have_retry) + struct btree *b, bool have_retry, bool *saw_error) { struct btree_node_entry *bne; struct sort_iter *iter; @@ -1003,14 +1004,14 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, le16_to_cpu(i->version)); ret = validate_bset(c, ca, b, i, b->written, sectors, - READ, have_retry); + READ, have_retry, saw_error); if (ret) goto fsck_err; if (!b->written) btree_node_set_format(b, b->data->format); - ret = validate_bset_keys(c, b, i, READ, have_retry); + ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error); if (ret) goto fsck_err; @@ -1205,7 +1206,7 @@ start: &failed, &rb->pick) > 0; if (!bio->bi_status && - !bch2_btree_node_read_done(c, ca, b, can_retry)) { + !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) { if (retry) bch_info(c, "retry success"); break; @@ -1311,6 +1312,7 @@ static void btree_node_read_all_replicas_done(struct closure *cl) unsigned i, written = 0, written2 = 0; __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; + bool _saw_error = false, *saw_error = &_saw_error; for (i = 0; i < ra->nr; i++) { struct btree_node *bn = ra->buf[i]; @@ -1397,13 +1399,15 @@ fsck_err: if (best >= 0) { memcpy(b->data, ra->buf[best], btree_bytes(c)); - ret = bch2_btree_node_read_done(c, NULL, b, false); + ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error); } else { ret = -1; } if (ret) set_btree_node_read_error(b); + else if (*saw_error) + bch2_btree_node_rewrite_async(c, b); for (i = 0; i < ra->nr; i++) { mempool_free(ra->buf[i], &c->btree_bounce_pool); @@ -1780,6 +1784,7 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, struct bset *i, unsigned sectors) { struct printbuf buf = PRINTBUF; + bool saw_error; int ret; ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), @@ -1791,8 +1796,8 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, if (ret) return ret; - ret = validate_bset_keys(c, b, i, WRITE, false) ?: - validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false); + ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?: + validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error); if (ret) { bch2_inconsistent_error(c); dump_stack(); diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index a720dd74139b5..c43fb60b8c82c 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -129,7 +129,7 @@ void bch2_btree_build_aux_trees(struct btree *); void bch2_btree_init_next(struct btree_trans *, struct btree *); int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *, - struct btree *, bool); + struct btree *, bool, bool *); void bch2_btree_node_read(struct bch_fs *, struct btree *, bool); int bch2_btree_root_read(struct bch_fs *, enum btree_id, const struct bkey_i *, unsigned); diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index d3e769b1eb3e0..ab210296223b1 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -39,7 +39,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, struct bset *sorted, *inmemory = &b->data->keys; struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev); struct bio *bio; - bool failed = false; + bool failed = false, saw_error = false; if (!bch2_dev_get_ioref(ca, READ)) return false; @@ -60,7 +60,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, memcpy(n_ondisk, n_sorted, btree_bytes(c)); v->written = 0; - if (bch2_btree_node_read_done(c, ca, v, false)) + if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error) return false; n_sorted = c->verify_data->data; -- 2.30.2