4080401
[linux.git] /
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_update.h"
6 #include "buckets.h"
7 #include "darray.h"
8 #include "dirent.h"
9 #include "error.h"
10 #include "fs-common.h"
11 #include "fsck.h"
12 #include "inode.h"
13 #include "keylist.h"
14 #include "subvolume.h"
15 #include "super.h"
16 #include "xattr.h"
17
18 #include <linux/bsearch.h>
19 #include <linux/dcache.h> /* struct qstr */
20
21 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
22
23 /*
24  * XXX: this is handling transaction restarts without returning
25  * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
26  */
27 static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
28                                     u32 snapshot)
29 {
30         struct btree_iter iter;
31         struct bkey_s_c k;
32         u64 sectors = 0;
33         int ret;
34
35         for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
36                                 SPOS(inum, 0, snapshot),
37                                 POS(inum, U64_MAX),
38                                 0, k, ret)
39                 if (bkey_extent_is_allocation(k.k))
40                         sectors += k.k->size;
41
42         bch2_trans_iter_exit(trans, &iter);
43
44         return ret ?: sectors;
45 }
46
47 static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
48                                     u32 snapshot)
49 {
50         struct btree_iter iter;
51         struct bkey_s_c k;
52         struct bkey_s_c_dirent d;
53         u64 subdirs = 0;
54         int ret;
55
56         for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
57                                 SPOS(inum, 0, snapshot),
58                                 POS(inum, U64_MAX),
59                                 0, k, ret) {
60                 if (k.k->type != KEY_TYPE_dirent)
61                         continue;
62
63                 d = bkey_s_c_to_dirent(k);
64                 if (d.v->d_type == DT_DIR)
65                         subdirs++;
66         }
67         bch2_trans_iter_exit(trans, &iter);
68
69         return ret ?: subdirs;
70 }
71
72 static int __snapshot_lookup_subvol(struct btree_trans *trans, u32 snapshot,
73                                     u32 *subvol)
74 {
75         struct bch_snapshot s;
76         int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots,
77                                           POS(0, snapshot), 0,
78                                           snapshot, &s);
79         if (!ret)
80                 *subvol = le32_to_cpu(s.subvol);
81         else if (bch2_err_matches(ret, ENOENT))
82                 bch_err(trans->c, "snapshot %u not fonud", snapshot);
83         return ret;
84
85 }
86
87 static int __subvol_lookup(struct btree_trans *trans, u32 subvol,
88                            u32 *snapshot, u64 *inum)
89 {
90         struct bch_subvolume s;
91         int ret;
92
93         ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
94
95         *snapshot = le32_to_cpu(s.snapshot);
96         *inum = le64_to_cpu(s.inode);
97         return ret;
98 }
99
100 static int subvol_lookup(struct btree_trans *trans, u32 subvol,
101                          u32 *snapshot, u64 *inum)
102 {
103         return lockrestart_do(trans, __subvol_lookup(trans, subvol, snapshot, inum));
104 }
105
106 static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
107                               struct bch_inode_unpacked *inode)
108 {
109         struct btree_iter iter;
110         struct bkey_s_c k;
111         int ret;
112
113         bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
114                              POS(0, inode_nr),
115                              BTREE_ITER_ALL_SNAPSHOTS);
116         k = bch2_btree_iter_peek(&iter);
117         ret = bkey_err(k);
118         if (ret)
119                 goto err;
120
121         if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
122                 ret = -BCH_ERR_ENOENT_inode;
123                 goto err;
124         }
125
126         ret = bch2_inode_unpack(k, inode);
127 err:
128         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
129                 bch_err(trans->c, "error fetching inode %llu: %s",
130                         inode_nr, bch2_err_str(ret));
131         bch2_trans_iter_exit(trans, &iter);
132         return ret;
133 }
134
135 static int __lookup_inode(struct btree_trans *trans, u64 inode_nr,
136                           struct bch_inode_unpacked *inode,
137                           u32 *snapshot)
138 {
139         struct btree_iter iter;
140         struct bkey_s_c k;
141         int ret;
142
143         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
144                                SPOS(0, inode_nr, *snapshot), 0);
145         ret = bkey_err(k);
146         if (ret)
147                 goto err;
148
149         ret = bkey_is_inode(k.k)
150                 ? bch2_inode_unpack(k, inode)
151                 : -BCH_ERR_ENOENT_inode;
152         if (!ret)
153                 *snapshot = iter.pos.snapshot;
154 err:
155         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
156                 bch_err(trans->c, "error fetching inode %llu:%u: %s",
157                         inode_nr, *snapshot, bch2_err_str(ret));
158         bch2_trans_iter_exit(trans, &iter);
159         return ret;
160 }
161
162 static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
163                         struct bch_inode_unpacked *inode,
164                         u32 *snapshot)
165 {
166         return lockrestart_do(trans, __lookup_inode(trans, inode_nr, inode, snapshot));
167 }
168
169 static int __lookup_dirent(struct btree_trans *trans,
170                            struct bch_hash_info hash_info,
171                            subvol_inum dir, struct qstr *name,
172                            u64 *target, unsigned *type)
173 {
174         struct btree_iter iter;
175         struct bkey_s_c_dirent d;
176         int ret;
177
178         ret = bch2_hash_lookup(trans, &iter, bch2_dirent_hash_desc,
179                                &hash_info, dir, name, 0);
180         if (ret)
181                 return ret;
182
183         d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
184         *target = le64_to_cpu(d.v->d_inum);
185         *type = d.v->d_type;
186         bch2_trans_iter_exit(trans, &iter);
187         return 0;
188 }
189
190 static int __write_inode(struct btree_trans *trans,
191                          struct bch_inode_unpacked *inode,
192                          u32 snapshot)
193 {
194         struct btree_iter iter;
195         int ret;
196
197         bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
198                             SPOS(0, inode->bi_inum, snapshot),
199                             BTREE_ITER_INTENT);
200
201         ret   = bch2_btree_iter_traverse(&iter) ?:
202                 bch2_inode_write(trans, &iter, inode);
203         bch2_trans_iter_exit(trans, &iter);
204         return ret;
205 }
206
207 static int write_inode(struct btree_trans *trans,
208                        struct bch_inode_unpacked *inode,
209                        u32 snapshot)
210 {
211         int ret = commit_do(trans, NULL, NULL,
212                                   BTREE_INSERT_NOFAIL|
213                                   BTREE_INSERT_LAZY_RW,
214                                   __write_inode(trans, inode, snapshot));
215         if (ret)
216                 bch_err(trans->c, "error in fsck: error updating inode: %s",
217                         bch2_err_str(ret));
218         return ret;
219 }
220
221 static int fsck_inode_rm(struct btree_trans *trans, u64 inum, u32 snapshot)
222 {
223         struct bch_fs *c = trans->c;
224         struct btree_iter iter = { NULL };
225         struct bkey_i_inode_generation delete;
226         struct bch_inode_unpacked inode_u;
227         struct bkey_s_c k;
228         int ret;
229
230         do {
231                 ret   = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
232                                                       SPOS(inum, 0, snapshot),
233                                                       SPOS(inum, U64_MAX, snapshot),
234                                                       0, NULL) ?:
235                         bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
236                                                       SPOS(inum, 0, snapshot),
237                                                       SPOS(inum, U64_MAX, snapshot),
238                                                       0, NULL) ?:
239                         bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
240                                                       SPOS(inum, 0, snapshot),
241                                                       SPOS(inum, U64_MAX, snapshot),
242                                                       0, NULL);
243         } while (ret == -BCH_ERR_transaction_restart_nested);
244         if (ret)
245                 goto err;
246 retry:
247         bch2_trans_begin(trans);
248
249         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
250                                SPOS(0, inum, snapshot), BTREE_ITER_INTENT);
251         ret = bkey_err(k);
252         if (ret)
253                 goto err;
254
255         if (!bkey_is_inode(k.k)) {
256                 bch2_fs_inconsistent(c,
257                                      "inode %llu:%u not found when deleting",
258                                      inum, snapshot);
259                 ret = -EIO;
260                 goto err;
261         }
262
263         bch2_inode_unpack(k, &inode_u);
264
265         /* Subvolume root? */
266         if (inode_u.bi_subvol)
267                 bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum);
268
269         bkey_inode_generation_init(&delete.k_i);
270         delete.k.p = iter.pos;
271         delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
272
273         ret   = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
274                 bch2_trans_commit(trans, NULL, NULL,
275                                 BTREE_INSERT_NOFAIL);
276 err:
277         bch2_trans_iter_exit(trans, &iter);
278         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
279                 goto retry;
280
281         return ret ?: -BCH_ERR_transaction_restart_nested;
282 }
283
284 static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
285 {
286         struct bch_fs *c = trans->c;
287         struct btree_iter iter;
288         struct bch_inode_unpacked dir_inode;
289         struct bch_hash_info dir_hash_info;
290         int ret;
291
292         ret = lookup_first_inode(trans, pos.inode, &dir_inode);
293         if (ret)
294                 goto err;
295
296         dir_hash_info = bch2_hash_info_init(c, &dir_inode);
297
298         bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
299
300         ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
301                                   &dir_hash_info, &iter,
302                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
303         bch2_trans_iter_exit(trans, &iter);
304 err:
305         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
306                 bch_err_fn(c, ret);
307         return ret;
308 }
309
310 /* Get lost+found, create if it doesn't exist: */
311 static int lookup_lostfound(struct btree_trans *trans, u32 subvol,
312                             struct bch_inode_unpacked *lostfound)
313 {
314         struct bch_fs *c = trans->c;
315         struct bch_inode_unpacked root;
316         struct bch_hash_info root_hash_info;
317         struct qstr lostfound_str = QSTR("lost+found");
318         subvol_inum root_inum = { .subvol = subvol };
319         u64 inum = 0;
320         unsigned d_type = 0;
321         u32 snapshot;
322         int ret;
323
324         ret = __subvol_lookup(trans, subvol, &snapshot, &root_inum.inum);
325         if (ret)
326                 return ret;
327
328         ret = __lookup_inode(trans, root_inum.inum, &root, &snapshot);
329         if (ret)
330                 return ret;
331
332         root_hash_info = bch2_hash_info_init(c, &root);
333
334         ret = __lookup_dirent(trans, root_hash_info, root_inum,
335                             &lostfound_str, &inum, &d_type);
336         if (bch2_err_matches(ret, ENOENT)) {
337                 bch_notice(c, "creating lost+found");
338                 goto create_lostfound;
339         }
340
341         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
342                 bch_err(c, "error looking up lost+found: %s", bch2_err_str(ret));
343         if (ret)
344                 return ret;
345
346         if (d_type != DT_DIR) {
347                 bch_err(c, "error looking up lost+found: not a directory");
348                 return ret;
349         }
350
351         /*
352          * The check_dirents pass has already run, dangling dirents
353          * shouldn't exist here:
354          */
355         return __lookup_inode(trans, inum, lostfound, &snapshot);
356
357 create_lostfound:
358         bch2_inode_init_early(c, lostfound);
359
360         ret = bch2_create_trans(trans, root_inum, &root,
361                                 lostfound, &lostfound_str,
362                                 0, 0, S_IFDIR|0700, 0, NULL, NULL,
363                                 (subvol_inum) { }, 0);
364         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
365                 bch_err(c, "error creating lost+found: %s", bch2_err_str(ret));
366         return ret;
367 }
368
369 static int __reattach_inode(struct btree_trans *trans,
370                           struct bch_inode_unpacked *inode,
371                           u32 inode_snapshot)
372 {
373         struct bch_hash_info dir_hash;
374         struct bch_inode_unpacked lostfound;
375         char name_buf[20];
376         struct qstr name;
377         u64 dir_offset = 0;
378         u32 subvol;
379         int ret;
380
381         ret = __snapshot_lookup_subvol(trans, inode_snapshot, &subvol);
382         if (ret)
383                 return ret;
384
385         ret = lookup_lostfound(trans, subvol, &lostfound);
386         if (ret)
387                 return ret;
388
389         if (S_ISDIR(inode->bi_mode)) {
390                 lostfound.bi_nlink++;
391
392                 ret = __write_inode(trans, &lostfound, U32_MAX);
393                 if (ret)
394                         return ret;
395         }
396
397         dir_hash = bch2_hash_info_init(trans->c, &lostfound);
398
399         snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
400         name = (struct qstr) QSTR(name_buf);
401
402         ret = bch2_dirent_create(trans,
403                                  (subvol_inum) {
404                                         .subvol = subvol,
405                                         .inum = lostfound.bi_inum,
406                                  },
407                                  &dir_hash,
408                                  inode_d_type(inode),
409                                  &name, inode->bi_inum, &dir_offset,
410                                  BCH_HASH_SET_MUST_CREATE);
411         if (ret)
412                 return ret;
413
414         inode->bi_dir           = lostfound.bi_inum;
415         inode->bi_dir_offset    = dir_offset;
416
417         return __write_inode(trans, inode, inode_snapshot);
418 }
419
420 static int reattach_inode(struct btree_trans *trans,
421                           struct bch_inode_unpacked *inode,
422                           u32 inode_snapshot)
423 {
424         int ret = commit_do(trans, NULL, NULL,
425                                   BTREE_INSERT_LAZY_RW|
426                                   BTREE_INSERT_NOFAIL,
427                         __reattach_inode(trans, inode, inode_snapshot));
428         if (ret) {
429                 bch_err(trans->c, "error reattaching inode %llu: %s",
430                         inode->bi_inum, bch2_err_str(ret));
431                 return ret;
432         }
433
434         return ret;
435 }
436
437 static int remove_backpointer(struct btree_trans *trans,
438                               struct bch_inode_unpacked *inode)
439 {
440         struct btree_iter iter;
441         struct bkey_s_c_dirent d;
442         int ret;
443
444         d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
445                                      POS(inode->bi_dir, inode->bi_dir_offset), 0,
446                                      dirent);
447         ret =   bkey_err(d) ?:
448                 __remove_dirent(trans, d.k->p);
449         bch2_trans_iter_exit(trans, &iter);
450         return ret;
451 }
452
453 struct snapshots_seen_entry {
454         u32                             id;
455         u32                             equiv;
456 };
457
458 struct snapshots_seen {
459         struct bpos                     pos;
460         DARRAY(struct snapshots_seen_entry) ids;
461 };
462
463 static inline void snapshots_seen_exit(struct snapshots_seen *s)
464 {
465         darray_exit(&s->ids);
466 }
467
468 static inline void snapshots_seen_init(struct snapshots_seen *s)
469 {
470         memset(s, 0, sizeof(*s));
471 }
472
473 static int snapshots_seen_add(struct bch_fs *c, struct snapshots_seen *s, u32 id)
474 {
475         struct snapshots_seen_entry *i, n = { id, id };
476         int ret;
477
478         darray_for_each(s->ids, i) {
479                 if (n.equiv < i->equiv)
480                         break;
481
482                 if (i->equiv == n.equiv) {
483                         bch_err(c, "%s(): adding duplicate snapshot", __func__);
484                         return -EINVAL;
485                 }
486         }
487
488         ret = darray_insert_item(&s->ids, i - s->ids.data, n);
489         if (ret)
490                 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
491                         s->ids.size);
492         return ret;
493 }
494
495 static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
496                                  enum btree_id btree_id, struct bpos pos)
497 {
498         struct snapshots_seen_entry *i, n = {
499                 .id     = pos.snapshot,
500                 .equiv  = bch2_snapshot_equiv(c, pos.snapshot),
501         };
502         int ret = 0;
503
504         if (!bkey_eq(s->pos, pos))
505                 s->ids.nr = 0;
506
507         pos.snapshot = n.equiv;
508         s->pos = pos;
509
510         darray_for_each(s->ids, i)
511                 if (i->equiv == n.equiv) {
512                         if (fsck_err_on(i->id != n.id, c,
513                                         "snapshot deletion did not run correctly:\n"
514                                         "  duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
515                                         bch2_btree_ids[btree_id],
516                                         pos.inode, pos.offset,
517                                         i->id, n.id, n.equiv))
518                                 return -BCH_ERR_need_snapshot_cleanup;
519
520                         return 0;
521                 }
522
523         ret = darray_push(&s->ids, n);
524         if (ret)
525                 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
526                         s->ids.size);
527 fsck_err:
528         return ret;
529 }
530
531 /**
532  * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
533  * and @ancestor hasn't been overwritten in @seen
534  *
535  * That is, returns whether key in @ancestor snapshot is visible in @id snapshot
536  */
537 static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
538                                     u32 id, u32 ancestor)
539 {
540         ssize_t i;
541         u32 top = seen->ids.nr ? seen->ids.data[seen->ids.nr - 1].equiv : 0;
542
543         BUG_ON(id > ancestor);
544         BUG_ON(!bch2_snapshot_is_equiv(c, id));
545         BUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
546
547         /* @ancestor should be the snapshot most recently added to @seen */
548         BUG_ON(ancestor != seen->pos.snapshot);
549         BUG_ON(ancestor != top);
550
551         if (id == ancestor)
552                 return true;
553
554         if (!bch2_snapshot_is_ancestor(c, id, ancestor))
555                 return false;
556
557         for (i = seen->ids.nr - 2;
558              i >= 0 && seen->ids.data[i].equiv >= id;
559              --i)
560                 if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv) &&
561                     bch2_snapshot_is_ancestor(c, seen->ids.data[i].equiv, ancestor))
562                         return false;
563
564         return true;
565 }
566
567 /**
568  * ref_visible - given a key with snapshot id @src that points to a key with
569  * snapshot id @dst, test whether there is some snapshot in which @dst is
570  * visible.
571  *
572  * This assumes we're visiting @src keys in natural key order.
573  *
574  * @s   - list of snapshot IDs already seen at @src
575  * @src - snapshot ID of src key
576  * @dst - snapshot ID of dst key
577  */
578 static int ref_visible(struct bch_fs *c, struct snapshots_seen *s,
579                        u32 src, u32 dst)
580 {
581         return dst <= src
582                 ? key_visible_in_snapshot(c, s, dst, src)
583                 : bch2_snapshot_is_ancestor(c, src, dst);
584 }
585
586 static int ref_visible2(struct bch_fs *c,
587                         u32 src, struct snapshots_seen *src_seen,
588                         u32 dst, struct snapshots_seen *dst_seen)
589 {
590         src = bch2_snapshot_equiv(c, src);
591         dst = bch2_snapshot_equiv(c, dst);
592
593         if (dst > src) {
594                 swap(dst, src);
595                 swap(dst_seen, src_seen);
596         }
597         return key_visible_in_snapshot(c, src_seen, dst, src);
598 }
599
600 #define for_each_visible_inode(_c, _s, _w, _snapshot, _i)                               \
601         for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr &&        \
602              (_i)->snapshot <= (_snapshot); _i++)                                       \
603                 if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
604
605 struct inode_walker_entry {
606         struct bch_inode_unpacked inode;
607         u32                     snapshot;
608         u64                     count;
609 };
610
611 struct inode_walker {
612         bool                            first_this_inode;
613         u64                             cur_inum;
614
615         DARRAY(struct inode_walker_entry) inodes;
616 };
617
618 static void inode_walker_exit(struct inode_walker *w)
619 {
620         darray_exit(&w->inodes);
621 }
622
623 static struct inode_walker inode_walker_init(void)
624 {
625         return (struct inode_walker) { 0, };
626 }
627
628 static int add_inode(struct bch_fs *c, struct inode_walker *w,
629                      struct bkey_s_c inode)
630 {
631         struct bch_inode_unpacked u;
632
633         BUG_ON(bch2_inode_unpack(inode, &u));
634
635         return darray_push(&w->inodes, ((struct inode_walker_entry) {
636                 .inode          = u,
637                 .snapshot       = bch2_snapshot_equiv(c, inode.k->p.snapshot),
638         }));
639 }
640
641 static int __walk_inode(struct btree_trans *trans,
642                         struct inode_walker *w, struct bpos pos)
643 {
644         struct bch_fs *c = trans->c;
645         struct btree_iter iter;
646         struct bkey_s_c k;
647         u32 restart_count = trans->restart_count;
648         unsigned i;
649         int ret;
650
651         pos.snapshot = bch2_snapshot_equiv(c, pos.snapshot);
652
653         if (pos.inode == w->cur_inum)
654                 goto lookup_snapshot;
655
656         w->inodes.nr = 0;
657
658         for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, pos.inode),
659                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
660                 if (k.k->p.offset != pos.inode)
661                         break;
662
663                 if (bkey_is_inode(k.k))
664                         add_inode(c, w, k);
665         }
666         bch2_trans_iter_exit(trans, &iter);
667
668         if (ret)
669                 return ret;
670
671         w->cur_inum             = pos.inode;
672         w->first_this_inode     = true;
673
674         if (trans_was_restarted(trans, restart_count))
675                 return -BCH_ERR_transaction_restart_nested;
676
677 lookup_snapshot:
678         for (i = 0; i < w->inodes.nr; i++)
679                 if (bch2_snapshot_is_ancestor(c, pos.snapshot, w->inodes.data[i].snapshot))
680                         goto found;
681         return INT_MAX;
682 found:
683         BUG_ON(pos.snapshot > w->inodes.data[i].snapshot);
684
685         if (pos.snapshot != w->inodes.data[i].snapshot) {
686                 struct inode_walker_entry e = w->inodes.data[i];
687
688                 e.snapshot = pos.snapshot;
689                 e.count = 0;
690
691                 bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u",
692                          pos.inode, pos.snapshot, w->inodes.data[i].snapshot);
693
694                 while (i && w->inodes.data[i - 1].snapshot > pos.snapshot)
695                         --i;
696
697                 ret = darray_insert_item(&w->inodes, i, e);
698                 if (ret)
699                         return ret;
700         }
701
702         return i;
703 }
704
705 static int __get_visible_inodes(struct btree_trans *trans,
706                                 struct inode_walker *w,
707                                 struct snapshots_seen *s,
708                                 u64 inum)
709 {
710         struct bch_fs *c = trans->c;
711         struct btree_iter iter;
712         struct bkey_s_c k;
713         int ret;
714
715         w->inodes.nr = 0;
716
717         for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
718                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
719                 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
720
721                 if (k.k->p.offset != inum)
722                         break;
723
724                 if (!ref_visible(c, s, s->pos.snapshot, equiv))
725                         continue;
726
727                 if (bkey_is_inode(k.k))
728                         add_inode(c, w, k);
729
730                 if (equiv >= s->pos.snapshot)
731                         break;
732         }
733         bch2_trans_iter_exit(trans, &iter);
734
735         return ret;
736 }
737
738 static int check_key_has_snapshot(struct btree_trans *trans,
739                                   struct btree_iter *iter,
740                                   struct bkey_s_c k)
741 {
742         struct bch_fs *c = trans->c;
743         struct printbuf buf = PRINTBUF;
744         int ret = 0;
745
746         if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
747                         "key in missing snapshot: %s",
748                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
749                 ret = bch2_btree_delete_at(trans, iter,
750                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
751 fsck_err:
752         printbuf_exit(&buf);
753         return ret;
754 }
755
756 static int hash_redo_key(struct btree_trans *trans,
757                          const struct bch_hash_desc desc,
758                          struct bch_hash_info *hash_info,
759                          struct btree_iter *k_iter, struct bkey_s_c k)
760 {
761         struct bkey_i *delete;
762         struct bkey_i *tmp;
763
764         delete = bch2_trans_kmalloc(trans, sizeof(*delete));
765         if (IS_ERR(delete))
766                 return PTR_ERR(delete);
767
768         tmp = bch2_bkey_make_mut_noupdate(trans, k);
769         if (IS_ERR(tmp))
770                 return PTR_ERR(tmp);
771
772         bkey_init(&delete->k);
773         delete->k.p = k_iter->pos;
774         return  bch2_btree_iter_traverse(k_iter) ?:
775                 bch2_trans_update(trans, k_iter, delete, 0) ?:
776                 bch2_hash_set_snapshot(trans, desc, hash_info,
777                                        (subvol_inum) { 0, k.k->p.inode },
778                                        k.k->p.snapshot, tmp,
779                                        BCH_HASH_SET_MUST_CREATE,
780                                        BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
781                 bch2_trans_commit(trans, NULL, NULL,
782                                   BTREE_INSERT_NOFAIL|
783                                   BTREE_INSERT_LAZY_RW);
784 }
785
786 static int hash_check_key(struct btree_trans *trans,
787                           const struct bch_hash_desc desc,
788                           struct bch_hash_info *hash_info,
789                           struct btree_iter *k_iter, struct bkey_s_c hash_k)
790 {
791         struct bch_fs *c = trans->c;
792         struct btree_iter iter = { NULL };
793         struct printbuf buf = PRINTBUF;
794         struct bkey_s_c k;
795         u64 hash;
796         int ret = 0;
797
798         if (hash_k.k->type != desc.key_type)
799                 return 0;
800
801         hash = desc.hash_bkey(hash_info, hash_k);
802
803         if (likely(hash == hash_k.k->p.offset))
804                 return 0;
805
806         if (hash_k.k->p.offset < hash)
807                 goto bad_hash;
808
809         for_each_btree_key_norestart(trans, iter, desc.btree_id,
810                                      SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
811                                      BTREE_ITER_SLOTS, k, ret) {
812                 if (bkey_eq(k.k->p, hash_k.k->p))
813                         break;
814
815                 if (fsck_err_on(k.k->type == desc.key_type &&
816                                 !desc.cmp_bkey(k, hash_k), c,
817                                 "duplicate hash table keys:\n%s",
818                                 (printbuf_reset(&buf),
819                                  bch2_bkey_val_to_text(&buf, c, hash_k),
820                                  buf.buf))) {
821                         ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0) ?: 1;
822                         break;
823                 }
824
825                 if (bkey_deleted(k.k)) {
826                         bch2_trans_iter_exit(trans, &iter);
827                         goto bad_hash;
828                 }
829         }
830 out:
831         bch2_trans_iter_exit(trans, &iter);
832         printbuf_exit(&buf);
833         return ret;
834 bad_hash:
835         if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
836                      bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
837                      (printbuf_reset(&buf),
838                       bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
839                 ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
840                 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
841                         bch_err(c, "hash_redo_key err %s", bch2_err_str(ret));
842                 if (ret)
843                         return ret;
844                 ret = -BCH_ERR_transaction_restart_nested;
845         }
846 fsck_err:
847         goto out;
848 }
849
850 static int check_inode(struct btree_trans *trans,
851                        struct btree_iter *iter,
852                        struct bkey_s_c k,
853                        struct bch_inode_unpacked *prev,
854                        struct snapshots_seen *s,
855                        bool full)
856 {
857         struct bch_fs *c = trans->c;
858         struct bch_inode_unpacked u;
859         bool do_update = false;
860         int ret;
861
862         ret = check_key_has_snapshot(trans, iter, k);
863         if (ret < 0)
864                 goto err;
865         if (ret)
866                 return 0;
867
868         ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
869         if (ret)
870                 goto err;
871
872         /*
873          * if snapshot id isn't a leaf node, skip it - deletion in
874          * particular is not atomic, so on the internal snapshot nodes
875          * we can see inodes marked for deletion after a clean shutdown
876          */
877         if (bch2_snapshot_internal_node(c, k.k->p.snapshot))
878                 return 0;
879
880         if (!bkey_is_inode(k.k))
881                 return 0;
882
883         BUG_ON(bch2_inode_unpack(k, &u));
884
885         if (!full &&
886             !(u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|
887                             BCH_INODE_I_SECTORS_DIRTY|
888                             BCH_INODE_UNLINKED)))
889                 return 0;
890
891         if (prev->bi_inum != u.bi_inum)
892                 *prev = u;
893
894         if (fsck_err_on(prev->bi_hash_seed      != u.bi_hash_seed ||
895                         inode_d_type(prev)      != inode_d_type(&u), c,
896                         "inodes in different snapshots don't match")) {
897                 bch_err(c, "repair not implemented yet");
898                 return -EINVAL;
899         }
900
901         if (u.bi_flags & BCH_INODE_UNLINKED &&
902             (!c->sb.clean ||
903              fsck_err(c, "filesystem marked clean, but inode %llu unlinked",
904                       u.bi_inum))) {
905                 bch2_trans_unlock(trans);
906                 bch2_fs_lazy_rw(c);
907
908                 ret = fsck_inode_rm(trans, u.bi_inum, iter->pos.snapshot);
909                 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
910                         bch_err(c, "error in fsck: error while deleting inode: %s",
911                                 bch2_err_str(ret));
912                 return ret;
913         }
914
915         if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY &&
916             (!c->sb.clean ||
917              fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty",
918                       u.bi_inum))) {
919                 bch_verbose(c, "truncating inode %llu", u.bi_inum);
920
921                 bch2_trans_unlock(trans);
922                 bch2_fs_lazy_rw(c);
923
924                 /*
925                  * XXX: need to truncate partial blocks too here - or ideally
926                  * just switch units to bytes and that issue goes away
927                  */
928                 ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
929                                 SPOS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9,
930                                      iter->pos.snapshot),
931                                 POS(u.bi_inum, U64_MAX),
932                                 0, NULL);
933                 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
934                         bch_err(c, "error in fsck: error truncating inode: %s",
935                                 bch2_err_str(ret));
936                 if (ret)
937                         return ret;
938
939                 /*
940                  * We truncated without our normal sector accounting hook, just
941                  * make sure we recalculate it:
942                  */
943                 u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
944
945                 u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
946                 do_update = true;
947         }
948
949         if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY &&
950             (!c->sb.clean ||
951              fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty",
952                       u.bi_inum))) {
953                 s64 sectors;
954
955                 bch_verbose(c, "recounting sectors for inode %llu",
956                             u.bi_inum);
957
958                 sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
959                 if (sectors < 0) {
960                         bch_err(c, "error in fsck: error recounting inode sectors: %s",
961                                 bch2_err_str(sectors));
962                         return sectors;
963                 }
964
965                 u.bi_sectors = sectors;
966                 u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
967                 do_update = true;
968         }
969
970         if (u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) {
971                 u.bi_dir = 0;
972                 u.bi_dir_offset = 0;
973                 u.bi_flags &= ~BCH_INODE_BACKPTR_UNTRUSTED;
974                 do_update = true;
975         }
976
977         if (do_update) {
978                 ret = __write_inode(trans, &u, iter->pos.snapshot);
979                 if (ret)
980                         bch_err(c, "error in fsck: error updating inode: %s",
981                                 bch2_err_str(ret));
982         }
983 err:
984 fsck_err:
985         if (ret)
986                 bch_err_fn(c, ret);
987         return ret;
988 }
989
990 noinline_for_stack
991 static int check_inodes(struct bch_fs *c, bool full)
992 {
993         struct btree_trans trans;
994         struct btree_iter iter;
995         struct bch_inode_unpacked prev = { 0 };
996         struct snapshots_seen s;
997         struct bkey_s_c k;
998         int ret;
999
1000         snapshots_seen_init(&s);
1001         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
1002
1003         ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_inodes,
1004                         POS_MIN,
1005                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1006                         NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
1007                 check_inode(&trans, &iter, k, &prev, &s, full));
1008
1009         bch2_trans_exit(&trans);
1010         snapshots_seen_exit(&s);
1011         if (ret)
1012                 bch_err_fn(c, ret);
1013         return ret;
1014 }
1015
1016 /*
1017  * Checking for overlapping extents needs to be reimplemented
1018  */
1019 #if 0
1020 static int fix_overlapping_extent(struct btree_trans *trans,
1021                                        struct bkey_s_c k, struct bpos cut_at)
1022 {
1023         struct btree_iter iter;
1024         struct bkey_i *u;
1025         int ret;
1026
1027         u = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1028         ret = PTR_ERR_OR_ZERO(u);
1029         if (ret)
1030                 return ret;
1031
1032         bkey_reassemble(u, k);
1033         bch2_cut_front(cut_at, u);
1034
1035
1036         /*
1037          * We don't want to go through the extent_handle_overwrites path:
1038          *
1039          * XXX: this is going to screw up disk accounting, extent triggers
1040          * assume things about extent overwrites - we should be running the
1041          * triggers manually here
1042          */
1043         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, u->k.p,
1044                              BTREE_ITER_INTENT|BTREE_ITER_NOT_EXTENTS);
1045
1046         BUG_ON(iter.flags & BTREE_ITER_IS_EXTENTS);
1047         ret   = bch2_btree_iter_traverse(&iter) ?:
1048                 bch2_trans_update(trans, &iter, u, BTREE_TRIGGER_NORUN) ?:
1049                 bch2_trans_commit(trans, NULL, NULL,
1050                                   BTREE_INSERT_NOFAIL|
1051                                   BTREE_INSERT_LAZY_RW);
1052         bch2_trans_iter_exit(trans, &iter);
1053         return ret;
1054 }
1055 #endif
1056
1057 static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
1058                                                 struct btree_iter *iter,
1059                                                 struct bpos pos)
1060 {
1061         return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
1062 }
1063
1064 static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
1065                                    struct bkey_s_c_dirent d)
1066 {
1067         return  inode->bi_dir           == d.k->p.inode &&
1068                 inode->bi_dir_offset    == d.k->p.offset;
1069 }
1070
1071 static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
1072                                    struct bch_inode_unpacked *inode)
1073 {
1074         return d.v->d_type == DT_SUBVOL
1075                 ? le32_to_cpu(d.v->d_child_subvol)      == inode->bi_subvol
1076                 : le64_to_cpu(d.v->d_inum)              == inode->bi_inum;
1077 }
1078
1079 static int inode_backpointer_exists(struct btree_trans *trans,
1080                                     struct bch_inode_unpacked *inode,
1081                                     u32 snapshot)
1082 {
1083         struct btree_iter iter;
1084         struct bkey_s_c_dirent d;
1085         int ret;
1086
1087         d = dirent_get_by_pos(trans, &iter,
1088                         SPOS(inode->bi_dir, inode->bi_dir_offset, snapshot));
1089         ret = bkey_err(d);
1090         if (ret)
1091                 return bch2_err_matches(ret, ENOENT) ? 0 : ret;
1092
1093         ret = dirent_points_to_inode(d, inode);
1094         bch2_trans_iter_exit(trans, &iter);
1095         return ret;
1096 }
1097
1098 static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
1099 {
1100         struct bch_fs *c = trans->c;
1101         struct inode_walker_entry *i;
1102         u32 restart_count = trans->restart_count;
1103         int ret = 0;
1104         s64 count2;
1105
1106         darray_for_each(w->inodes, i) {
1107                 if (i->inode.bi_sectors == i->count)
1108                         continue;
1109
1110                 count2 = bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot);
1111
1112                 if (i->count != count2) {
1113                         bch_err(c, "fsck counted i_sectors wrong: got %llu should be %llu",
1114                                 i->count, count2);
1115                         i->count = count2;
1116                         if (i->inode.bi_sectors == i->count)
1117                                 continue;
1118                 }
1119
1120                 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
1121                             "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
1122                             w->cur_inum, i->snapshot,
1123                             i->inode.bi_sectors, i->count)) {
1124                         i->inode.bi_sectors = i->count;
1125                         ret = write_inode(trans, &i->inode, i->snapshot);
1126                         if (ret)
1127                                 break;
1128                 }
1129         }
1130 fsck_err:
1131         if (ret)
1132                 bch_err_fn(c, ret);
1133         if (!ret && trans_was_restarted(trans, restart_count))
1134                 ret = -BCH_ERR_transaction_restart_nested;
1135         return ret;
1136 }
1137
1138 struct extent_end {
1139         u32                     snapshot;
1140         u64                     offset;
1141         struct snapshots_seen   seen;
1142 };
1143
1144 typedef DARRAY(struct extent_end) extent_ends;
1145
1146 static int get_print_extent(struct btree_trans *trans, struct bpos pos, struct printbuf *out)
1147 {
1148         struct btree_iter iter;
1149         struct bkey_s_c k;
1150         int ret;
1151
1152         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_extents, pos,
1153                                BTREE_ITER_SLOTS|
1154                                BTREE_ITER_ALL_SNAPSHOTS|
1155                                BTREE_ITER_NOT_EXTENTS);
1156         ret = bkey_err(k);
1157         if (ret)
1158                 return ret;
1159
1160         bch2_bkey_val_to_text(out, trans->c, k);
1161         bch2_trans_iter_exit(trans, &iter);
1162         return 0;
1163 }
1164
1165 static int check_overlapping_extents(struct btree_trans *trans,
1166                               struct snapshots_seen *seen,
1167                               extent_ends *extent_ends,
1168                               struct bkey_s_c k,
1169                               struct btree_iter *iter)
1170 {
1171         struct bch_fs *c = trans->c;
1172         struct extent_end *i;
1173         struct printbuf buf = PRINTBUF;
1174         int ret = 0;
1175
1176         darray_for_each(*extent_ends, i) {
1177                 /* duplicate, due to transaction restart: */
1178                 if (i->offset   == k.k->p.offset &&
1179                     i->snapshot == k.k->p.snapshot)
1180                         continue;
1181
1182                 if (!ref_visible2(c,
1183                                   k.k->p.snapshot, seen,
1184                                   i->snapshot, &i->seen))
1185                         continue;
1186
1187                 if (i->offset <= bkey_start_offset(k.k))
1188                         continue;
1189
1190                 printbuf_reset(&buf);
1191                 prt_str(&buf, "overlapping extents:\n  ");
1192                 bch2_bkey_val_to_text(&buf, c, k);
1193                 prt_str(&buf, "\n  ");
1194
1195                 ret = get_print_extent(trans, SPOS(k.k->p.inode, i->offset, i->snapshot), &buf);
1196                 if (ret)
1197                         break;
1198
1199                 if (fsck_err(c, buf.buf)) {
1200                         struct bkey_i *update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1201                         if ((ret = PTR_ERR_OR_ZERO(update)))
1202                                 goto err;
1203                         bkey_reassemble(update, k);
1204                         ret = bch2_trans_update_extent(trans, iter, update, 0);
1205                         if (ret)
1206                                 goto err;
1207                 }
1208         }
1209 err:
1210 fsck_err:
1211         printbuf_exit(&buf);
1212         return ret;
1213 }
1214
1215 static int extent_ends_at(extent_ends *extent_ends,
1216                           struct snapshots_seen *seen,
1217                           struct bkey_s_c k)
1218 {
1219         struct extent_end *i, n = (struct extent_end) {
1220                 .snapshot       = k.k->p.snapshot,
1221                 .offset         = k.k->p.offset,
1222                 .seen           = *seen,
1223         };
1224
1225         n.seen.ids.data = kmemdup(seen->ids.data,
1226                               sizeof(seen->ids.data[0]) * seen->ids.size,
1227                               GFP_KERNEL);
1228         if (!n.seen.ids.data)
1229                 return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
1230
1231         darray_for_each(*extent_ends, i) {
1232                 if (i->snapshot == k.k->p.snapshot) {
1233                         snapshots_seen_exit(&i->seen);
1234                         *i = n;
1235                         return 0;
1236                 }
1237
1238                 if (i->snapshot >= k.k->p.snapshot)
1239                         break;
1240         }
1241
1242         return darray_insert_item(extent_ends, i - extent_ends->data, n);
1243 }
1244
1245 static void extent_ends_reset(extent_ends *extent_ends)
1246 {
1247         struct extent_end *i;
1248
1249         darray_for_each(*extent_ends, i)
1250                 snapshots_seen_exit(&i->seen);
1251
1252         extent_ends->nr = 0;
1253 }
1254
1255 static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
1256                         struct bkey_s_c k,
1257                         struct inode_walker *inode,
1258                         struct snapshots_seen *s,
1259                         extent_ends *extent_ends)
1260 {
1261         struct bch_fs *c = trans->c;
1262         struct inode_walker_entry *i;
1263         struct printbuf buf = PRINTBUF;
1264         struct bpos equiv;
1265         int ret = 0;
1266
1267         ret = check_key_has_snapshot(trans, iter, k);
1268         if (ret) {
1269                 ret = ret < 0 ? ret : 0;
1270                 goto out;
1271         }
1272
1273         equiv = k.k->p;
1274         equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1275
1276         ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1277         if (ret)
1278                 goto err;
1279
1280         if (k.k->type == KEY_TYPE_whiteout)
1281                 goto out;
1282
1283         if (inode->cur_inum != k.k->p.inode) {
1284                 ret = check_i_sectors(trans, inode);
1285                 if (ret)
1286                         goto err;
1287
1288                 extent_ends_reset(extent_ends);
1289         }
1290
1291         BUG_ON(!iter->path->should_be_locked);
1292
1293         ret = check_overlapping_extents(trans, s, extent_ends, k, iter);
1294         if (ret)
1295                 goto err;
1296
1297         ret = extent_ends_at(extent_ends, s, k);
1298         if (ret)
1299                 goto err;
1300
1301         ret = __walk_inode(trans, inode, equiv);
1302         if (ret < 0)
1303                 goto err;
1304
1305         if (fsck_err_on(ret == INT_MAX, c,
1306                         "extent in missing inode:\n  %s",
1307                         (printbuf_reset(&buf),
1308                          bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1309                 ret = bch2_btree_delete_at(trans, iter,
1310                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1311                 goto out;
1312         }
1313
1314         if (ret == INT_MAX) {
1315                 ret = 0;
1316                 goto out;
1317         }
1318
1319         i = inode->inodes.data + ret;
1320         ret = 0;
1321
1322         if (fsck_err_on(!S_ISREG(i->inode.bi_mode) &&
1323                         !S_ISLNK(i->inode.bi_mode), c,
1324                         "extent in non regular inode mode %o:\n  %s",
1325                         i->inode.bi_mode,
1326                         (printbuf_reset(&buf),
1327                          bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1328                 ret = bch2_btree_delete_at(trans, iter,
1329                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1330                 goto out;
1331         }
1332
1333         /*
1334          * Check inodes in reverse order, from oldest snapshots to newest, so
1335          * that we emit the fewest number of whiteouts necessary:
1336          */
1337         for (i = inode->inodes.data + inode->inodes.nr - 1;
1338              i >= inode->inodes.data;
1339              --i) {
1340                 if (i->snapshot > equiv.snapshot ||
1341                     !key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
1342                         continue;
1343
1344                 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
1345                                 k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
1346                                 !bkey_extent_is_reservation(k), c,
1347                                 "extent type past end of inode %llu:%u, i_size %llu\n  %s",
1348                                 i->inode.bi_inum, i->snapshot, i->inode.bi_size,
1349                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1350                         struct btree_iter iter2;
1351
1352                         bch2_trans_copy_iter(&iter2, iter);
1353                         bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
1354                         ret =   bch2_btree_iter_traverse(&iter2) ?:
1355                                 bch2_btree_delete_at(trans, &iter2,
1356                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1357                         bch2_trans_iter_exit(trans, &iter2);
1358                         if (ret)
1359                                 goto err;
1360
1361                         if (i->snapshot != equiv.snapshot) {
1362                                 ret = snapshots_seen_add(c, s, i->snapshot);
1363                                 if (ret)
1364                                         goto err;
1365                         }
1366                 }
1367         }
1368
1369         if (bkey_extent_is_allocation(k.k))
1370                 for_each_visible_inode(c, s, inode, equiv.snapshot, i)
1371                         i->count += k.k->size;
1372 #if 0
1373         bch2_bkey_buf_reassemble(&prev, c, k);
1374 #endif
1375
1376 out:
1377 err:
1378 fsck_err:
1379         printbuf_exit(&buf);
1380
1381         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
1382                 bch_err_fn(c, ret);
1383         return ret;
1384 }
1385
1386 /*
1387  * Walk extents: verify that extents have a corresponding S_ISREG inode, and
1388  * that i_size an i_sectors are consistent
1389  */
1390 noinline_for_stack
1391 static int check_extents(struct bch_fs *c)
1392 {
1393         struct inode_walker w = inode_walker_init();
1394         struct snapshots_seen s;
1395         struct btree_trans trans;
1396         struct btree_iter iter;
1397         struct bkey_s_c k;
1398         extent_ends extent_ends = { 0 };
1399         struct disk_reservation res = { 0 };
1400         int ret = 0;
1401
1402         snapshots_seen_init(&s);
1403         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
1404
1405         bch_verbose(c, "checking extents");
1406
1407         ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents,
1408                         POS(BCACHEFS_ROOT_INO, 0),
1409                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1410                         &res, NULL,
1411                         BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
1412                 bch2_disk_reservation_put(c, &res);
1413                 check_extent(&trans, &iter, k, &w, &s, &extent_ends);
1414         }));
1415
1416         bch2_disk_reservation_put(c, &res);
1417         extent_ends_reset(&extent_ends);
1418         darray_exit(&extent_ends);
1419         inode_walker_exit(&w);
1420         bch2_trans_exit(&trans);
1421         snapshots_seen_exit(&s);
1422
1423         if (ret)
1424                 bch_err_fn(c, ret);
1425         return ret;
1426 }
1427
1428 static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
1429 {
1430         struct bch_fs *c = trans->c;
1431         struct inode_walker_entry *i;
1432         u32 restart_count = trans->restart_count;
1433         int ret = 0;
1434         s64 count2;
1435
1436         darray_for_each(w->inodes, i) {
1437                 if (i->inode.bi_nlink == i->count)
1438                         continue;
1439
1440                 count2 = bch2_count_subdirs(trans, w->cur_inum, i->snapshot);
1441                 if (count2 < 0)
1442                         return count2;
1443
1444                 if (i->count != count2) {
1445                         bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
1446                                 i->count, count2);
1447                         i->count = count2;
1448                         if (i->inode.bi_nlink == i->count)
1449                                 continue;
1450                 }
1451
1452                 if (fsck_err_on(i->inode.bi_nlink != i->count, c,
1453                                 "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
1454                                 w->cur_inum, i->snapshot, i->inode.bi_nlink, i->count)) {
1455                         i->inode.bi_nlink = i->count;
1456                         ret = write_inode(trans, &i->inode, i->snapshot);
1457                         if (ret)
1458                                 break;
1459                 }
1460         }
1461 fsck_err:
1462         if (ret)
1463                 bch_err_fn(c, ret);
1464         if (!ret && trans_was_restarted(trans, restart_count))
1465                 ret = -BCH_ERR_transaction_restart_nested;
1466         return ret;
1467 }
1468
1469 static int check_dirent_target(struct btree_trans *trans,
1470                                struct btree_iter *iter,
1471                                struct bkey_s_c_dirent d,
1472                                struct bch_inode_unpacked *target,
1473                                u32 target_snapshot)
1474 {
1475         struct bch_fs *c = trans->c;
1476         struct bkey_i_dirent *n;
1477         bool backpointer_exists = true;
1478         struct printbuf buf = PRINTBUF;
1479         int ret = 0;
1480
1481         if (!target->bi_dir &&
1482             !target->bi_dir_offset) {
1483                 target->bi_dir          = d.k->p.inode;
1484                 target->bi_dir_offset   = d.k->p.offset;
1485
1486                 ret = __write_inode(trans, target, target_snapshot);
1487                 if (ret)
1488                         goto err;
1489         }
1490
1491         if (!inode_points_to_dirent(target, d)) {
1492                 ret = inode_backpointer_exists(trans, target, d.k->p.snapshot);
1493                 if (ret < 0)
1494                         goto err;
1495
1496                 backpointer_exists = ret;
1497                 ret = 0;
1498
1499                 if (fsck_err_on(S_ISDIR(target->bi_mode) &&
1500                                 backpointer_exists, c,
1501                                 "directory %llu with multiple links",
1502                                 target->bi_inum)) {
1503                         ret = __remove_dirent(trans, d.k->p);
1504                         goto out;
1505                 }
1506
1507                 if (fsck_err_on(backpointer_exists &&
1508                                 !target->bi_nlink, c,
1509                                 "inode %llu type %s has multiple links but i_nlink 0",
1510                                 target->bi_inum, bch2_d_types[d.v->d_type])) {
1511                         target->bi_nlink++;
1512                         target->bi_flags &= ~BCH_INODE_UNLINKED;
1513
1514                         ret = __write_inode(trans, target, target_snapshot);
1515                         if (ret)
1516                                 goto err;
1517                 }
1518
1519                 if (fsck_err_on(!backpointer_exists, c,
1520                                 "inode %llu:%u has wrong backpointer:\n"
1521                                 "got       %llu:%llu\n"
1522                                 "should be %llu:%llu",
1523                                 target->bi_inum, target_snapshot,
1524                                 target->bi_dir,
1525                                 target->bi_dir_offset,
1526                                 d.k->p.inode,
1527                                 d.k->p.offset)) {
1528                         target->bi_dir          = d.k->p.inode;
1529                         target->bi_dir_offset   = d.k->p.offset;
1530
1531                         ret = __write_inode(trans, target, target_snapshot);
1532                         if (ret)
1533                                 goto err;
1534                 }
1535         }
1536
1537         if (fsck_err_on(d.v->d_type != inode_d_type(target), c,
1538                         "incorrect d_type: got %s, should be %s:\n%s",
1539                         bch2_d_type_str(d.v->d_type),
1540                         bch2_d_type_str(inode_d_type(target)),
1541                         (printbuf_reset(&buf),
1542                          bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
1543                 n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1544                 ret = PTR_ERR_OR_ZERO(n);
1545                 if (ret)
1546                         goto err;
1547
1548                 bkey_reassemble(&n->k_i, d.s_c);
1549                 n->v.d_type = inode_d_type(target);
1550
1551                 ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1552                 if (ret)
1553                         goto err;
1554
1555                 d = dirent_i_to_s_c(n);
1556         }
1557
1558         if (d.v->d_type == DT_SUBVOL &&
1559             target->bi_parent_subvol != le32_to_cpu(d.v->d_parent_subvol) &&
1560             (c->sb.version < bcachefs_metadata_version_subvol_dirent ||
1561              fsck_err(c, "dirent has wrong d_parent_subvol field: got %u, should be %u",
1562                       le32_to_cpu(d.v->d_parent_subvol),
1563                       target->bi_parent_subvol))) {
1564                 n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1565                 ret = PTR_ERR_OR_ZERO(n);
1566                 if (ret)
1567                         goto err;
1568
1569                 bkey_reassemble(&n->k_i, d.s_c);
1570                 n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
1571
1572                 ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1573                 if (ret)
1574                         goto err;
1575
1576                 d = dirent_i_to_s_c(n);
1577         }
1578 out:
1579 err:
1580 fsck_err:
1581         printbuf_exit(&buf);
1582
1583         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
1584                 bch_err_fn(c, ret);
1585         return ret;
1586 }
1587
1588 static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
1589                         struct bkey_s_c k,
1590                         struct bch_hash_info *hash_info,
1591                         struct inode_walker *dir,
1592                         struct inode_walker *target,
1593                         struct snapshots_seen *s)
1594 {
1595         struct bch_fs *c = trans->c;
1596         struct bkey_s_c_dirent d;
1597         struct inode_walker_entry *i;
1598         struct printbuf buf = PRINTBUF;
1599         struct bpos equiv;
1600         int ret = 0;
1601
1602         ret = check_key_has_snapshot(trans, iter, k);
1603         if (ret) {
1604                 ret = ret < 0 ? ret : 0;
1605                 goto out;
1606         }
1607
1608         equiv = k.k->p;
1609         equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1610
1611         ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1612         if (ret)
1613                 goto err;
1614
1615         if (k.k->type == KEY_TYPE_whiteout)
1616                 goto out;
1617
1618         if (dir->cur_inum != k.k->p.inode) {
1619                 ret = check_subdir_count(trans, dir);
1620                 if (ret)
1621                         goto err;
1622         }
1623
1624         BUG_ON(!iter->path->should_be_locked);
1625
1626         ret = __walk_inode(trans, dir, equiv);
1627         if (ret < 0)
1628                 goto err;
1629
1630         if (dir->first_this_inode)
1631                 *hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
1632         dir->first_this_inode = false;
1633
1634         if (fsck_err_on(ret == INT_MAX, c,
1635                         "dirent in nonexisting directory:\n%s",
1636                         (printbuf_reset(&buf),
1637                          bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1638                 ret = bch2_btree_delete_at(trans, iter,
1639                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1640                 goto out;
1641         }
1642
1643         if (ret == INT_MAX) {
1644                 ret = 0;
1645                 goto out;
1646         }
1647
1648         i = dir->inodes.data + ret;
1649         ret = 0;
1650
1651         if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
1652                         "dirent in non directory inode type %s:\n%s",
1653                         bch2_d_type_str(inode_d_type(&i->inode)),
1654                         (printbuf_reset(&buf),
1655                          bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1656                 ret = bch2_btree_delete_at(trans, iter, 0);
1657                 goto out;
1658         }
1659
1660         ret = hash_check_key(trans, bch2_dirent_hash_desc, hash_info, iter, k);
1661         if (ret < 0)
1662                 goto err;
1663         if (ret) {
1664                 /* dirent has been deleted */
1665                 ret = 0;
1666                 goto out;
1667         }
1668
1669         if (k.k->type != KEY_TYPE_dirent)
1670                 goto out;
1671
1672         d = bkey_s_c_to_dirent(k);
1673
1674         if (d.v->d_type == DT_SUBVOL) {
1675                 struct bch_inode_unpacked subvol_root;
1676                 u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
1677                 u32 target_snapshot;
1678                 u64 target_inum;
1679
1680                 ret = __subvol_lookup(trans, target_subvol,
1681                                       &target_snapshot, &target_inum);
1682                 if (ret && !bch2_err_matches(ret, ENOENT))
1683                         goto err;
1684
1685                 if (fsck_err_on(ret, c,
1686                                 "dirent points to missing subvolume %llu",
1687                                 le64_to_cpu(d.v->d_child_subvol))) {
1688                         ret = __remove_dirent(trans, d.k->p);
1689                         goto err;
1690                 }
1691
1692                 ret = __lookup_inode(trans, target_inum,
1693                                    &subvol_root, &target_snapshot);
1694                 if (ret && !bch2_err_matches(ret, ENOENT))
1695                         goto err;
1696
1697                 if (fsck_err_on(ret, c,
1698                                 "subvolume %u points to missing subvolume root %llu",
1699                                 target_subvol,
1700                                 target_inum)) {
1701                         bch_err(c, "repair not implemented yet");
1702                         ret = -EINVAL;
1703                         goto err;
1704                 }
1705
1706                 if (fsck_err_on(subvol_root.bi_subvol != target_subvol, c,
1707                                 "subvol root %llu has wrong bi_subvol field: got %u, should be %u",
1708                                 target_inum,
1709                                 subvol_root.bi_subvol, target_subvol)) {
1710                         subvol_root.bi_subvol = target_subvol;
1711                         ret = __write_inode(trans, &subvol_root, target_snapshot);
1712                         if (ret)
1713                                 goto err;
1714                 }
1715
1716                 ret = check_dirent_target(trans, iter, d, &subvol_root,
1717                                           target_snapshot);
1718                 if (ret)
1719                         goto err;
1720         } else {
1721                 ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
1722                 if (ret)
1723                         goto err;
1724
1725                 if (fsck_err_on(!target->inodes.nr, c,
1726                                 "dirent points to missing inode: (equiv %u)\n%s",
1727                                 equiv.snapshot,
1728                                 (printbuf_reset(&buf),
1729                                  bch2_bkey_val_to_text(&buf, c, k),
1730                                  buf.buf))) {
1731                         ret = __remove_dirent(trans, d.k->p);
1732                         if (ret)
1733                                 goto err;
1734                 }
1735
1736                 darray_for_each(target->inodes, i) {
1737                         ret = check_dirent_target(trans, iter, d,
1738                                                   &i->inode, i->snapshot);
1739                         if (ret)
1740                                 goto err;
1741                 }
1742         }
1743
1744         if (d.v->d_type == DT_DIR)
1745                 for_each_visible_inode(c, s, dir, equiv.snapshot, i)
1746                         i->count++;
1747
1748 out:
1749 err:
1750 fsck_err:
1751         printbuf_exit(&buf);
1752
1753         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
1754                 bch_err_fn(c, ret);
1755         return ret;
1756 }
1757
1758 /*
1759  * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
1760  * validate d_type
1761  */
1762 noinline_for_stack
1763 static int check_dirents(struct bch_fs *c)
1764 {
1765         struct inode_walker dir = inode_walker_init();
1766         struct inode_walker target = inode_walker_init();
1767         struct snapshots_seen s;
1768         struct bch_hash_info hash_info;
1769         struct btree_trans trans;
1770         struct btree_iter iter;
1771         struct bkey_s_c k;
1772         int ret = 0;
1773
1774         bch_verbose(c, "checking dirents");
1775
1776         snapshots_seen_init(&s);
1777         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
1778
1779         ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_dirents,
1780                         POS(BCACHEFS_ROOT_INO, 0),
1781                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1782                         k,
1783                         NULL, NULL,
1784                         BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
1785                 check_dirent(&trans, &iter, k, &hash_info, &dir, &target, &s));
1786
1787         bch2_trans_exit(&trans);
1788         snapshots_seen_exit(&s);
1789         inode_walker_exit(&dir);
1790         inode_walker_exit(&target);
1791
1792         if (ret)
1793                 bch_err_fn(c, ret);
1794         return ret;
1795 }
1796
1797 static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
1798                        struct bkey_s_c k,
1799                        struct bch_hash_info *hash_info,
1800                        struct inode_walker *inode)
1801 {
1802         struct bch_fs *c = trans->c;
1803         int ret;
1804
1805         ret = check_key_has_snapshot(trans, iter, k);
1806         if (ret)
1807                 return ret;
1808
1809         ret = __walk_inode(trans, inode, k.k->p);
1810         if (ret < 0)
1811                 return ret;
1812
1813         if (inode->first_this_inode)
1814                 *hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
1815         inode->first_this_inode = false;
1816
1817         if (fsck_err_on(ret == INT_MAX, c,
1818                         "xattr for missing inode %llu",
1819                         k.k->p.inode))
1820                 return bch2_btree_delete_at(trans, iter, 0);
1821
1822         if (ret == INT_MAX)
1823                 return 0;
1824
1825         ret = 0;
1826
1827         ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
1828 fsck_err:
1829         if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
1830                 bch_err_fn(c, ret);
1831         return ret;
1832 }
1833
1834 /*
1835  * Walk xattrs: verify that they all have a corresponding inode
1836  */
1837 noinline_for_stack
1838 static int check_xattrs(struct bch_fs *c)
1839 {
1840         struct inode_walker inode = inode_walker_init();
1841         struct bch_hash_info hash_info;
1842         struct btree_trans trans;
1843         struct btree_iter iter;
1844         struct bkey_s_c k;
1845         int ret = 0;
1846
1847         bch_verbose(c, "checking xattrs");
1848
1849         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
1850
1851         ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
1852                         POS(BCACHEFS_ROOT_INO, 0),
1853                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1854                         k,
1855                         NULL, NULL,
1856                         BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
1857                 check_xattr(&trans, &iter, k, &hash_info, &inode));
1858
1859         bch2_trans_exit(&trans);
1860
1861         if (ret)
1862                 bch_err_fn(c, ret);
1863         return ret;
1864 }
1865
1866 static int check_root_trans(struct btree_trans *trans)
1867 {
1868         struct bch_fs *c = trans->c;
1869         struct bch_inode_unpacked root_inode;
1870         u32 snapshot;
1871         u64 inum;
1872         int ret;
1873
1874         ret = __subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
1875         if (ret && !bch2_err_matches(ret, ENOENT))
1876                 return ret;
1877
1878         if (mustfix_fsck_err_on(ret, c, "root subvol missing")) {
1879                 struct bkey_i_subvolume root_subvol;
1880
1881                 snapshot        = U32_MAX;
1882                 inum            = BCACHEFS_ROOT_INO;
1883
1884                 bkey_subvolume_init(&root_subvol.k_i);
1885                 root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
1886                 root_subvol.v.flags     = 0;
1887                 root_subvol.v.snapshot  = cpu_to_le32(snapshot);
1888                 root_subvol.v.inode     = cpu_to_le64(inum);
1889                 ret = commit_do(trans, NULL, NULL,
1890                                       BTREE_INSERT_NOFAIL|
1891                                       BTREE_INSERT_LAZY_RW,
1892                         __bch2_btree_insert(trans, BTREE_ID_subvolumes,
1893                                             &root_subvol.k_i, 0));
1894                 if (ret) {
1895                         bch_err(c, "error writing root subvol: %s", bch2_err_str(ret));
1896                         goto err;
1897                 }
1898
1899         }
1900
1901         ret = __lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
1902         if (ret && !bch2_err_matches(ret, ENOENT))
1903                 return ret;
1904
1905         if (mustfix_fsck_err_on(ret, c, "root directory missing") ||
1906             mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), c,
1907                                 "root inode not a directory")) {
1908                 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
1909                                 0, NULL);
1910                 root_inode.bi_inum = inum;
1911
1912                 ret = __write_inode(trans, &root_inode, snapshot);
1913                 if (ret)
1914                         bch_err(c, "error writing root inode: %s", bch2_err_str(ret));
1915         }
1916 err:
1917 fsck_err:
1918         return ret;
1919 }
1920
1921 /* Get root directory, create if it doesn't exist: */
1922 noinline_for_stack
1923 static int check_root(struct bch_fs *c)
1924 {
1925         int ret;
1926
1927         bch_verbose(c, "checking root directory");
1928
1929         ret = bch2_trans_do(c, NULL, NULL,
1930                              BTREE_INSERT_NOFAIL|
1931                              BTREE_INSERT_LAZY_RW,
1932                 check_root_trans(&trans));
1933
1934         if (ret)
1935                 bch_err_fn(c, ret);
1936         return ret;
1937 }
1938
1939 struct pathbuf_entry {
1940         u64     inum;
1941         u32     snapshot;
1942 };
1943
1944 typedef DARRAY(struct pathbuf_entry) pathbuf;
1945
1946 static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
1947 {
1948         struct pathbuf_entry *i;
1949
1950         darray_for_each(*p, i)
1951                 if (i->inum     == inum &&
1952                     i->snapshot == snapshot)
1953                         return true;
1954
1955         return false;
1956 }
1957
1958 static int path_down(struct bch_fs *c, pathbuf *p,
1959                      u64 inum, u32 snapshot)
1960 {
1961         int ret = darray_push(p, ((struct pathbuf_entry) {
1962                 .inum           = inum,
1963                 .snapshot       = snapshot,
1964         }));
1965
1966         if (ret)
1967                 bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
1968                         p->size);
1969         return ret;
1970 }
1971
1972 /*
1973  * Check that a given inode is reachable from the root:
1974  *
1975  * XXX: we should also be verifying that inodes are in the right subvolumes
1976  */
1977 static int check_path(struct btree_trans *trans,
1978                       pathbuf *p,
1979                       struct bch_inode_unpacked *inode,
1980                       u32 snapshot)
1981 {
1982         struct bch_fs *c = trans->c;
1983         int ret = 0;
1984
1985         snapshot = bch2_snapshot_equiv(c, snapshot);
1986         p->nr = 0;
1987
1988         while (!(inode->bi_inum == BCACHEFS_ROOT_INO &&
1989                  inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)) {
1990                 struct btree_iter dirent_iter;
1991                 struct bkey_s_c_dirent d;
1992                 u32 parent_snapshot = snapshot;
1993
1994                 if (inode->bi_subvol) {
1995                         u64 inum;
1996
1997                         ret = subvol_lookup(trans, inode->bi_parent_subvol,
1998                                             &parent_snapshot, &inum);
1999                         if (ret)
2000                                 break;
2001                 }
2002
2003                 ret = lockrestart_do(trans,
2004                         PTR_ERR_OR_ZERO((d = dirent_get_by_pos(trans, &dirent_iter,
2005                                           SPOS(inode->bi_dir, inode->bi_dir_offset,
2006                                                parent_snapshot))).k));
2007                 if (ret && !bch2_err_matches(ret, ENOENT))
2008                         break;
2009
2010                 if (!ret && !dirent_points_to_inode(d, inode)) {
2011                         bch2_trans_iter_exit(trans, &dirent_iter);
2012                         ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
2013                 }
2014
2015                 if (bch2_err_matches(ret, ENOENT)) {
2016                         if (fsck_err(c,  "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
2017                                      inode->bi_inum, snapshot,
2018                                      bch2_d_type_str(inode_d_type(inode)),
2019                                      inode->bi_nlink,
2020                                      inode->bi_dir,
2021                                      inode->bi_dir_offset))
2022                                 ret = reattach_inode(trans, inode, snapshot);
2023                         break;
2024                 }
2025
2026                 bch2_trans_iter_exit(trans, &dirent_iter);
2027
2028                 if (!S_ISDIR(inode->bi_mode))
2029                         break;
2030
2031                 ret = path_down(c, p, inode->bi_inum, snapshot);
2032                 if (ret) {
2033                         bch_err(c, "memory allocation failure");
2034                         return ret;
2035                 }
2036
2037                 snapshot = parent_snapshot;
2038
2039                 ret = lookup_inode(trans, inode->bi_dir, inode, &snapshot);
2040                 if (ret) {
2041                         /* Should have been caught in dirents pass */
2042                         bch_err(c, "error looking up parent directory: %i", ret);
2043                         break;
2044                 }
2045
2046                 if (path_is_dup(p, inode->bi_inum, snapshot)) {
2047                         struct pathbuf_entry *i;
2048
2049                         /* XXX print path */
2050                         bch_err(c, "directory structure loop");
2051
2052                         darray_for_each(*p, i)
2053                                 pr_err("%llu:%u", i->inum, i->snapshot);
2054                         pr_err("%llu:%u", inode->bi_inum, snapshot);
2055
2056                         if (!fsck_err(c, "directory structure loop"))
2057                                 return 0;
2058
2059                         ret = commit_do(trans, NULL, NULL,
2060                                               BTREE_INSERT_NOFAIL|
2061                                               BTREE_INSERT_LAZY_RW,
2062                                         remove_backpointer(trans, inode));
2063                         if (ret) {
2064                                 bch_err(c, "error removing dirent: %i", ret);
2065                                 break;
2066                         }
2067
2068                         ret = reattach_inode(trans, inode, snapshot);
2069                 }
2070         }
2071 fsck_err:
2072         if (ret)
2073                 bch_err_fn(c, ret);
2074         return ret;
2075 }
2076
2077 /*
2078  * Check for unreachable inodes, as well as loops in the directory structure:
2079  * After check_dirents(), if an inode backpointer doesn't exist that means it's
2080  * unreachable:
2081  */
2082 noinline_for_stack
2083 static int check_directory_structure(struct bch_fs *c)
2084 {
2085         struct btree_trans trans;
2086         struct btree_iter iter;
2087         struct bkey_s_c k;
2088         struct bch_inode_unpacked u;
2089         pathbuf path = { 0, };
2090         int ret;
2091
2092         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2093
2094         for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN,
2095                            BTREE_ITER_INTENT|
2096                            BTREE_ITER_PREFETCH|
2097                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
2098                 if (!bkey_is_inode(k.k))
2099                         continue;
2100
2101                 ret = bch2_inode_unpack(k, &u);
2102                 if (ret) {
2103                         /* Should have been caught earlier in fsck: */
2104                         bch_err(c, "error unpacking inode %llu: %i", k.k->p.offset, ret);
2105                         break;
2106                 }
2107
2108                 if (u.bi_flags & BCH_INODE_UNLINKED)
2109                         continue;
2110
2111                 ret = check_path(&trans, &path, &u, iter.pos.snapshot);
2112                 if (ret)
2113                         break;
2114         }
2115         bch2_trans_iter_exit(&trans, &iter);
2116         bch2_trans_exit(&trans);
2117         darray_exit(&path);
2118
2119         if (ret)
2120                 bch_err_fn(c, ret);
2121         return ret;
2122 }
2123
2124 struct nlink_table {
2125         size_t          nr;
2126         size_t          size;
2127
2128         struct nlink {
2129                 u64     inum;
2130                 u32     snapshot;
2131                 u32     count;
2132         }               *d;
2133 };
2134
2135 static int add_nlink(struct bch_fs *c, struct nlink_table *t,
2136                      u64 inum, u32 snapshot)
2137 {
2138         if (t->nr == t->size) {
2139                 size_t new_size = max_t(size_t, 128UL, t->size * 2);
2140                 void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
2141
2142                 if (!d) {
2143                         bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
2144                                 new_size);
2145                         return -BCH_ERR_ENOMEM_fsck_add_nlink;
2146                 }
2147
2148                 if (t->d)
2149                         memcpy(d, t->d, t->size * sizeof(t->d[0]));
2150                 kvfree(t->d);
2151
2152                 t->d = d;
2153                 t->size = new_size;
2154         }
2155
2156
2157         t->d[t->nr++] = (struct nlink) {
2158                 .inum           = inum,
2159                 .snapshot       = snapshot,
2160         };
2161
2162         return 0;
2163 }
2164
2165 static int nlink_cmp(const void *_l, const void *_r)
2166 {
2167         const struct nlink *l = _l;
2168         const struct nlink *r = _r;
2169
2170         return cmp_int(l->inum, r->inum) ?: cmp_int(l->snapshot, r->snapshot);
2171 }
2172
2173 static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
2174                      struct nlink_table *links,
2175                      u64 range_start, u64 range_end, u64 inum, u32 snapshot)
2176 {
2177         struct nlink *link, key = {
2178                 .inum = inum, .snapshot = U32_MAX,
2179         };
2180
2181         if (inum < range_start || inum >= range_end)
2182                 return;
2183
2184         link = __inline_bsearch(&key, links->d, links->nr,
2185                                 sizeof(links->d[0]), nlink_cmp);
2186         if (!link)
2187                 return;
2188
2189         while (link > links->d && link[0].inum == link[-1].inum)
2190                 --link;
2191
2192         for (; link < links->d + links->nr && link->inum == inum; link++)
2193                 if (ref_visible(c, s, snapshot, link->snapshot)) {
2194                         link->count++;
2195                         if (link->snapshot >= snapshot)
2196                                 break;
2197                 }
2198 }
2199
2200 noinline_for_stack
2201 static int check_nlinks_find_hardlinks(struct bch_fs *c,
2202                                        struct nlink_table *t,
2203                                        u64 start, u64 *end)
2204 {
2205         struct btree_trans trans;
2206         struct btree_iter iter;
2207         struct bkey_s_c k;
2208         struct bch_inode_unpacked u;
2209         int ret = 0;
2210
2211         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2212
2213         for_each_btree_key(&trans, iter, BTREE_ID_inodes,
2214                            POS(0, start),
2215                            BTREE_ITER_INTENT|
2216                            BTREE_ITER_PREFETCH|
2217                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
2218                 if (!bkey_is_inode(k.k))
2219                         continue;
2220
2221                 /* Should never fail, checked by bch2_inode_invalid: */
2222                 BUG_ON(bch2_inode_unpack(k, &u));
2223
2224                 /*
2225                  * Backpointer and directory structure checks are sufficient for
2226                  * directories, since they can't have hardlinks:
2227                  */
2228                 if (S_ISDIR(le16_to_cpu(u.bi_mode)))
2229                         continue;
2230
2231                 if (!u.bi_nlink)
2232                         continue;
2233
2234                 ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
2235                 if (ret) {
2236                         *end = k.k->p.offset;
2237                         ret = 0;
2238                         break;
2239                 }
2240
2241         }
2242         bch2_trans_iter_exit(&trans, &iter);
2243         bch2_trans_exit(&trans);
2244
2245         if (ret)
2246                 bch_err(c, "error in fsck: btree error %i while walking inodes", ret);
2247
2248         return ret;
2249 }
2250
2251 noinline_for_stack
2252 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
2253                                      u64 range_start, u64 range_end)
2254 {
2255         struct btree_trans trans;
2256         struct snapshots_seen s;
2257         struct btree_iter iter;
2258         struct bkey_s_c k;
2259         struct bkey_s_c_dirent d;
2260         int ret;
2261
2262         snapshots_seen_init(&s);
2263
2264         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2265
2266         for_each_btree_key(&trans, iter, BTREE_ID_dirents, POS_MIN,
2267                            BTREE_ITER_INTENT|
2268                            BTREE_ITER_PREFETCH|
2269                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
2270                 ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
2271                 if (ret)
2272                         break;
2273
2274                 switch (k.k->type) {
2275                 case KEY_TYPE_dirent:
2276                         d = bkey_s_c_to_dirent(k);
2277
2278                         if (d.v->d_type != DT_DIR &&
2279                             d.v->d_type != DT_SUBVOL)
2280                                 inc_link(c, &s, links, range_start, range_end,
2281                                          le64_to_cpu(d.v->d_inum),
2282                                          bch2_snapshot_equiv(c, d.k->p.snapshot));
2283                         break;
2284                 }
2285         }
2286         bch2_trans_iter_exit(&trans, &iter);
2287
2288         if (ret)
2289                 bch_err(c, "error in fsck: btree error %i while walking dirents", ret);
2290
2291         bch2_trans_exit(&trans);
2292         snapshots_seen_exit(&s);
2293         return ret;
2294 }
2295
2296 static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
2297                                      struct bkey_s_c k,
2298                                      struct nlink_table *links,
2299                                      size_t *idx, u64 range_end)
2300 {
2301         struct bch_fs *c = trans->c;
2302         struct bch_inode_unpacked u;
2303         struct nlink *link = &links->d[*idx];
2304         int ret = 0;
2305
2306         if (k.k->p.offset >= range_end)
2307                 return 1;
2308
2309         if (!bkey_is_inode(k.k))
2310                 return 0;
2311
2312         BUG_ON(bch2_inode_unpack(k, &u));
2313
2314         if (S_ISDIR(le16_to_cpu(u.bi_mode)))
2315                 return 0;
2316
2317         if (!u.bi_nlink)
2318                 return 0;
2319
2320         while ((cmp_int(link->inum, k.k->p.offset) ?:
2321                 cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
2322                 BUG_ON(*idx == links->nr);
2323                 link = &links->d[++*idx];
2324         }
2325
2326         if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c,
2327                         "inode %llu type %s has wrong i_nlink (%u, should be %u)",
2328                         u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
2329                         bch2_inode_nlink_get(&u), link->count)) {
2330                 bch2_inode_nlink_set(&u, link->count);
2331                 ret = __write_inode(trans, &u, k.k->p.snapshot);
2332         }
2333 fsck_err:
2334         return ret;
2335 }
2336
2337 noinline_for_stack
2338 static int check_nlinks_update_hardlinks(struct bch_fs *c,
2339                                struct nlink_table *links,
2340                                u64 range_start, u64 range_end)
2341 {
2342         struct btree_trans trans;
2343         struct btree_iter iter;
2344         struct bkey_s_c k;
2345         size_t idx = 0;
2346         int ret = 0;
2347
2348         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2349
2350         ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_inodes,
2351                         POS(0, range_start),
2352                         BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
2353                         NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
2354                 check_nlinks_update_inode(&trans, &iter, k, links, &idx, range_end));
2355
2356         bch2_trans_exit(&trans);
2357
2358         if (ret < 0) {
2359                 bch_err(c, "error in fsck: btree error %i while walking inodes", ret);
2360                 return ret;
2361         }
2362
2363         return 0;
2364 }
2365
2366 noinline_for_stack
2367 static int check_nlinks(struct bch_fs *c)
2368 {
2369         struct nlink_table links = { 0 };
2370         u64 this_iter_range_start, next_iter_range_start = 0;
2371         int ret = 0;
2372
2373         bch_verbose(c, "checking inode nlinks");
2374
2375         do {
2376                 this_iter_range_start = next_iter_range_start;
2377                 next_iter_range_start = U64_MAX;
2378
2379                 ret = check_nlinks_find_hardlinks(c, &links,
2380                                                   this_iter_range_start,
2381                                                   &next_iter_range_start);
2382
2383                 ret = check_nlinks_walk_dirents(c, &links,
2384                                           this_iter_range_start,
2385                                           next_iter_range_start);
2386                 if (ret)
2387                         break;
2388
2389                 ret = check_nlinks_update_hardlinks(c, &links,
2390                                          this_iter_range_start,
2391                                          next_iter_range_start);
2392                 if (ret)
2393                         break;
2394
2395                 links.nr = 0;
2396         } while (next_iter_range_start != U64_MAX);
2397
2398         kvfree(links.d);
2399
2400         if (ret)
2401                 bch_err_fn(c, ret);
2402         return ret;
2403 }
2404
2405 static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
2406                              struct bkey_s_c k)
2407 {
2408         struct bkey_s_c_reflink_p p;
2409         struct bkey_i_reflink_p *u;
2410         int ret;
2411
2412         if (k.k->type != KEY_TYPE_reflink_p)
2413                 return 0;
2414
2415         p = bkey_s_c_to_reflink_p(k);
2416
2417         if (!p.v->front_pad && !p.v->back_pad)
2418                 return 0;
2419
2420         u = bch2_trans_kmalloc(trans, sizeof(*u));
2421         ret = PTR_ERR_OR_ZERO(u);
2422         if (ret)
2423                 return ret;
2424
2425         bkey_reassemble(&u->k_i, k);
2426         u->v.front_pad  = 0;
2427         u->v.back_pad   = 0;
2428
2429         return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
2430 }
2431
2432 noinline_for_stack
2433 static int fix_reflink_p(struct bch_fs *c)
2434 {
2435         struct btree_iter iter;
2436         struct bkey_s_c k;
2437         int ret;
2438
2439         if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
2440                 return 0;
2441
2442         bch_verbose(c, "fixing reflink_p keys");
2443
2444         ret = bch2_trans_run(c,
2445                 for_each_btree_key_commit(&trans, iter,
2446                                 BTREE_ID_extents, POS_MIN,
2447                                 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
2448                                 BTREE_ITER_ALL_SNAPSHOTS, k,
2449                                 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
2450                         fix_reflink_p_key(&trans, &iter, k)));
2451
2452         if (ret)
2453                 bch_err_fn(c, ret);
2454         return ret;
2455 }
2456
2457 /*
2458  * Checks for inconsistencies that shouldn't happen, unless we have a bug.
2459  * Doesn't fix them yet, mainly because they haven't yet been observed:
2460  */
2461 int bch2_fsck_full(struct bch_fs *c)
2462 {
2463         int ret;
2464 again:
2465         ret =   bch2_fs_check_snapshot_trees(c);
2466                 bch2_fs_check_snapshots(c) ?:
2467                 bch2_fs_check_subvols(c) ?:
2468                 bch2_delete_dead_snapshots(c) ?:
2469                 check_inodes(c, true) ?:
2470                 check_extents(c) ?:
2471                 check_dirents(c) ?:
2472                 check_xattrs(c) ?:
2473                 check_root(c) ?:
2474                 check_directory_structure(c) ?:
2475                 check_nlinks(c) ?:
2476                 fix_reflink_p(c);
2477
2478         if (bch2_err_matches(ret, BCH_ERR_need_snapshot_cleanup)) {
2479                 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
2480                 goto again;
2481         }
2482
2483         return ret;
2484 }
2485
2486 int bch2_fsck_walk_inodes_only(struct bch_fs *c)
2487 {
2488         return  bch2_fs_check_snapshots(c) ?:
2489                 bch2_fs_check_subvols(c) ?:
2490                 bch2_delete_dead_snapshots(c) ?:
2491                 check_inodes(c, false);
2492 }