bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&old->key),
fs_usage);
- bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res,
- gc_pos_btree_root(b->btree_id));
+ bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res);
preempt_enable();
percpu_up_read(&c->mark_lock);
bkey_disassemble(b, k, &tmp),
fs_usage);
- bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res,
- gc_pos_btree_node(b));
+ bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res);
preempt_enable();
percpu_up_read(&c->mark_lock);
bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&b->key),
fs_usage);
- bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res,
- gc_pos_btree_root(b->btree_id));
+ bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res);
percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
int bch2_fs_usage_apply(struct bch_fs *c,
struct bch_fs_usage *fs_usage,
- struct disk_reservation *disk_res,
- struct gc_pos gc_pos)
+ struct disk_reservation *disk_res)
{
s64 added = fs_usage->s.data + fs_usage->s.reserved;
s64 should_not_have_added;
acc_u64s((u64 *) this_cpu_ptr(c->usage[0]),
(u64 *) fs_usage,
sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
-
- if (gc_visited(c, gc_pos)) {
- BUG_ON(!c->usage[1]);
- acc_u64s((u64 *) this_cpu_ptr(c->usage[1]),
- (u64 *) fs_usage,
- sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
- }
preempt_enable();
return ret;
bch2_btree_node_iter_advance(&node_iter, b);
}
- if (bch2_fs_usage_apply(c, fs_usage, trans->disk_res, pos) &&
+ if (bch2_fs_usage_apply(c, fs_usage, trans->disk_res) &&
!warned_disk_usage &&
!xchg(&warned_disk_usage, 1)) {
char buf[200];
struct bch_fs_usage *, u64, unsigned);
void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
- struct disk_reservation *, struct gc_pos);
+ struct disk_reservation *);
/* disk reservations: */