From: Kent Overstreet Date: Sat, 11 Jul 2020 20:28:54 +0000 (-0400) Subject: bcachefs: Make copygc thread global X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=e6d1161530bcd632ad10b6aa0ad511abb146dbcc;p=linux.git bcachefs: Make copygc thread global Per device copygc threads don't move data to different devices and they make fragmentation works - they don't make much sense anymore. Signed-off-by: Kent Overstreet Signed-off-by: Kent Overstreet --- diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index b1a8192f27512..d80e1edf8c440 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -41,29 +41,26 @@ static void pd_controllers_update(struct work_struct *work) struct bch_fs, pd_controllers_update); struct bch_dev *ca; + s64 free = 0, fragmented = 0; unsigned i; for_each_member_device(ca, c, i) { struct bch_dev_usage stats = bch2_dev_usage_read(c, ca); - u64 free = bucket_to_sector(ca, + free += bucket_to_sector(ca, __dev_buckets_free(ca, stats)) << 9; /* * Bytes of internal fragmentation, which can be * reclaimed by copy GC */ - s64 fragmented = (bucket_to_sector(ca, + fragmented += max_t(s64, 0, (bucket_to_sector(ca, stats.buckets[BCH_DATA_user] + stats.buckets[BCH_DATA_cached]) - (stats.sectors[BCH_DATA_user] + - stats.sectors[BCH_DATA_cached])) << 9; - - fragmented = max(0LL, fragmented); - - bch2_pd_controller_update(&ca->copygc_pd, - free, fragmented, -1); + stats.sectors[BCH_DATA_cached])) << 9); } + bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1); schedule_delayed_work(&c->pd_controllers_update, c->pd_controllers_update_seconds * HZ); } @@ -1191,7 +1188,7 @@ stop: void bch2_recalc_capacity(struct bch_fs *c) { struct bch_dev *ca; - u64 capacity = 0, reserved_sectors = 0, gc_reserve; + u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0; unsigned bucket_size_max = 0; unsigned long ra_pages = 0; unsigned i, j; @@ -1234,7 +1231,7 @@ void bch2_recalc_capacity(struct bch_fs *c) dev_reserve *= ca->mi.bucket_size; - ca->copygc_threshold = dev_reserve; + copygc_threshold += dev_reserve; capacity += bucket_to_sector(ca, ca->mi.nbuckets - ca->mi.first_bucket); @@ -1253,6 +1250,7 @@ void bch2_recalc_capacity(struct bch_fs *c) reserved_sectors = min(reserved_sectors, capacity); + c->copygc_threshold = copygc_threshold; c->capacity = capacity - reserved_sectors; c->bucket_size_max = bucket_size_max; @@ -1312,7 +1310,7 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) for (i = 0; i < ARRAY_SIZE(c->write_points); i++) bch2_writepoint_stop(c, ca, &c->write_points[i]); - bch2_writepoint_stop(c, ca, &ca->copygc_write_point); + bch2_writepoint_stop(c, ca, &c->copygc_write_point); bch2_writepoint_stop(c, ca, &c->rebalance_write_point); bch2_writepoint_stop(c, ca, &c->btree_write_point); diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 32f7e38c086e3..3ea28a79b8c93 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -956,8 +956,9 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) c->open_buckets_freelist = ob - c->open_buckets; } - writepoint_init(&c->btree_write_point, BCH_DATA_btree); - writepoint_init(&c->rebalance_write_point, BCH_DATA_user); + writepoint_init(&c->btree_write_point, BCH_DATA_btree); + writepoint_init(&c->rebalance_write_point, BCH_DATA_user); + writepoint_init(&c->copygc_write_point, BCH_DATA_user); for (wp = c->write_points; wp < c->write_points + c->write_points_nr; wp++) { diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 7fdcae5fa2255..baa8801c5412a 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -452,13 +452,6 @@ struct bch_dev { alloc_heap alloc_heap; - /* Copying GC: */ - struct task_struct *copygc_thread; - copygc_heap copygc_heap; - struct bch_pd_controller copygc_pd; - struct write_point copygc_write_point; - u64 copygc_threshold; - atomic64_t rebalance_work; struct journal_device journal; @@ -753,6 +746,13 @@ struct bch_fs { /* REBALANCE */ struct bch_fs_rebalance rebalance; + /* COPYGC */ + struct task_struct *copygc_thread; + copygc_heap copygc_heap; + struct bch_pd_controller copygc_pd; + struct write_point copygc_write_point; + u64 copygc_threshold; + /* STRIPES: */ GENRADIX(struct stripe) stripes[2]; diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index fde5fba2841e1..c8a57b512b772 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -2009,7 +2009,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) alloc_fifo free[RESERVE_NR]; alloc_fifo free_inc; alloc_heap alloc_heap; - copygc_heap copygc_heap; size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE, ca->mi.bucket_size / c->opts.btree_node_size); @@ -2018,15 +2017,13 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7); size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12), btree_reserve * 2); - bool resize = ca->buckets[0] != NULL, - start_copygc = ca->copygc_thread != NULL; + bool resize = ca->buckets[0] != NULL; int ret = -ENOMEM; unsigned i; memset(&free, 0, sizeof(free)); memset(&free_inc, 0, sizeof(free_inc)); memset(&alloc_heap, 0, sizeof(alloc_heap)); - memset(©gc_heap, 0, sizeof(copygc_heap)); if (!(buckets = kvpmalloc(sizeof(struct bucket_array) + nbuckets * sizeof(struct bucket), @@ -2039,14 +2036,13 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) copygc_reserve, GFP_KERNEL) || !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) || !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) || - !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) || - !init_heap(©gc_heap, copygc_reserve, GFP_KERNEL)) + !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL)) goto err; buckets->first_bucket = ca->mi.first_bucket; buckets->nbuckets = nbuckets; - bch2_copygc_stop(ca); + bch2_copygc_stop(c); if (resize) { down_write(&c->gc_lock); @@ -2089,21 +2085,13 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) /* with gc lock held, alloc_heap can't be in use: */ swap(ca->alloc_heap, alloc_heap); - /* and we shut down copygc: */ - swap(ca->copygc_heap, copygc_heap); - nbuckets = ca->mi.nbuckets; if (resize) up_write(&ca->bucket_lock); - if (start_copygc && - bch2_copygc_start(c, ca)) - bch_err(ca, "error restarting copygc thread"); - ret = 0; err: - free_heap(©gc_heap); free_heap(&alloc_heap); free_fifo(&free_inc); for (i = 0; i < RESERVE_NR; i++) @@ -2120,7 +2108,6 @@ void bch2_dev_buckets_free(struct bch_dev *ca) { unsigned i; - free_heap(&ca->copygc_heap); free_heap(&ca->alloc_heap); free_fifo(&ca->free_inc); for (i = 0; i < RESERVE_NR; i++) diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h index b64b2fc9a896c..0f7fcfe29e0e3 100644 --- a/fs/bcachefs/buckets_types.h +++ b/fs/bcachefs/buckets_types.h @@ -121,6 +121,7 @@ struct disk_reservation { }; struct copygc_heap_entry { + u8 dev; u8 gen; u32 sectors; u64 offset; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 135bbc102b53f..c33b58dc5c502 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -43,13 +43,6 @@ #define COPYGC_BUCKETS_PER_ITER(ca) \ ((ca)->free[RESERVE_MOVINGGC].size / 2) -/* - * Max sectors to move per iteration: Have to take into account internal - * fragmentation from the multiple write points for each generation: - */ -#define COPYGC_SECTORS_PER_ITER(ca) \ - ((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca)) - static inline int sectors_used_cmp(copygc_heap *heap, struct copygc_heap_entry l, struct copygc_heap_entry r) @@ -62,18 +55,22 @@ static int bucket_offset_cmp(const void *_l, const void *_r, size_t size) const struct copygc_heap_entry *l = _l; const struct copygc_heap_entry *r = _r; - return cmp_int(l->offset, r->offset); + return cmp_int(l->dev, r->dev) ?: + cmp_int(l->offset, r->offset); } -static bool __copygc_pred(struct bch_dev *ca, - struct bkey_s_c k) +static int __copygc_pred(struct bch_fs *c, struct bkey_s_c k) { - copygc_heap *h = &ca->copygc_heap; - const struct bch_extent_ptr *ptr = - bch2_bkey_has_device(k, ca->dev_idx); - - if (ptr) { - struct copygc_heap_entry search = { .offset = ptr->offset }; + copygc_heap *h = &c->copygc_heap; + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const struct bch_extent_ptr *ptr; + + bkey_for_each_ptr(ptrs, ptr) { + struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + struct copygc_heap_entry search = { + .dev = ptr->dev, + .offset = ptr->offset + }; ssize_t i = eytzinger0_find_le(h->data, h->used, sizeof(h->data[0]), @@ -89,12 +86,13 @@ static bool __copygc_pred(struct bch_dev *ca, BUG_ON(i != j); #endif - return (i >= 0 && - ptr->offset < h->data[i].offset + ca->mi.bucket_size && - ptr->gen == h->data[i].gen); + if (i >= 0 && + ptr->offset < h->data[i].offset + ca->mi.bucket_size && + ptr->gen == h->data[i].gen) + return ptr->dev; } - return false; + return -1; } static enum data_cmd copygc_pred(struct bch_fs *c, void *arg, @@ -102,14 +100,14 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg, struct bch_io_opts *io_opts, struct data_opts *data_opts) { - struct bch_dev *ca = arg; - - if (!__copygc_pred(ca, k)) + int dev_idx = __copygc_pred(c, k); + if (dev_idx < 0) return DATA_SKIP; - data_opts->target = dev_to_target(ca->dev_idx); + /* XXX: use io_opts for this inode */ + data_opts->target = dev_to_target(dev_idx); data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE; - data_opts->rewrite_dev = ca->dev_idx; + data_opts->rewrite_dev = dev_idx; return DATA_REWRITE; } @@ -125,20 +123,21 @@ static bool have_copygc_reserve(struct bch_dev *ca) return ret; } -static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) +static void bch2_copygc(struct bch_fs *c) { - copygc_heap *h = &ca->copygc_heap; + copygc_heap *h = &c->copygc_heap; struct copygc_heap_entry e, *i; struct bucket_array *buckets; struct bch_move_stats move_stats; u64 sectors_to_move = 0, sectors_not_moved = 0; + u64 sectors_reserved = 0; u64 buckets_to_move, buckets_not_moved = 0; - size_t b; + struct bch_dev *ca; + unsigned dev_idx; + size_t b, heap_size = 0; int ret; memset(&move_stats, 0, sizeof(move_stats)); - closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca)); - /* * Find buckets with lowest sector counts, skipping completely * empty buckets, by building a maxheap sorted by sector count, @@ -147,38 +146,51 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) */ h->used = 0; - /* - * We need bucket marks to be up to date - gc can't be recalculating - * them: - */ - down_read(&c->gc_lock); - down_read(&ca->bucket_lock); - buckets = bucket_array(ca); - - for (b = buckets->first_bucket; b < buckets->nbuckets; b++) { - struct bucket_mark m = READ_ONCE(buckets->b[b].mark); - struct copygc_heap_entry e; - - if (m.owned_by_allocator || - m.data_type != BCH_DATA_user || - !bucket_sectors_used(m) || - bucket_sectors_used(m) >= ca->mi.bucket_size) - continue; + for_each_rw_member(ca, c, dev_idx) + heap_size += ca->mi.nbuckets >> 7; - e = (struct copygc_heap_entry) { - .gen = m.gen, - .sectors = bucket_sectors_used(m), - .offset = bucket_to_sector(ca, b), - }; - heap_add_or_replace(h, e, -sectors_used_cmp, NULL); + if (h->size < heap_size) { + free_heap(&c->copygc_heap); + if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) { + bch_err(c, "error allocating copygc heap"); + return; + } + } + + for_each_rw_member(ca, c, dev_idx) { + closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca)); + + spin_lock(&ca->fs->freelist_lock); + sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size; + spin_unlock(&ca->fs->freelist_lock); + + down_read(&ca->bucket_lock); + buckets = bucket_array(ca); + + for (b = buckets->first_bucket; b < buckets->nbuckets; b++) { + struct bucket_mark m = READ_ONCE(buckets->b[b].mark); + struct copygc_heap_entry e; + + if (m.owned_by_allocator || + m.data_type != BCH_DATA_user || + !bucket_sectors_used(m) || + bucket_sectors_used(m) >= ca->mi.bucket_size) + continue; + + e = (struct copygc_heap_entry) { + .gen = m.gen, + .sectors = bucket_sectors_used(m), + .offset = bucket_to_sector(ca, b), + }; + heap_add_or_replace(h, e, -sectors_used_cmp, NULL); + } + up_read(&ca->bucket_lock); } - up_read(&ca->bucket_lock); - up_read(&c->gc_lock); for (i = h->data; i < h->data + h->used; i++) sectors_to_move += i->sectors; - while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) { + while (sectors_to_move > sectors_reserved) { BUG_ON(!heap_pop(h, e, -sectors_used_cmp, NULL)); sectors_to_move -= e.sectors; } @@ -192,24 +204,26 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) sizeof(h->data[0]), bucket_offset_cmp, NULL); - ret = bch2_move_data(c, &ca->copygc_pd.rate, - writepoint_ptr(&ca->copygc_write_point), + ret = bch2_move_data(c, &c->copygc_pd.rate, + writepoint_ptr(&c->copygc_write_point), POS_MIN, POS_MAX, - copygc_pred, ca, + copygc_pred, NULL, &move_stats); - down_read(&ca->bucket_lock); - buckets = bucket_array(ca); - for (i = h->data; i < h->data + h->used; i++) { - size_t b = sector_to_bucket(ca, i->offset); - struct bucket_mark m = READ_ONCE(buckets->b[b].mark); - - if (i->gen == m.gen && bucket_sectors_used(m)) { - sectors_not_moved += bucket_sectors_used(m); - buckets_not_moved++; + for_each_rw_member(ca, c, dev_idx) { + down_read(&ca->bucket_lock); + buckets = bucket_array(ca); + for (i = h->data; i < h->data + h->used; i++) { + size_t b = sector_to_bucket(ca, i->offset); + struct bucket_mark m = READ_ONCE(buckets->b[b].mark); + + if (i->gen == m.gen && bucket_sectors_used(m)) { + sectors_not_moved += bucket_sectors_used(m); + buckets_not_moved++; + } } + up_read(&ca->bucket_lock); } - up_read(&ca->bucket_lock); if (sectors_not_moved && !ret) bch_warn_ratelimited(c, @@ -220,7 +234,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) atomic64_read(&move_stats.keys_raced), atomic64_read(&move_stats.sectors_raced)); - trace_copygc(ca, + trace_copygc(c, atomic64_read(&move_stats.sectors_moved), sectors_not_moved, buckets_to_move, buckets_not_moved); } @@ -239,20 +253,27 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) * often and continually reduce the amount of fragmented space as the device * fills up. So, we increase the threshold by half the current free space. */ -unsigned long bch2_copygc_wait_amount(struct bch_dev *ca) +unsigned long bch2_copygc_wait_amount(struct bch_fs *c) { - struct bch_fs *c = ca->fs; - struct bch_dev_usage usage = bch2_dev_usage_read(c, ca); - u64 fragmented_allowed = ca->copygc_threshold + - ((__dev_buckets_available(ca, usage) * ca->mi.bucket_size) >> 1); + struct bch_dev *ca; + unsigned dev_idx; + u64 fragmented_allowed = c->copygc_threshold; + u64 fragmented = 0; + + for_each_rw_member(ca, c, dev_idx) { + struct bch_dev_usage usage = bch2_dev_usage_read(c, ca); + + fragmented_allowed += ((__dev_buckets_available(ca, usage) * + ca->mi.bucket_size) >> 1); + fragmented += usage.sectors_fragmented; + } - return max_t(s64, 0, fragmented_allowed - usage.sectors_fragmented); + return max_t(s64, 0, fragmented_allowed - fragmented); } static int bch2_copygc_thread(void *arg) { - struct bch_dev *ca = arg; - struct bch_fs *c = ca->fs; + struct bch_fs *c = arg; struct io_clock *clock = &c->io_clock[WRITE]; unsigned long last, wait; @@ -263,7 +284,7 @@ static int bch2_copygc_thread(void *arg) break; last = atomic_long_read(&clock->now); - wait = bch2_copygc_wait_amount(ca); + wait = bch2_copygc_wait_amount(c); if (wait > clock->max_slop) { bch2_kthread_io_clock_wait(clock, last + wait, @@ -271,29 +292,29 @@ static int bch2_copygc_thread(void *arg) continue; } - bch2_copygc(c, ca); + bch2_copygc(c); } return 0; } -void bch2_copygc_stop(struct bch_dev *ca) +void bch2_copygc_stop(struct bch_fs *c) { - ca->copygc_pd.rate.rate = UINT_MAX; - bch2_ratelimit_reset(&ca->copygc_pd.rate); + c->copygc_pd.rate.rate = UINT_MAX; + bch2_ratelimit_reset(&c->copygc_pd.rate); - if (ca->copygc_thread) { - kthread_stop(ca->copygc_thread); - put_task_struct(ca->copygc_thread); + if (c->copygc_thread) { + kthread_stop(c->copygc_thread); + put_task_struct(c->copygc_thread); } - ca->copygc_thread = NULL; + c->copygc_thread = NULL; } -int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca) +int bch2_copygc_start(struct bch_fs *c) { struct task_struct *t; - if (ca->copygc_thread) + if (c->copygc_thread) return 0; if (c->opts.nochanges) @@ -302,21 +323,21 @@ int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca) if (bch2_fs_init_fault("copygc_start")) return -ENOMEM; - t = kthread_create(bch2_copygc_thread, ca, - "bch_copygc[%s]", ca->name); + t = kthread_create(bch2_copygc_thread, c, + "bch_copygc[%s]", c->name); if (IS_ERR(t)) return PTR_ERR(t); get_task_struct(t); - ca->copygc_thread = t; - wake_up_process(ca->copygc_thread); + c->copygc_thread = t; + wake_up_process(c->copygc_thread); return 0; } -void bch2_dev_copygc_init(struct bch_dev *ca) +void bch2_fs_copygc_init(struct bch_fs *c) { - bch2_pd_controller_init(&ca->copygc_pd); - ca->copygc_pd.d_term = 0; + bch2_pd_controller_init(&c->copygc_pd); + c->copygc_pd.d_term = 0; } diff --git a/fs/bcachefs/movinggc.h b/fs/bcachefs/movinggc.h index dcd479632cf11..922738247d039 100644 --- a/fs/bcachefs/movinggc.h +++ b/fs/bcachefs/movinggc.h @@ -2,8 +2,8 @@ #ifndef _BCACHEFS_MOVINGGC_H #define _BCACHEFS_MOVINGGC_H -void bch2_copygc_stop(struct bch_dev *); -int bch2_copygc_start(struct bch_fs *, struct bch_dev *); -void bch2_dev_copygc_init(struct bch_dev *); +void bch2_copygc_stop(struct bch_fs *); +int bch2_copygc_start(struct bch_fs *); +void bch2_fs_copygc_init(struct bch_fs *); #endif /* _BCACHEFS_MOVINGGC_H */ diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index cd1033228b9cd..6dc899be5bd2f 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -181,10 +181,7 @@ static void __bch2_fs_read_only(struct bch_fs *c) int ret; bch2_rebalance_stop(c); - - for_each_member_device(ca, c, i) - bch2_copygc_stop(ca); - + bch2_copygc_stop(c); bch2_gc_thread_stop(c); /* @@ -364,8 +361,6 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c) static int bch2_fs_read_write_late(struct bch_fs *c) { - struct bch_dev *ca; - unsigned i; int ret; ret = bch2_gc_thread_start(c); @@ -374,13 +369,10 @@ static int bch2_fs_read_write_late(struct bch_fs *c) return ret; } - for_each_rw_member(ca, c, i) { - ret = bch2_copygc_start(c, ca); - if (ret) { - bch_err(c, "error starting copygc threads"); - percpu_ref_put(&ca->io_ref); - return ret; - } + ret = bch2_copygc_start(c); + if (ret) { + bch_err(c, "error starting copygc thread"); + return ret; } ret = bch2_rebalance_start(c); @@ -504,6 +496,7 @@ static void bch2_fs_free(struct bch_fs *c) kfree(c->replicas_gc.entries); kfree(rcu_dereference_protected(c->disk_groups, 1)); kfree(c->journal_seq_blacklist_table); + free_heap(&c->copygc_heap); if (c->journal_reclaim_wq) destroy_workqueue(c->journal_reclaim_wq); @@ -652,6 +645,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) for (i = 0; i < BCH_TIME_STAT_NR; i++) bch2_time_stats_init(&c->times[i]); + bch2_fs_copygc_init(c); bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); bch2_fs_allocator_background_init(c); bch2_fs_allocator_foreground_init(c); @@ -1076,10 +1070,6 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, init_rwsem(&ca->bucket_lock); - writepoint_init(&ca->copygc_write_point, BCH_DATA_user); - - bch2_dev_copygc_init(ca); - INIT_WORK(&ca->io_error_work, bch2_io_error_work); bch2_time_stats_init(&ca->io_latency[READ]); @@ -1318,8 +1308,6 @@ static bool bch2_fs_may_start(struct bch_fs *c) static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca) { - bch2_copygc_stop(ca); - /* * The allocator thread itself allocates btree nodes, so stop it first: */ @@ -1340,9 +1328,6 @@ static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) if (bch2_dev_allocator_start(ca)) return "error starting allocator thread"; - if (bch2_copygc_start(c, ca)) - return "error starting copygc thread"; - return NULL; } diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index ac8cf6dcec3d2..058e2137f0c93 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -379,6 +379,7 @@ SHOW(bch2_fs) sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled); sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ + sysfs_pd_controller_show(copy_gc, &c->copygc_pd); if (attr == &sysfs_rebalance_work) return bch2_rebalance_work_show(c, buf); @@ -460,14 +461,11 @@ STORE(bch2_fs) } if (attr == &sysfs_copy_gc_enabled) { - struct bch_dev *ca; - unsigned i; ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled) ?: (ssize_t) size; - for_each_member_device(ca, c, i) - if (ca->copygc_thread) - wake_up_process(ca->copygc_thread); + if (c->copygc_thread) + wake_up_process(c->copygc_thread); return ret; } @@ -482,6 +480,7 @@ STORE(bch2_fs) sysfs_strtoul(pd_controllers_update_seconds, c->pd_controllers_update_seconds); sysfs_pd_controller_store(rebalance, &c->rebalance.pd); + sysfs_pd_controller_store(copy_gc, &c->copygc_pd); sysfs_strtoul(promote_whole_extents, c->promote_whole_extents); @@ -607,6 +606,7 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_rebalance_enabled, &sysfs_rebalance_work, sysfs_pd_controller_files(rebalance), + sysfs_pd_controller_files(copy_gc), &sysfs_new_stripes, @@ -882,7 +882,7 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf) stats.sectors[BCH_DATA_cached], stats.sectors_ec, stats.sectors_fragmented, - ca->copygc_threshold, + c->copygc_threshold, c->freelist_wait.list.first ? "waiting" : "empty", c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_OPEN_BUCKET_RESERVE, @@ -949,8 +949,6 @@ SHOW(bch2_dev) return out.pos - buf; } - sysfs_pd_controller_show(copy_gc, &ca->copygc_pd); - if (attr == &sysfs_cache_replacement_policy) { bch2_string_opt_to_text(&out, bch2_cache_replacement_policies, @@ -1004,8 +1002,6 @@ STORE(bch2_dev) struct bch_fs *c = ca->fs; struct bch_member *mi; - sysfs_pd_controller_store(copy_gc, &ca->copygc_pd); - if (attr == &sysfs_discard) { bool v = strtoul_or_return(buf); @@ -1090,8 +1086,6 @@ struct attribute *bch2_dev_files[] = { /* debug: */ &sysfs_alloc_debug, &sysfs_wake_allocator, - - sysfs_pd_controller_files(copy_gc), NULL }; diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index d109ef174fd02..5c57b6efaaf3a 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -470,10 +470,10 @@ TRACE_EVENT(move_data, ); TRACE_EVENT(copygc, - TP_PROTO(struct bch_dev *ca, + TP_PROTO(struct bch_fs *c, u64 sectors_moved, u64 sectors_not_moved, u64 buckets_moved, u64 buckets_not_moved), - TP_ARGS(ca, + TP_ARGS(c, sectors_moved, sectors_not_moved, buckets_moved, buckets_not_moved), @@ -486,7 +486,7 @@ TRACE_EVENT(copygc, ), TP_fast_assign( - memcpy(__entry->uuid, ca->uuid.b, 16); + memcpy(__entry->uuid, c->sb.user_uuid.b, 16); __entry->sectors_moved = sectors_moved; __entry->sectors_not_moved = sectors_not_moved; __entry->buckets_moved = buckets_moved;