struct bch_fs,
pd_controllers_update);
struct bch_dev *ca;
+ s64 free = 0, fragmented = 0;
unsigned i;
for_each_member_device(ca, c, i) {
struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
- u64 free = bucket_to_sector(ca,
+ free += bucket_to_sector(ca,
__dev_buckets_free(ca, stats)) << 9;
/*
* Bytes of internal fragmentation, which can be
* reclaimed by copy GC
*/
- s64 fragmented = (bucket_to_sector(ca,
+ fragmented += max_t(s64, 0, (bucket_to_sector(ca,
stats.buckets[BCH_DATA_user] +
stats.buckets[BCH_DATA_cached]) -
(stats.sectors[BCH_DATA_user] +
- stats.sectors[BCH_DATA_cached])) << 9;
-
- fragmented = max(0LL, fragmented);
-
- bch2_pd_controller_update(&ca->copygc_pd,
- free, fragmented, -1);
+ stats.sectors[BCH_DATA_cached])) << 9);
}
+ bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
schedule_delayed_work(&c->pd_controllers_update,
c->pd_controllers_update_seconds * HZ);
}
void bch2_recalc_capacity(struct bch_fs *c)
{
struct bch_dev *ca;
- u64 capacity = 0, reserved_sectors = 0, gc_reserve;
+ u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0;
unsigned bucket_size_max = 0;
unsigned long ra_pages = 0;
unsigned i, j;
dev_reserve *= ca->mi.bucket_size;
- ca->copygc_threshold = dev_reserve;
+ copygc_threshold += dev_reserve;
capacity += bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket);
reserved_sectors = min(reserved_sectors, capacity);
+ c->copygc_threshold = copygc_threshold;
c->capacity = capacity - reserved_sectors;
c->bucket_size_max = bucket_size_max;
for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
bch2_writepoint_stop(c, ca, &c->write_points[i]);
- bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
+ bch2_writepoint_stop(c, ca, &c->copygc_write_point);
bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
bch2_writepoint_stop(c, ca, &c->btree_write_point);
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
alloc_heap alloc_heap;
- copygc_heap copygc_heap;
size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / c->opts.btree_node_size);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7);
size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
btree_reserve * 2);
- bool resize = ca->buckets[0] != NULL,
- start_copygc = ca->copygc_thread != NULL;
+ bool resize = ca->buckets[0] != NULL;
int ret = -ENOMEM;
unsigned i;
memset(&free, 0, sizeof(free));
memset(&free_inc, 0, sizeof(free_inc));
memset(&alloc_heap, 0, sizeof(alloc_heap));
- memset(©gc_heap, 0, sizeof(copygc_heap));
if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
nbuckets * sizeof(struct bucket),
copygc_reserve, GFP_KERNEL) ||
!init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
!init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
- !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
- !init_heap(©gc_heap, copygc_reserve, GFP_KERNEL))
+ !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
goto err;
buckets->first_bucket = ca->mi.first_bucket;
buckets->nbuckets = nbuckets;
- bch2_copygc_stop(ca);
+ bch2_copygc_stop(c);
if (resize) {
down_write(&c->gc_lock);
/* with gc lock held, alloc_heap can't be in use: */
swap(ca->alloc_heap, alloc_heap);
- /* and we shut down copygc: */
- swap(ca->copygc_heap, copygc_heap);
-
nbuckets = ca->mi.nbuckets;
if (resize)
up_write(&ca->bucket_lock);
- if (start_copygc &&
- bch2_copygc_start(c, ca))
- bch_err(ca, "error restarting copygc thread");
-
ret = 0;
err:
- free_heap(©gc_heap);
free_heap(&alloc_heap);
free_fifo(&free_inc);
for (i = 0; i < RESERVE_NR; i++)
{
unsigned i;
- free_heap(&ca->copygc_heap);
free_heap(&ca->alloc_heap);
free_fifo(&ca->free_inc);
for (i = 0; i < RESERVE_NR; i++)
#define COPYGC_BUCKETS_PER_ITER(ca) \
((ca)->free[RESERVE_MOVINGGC].size / 2)
-/*
- * Max sectors to move per iteration: Have to take into account internal
- * fragmentation from the multiple write points for each generation:
- */
-#define COPYGC_SECTORS_PER_ITER(ca) \
- ((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca))
-
static inline int sectors_used_cmp(copygc_heap *heap,
struct copygc_heap_entry l,
struct copygc_heap_entry r)
const struct copygc_heap_entry *l = _l;
const struct copygc_heap_entry *r = _r;
- return cmp_int(l->offset, r->offset);
+ return cmp_int(l->dev, r->dev) ?:
+ cmp_int(l->offset, r->offset);
}
-static bool __copygc_pred(struct bch_dev *ca,
- struct bkey_s_c k)
+static int __copygc_pred(struct bch_fs *c, struct bkey_s_c k)
{
- copygc_heap *h = &ca->copygc_heap;
- const struct bch_extent_ptr *ptr =
- bch2_bkey_has_device(k, ca->dev_idx);
-
- if (ptr) {
- struct copygc_heap_entry search = { .offset = ptr->offset };
+ copygc_heap *h = &c->copygc_heap;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct copygc_heap_entry search = {
+ .dev = ptr->dev,
+ .offset = ptr->offset
+ };
ssize_t i = eytzinger0_find_le(h->data, h->used,
sizeof(h->data[0]),
BUG_ON(i != j);
#endif
- return (i >= 0 &&
- ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
- ptr->gen == h->data[i].gen);
+ if (i >= 0 &&
+ ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
+ ptr->gen == h->data[i].gen)
+ return ptr->dev;
}
- return false;
+ return -1;
}
static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
struct bch_io_opts *io_opts,
struct data_opts *data_opts)
{
- struct bch_dev *ca = arg;
-
- if (!__copygc_pred(ca, k))
+ int dev_idx = __copygc_pred(c, k);
+ if (dev_idx < 0)
return DATA_SKIP;
- data_opts->target = dev_to_target(ca->dev_idx);
+ /* XXX: use io_opts for this inode */
+ data_opts->target = dev_to_target(dev_idx);
data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
- data_opts->rewrite_dev = ca->dev_idx;
+ data_opts->rewrite_dev = dev_idx;
return DATA_REWRITE;
}
return ret;
}
-static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
+static void bch2_copygc(struct bch_fs *c)
{
- copygc_heap *h = &ca->copygc_heap;
+ copygc_heap *h = &c->copygc_heap;
struct copygc_heap_entry e, *i;
struct bucket_array *buckets;
struct bch_move_stats move_stats;
u64 sectors_to_move = 0, sectors_not_moved = 0;
+ u64 sectors_reserved = 0;
u64 buckets_to_move, buckets_not_moved = 0;
- size_t b;
+ struct bch_dev *ca;
+ unsigned dev_idx;
+ size_t b, heap_size = 0;
int ret;
memset(&move_stats, 0, sizeof(move_stats));
- closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
-
/*
* Find buckets with lowest sector counts, skipping completely
* empty buckets, by building a maxheap sorted by sector count,
*/
h->used = 0;
- /*
- * We need bucket marks to be up to date - gc can't be recalculating
- * them:
- */
- down_read(&c->gc_lock);
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
- struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
- struct copygc_heap_entry e;
-
- if (m.owned_by_allocator ||
- m.data_type != BCH_DATA_user ||
- !bucket_sectors_used(m) ||
- bucket_sectors_used(m) >= ca->mi.bucket_size)
- continue;
+ for_each_rw_member(ca, c, dev_idx)
+ heap_size += ca->mi.nbuckets >> 7;
- e = (struct copygc_heap_entry) {
- .gen = m.gen,
- .sectors = bucket_sectors_used(m),
- .offset = bucket_to_sector(ca, b),
- };
- heap_add_or_replace(h, e, -sectors_used_cmp, NULL);
+ if (h->size < heap_size) {
+ free_heap(&c->copygc_heap);
+ if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
+ bch_err(c, "error allocating copygc heap");
+ return;
+ }
+ }
+
+ for_each_rw_member(ca, c, dev_idx) {
+ closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
+
+ spin_lock(&ca->fs->freelist_lock);
+ sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
+ spin_unlock(&ca->fs->freelist_lock);
+
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
+ struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
+ struct copygc_heap_entry e;
+
+ if (m.owned_by_allocator ||
+ m.data_type != BCH_DATA_user ||
+ !bucket_sectors_used(m) ||
+ bucket_sectors_used(m) >= ca->mi.bucket_size)
+ continue;
+
+ e = (struct copygc_heap_entry) {
+ .gen = m.gen,
+ .sectors = bucket_sectors_used(m),
+ .offset = bucket_to_sector(ca, b),
+ };
+ heap_add_or_replace(h, e, -sectors_used_cmp, NULL);
+ }
+ up_read(&ca->bucket_lock);
}
- up_read(&ca->bucket_lock);
- up_read(&c->gc_lock);
for (i = h->data; i < h->data + h->used; i++)
sectors_to_move += i->sectors;
- while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
+ while (sectors_to_move > sectors_reserved) {
BUG_ON(!heap_pop(h, e, -sectors_used_cmp, NULL));
sectors_to_move -= e.sectors;
}
sizeof(h->data[0]),
bucket_offset_cmp, NULL);
- ret = bch2_move_data(c, &ca->copygc_pd.rate,
- writepoint_ptr(&ca->copygc_write_point),
+ ret = bch2_move_data(c, &c->copygc_pd.rate,
+ writepoint_ptr(&c->copygc_write_point),
POS_MIN, POS_MAX,
- copygc_pred, ca,
+ copygc_pred, NULL,
&move_stats);
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
- for (i = h->data; i < h->data + h->used; i++) {
- size_t b = sector_to_bucket(ca, i->offset);
- struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
-
- if (i->gen == m.gen && bucket_sectors_used(m)) {
- sectors_not_moved += bucket_sectors_used(m);
- buckets_not_moved++;
+ for_each_rw_member(ca, c, dev_idx) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+ for (i = h->data; i < h->data + h->used; i++) {
+ size_t b = sector_to_bucket(ca, i->offset);
+ struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
+
+ if (i->gen == m.gen && bucket_sectors_used(m)) {
+ sectors_not_moved += bucket_sectors_used(m);
+ buckets_not_moved++;
+ }
}
+ up_read(&ca->bucket_lock);
}
- up_read(&ca->bucket_lock);
if (sectors_not_moved && !ret)
bch_warn_ratelimited(c,
atomic64_read(&move_stats.keys_raced),
atomic64_read(&move_stats.sectors_raced));
- trace_copygc(ca,
+ trace_copygc(c,
atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
buckets_to_move, buckets_not_moved);
}
* often and continually reduce the amount of fragmented space as the device
* fills up. So, we increase the threshold by half the current free space.
*/
-unsigned long bch2_copygc_wait_amount(struct bch_dev *ca)
+unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
{
- struct bch_fs *c = ca->fs;
- struct bch_dev_usage usage = bch2_dev_usage_read(c, ca);
- u64 fragmented_allowed = ca->copygc_threshold +
- ((__dev_buckets_available(ca, usage) * ca->mi.bucket_size) >> 1);
+ struct bch_dev *ca;
+ unsigned dev_idx;
+ u64 fragmented_allowed = c->copygc_threshold;
+ u64 fragmented = 0;
+
+ for_each_rw_member(ca, c, dev_idx) {
+ struct bch_dev_usage usage = bch2_dev_usage_read(c, ca);
+
+ fragmented_allowed += ((__dev_buckets_available(ca, usage) *
+ ca->mi.bucket_size) >> 1);
+ fragmented += usage.sectors_fragmented;
+ }
- return max_t(s64, 0, fragmented_allowed - usage.sectors_fragmented);
+ return max_t(s64, 0, fragmented_allowed - fragmented);
}
static int bch2_copygc_thread(void *arg)
{
- struct bch_dev *ca = arg;
- struct bch_fs *c = ca->fs;
+ struct bch_fs *c = arg;
struct io_clock *clock = &c->io_clock[WRITE];
unsigned long last, wait;
break;
last = atomic_long_read(&clock->now);
- wait = bch2_copygc_wait_amount(ca);
+ wait = bch2_copygc_wait_amount(c);
if (wait > clock->max_slop) {
bch2_kthread_io_clock_wait(clock, last + wait,
continue;
}
- bch2_copygc(c, ca);
+ bch2_copygc(c);
}
return 0;
}
-void bch2_copygc_stop(struct bch_dev *ca)
+void bch2_copygc_stop(struct bch_fs *c)
{
- ca->copygc_pd.rate.rate = UINT_MAX;
- bch2_ratelimit_reset(&ca->copygc_pd.rate);
+ c->copygc_pd.rate.rate = UINT_MAX;
+ bch2_ratelimit_reset(&c->copygc_pd.rate);
- if (ca->copygc_thread) {
- kthread_stop(ca->copygc_thread);
- put_task_struct(ca->copygc_thread);
+ if (c->copygc_thread) {
+ kthread_stop(c->copygc_thread);
+ put_task_struct(c->copygc_thread);
}
- ca->copygc_thread = NULL;
+ c->copygc_thread = NULL;
}
-int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca)
+int bch2_copygc_start(struct bch_fs *c)
{
struct task_struct *t;
- if (ca->copygc_thread)
+ if (c->copygc_thread)
return 0;
if (c->opts.nochanges)
if (bch2_fs_init_fault("copygc_start"))
return -ENOMEM;
- t = kthread_create(bch2_copygc_thread, ca,
- "bch_copygc[%s]", ca->name);
+ t = kthread_create(bch2_copygc_thread, c,
+ "bch_copygc[%s]", c->name);
if (IS_ERR(t))
return PTR_ERR(t);
get_task_struct(t);
- ca->copygc_thread = t;
- wake_up_process(ca->copygc_thread);
+ c->copygc_thread = t;
+ wake_up_process(c->copygc_thread);
return 0;
}
-void bch2_dev_copygc_init(struct bch_dev *ca)
+void bch2_fs_copygc_init(struct bch_fs *c)
{
- bch2_pd_controller_init(&ca->copygc_pd);
- ca->copygc_pd.d_term = 0;
+ bch2_pd_controller_init(&c->copygc_pd);
+ c->copygc_pd.d_term = 0;
}