bcachefs: Move gc of bucket.oldest_gen to workqueue
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 20 Apr 2024 02:44:12 +0000 (22:44 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 8 May 2024 21:29:20 +0000 (17:29 -0400)
This is a nice cleanup - and we've also been having problems with
kthread creation in the mount path.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_foreground.c
fs/bcachefs/bcachefs.h
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.h
fs/bcachefs/super.c
fs/bcachefs/sysfs.c

index f07373b781745663934702fb5b2947ccf9e57491..6fa51ee16cc3368e48d738130973949f746bc8f7 100644 (file)
@@ -874,7 +874,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
                        bch2_do_invalidates(c);
 
                if (statechange(a->data_type == BCH_DATA_need_gc_gens))
-                       bch2_do_gc_gens(c);
+                       bch2_gc_gens_async(c);
        }
 
        if ((flags & BTREE_TRIGGER_gc) &&
index fb8825c4e7ad82a75cfb6b12af990975d2294cdc..6cb878f5e5ebb01cb5d68ee5e38aa85c11695785 100644 (file)
@@ -541,7 +541,7 @@ again:
                bch2_do_discards(c);
 
        if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
-               bch2_do_gc_gens(c);
+               bch2_gc_gens_async(c);
 
        if (should_invalidate_buckets(ca, *usage))
                bch2_do_invalidates(c);
index 7562446f2d2afc38aebfcaa2d9a033662a6b94c5..6e324c1fe924540c91eb369368947b1ed10b3ef9 100644 (file)
@@ -718,6 +718,7 @@ struct btree_trans_buf {
        x(discard_fast)                                                 \
        x(invalidate)                                                   \
        x(delete_dead_snapshots)                                        \
+       x(gc_gens)                                                      \
        x(snapshot_delete_pagecache)                                    \
        x(sysfs)                                                        \
        x(btree_write_buffer)
@@ -960,8 +961,7 @@ struct bch_fs {
        struct work_struct      discard_fast_work;
 
        /* GARBAGE COLLECTION */
-       struct task_struct      *gc_thread;
-       atomic_t                kick_gc;
+       struct work_struct      gc_gens_work;
        unsigned long           gc_count;
 
        enum btree_id           gc_gens_btree;
@@ -1118,7 +1118,6 @@ struct bch_fs {
        u64                     counters_on_mount[BCH_COUNTER_NR];
        u64 __percpu            *counters;
 
-       unsigned                btree_gc_periodic:1;
        unsigned                copy_gc_enabled:1;
        bool                    promote_whole_extents;
 
index 7549e806a6f48df28189ae6a664dbc95f15cd374..919fa1b9fc2e212bb06eb4f8e62fc70fccceae02 100644 (file)
@@ -1669,6 +1669,9 @@ static int gc_btree_gens_key(struct btree_trans *trans,
        struct bkey_i *u;
        int ret;
 
+       if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
+               return -EROFS;
+
        percpu_down_read(&c->mark_lock);
        bkey_for_each_ptr(ptrs, ptr) {
                struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
@@ -1802,80 +1805,23 @@ err:
        return ret;
 }
 
-static int bch2_gc_thread(void *arg)
+static void bch2_gc_gens_work(struct work_struct *work)
 {
-       struct bch_fs *c = arg;
-       struct io_clock *clock = &c->io_clock[WRITE];
-       unsigned long last = atomic64_read(&clock->now);
-       unsigned last_kick = atomic_read(&c->kick_gc);
-
-       set_freezable();
-
-       while (1) {
-               while (1) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-
-                       if (kthread_should_stop()) {
-                               __set_current_state(TASK_RUNNING);
-                               return 0;
-                       }
-
-                       if (atomic_read(&c->kick_gc) != last_kick)
-                               break;
-
-                       if (c->btree_gc_periodic) {
-                               unsigned long next = last + c->capacity / 16;
-
-                               if (atomic64_read(&clock->now) >= next)
-                                       break;
-
-                               bch2_io_clock_schedule_timeout(clock, next);
-                       } else {
-                               schedule();
-                       }
-
-                       try_to_freeze();
-               }
-               __set_current_state(TASK_RUNNING);
-
-               last = atomic64_read(&clock->now);
-               last_kick = atomic_read(&c->kick_gc);
-
-               bch2_gc_gens(c);
-               debug_check_no_locks_held();
-       }
-
-       return 0;
+       struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work);
+       bch2_gc_gens(c);
+       bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
 }
 
-void bch2_gc_thread_stop(struct bch_fs *c)
+void bch2_gc_gens_async(struct bch_fs *c)
 {
-       struct task_struct *p;
-
-       p = c->gc_thread;
-       c->gc_thread = NULL;
-
-       if (p) {
-               kthread_stop(p);
-               put_task_struct(p);
-       }
+       if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) &&
+           !queue_work(c->write_ref_wq, &c->gc_gens_work))
+               bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
 }
 
-int bch2_gc_thread_start(struct bch_fs *c)
+void bch2_fs_gc_init(struct bch_fs *c)
 {
-       struct task_struct *p;
-
-       if (c->gc_thread)
-               return 0;
+       seqcount_init(&c->gc_pos_lock);
 
-       p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
-       if (IS_ERR(p)) {
-               bch_err_fn(c, PTR_ERR(p));
-               return PTR_ERR(p);
-       }
-
-       get_task_struct(p);
-       c->gc_thread = p;
-       wake_up_process(p);
-       return 0;
+       INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work);
 }
index 0d6c0a2df613e35492dc966af646d2b54791088b..15315aab93bd83b98243c4574f83bdba0b578d57 100644 (file)
@@ -7,9 +7,6 @@
 
 int bch2_check_topology(struct bch_fs *);
 int bch2_check_allocations(struct bch_fs *);
-int bch2_gc_gens(struct bch_fs *);
-void bch2_gc_thread_stop(struct bch_fs *);
-int bch2_gc_thread_start(struct bch_fs *);
 
 /*
  * For concurrent mark and sweep (with other index updates), we define a total
@@ -104,11 +101,8 @@ static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
        return ret;
 }
 
-static inline void bch2_do_gc_gens(struct bch_fs *c)
-{
-       atomic_inc(&c->kick_gc);
-       if (c->gc_thread)
-               wake_up_process(c->gc_thread);
-}
+int bch2_gc_gens(struct bch_fs *);
+void bch2_gc_gens_async(struct bch_fs *);
+void bch2_fs_gc_init(struct bch_fs *);
 
 #endif /* _BCACHEFS_BTREE_GC_H */
index adad2a7036295bf9034ccdebd320e64c03eed6d2..36833f86d50da532772d85dfbe4c0ea04fb05355 100644 (file)
@@ -264,7 +264,6 @@ static void __bch2_fs_read_only(struct bch_fs *c)
        bch2_open_buckets_stop(c, NULL, true);
        bch2_rebalance_stop(c);
        bch2_copygc_stop(c);
-       bch2_gc_thread_stop(c);
        bch2_fs_ec_flush(c);
 
        bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
@@ -486,12 +485,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
        }
 #endif
 
-       ret = bch2_gc_thread_start(c);
-       if (ret) {
-               bch_err(c, "error starting gc thread");
-               return ret;
-       }
-
        ret = bch2_journal_reclaim_start(&c->journal);
        if (ret)
                goto err;
@@ -780,6 +773,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        for (i = 0; i < BCH_TIME_STAT_NR; i++)
                bch2_time_stats_init(&c->times[i]);
 
+       bch2_fs_gc_init(c);
        bch2_fs_copygc_init(c);
        bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
        bch2_fs_btree_iter_init_early(c);
@@ -810,8 +804,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        INIT_LIST_HEAD(&c->fsck_error_msgs);
        mutex_init(&c->fsck_error_msgs_lock);
 
-       seqcount_init(&c->gc_pos_lock);
-
        seqcount_init(&c->usage_lock);
 
        sema_init(&c->io_in_flight, 128);
index 24d4c8ef25a59040473b3f04b923c616da973490..43edda74d3cbcf9df765a910385f0508f30827f5 100644 (file)
@@ -142,7 +142,6 @@ write_attribute(trigger_invalidates);
 write_attribute(trigger_journal_flush);
 write_attribute(prune_cache);
 write_attribute(btree_wakeup);
-rw_attribute(btree_gc_periodic);
 rw_attribute(gc_gens_pos);
 
 read_attribute(uuid);
@@ -408,8 +407,6 @@ SHOW(bch2_fs)
        if (attr == &sysfs_btree_write_stats)
                bch2_btree_write_stats_to_text(out, c);
 
-       sysfs_printf(btree_gc_periodic, "%u",   (int) c->btree_gc_periodic);
-
        if (attr == &sysfs_gc_gens_pos)
                bch2_gc_gens_pos_to_text(out, c);
 
@@ -485,14 +482,6 @@ STORE(bch2_fs)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
 
-       if (attr == &sysfs_btree_gc_periodic) {
-               ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
-                       ?: (ssize_t) size;
-
-               wake_up_process(c->gc_thread);
-               return ret;
-       }
-
        if (attr == &sysfs_copy_gc_enabled) {
                ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
                        ?: (ssize_t) size;