bcachefs: Extra kthread_should_stop() calls for copygc
authorKent Overstreet <kent.overstreet@linux.dev>
Tue, 28 Nov 2023 21:33:52 +0000 (16:33 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 29 Nov 2023 03:58:23 +0000 (22:58 -0500)
This fixes a bug where going read-only was taking longer than it should
have due to copygc forgetting to check kthread_should_stop()

Additionally: fix a missing is_kthread check in bch2_move_ratelimit().

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/move.c
fs/bcachefs/movinggc.c

index 67ac68f9dd3f1c8c91d855b7bb9aed125a3b1186..54830ee0ed886795233e939158d9b4f417d11f85 100644 (file)
@@ -441,24 +441,26 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
 int bch2_move_ratelimit(struct moving_context *ctxt)
 {
        struct bch_fs *c = ctxt->trans->c;
+       bool is_kthread = current->flags & PF_KTHREAD;
        u64 delay;
 
        if (ctxt->wait_on_copygc && c->copygc_running) {
                bch2_moving_ctxt_flush_all(ctxt);
                wait_event_killable(c->copygc_running_wq,
                                    !c->copygc_running ||
-                                   kthread_should_stop());
+                                   (is_kthread && kthread_should_stop()));
        }
 
        do {
                delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
 
-               if ((current->flags & PF_KTHREAD) && kthread_should_stop())
+               if (is_kthread && kthread_should_stop())
                        return 1;
 
                if (delay)
                        move_ctxt_wait_event_timeout(ctxt,
-                                       freezing(current) || kthread_should_stop(),
+                                       freezing(current) ||
+                                       (is_kthread && kthread_should_stop()),
                                        delay);
 
                if (unlikely(freezing(current))) {
@@ -633,6 +635,7 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt,
 {
        struct btree_trans *trans = ctxt->trans;
        struct bch_fs *c = trans->c;
+       bool is_kthread = current->flags & PF_KTHREAD;
        struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
        struct btree_iter iter;
        struct bkey_buf sk;
@@ -678,6 +681,9 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt,
        }
 
        while (!(ret = bch2_move_ratelimit(ctxt))) {
+               if (is_kthread && kthread_should_stop())
+                       break;
+
                bch2_trans_begin(trans);
 
                ret = bch2_get_next_backpointer(trans, bucket, gen,
index 0a0576326c5b2d433fcd4aace513379972f57152..a84e79f79e5ec562fa8f9d072ef3250e60a8564f 100644 (file)
@@ -207,7 +207,7 @@ static int bch2_copygc(struct moving_context *ctxt,
                goto err;
 
        darray_for_each(buckets, i) {
-               if (unlikely(freezing(current)))
+               if (kthread_should_stop() || freezing(current))
                        break;
 
                f = move_bucket_in_flight_add(buckets_in_flight, *i);