bcache: allow allocator to invalidate bucket in gc
authorDongsheng Yang <dongsheng.yang@easystack.cn>
Tue, 28 May 2024 12:09:12 +0000 (20:09 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 28 May 2024 12:55:59 +0000 (06:55 -0600)
Currently, if the gc is running, when the allocator found free_inc
is empty, allocator has to wait the gc finish. Before that, the
IO is blocked.

But actually, there would be some buckets is reclaimable before gc,
and gc will never mark this kind of bucket to be unreclaimable.

So we can put these buckets into free_inc in gc running to avoid
IO being blocked.

Signed-off-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
Signed-off-by: Mingzhe Zou <mingzhe.zou@easystack.cn>
Signed-off-by: Coly Li <colyli@suse.de>
Link: https://lore.kernel.org/r/20240528120914.28705-2-colyli@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c

index ce13c272c3872366eedd8a7d94894a81ea78d98c..32a46343097dd81e203eb18c2a74372bb1fef587 100644 (file)
@@ -129,12 +129,9 @@ static inline bool can_inc_bucket_gen(struct bucket *b)
 
 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
 {
-       BUG_ON(!ca->set->gc_mark_valid);
-
-       return (!GC_MARK(b) ||
-               GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
-               !atomic_read(&b->pin) &&
-               can_inc_bucket_gen(b);
+       return (ca->set->gc_mark_valid || b->reclaimable_in_gc) &&
+              ((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
+              !atomic_read(&b->pin) && can_inc_bucket_gen(b));
 }
 
 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@@ -148,6 +145,7 @@ void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
        bch_inc_gen(ca, b);
        b->prio = INITIAL_PRIO;
        atomic_inc(&b->pin);
+       b->reclaimable_in_gc = 0;
 }
 
 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@@ -352,8 +350,7 @@ static int bch_allocator_thread(void *arg)
                 */
 
 retry_invalidate:
-               allocator_wait(ca, ca->set->gc_mark_valid &&
-                              !ca->invalidate_needs_gc);
+               allocator_wait(ca, !ca->invalidate_needs_gc);
                invalidate_buckets(ca);
 
                /*
index 4e6afa89921fe0b79c4a760163d66565a2092d53..1d33e40d26ea51f6e433978315df0213259a38ef 100644 (file)
@@ -200,6 +200,7 @@ struct bucket {
        uint8_t         gen;
        uint8_t         last_gc; /* Most out of date gen in the btree */
        uint16_t        gc_mark; /* Bitfield used by GC. See below for field */
+       uint16_t        reclaimable_in_gc:1;
 };
 
 /*
index d011a7154d3304d3deeb4cc0d2d48ea9728822fa..4e6ccf2c8a0bf3a048e88cba537801d6d8ec3dfd 100644 (file)
@@ -1741,18 +1741,20 @@ static void btree_gc_start(struct cache_set *c)
 
        mutex_lock(&c->bucket_lock);
 
-       c->gc_mark_valid = 0;
        c->gc_done = ZERO_KEY;
 
        ca = c->cache;
        for_each_bucket(b, ca) {
                b->last_gc = b->gen;
+               if (bch_can_invalidate_bucket(ca, b))
+                       b->reclaimable_in_gc = 1;
                if (!atomic_read(&b->pin)) {
                        SET_GC_MARK(b, 0);
                        SET_GC_SECTORS_USED(b, 0);
                }
        }
 
+       c->gc_mark_valid = 0;
        mutex_unlock(&c->bucket_lock);
 }
 
@@ -1809,6 +1811,9 @@ static void bch_btree_gc_finish(struct cache_set *c)
        for_each_bucket(b, ca) {
                c->need_gc      = max(c->need_gc, bucket_gc_gen(b));
 
+               if (b->reclaimable_in_gc)
+                       b->reclaimable_in_gc = 0;
+
                if (atomic_read(&b->pin))
                        continue;