bcachefs: Improve the nopromote tracepoint
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 20 Dec 2023 21:49:43 +0000 (16:49 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Jan 2024 16:47:42 +0000 (11:47 -0500)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/errcode.h
fs/bcachefs/io_read.c

index d7dc8f644a05c05f53a6ca9c5a7877af0b19ab30..716ff643dc85216bd9af202766560f53fb1c6763 100644 (file)
        x(BCH_ERR_nopromote,            nopromote_unwritten)                    \
        x(BCH_ERR_nopromote,            nopromote_congested)                    \
        x(BCH_ERR_nopromote,            nopromote_in_flight)                    \
+       x(BCH_ERR_nopromote,            nopromote_no_writes)                    \
        x(BCH_ERR_nopromote,            nopromote_enomem)
 
 enum bch_errcode {
index 88aa004eade8692d2555b6d74025f37858959d5d..5c2d118eaf6feeba1c759596b76f1d47acb4b419 100644 (file)
@@ -172,11 +172,13 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
        int ret;
 
        if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
-               return NULL;
+               return ERR_PTR(-BCH_ERR_nopromote_no_writes);
 
        op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_KERNEL);
-       if (!op)
+       if (!op) {
+               ret = -BCH_ERR_nopromote_enomem;
                goto err;
+       }
 
        op->start_time = local_clock();
        op->pos = pos;
@@ -188,23 +190,28 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
        *rbio = kzalloc(sizeof(struct bch_read_bio) +
                        sizeof(struct bio_vec) * pages,
                        GFP_KERNEL);
-       if (!*rbio)
+       if (!*rbio) {
+               ret = -BCH_ERR_nopromote_enomem;
                goto err;
+       }
 
        rbio_init(&(*rbio)->bio, opts);
        bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
 
-       if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
-                                GFP_KERNEL))
+       if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) {
+               ret = -BCH_ERR_nopromote_enomem;
                goto err;
+       }
 
        (*rbio)->bounce         = true;
        (*rbio)->split          = true;
        (*rbio)->kmalloc        = true;
 
        if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
-                                         bch_promote_params))
+                                         bch_promote_params)) {
+               ret = -BCH_ERR_nopromote_in_flight;
                goto err;
+       }
 
        bio = &op->write.op.wbio.bio;
        bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
@@ -223,9 +230,8 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
         * -BCH_ERR_ENOSPC_disk_reservation:
         */
        if (ret) {
-               ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
-                                       bch_promote_params);
-               BUG_ON(ret);
+               BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash,
+                                             bch_promote_params));
                goto err;
        }
 
@@ -239,7 +245,7 @@ err:
        *rbio = NULL;
        kfree(op);
        bch2_write_ref_put(c, BCH_WRITE_REF_promote);
-       return NULL;
+       return ERR_PTR(ret);
 }
 
 noinline
@@ -274,10 +280,9 @@ static struct promote_op *promote_alloc(struct btree_trans *trans,
                                  ? BTREE_ID_reflink
                                  : BTREE_ID_extents,
                                  k, pos, pick, opts, sectors, rbio);
-       if (!promote) {
-               ret = -BCH_ERR_nopromote_enomem;
+       ret = PTR_ERR_OR_ZERO(promote);
+       if (ret)
                goto nopromote;
-       }
 
        *bounce         = true;
        *read_full      = promote_full;