bcachefs: Add some printks for error paths
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 29 Apr 2020 16:57:04 +0000 (12:57 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:39 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_gc.c
fs/bcachefs/replicas.c

index e4c1b90f3cb59ec9bfecda643dbfa54604d6e355..59af44f7eab67045642452ffa8624ac513140e46 100644 (file)
@@ -701,8 +701,10 @@ static int bch2_gc_start(struct bch_fs *c,
 
        c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
                                         sizeof(u64), GFP_KERNEL);
-       if (!c->usage_gc)
+       if (!c->usage_gc) {
+               bch_err(c, "error allocating c->usage_gc");
                return -ENOMEM;
+       }
 
        for_each_member_device(ca, c, i) {
                BUG_ON(ca->buckets[1]);
@@ -713,19 +715,23 @@ static int bch2_gc_start(struct bch_fs *c,
                                GFP_KERNEL|__GFP_ZERO);
                if (!ca->buckets[1]) {
                        percpu_ref_put(&ca->ref);
+                       bch_err(c, "error allocating ca->buckets[gc]");
                        return -ENOMEM;
                }
 
                ca->usage[1] = alloc_percpu(struct bch_dev_usage);
                if (!ca->usage[1]) {
+                       bch_err(c, "error allocating ca->usage[gc]");
                        percpu_ref_put(&ca->ref);
                        return -ENOMEM;
                }
        }
 
        ret = bch2_ec_mem_alloc(c, true);
-       if (ret)
+       if (ret) {
+               bch_err(c, "error allocating ec gc mem");
                return ret;
+       }
 
        percpu_down_write(&c->mark_lock);
 
index f4851c8b8f8831f647188e9ef71257a4e1d61874..3e7c389f06ce84d13f54b290b91b04ede41435fb 100644 (file)
@@ -304,8 +304,10 @@ static int replicas_table_update(struct bch_fs *c,
        if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
            !(new_scratch  = kmalloc(scratch_bytes, GFP_NOIO)) ||
            (c->usage_gc &&
-            !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
+            !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO)))) {
+               bch_err(c, "error updating replicas table: memory allocation failure");
                goto err;
+       }
 
        for (i = 0; i < ARRAY_SIZE(new_usage); i++)
                if (c->usage[i])
@@ -365,7 +367,7 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
                                struct bch_replicas_entry *new_entry)
 {
        struct bch_replicas_cpu new_r, new_gc;
-       int ret = -ENOMEM;
+       int ret = 0;
 
        verify_replicas_entry(new_entry);
 
@@ -412,14 +414,16 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
                swap(new_gc, c->replicas_gc);
        percpu_up_write(&c->mark_lock);
 out:
-       ret = 0;
-err:
        mutex_unlock(&c->sb_lock);
 
        kfree(new_r.entries);
        kfree(new_gc.entries);
 
        return ret;
+err:
+       bch_err(c, "error adding replicas entry: memory allocation failure");
+       ret = -ENOMEM;
+       goto out;
 }
 
 int bch2_mark_replicas(struct bch_fs *c,
@@ -564,6 +568,7 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
                                         GFP_NOIO);
        if (!c->replicas_gc.entries) {
                mutex_unlock(&c->sb_lock);
+               bch_err(c, "error allocating c->replicas_gc");
                return -ENOMEM;
        }
 
@@ -589,8 +594,10 @@ retry:
        nr              = READ_ONCE(c->replicas.nr);
        new.entry_size  = READ_ONCE(c->replicas.entry_size);
        new.entries     = kcalloc(nr, new.entry_size, GFP_KERNEL);
-       if (!new.entries)
+       if (!new.entries) {
+               bch_err(c, "error allocating c->replicas_gc");
                return -ENOMEM;
+       }
 
        mutex_lock(&c->sb_lock);
        percpu_down_write(&c->mark_lock);