}
new = kmalloc(bkey_bytes(&cur.k->k), GFP_KERNEL);
- if (!new)
+ if (!new) {
+ bch_err(c, "%s: error allocating new key", __func__);
return -ENOMEM;
+ }
bkey_copy(new, cur.k);
}
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
- if (!new)
+ if (!new) {
+ bch_err(c, "%s: error allocating new key", __func__);
return -ENOMEM;
+ }
bkey_reassemble(new, *k);
"superblock not marked as containing replicas (type %u)",
k.k->type)) {
ret = bch2_mark_bkey_replicas(c, k);
- if (ret)
- return ret;
+ if (ret) {
+ bch_err(c, "error marking bkey replicas: %i", ret);
+ goto err;
+ }
}
ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, &k);
bch2_mark_key(c, k, 0, k.k->size, NULL, 0, flags);
fsck_err:
+err:
+ if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
k, &max_stale, true);
- if (ret)
+ if (ret) {
+ bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
break;
+ }
if (b->c.level) {
bch2_bkey_buf_reassemble(&cur, c, k);
continue;
}
- if (ret)
+ if (ret) {
+ bch_err(c, "%s: error %i getting btree node",
+ __func__, ret);
break;
+ }
ret = bch2_gc_btree_init_recurse(c, child,
target_depth);
fsck_err:
six_unlock_read(&b->c.lock);
+ if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
int ret = initial
? bch2_gc_btree_init(c, id)
: bch2_gc_btree(c, id, initial);
- if (ret)
+ if (ret) {
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
+ }
}
return 0;
#undef copy_stripe_field
#undef copy_field
fsck_err:
+ if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
BUG_ON(c->gc_thread);
p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
- if (IS_ERR(p))
+ if (IS_ERR(p)) {
+ bch_err(c, "error creating gc thread: %li", PTR_ERR(p));
return PTR_ERR(p);
+ }
get_task_struct(p);
c->gc_thread = p;
};
new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
- if (!new_keys.d)
+ if (!new_keys.d) {
+ bch_err(c, "%s: error allocating new key array (size %zu)",
+ __func__, new_keys.size);
return -ENOMEM;
+ }
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
kvfree(keys->d);
kmalloc(sizeof(struct bkey), GFP_KERNEL);
int ret;
- if (!whiteout)
+ if (!whiteout) {
+ bch_err(c, "%s: error allocating new key", __func__);
return -ENOMEM;
+ }
bkey_init(&whiteout->k);
whiteout->k.p = pos;
&lostfound,
0, 0, S_IFDIR|0700, 0,
NULL, NULL));
- if (ret)
+ if (ret) {
+ bch_err(c, "error creating lost+found");
goto err;
+ }
if (enabled_qtypes(c)) {
ret = bch2_fs_quota_read(c);