struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
int ret;
mutex_lock(&inode->ei_update_lock);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
if (type == ACL_TYPE_ACCESS && acl) {
ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
unsigned i;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0,
if (k->k.p.offset >= ca->mi.nbuckets)
return 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
BTREE_ITER_INTENT);
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
u64 journal_seq = 0;
int ret = 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
POS(ca->dev_idx, 0),
goto retry;
trans_restart();
- trace_trans_restart_btree_node_reused(c,
- iter->trans->ip);
+ trace_trans_restart_btree_node_reused(iter->trans->ip);
return ERR_PTR(-EINTR);
}
}
u8 max_stale;
int ret = 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
if (ret)
return ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, id, bkey_start_pos(&insert->k),
BTREE_ITER_SLOTS, k, ret) {
struct btree *merge[GC_MERGE_NODES];
u32 lock_seq[GC_MERGE_NODES];
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
/*
* XXX: We don't have a good way of positively matching on sibling nodes
struct btree_iter *iter;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->c.level, 0);
if (unlikely(!ret)) {
trans_restart();
- trace_trans_restart_would_deadlock(iter->trans->c,
- iter->trans->ip);
+ trace_trans_restart_would_deadlock(iter->trans->ip);
return false;
}
return ret;
}
-int bch2_trans_realloc_iters(struct btree_trans *trans,
+static int bch2_trans_realloc_iters(struct btree_trans *trans,
unsigned new_size)
{
void *new_iters, *new_updates;
if (trans->iters_live) {
trans_restart();
- trace_trans_restart_iters_realloced(trans->c, trans->ip);
+ trace_trans_restart_iters_realloced(trans->ip, trans->size);
return -EINTR;
}
return 0;
}
-void bch2_trans_preload_iters(struct btree_trans *trans)
-{
- bch2_trans_realloc_iters(trans, BTREE_ITER_MAX);
-}
-
static int btree_trans_iter_alloc(struct btree_trans *trans)
{
unsigned idx = __ffs64(~trans->iters_linked);
return &trans->iters[idx];
}
-void *bch2_trans_kmalloc(struct btree_trans *trans,
- size_t size)
+static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size)
{
- void *ret;
-
- if (trans->mem_top + size > trans->mem_bytes) {
+ if (size > trans->mem_bytes) {
size_t old_bytes = trans->mem_bytes;
- size_t new_bytes = roundup_pow_of_two(trans->mem_top + size);
+ size_t new_bytes = roundup_pow_of_two(size);
void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
if (!new_mem)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
trans->mem = new_mem;
trans->mem_bytes = new_bytes;
if (old_bytes) {
trans_restart();
- trace_trans_restart_mem_realloced(trans->c, trans->ip);
- return ERR_PTR(-EINTR);
+ trace_trans_restart_mem_realloced(trans->ip, new_bytes);
+ return -EINTR;
}
}
- ret = trans->mem + trans->mem_top;
+ return 0;
+}
+
+void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
+{
+ void *p;
+ int ret;
+
+ ret = bch2_trans_preload_mem(trans, trans->mem_top + size);
+ if (ret)
+ return ERR_PTR(ret);
+
+ p = trans->mem + trans->mem_top;
trans->mem_top += size;
- return ret;
+ return p;
}
inline void bch2_trans_unlink_iters(struct btree_trans *trans, u64 iters)
bch2_btree_iter_traverse_all(trans);
}
-void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c)
+void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
+ unsigned expected_nr_iters,
+ size_t expected_mem_bytes)
{
memset(trans, 0, offsetof(struct btree_trans, iters_onstack));
trans->size = ARRAY_SIZE(trans->iters_onstack);
trans->iters = trans->iters_onstack;
trans->updates = trans->updates_onstack;
+
+ if (expected_nr_iters > trans->size)
+ bch2_trans_realloc_iters(trans, expected_nr_iters);
+
+ if (expected_mem_bytes)
+ bch2_trans_preload_mem(trans, expected_mem_bytes);
}
int bch2_trans_exit(struct btree_trans *trans)
/* new multiple iterator interface: */
-int bch2_trans_realloc_iters(struct btree_trans *, unsigned);
-void bch2_trans_preload_iters(struct btree_trans *);
-
int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
int bch2_trans_iter_free_on_commit(struct btree_trans *, struct btree_iter *);
}
void *bch2_trans_kmalloc(struct btree_trans *, size_t);
-void bch2_trans_init(struct btree_trans *, struct bch_fs *);
+void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
int bch2_trans_exit(struct btree_trans *);
#ifdef TRACE_TRANSACTION_RESTARTS
struct btree_trans trans; \
int _ret; \
\
- bch2_trans_init(&trans, (_c)); \
+ bch2_trans_init(&trans, (_c), 0, 0); \
\
do { \
bch2_trans_begin(&trans); \
* instead of locking/reserving all the way to the root:
*/
if (!bch2_btree_iter_upgrade(iter, U8_MAX)) {
- trace_trans_restart_iter_upgrade(c, iter->trans->ip);
+ trace_trans_restart_iter_upgrade(trans->ip);
ret = -EINTR;
goto out;
}
if (!bch2_trans_relock(trans)) {
trans_restart(" (iter relock after journal preres get blocked)");
- trace_trans_restart_journal_preres_get(c, trans->ip);
+ trace_trans_restart_journal_preres_get(trans->ip);
return -EINTR;
}
ret = bch2_trans_mark_update(trans, i,
&trans->fs_usage_deltas);
if (ret == -EINTR)
- trace_trans_restart_mark(c, trans->ip);
+ trace_trans_restart_mark(trans->ip);
if (ret)
return ret;
}
if (race_fault()) {
ret = -EINTR;
trans_restart(" (race)");
- trace_trans_restart_fault_inject(c, trans->ip);
+ trace_trans_restart_fault_inject(trans->ip);
goto out;
}
ret == -EINTR ||
(flags & BTREE_INSERT_NOUNLOCK)) {
trans_restart(" (split)");
- trace_trans_restart_btree_node_split(c, trans->ip);
+ trace_trans_restart_btree_node_split(trans->ip);
ret = -EINTR;
}
break;
return 0;
trans_restart(" (iter relock after marking replicas)");
- trace_trans_restart_mark_replicas(c, trans->ip);
+ trace_trans_restart_mark_replicas(trans->ip);
ret = -EINTR;
break;
case BTREE_INSERT_NEED_JOURNAL_RES:
return 0;
trans_restart(" (iter relock after journal res get blocked)");
- trace_trans_restart_journal_res_get(c, trans->ip);
+ trace_trans_restart_journal_res_get(trans->ip);
ret = -EINTR;
break;
default:
if (ret2) {
trans_restart(" (traverse)");
- trace_trans_restart_traverse(c, trans->ip);
+ trace_trans_restart_traverse(trans->ip);
return ret2;
}
return 0;
trans_restart(" (atomic)");
- trace_trans_restart_atomic(c, trans->ip);
+ trace_trans_restart_atomic(trans->ip);
}
return ret;
if (!bch2_btree_iter_upgrade(i->iter, 1)) {
trans_restart(" (failed upgrade, locks_want %u uptodate %u)",
old_locks_want, old_uptodate);
- trace_trans_restart_upgrade(c, trans->ip);
+ trace_trans_restart_upgrade(trans->ip);
ret = -EINTR;
goto err;
}
struct btree_iter *iter;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
+ if (ret == -EINTR)
+ goto retry;
bch2_trans_exit(&trans);
return ret;
struct btree_iter *iter;
int ret = 0;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ /*
+ * XXX: whether we need mem/more iters depends on whether this btree id
+ * has triggers
+ */
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
iter = bch2_trans_get_iter(&trans, id, start, BTREE_ITER_INTENT);
if (!i->size)
return i->ret;
- bch2_trans_init(&trans, i->c);
+ bch2_trans_init(&trans, i->c, 0, 0);
iter = bch2_trans_get_iter(&trans, i->id, i->from, BTREE_ITER_PREFETCH);
k = bch2_btree_iter_peek(iter);
if (!i->size || !bkey_cmp(POS_MAX, i->from))
return i->ret;
- bch2_trans_init(&trans, i->c);
+ bch2_trans_init(&trans, i->c, 0, 0);
for_each_btree_node(&trans, iter, i->id, i->from, 0, b) {
bch2_btree_node_to_text(&PBUF(i->buf), i->c, b);
if (!i->size)
return i->ret;
- bch2_trans_init(&trans, i->c);
+ bch2_trans_init(&trans, i->c, 0, 0);
iter = bch2_trans_get_iter(&trans, i->id, i->from, BTREE_ITER_PREFETCH);
struct bkey_s_c k;
u64 inum = 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_hash_lookup(&trans, bch2_dirent_hash_desc,
hash_info, dir_inum, name, 0);
if (!dir_emit_dots(file, ctx))
return 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
POS(inode->v.i_ino, ctx->pos), 0, k, ret) {
if (!buf)
return -ENOMEM;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC,
POS(0, stripe_idx),
struct bkey_s_c k;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
BKEY_PADDED(k) tmp;
int ret = 0, dev, idx;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(pos),
new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
BUG_ON(!new_key);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
if (ret)
return ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0, 0);
size_t i, idx = 0;
int ret = 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0);
end.offset += size;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
BTREE_ITER_SLOTS, k, err) {
BUG_ON(k->k.p.inode != inode->v.i_ino);
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
iter = bch2_trans_get_iter(&trans,
BTREE_ID_EXTENTS,
ret = readpages_iter_init(&readpages_iter, ractl);
BUG_ON(ret);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_SLOTS);
rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
bio_add_page_contig(&rbio->bio, page);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_SLOTS);
struct bkey_s_c k;
int ret = 0;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, start,
BTREE_ITER_INTENT);
struct bkey_s_c k;
int ret = 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
if ((offset | len) & (block_bytes(c) - 1))
return -EINVAL;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
/*
* We need i_mutex to keep the page cache consistent with the extents
unsigned replicas = io_opts(c, inode).data_replicas;
int ret;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
inode_lock(&inode->v);
inode_dio_wait(&inode->v);
if (offset >= isize)
return -ENXIO;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
if (offset >= isize)
return -ENXIO;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9),
struct bch_inode_unpacked inode_u;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
if (!tmpfile)
mutex_lock(&dir->ei_update_lock);
- bch2_trans_init(&trans, c);
- bch2_trans_realloc_iters(&trans, 8);
+ bch2_trans_init(&trans, c, 8, 1024);
retry:
bch2_trans_begin(&trans);
int ret;
mutex_lock(&inode->ei_update_lock);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 4, 1024);
retry:
bch2_trans_begin(&trans);
int ret;
bch2_lock_inodes(dir, inode);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 4, 1024);
retry:
bch2_trans_begin(&trans);
return ret;
}
+ bch2_trans_init(&trans, c, 8, 2048);
+
bch2_lock_inodes(i.src_dir,
i.dst_dir,
i.src_inode,
i.dst_inode);
- bch2_trans_init(&trans, c);
-
if (S_ISDIR(i.src_inode->v.i_mode) &&
inode_attrs_changing(i.dst_dir, i.src_inode)) {
ret = -EXDEV;
if (ret)
goto err;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
kfree(acl);
if (start + len < start)
return -EINVAL;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(ei->v.i_ino, start >> 9), 0, k, ret)
u64 i_sectors;
int ret = 0;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
bch_verbose(c, "checking extents");
bch_verbose(c, "checking dirents");
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
hash_check_init(&h);
hash_check_init(&h);
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
POS(BCACHEFS_ROOT_INO, 0), 0);
u64 d_inum;
int ret = 0;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
bch_verbose(c, "checking directory structure");
u64 d_inum;
int ret;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false);
int ret = 0, ret2 = 0;
u64 nlinks_pos;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
POS(range_start, 0), 0);
struct bkey_s_c_inode inode;
int ret;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
if (ret)
return ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(inode_nr, 0),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
BUG_ON(bch2_keylist_empty(keys));
bch2_verify_keylist_sorted(keys);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
flags &= ~BCH_READ_LAST_FRAGMENT;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
rbio->pos, BTREE_ITER_SLOTS);
struct bkey_s_c k;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
flags &= ~BCH_READ_LAST_FRAGMENT;
flags |= BCH_READ_MUST_CLONE;
if (rbio->pick.crc.compression_type)
return;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
BCH_READ_USER_MAPPED;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
BUG_ON(rbio->_state);
BUG_ON(flags & BCH_READ_NODECODE);
unsigned i, nr, new_nr;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < BTREE_ID_NR; i++) {
struct btree_iter *iter;
BKEY_PADDED(key) tmp;
int ret = 0;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
POS_MIN, BTREE_ITER_PREFETCH);
if (flags & BCH_FORCE_IF_METADATA_LOST)
return -EINVAL;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
closure_init_stack(&cl);
for (id = 0; id < BTREE_ID_NR; id++) {
struct keylist *keys = &op->insert_keys;
int ret = 0;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
INIT_LIST_HEAD(&ctxt.reads);
init_waitqueue_head(&ctxt.wait);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_USER;
stats->btree_id = BTREE_ID_EXTENTS;
enum data_cmd cmd;
int ret = 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_BTREE;
struct bkey_s_c k;
int ret = 0;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0),
BTREE_ITER_PREFETCH, k, ret) {
return ret;
}
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
bkey_quota_init(&new_quota.k_i);
new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_QUOTAS, new_quota.k.p,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bool split_compressed = false;
int ret;
- bch2_trans_init(&trans, c);
- bch2_trans_preload_iters(&trans);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
retry:
bch2_trans_begin(&trans);
if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
if (k.k->type == KEY_TYPE_extent) {
bkey_cookie_init(&k.k_i);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
BTREE_ITER_INTENT);
bkey_cookie_init(&k.k_i);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
BTREE_ITER_INTENT);
u64 i;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
u64 i;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
u64 i;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
u64 i;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
struct btree_iter *iter;
struct bkey_s_c k;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN, 0);
struct btree_iter *iter;
struct bkey_s_c k;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, 0);
struct bkey_s_c k;
u64 i;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < nr; i++) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
int ret;
u64 i;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < nr; i++) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
bkey_cookie_init(&insert.k_i);
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
struct bkey_s_c k;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret)
;
struct bkey_s_c k;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_INTENT, k, ret) {
);
DECLARE_EVENT_CLASS(transaction_restart,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip),
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip),
TP_STRUCT__entry(
- __array(char, name, 16)
__field(unsigned long, ip )
),
TP_fast_assign(
- memcpy(__entry->name, c->name, 16);
__entry->ip = ip;
),
);
DEFINE_EVENT(transaction_restart, trans_restart_btree_node_reused,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_would_deadlock,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
-DEFINE_EVENT(transaction_restart, trans_restart_iters_realloced,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+TRACE_EVENT(trans_restart_iters_realloced,
+ TP_PROTO(unsigned long ip, unsigned nr),
+ TP_ARGS(ip, nr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, ip )
+ __field(unsigned, nr )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->nr = nr;
+ ),
+
+ TP_printk("%pS nr %u", (void *) __entry->ip, __entry->nr)
);
-DEFINE_EVENT(transaction_restart, trans_restart_mem_realloced,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+TRACE_EVENT(trans_restart_mem_realloced,
+ TP_PROTO(unsigned long ip, unsigned long bytes),
+ TP_ARGS(ip, bytes),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, ip )
+ __field(unsigned long, bytes )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->bytes = bytes;
+ ),
+
+ TP_printk("%pS bytes %lu", (void *) __entry->ip, __entry->bytes)
);
DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_btree_node_split,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_mark,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_upgrade,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_iter_upgrade,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_traverse,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_atomic,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip)
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
);
DECLARE_EVENT_CLASS(node_lock_fail,
struct bkey_s_c_xattr xattr;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
iter = bch2_hash_lookup(&trans, bch2_xattr_hash_desc,
&inode->ei_str_hash, inode->v.i_ino,
u64 inum = dentry->d_inode->i_ino;
int ret;
- bch2_trans_init(&trans, c);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
POS(inum, 0), 0, k, ret) {