}
new->k.p = iter->pos;
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new->k_i));
+ bch2_trans_update(trans, iter, &new->k_i);
*new_acl = acl;
acl = NULL;
err:
a->k.p = iter->pos;
bch2_alloc_pack(a, new_u);
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
+ bch2_trans_update(trans, iter, &a->k_i);
ret = bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
a->k.p = iter->pos;
bch2_alloc_pack(a, u);
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
+ bch2_trans_update(trans, iter, &a->k_i);
/*
* XXX:
return iter->flags & BTREE_ITER_TYPE;
}
-struct deferred_update {
- struct journal_preres res;
- struct journal_entry_pin journal;
-
- spinlock_t lock;
- unsigned dirty:1;
-
- u8 allocated_u64s;
- enum btree_id btree_id;
-
- /* must be last: */
- struct bkey_i k;
-};
-
struct btree_insert_entry {
struct bkey_i *k;
-
- union {
struct btree_iter *iter;
- struct deferred_update *d;
- };
-
- bool deferred;
};
#define BTREE_ITER_MAX 64
void bch2_btree_journal_key(struct btree_trans *, struct btree_iter *,
struct bkey_i *);
-void bch2_deferred_update_free(struct bch_fs *,
- struct deferred_update *);
-struct deferred_update *
-bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned);
-
-#define BTREE_INSERT_ENTRY(_iter, _k) \
- ((struct btree_insert_entry) { \
- .iter = (_iter), \
- .k = (_k), \
- })
-
-#define BTREE_INSERT_DEFERRED(_d, _k) \
- ((struct btree_insert_entry) { \
- .k = (_k), \
- .d = (_d), \
- .deferred = true, \
- })
-
enum {
__BTREE_INSERT_ATOMIC,
__BTREE_INSERT_NOUNLOCK,
u64 *, unsigned);
static inline void bch2_trans_update(struct btree_trans *trans,
- struct btree_insert_entry entry)
+ struct btree_iter *iter,
+ struct bkey_i *k)
{
EBUG_ON(trans->nr_updates >= trans->nr_iters + 4);
- trans->updates[trans->nr_updates++] = entry;
+ trans->updates[trans->nr_updates++] = (struct btree_insert_entry) {
+ .iter = iter, .k = k
+ };
}
#define bch2_trans_do(_c, _journal_seq, _flags, _do) \
_ret; \
})
-#define __trans_next_update(_trans, _i, _filter) \
-({ \
- while ((_i) < (_trans)->updates + (_trans->nr_updates) && !(_filter))\
- (_i)++; \
- \
- (_i) < (_trans)->updates + (_trans->nr_updates); \
-})
-
-#define __trans_for_each_update(_trans, _i, _filter) \
+#define trans_for_each_update(_trans, _i) \
for ((_i) = (_trans)->updates; \
- __trans_next_update(_trans, _i, _filter); \
+ (_i) < (_trans)->updates + (_trans)->nr_updates; \
(_i)++)
-#define trans_for_each_update(trans, i) \
- __trans_for_each_update(trans, i, true)
-
-#define trans_for_each_update_iter(trans, i) \
- __trans_for_each_update(trans, i, !(i)->deferred)
-
#endif /* _BCACHEFS_BTREE_UPDATE_H */
? trans->updates + trans->updates_sorted[sorted_idx - 1]
: NULL;
- return !i->deferred &&
- prev &&
+ return prev &&
i->iter->l[0].b == prev->iter->l[0].b;
}
}
}
-static inline int btree_trans_cmp(struct btree_insert_entry l,
- struct btree_insert_entry r)
-{
- return cmp_int(l.deferred, r.deferred) ?:
- btree_iter_cmp(l.iter, r.iter);
-}
-
static inline void btree_trans_sort_updates(struct btree_trans *trans)
{
struct btree_insert_entry *l, *r;
for (pos = 0; pos < nr; pos++) {
r = trans->updates + trans->updates_sorted[pos];
- if (btree_trans_cmp(*l, *r) <= 0)
+ if (btree_iter_cmp(l->iter, r->iter) <= 0)
break;
}
trace_btree_insert_key(c, b, insert->k);
}
-/* Deferred btree updates: */
-
-static void deferred_update_flush(struct journal *j,
- struct journal_entry_pin *pin,
- u64 seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct deferred_update *d =
- container_of(pin, struct deferred_update, journal);
- struct journal_preres res = { 0 };
- u64 tmp[32];
- struct bkey_i *k = (void *) tmp;
- int ret;
-
- if (d->allocated_u64s > ARRAY_SIZE(tmp)) {
- k = kmalloc(d->allocated_u64s * sizeof(u64), GFP_NOFS);
-
- BUG_ON(!k); /* XXX */
- }
-
- spin_lock(&d->lock);
- if (d->dirty) {
- BUG_ON(jset_u64s(d->k.k.u64s) > d->res.u64s);
-
- swap(res, d->res);
-
- BUG_ON(d->k.k.u64s > d->allocated_u64s);
-
- bkey_copy(k, &d->k);
- d->dirty = false;
- spin_unlock(&d->lock);
-
- ret = bch2_btree_insert(c, d->btree_id, k, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_JOURNAL_RESERVED);
- bch2_fs_fatal_err_on(ret && !bch2_journal_error(j),
- c, "error flushing deferred btree update: %i", ret);
-
- spin_lock(&d->lock);
- }
-
- if (!d->dirty)
- bch2_journal_pin_drop(j, &d->journal);
- spin_unlock(&d->lock);
-
- bch2_journal_preres_put(j, &res);
- if (k != (void *) tmp)
- kfree(k);
-}
-
-static void btree_insert_key_deferred(struct btree_trans *trans,
- struct btree_insert_entry *insert)
-{
- struct bch_fs *c = trans->c;
- struct journal *j = &c->journal;
- struct deferred_update *d = insert->d;
- int difference;
-
- BUG_ON(trans->flags & BTREE_INSERT_JOURNAL_REPLAY);
- BUG_ON(insert->k->u64s > d->allocated_u64s);
-
- __btree_journal_key(trans, d->btree_id, insert->k);
-
- spin_lock(&d->lock);
- BUG_ON(jset_u64s(insert->k->u64s) >
- trans->journal_preres.u64s);
-
- difference = jset_u64s(insert->k->u64s) - d->res.u64s;
- if (difference > 0) {
- trans->journal_preres.u64s -= difference;
- d->res.u64s += difference;
- }
-
- bkey_copy(&d->k, insert->k);
- d->dirty = true;
-
- bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
- deferred_update_flush);
- spin_unlock(&d->lock);
-}
-
-void bch2_deferred_update_free(struct bch_fs *c,
- struct deferred_update *d)
-{
- deferred_update_flush(&c->journal, &d->journal, 0);
-
- BUG_ON(journal_pin_active(&d->journal));
-
- bch2_journal_pin_flush(&c->journal, &d->journal);
- kfree(d);
-}
-
-struct deferred_update *
-bch2_deferred_update_alloc(struct bch_fs *c,
- enum btree_id btree_id,
- unsigned u64s)
-{
- struct deferred_update *d;
-
- BUG_ON(u64s > U8_MAX);
-
- d = kmalloc(offsetof(struct deferred_update, k) +
- u64s * sizeof(u64), GFP_NOFS);
- BUG_ON(!d);
-
- memset(d, 0, offsetof(struct deferred_update, k));
-
- spin_lock_init(&d->lock);
- d->allocated_u64s = u64s;
- d->btree_id = btree_id;
-
- return d;
-}
-
/* Normal update interface: */
static inline void btree_insert_entry_checks(struct btree_trans *trans,
struct btree_insert_entry *i)
{
struct bch_fs *c = trans->c;
- enum btree_id btree_id = !i->deferred
- ? i->iter->btree_id
- : i->d->btree_id;
-
- if (!i->deferred) {
- BUG_ON(i->iter->level);
- BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
- EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
- bkey_cmp(i->k->k.p, i->iter->l[0].b->key.k.p) > 0);
- EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
- !(trans->flags & BTREE_INSERT_ATOMIC));
- }
+
+ BUG_ON(i->iter->level);
+ BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
+ EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ bkey_cmp(i->k->k.p, i->iter->l[0].b->key.k.p) > 0);
+ EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ !(trans->flags & BTREE_INSERT_ATOMIC));
BUG_ON(debug_check_bkeys(c) &&
!bkey_deleted(&i->k->k) &&
- bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), btree_id));
+ bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->iter->btree_id));
}
static int bch2_trans_journal_preres_get(struct btree_trans *trans)
int ret;
trans_for_each_update(trans, i)
- if (i->deferred)
+ if (0)
u64s += jset_u64s(i->k->k.u64s);
if (!u64s)
static inline void do_btree_insert_one(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
- if (likely(!insert->deferred))
- btree_insert_key_leaf(trans, insert);
- else
- btree_insert_key_deferred(trans, insert);
+ btree_insert_key_leaf(trans, insert);
}
static inline bool update_triggers_transactional(struct btree_trans *trans,
struct btree_insert_entry *i)
{
return likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
- !i->deferred &&
btree_node_type_needs_gc(i->iter->btree_id);
}
: 0;
int ret;
- trans_for_each_update_iter(trans, i)
+ trans_for_each_update(trans, i)
BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
/*
* note: running triggers will append more updates to the list of
* updates as we're walking it:
*/
- trans_for_each_update_iter(trans, i)
+ trans_for_each_update(trans, i)
if (update_has_triggers(trans, i) &&
update_triggers_transactional(trans, i)) {
ret = bch2_trans_mark_update(trans, i->iter, i->k);
if (ret)
goto out;
- trans_for_each_update_iter(trans, i) {
+ trans_for_each_update(trans, i) {
if (!btree_node_type_needs_gc(i->iter->btree_id))
continue;
i->k->k.version = MAX_VERSION;
}
- trans_for_each_update_iter(trans, i)
+ trans_for_each_update(trans, i)
if (update_has_triggers(trans, i) &&
!update_triggers_transactional(trans, i))
bch2_mark_update(trans, i, &fs_usage->u, mark_flags);
if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
unlikely(c->gc_pos.phase))
- trans_for_each_update_iter(trans, i)
+ trans_for_each_update(trans, i)
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i, NULL,
mark_flags|
case BTREE_INSERT_NEED_MARK_REPLICAS:
bch2_trans_unlock(trans);
- trans_for_each_update_iter(trans, i) {
+ trans_for_each_update(trans, i) {
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(i->k));
if (ret)
return ret;
unsigned iter;
int ret;
- trans_for_each_update_iter(trans, i) {
+ trans_for_each_update(trans, i) {
if (!bch2_btree_iter_upgrade(i->iter, 1)) {
trace_trans_restart_upgrade(trans->ip);
ret = -EINTR;
trans->nounlock = false;
- trans_for_each_update_iter(trans, i)
+ trans_for_each_update(trans, i)
bch2_btree_iter_downgrade(i->iter);
err:
/* make sure we didn't drop or screw up locks: */
iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
+ bch2_trans_update(&trans, iter, k);
ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
if (ret == -EINTR)
break;
}
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &delete));
+ bch2_trans_update(trans, iter, &delete);
ret = bch2_trans_commit(trans, NULL, journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL);
bkey_init(&k.k);
k.k.p = iter->pos;
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &k));
+ bch2_trans_update(trans, iter, &k);
return bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|flags);
bch_err(c, "disk usage increased more than %llu sectors reserved",
disk_res_sectors);
- trans_for_each_update_iter(trans, i) {
+ trans_for_each_update(trans, i) {
struct btree_iter *iter = i->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct btree_insert_entry *i;
int ret;
- trans_for_each_update_iter(trans, i)
+ trans_for_each_update(trans, i)
if (i->iter->btree_id == btree_id &&
(btree_node_type_is_extents(btree_id)
? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
bkey_init(&new_k->k);
new_k->k.p = iter->pos;
- trans_for_each_update_iter(trans, i)
+ trans_for_each_update(trans, i)
if (i->iter == iter) {
i->k = new_k;
return new_k;
}
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, new_k));
+ bch2_trans_update(trans, iter, new_k);
return new_k;
}
* new_dst at the src position:
*/
new_dst->k.p = src_iter->pos;
- bch2_trans_update(trans,
- BTREE_INSERT_ENTRY(src_iter,
- &new_dst->k_i));
+ bch2_trans_update(trans, src_iter,
+ &new_dst->k_i);
return 0;
} else {
/* If we're overwriting, we can't insert new_dst
}
}
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(src_iter, &new_src->k_i));
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(dst_iter, &new_dst->k_i));
+ bch2_trans_update(trans, src_iter, &new_src->k_i);
+ bch2_trans_update(trans, dst_iter, &new_dst->k_i);
return 0;
}
stripe->k.p = iter->pos;
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &stripe->k_i));
+ bch2_trans_update(&trans, iter, &stripe->k_i);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
extent_stripe_ptr_add(e, s, ptr, idx);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.k));
+ bch2_trans_update(&trans, iter, &tmp.k);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
spin_unlock(&c->ec_stripes_heap_lock);
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new_key->k_i));
+ bch2_trans_update(trans, iter, &new_key->k_i);
return bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|flags);
if (!may_allocate && allocating)
return -ENOSPC;
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, k));
+ bch2_trans_update(trans, extent_iter, k);
new_i_size = min(k->k.p.offset << 9, new_i_size);
/* XXX: inode->i_size locking */
if (i_sectors_delta ||
new_i_size > inode->ei_inode.bi_size) {
- if (c->opts.new_inode_updates) {
- bch2_trans_unlock(trans);
- mutex_lock(&inode->ei_update_lock);
-
- if (!bch2_trans_relock(trans)) {
- mutex_unlock(&inode->ei_update_lock);
- return -EINTR;
- }
-
- inode_locked = true;
-
- if (!inode->ei_inode_update)
- inode->ei_inode_update =
- bch2_deferred_update_alloc(c,
- BTREE_ID_INODES, 64);
-
- inode_u = inode->ei_inode;
- inode_u.bi_sectors += i_sectors_delta;
-
- /* XXX: this is slightly suspect */
- if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
- new_i_size > inode_u.bi_size) {
- inode_u.bi_size = new_i_size;
- extended = true;
- }
-
- bch2_inode_pack(&inode_p, &inode_u);
- bch2_trans_update(trans,
- BTREE_INSERT_DEFERRED(inode->ei_inode_update,
- &inode_p.inode.k_i));
- } else {
- inode_iter = bch2_trans_get_iter(trans,
- BTREE_ID_INODES,
- POS(k->k.p.inode, 0),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- if (IS_ERR(inode_iter))
- return PTR_ERR(inode_iter);
-
- ret = bch2_btree_iter_traverse(inode_iter);
- if (ret)
- goto err;
+ inode_iter = bch2_trans_get_iter(trans,
+ BTREE_ID_INODES,
+ POS(k->k.p.inode, 0),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ if (IS_ERR(inode_iter))
+ return PTR_ERR(inode_iter);
- inode_u = inode->ei_inode;
- inode_u.bi_sectors += i_sectors_delta;
+ ret = bch2_btree_iter_traverse(inode_iter);
+ if (ret)
+ goto err;
- /* XXX: this is slightly suspect */
- if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
- new_i_size > inode_u.bi_size) {
- inode_u.bi_size = new_i_size;
- extended = true;
- }
+ inode_u = inode->ei_inode;
+ inode_u.bi_sectors += i_sectors_delta;
- bch2_inode_pack(&inode_p, &inode_u);
- bch2_trans_update(trans,
- BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
+ /* XXX: this is slightly suspect */
+ if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+ new_i_size > inode_u.bi_size) {
+ inode_u.bi_size = new_i_size;
+ extended = true;
}
+
+ bch2_inode_pack(&inode_p, &inode_u);
+ bch2_trans_update(trans, inode_iter, &inode_p.inode.k_i);
}
ret = bch2_trans_commit(trans, disk_res,
bkey_start_pos(&delete.k));
}
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(dst, ©.k));
- bch2_trans_update(&trans,
- BTREE_INSERT_ENTRY(del ?: src, &delete));
+ bch2_trans_update(&trans, dst, ©.k);
+ bch2_trans_update(&trans, del ?: src, &delete);
if (copy.k.k.size == k.k->size) {
/*
inode_set_fn set,
void *p)
{
- struct bch_fs *c = trans->c;
struct btree_iter *iter = NULL;
struct bkey_inode_buf *inode_p;
int ret;
lockdep_assert_held(&inode->ei_update_lock);
- if (c->opts.new_inode_updates) {
- /* XXX: Don't do this with btree locks held */
- if (!inode->ei_inode_update)
- inode->ei_inode_update =
- bch2_deferred_update_alloc(c, BTREE_ID_INODES, 64);
- } else {
- iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
- POS(inode->v.i_ino, 0),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
-
- /* The btree node lock is our lock on the inode: */
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return ret;
- }
+ iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
+ POS(inode->v.i_ino, 0),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ /* The btree node lock is our lock on the inode: */
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
*inode_u = inode->ei_inode;
return PTR_ERR(inode_p);
bch2_inode_pack(inode_p, inode_u);
-
- if (!inode->ei_inode_update)
- bch2_trans_update(trans,
- BTREE_INSERT_ENTRY(iter, &inode_p->inode.k_i));
- else
- bch2_trans_update(trans,
- BTREE_INSERT_DEFERRED(inode->ei_inode_update,
- &inode_p->inode.k_i));
+ bch2_trans_update(trans, iter, &inode_p->inode.k_i);
return 0;
}
mutex_init(&inode->ei_update_lock);
pagecache_lock_init(&inode->ei_pagecache_lock);
mutex_init(&inode->ei_quota_lock);
- inode->ei_inode_update = NULL;
inode->ei_journal_seq = 0;
return &inode->v;
BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
- if (inode->ei_inode_update)
- bch2_deferred_update_free(c, inode->ei_inode_update);
- inode->ei_inode_update = NULL;
-
if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
KEY_TYPE_QUOTA_WARN);
struct inode v;
struct mutex ei_update_lock;
- struct deferred_update *ei_inode_update;
u64 ei_journal_seq;
u64 ei_quota_reserved;
unsigned long ei_last_dirtied;
if (fsck_err(c, "dirent with junk at end, was %s (%zu) now %s (%u)",
buf, strlen(buf), d->v.d_name, len)) {
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &d->k_i));
+ bch2_trans_update(trans, iter, &d->k_i);
ret = bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
bkey_reassemble(&n->k_i, d.s_c);
n->v.d_type = mode_to_type(target.bi_mode);
- bch2_trans_update(&trans,
- BTREE_INSERT_ENTRY(iter, &n->k_i));
+ bch2_trans_update(&trans, iter, &n->k_i);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
struct bkey_inode_buf p;
bch2_inode_pack(&p, &u);
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
+ bch2_trans_update(trans, iter, &p.inode.k_i);
ret = bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
inode_u->bi_generation = bkey_generation(k);
bch2_inode_pack(inode_p, inode_u);
- bch2_trans_update(trans,
- BTREE_INSERT_ENTRY(iter, &inode_p->inode.k_i));
+ bch2_trans_update(trans, iter, &inode_p->inode.k_i);
return 0;
}
}
delete.v.bi_generation = cpu_to_le32(bi_generation);
}
- bch2_trans_update(&trans,
- BTREE_INSERT_ENTRY(iter, &delete.k_i));
+ bch2_trans_update(&trans, iter, &delete.k_i);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
if (ret)
break;
- bch2_trans_update(&trans,
- BTREE_INSERT_ENTRY(iter, &split.k));
+ bch2_trans_update(&trans, iter, &split.k);
ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
BTREE_INSERT_NOFAIL|
if (!bch2_bkey_narrow_crcs(&new.k, new_crc))
goto out;
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new.k));
+ bch2_trans_update(&trans, iter, &new.k);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
*/
bch2_extent_normalize(c, bkey_i_to_s(&tmp.key));
- /* XXX not sketchy at all */
- iter->pos = bkey_start_pos(&tmp.key.k);
+ bch2_btree_iter_set_pos(iter, bkey_start_pos(&tmp.key.k));
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.key));
+ bch2_trans_update(&trans, iter, &tmp.key);
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
goto next;
}
- bch2_trans_update(&trans,
- BTREE_INSERT_ENTRY(iter, insert));
+ bch2_trans_update(&trans, iter, insert);
ret = bch2_trans_commit(&trans, &op->res,
op_journal_seq(op),
OPT_UINT(0, BCH_REPLICAS_MAX), \
NO_SB_OPT, 1, \
"n", "Data written to this device will be considered\n"\
- "to have already been replicated n times") \
- x(new_inode_updates, u8, \
- OPT_MOUNT, \
- OPT_BOOL(), \
- NO_SB_OPT, false, \
- NULL, "Enable new btree write-cache for inode updates")
-
+ "to have already been replicated n times")
struct bch_opts {
#define x(_name, _bits, ...) unsigned _name##_defined:1;
if (qdq->d_fieldmask & QC_INO_HARD)
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new_quota.k_i));
+ bch2_trans_update(&trans, iter, &new_quota.k_i);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
bch2_cut_front(split_iter->pos, split);
bch2_cut_back(atomic_end, &split->k);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(split_iter, split));
+ bch2_trans_update(&trans, split_iter, split);
bch2_btree_iter_set_pos(iter, split->k.p);
} while (bkey_cmp(iter->pos, k->k.p) < 0);
r_v->v.refcount = 0;
memcpy(r_v->v.start, e->v.start, bkey_val_bytes(&e->k));
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(reflink_iter, &r_v->k_i));
+ bch2_trans_update(trans, reflink_iter, &r_v->k_i);
r_p = bch2_trans_kmalloc(trans, sizeof(*r_p));
if (IS_ERR(r_p))
set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, &r_p->k_i));
+ bch2_trans_update(trans, extent_iter, &r_p->k_i);
err:
if (!IS_ERR(reflink_iter)) {
c->reflink_hint = reflink_iter->pos.offset;
}
insert->k.p = iter->pos;
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, insert));
+ bch2_trans_update(trans, iter, insert);
bch2_trans_iter_free_on_commit(trans, iter);
}
delete->k.p = iter->pos;
delete->k.type = ret ? KEY_TYPE_whiteout : KEY_TYPE_deleted;
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, delete));
+ bch2_trans_update(trans, iter, delete);
return 0;
}
ret = bch2_btree_iter_traverse(iter);
BUG_ON(ret);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
+ bch2_trans_update(&trans, iter, &k.k_i);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
ret = bch2_btree_iter_traverse(iter);
BUG_ON(ret);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
+ bch2_trans_update(&trans, iter, &k.k_i);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
bkey_cookie_init(&k.k_i);
k.k.p = iter->pos;
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
+ bch2_trans_update(&trans, iter, &k.k_i);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
}
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter->pos;
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
+ bch2_trans_update(&trans, iter, &insert.k_i);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
bkey_reassemble(&u.k_i, k);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
+ bch2_trans_update(&trans, iter, &u.k_i);
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret);
}