static int bch2_trans_realloc_iters(struct btree_trans *trans,
unsigned new_size)
{
- void *new_iters, *new_updates, *new_sorted;
+ void *new_iters, *new_updates;
size_t iters_bytes;
size_t updates_bytes;
- size_t sorted_bytes;
new_size = roundup_pow_of_two(new_size);
iters_bytes = sizeof(struct btree_iter) * new_size;
updates_bytes = sizeof(struct btree_insert_entry) * new_size;
- sorted_bytes = sizeof(u8) * new_size;
- new_iters = kmalloc(iters_bytes +
- updates_bytes +
- sorted_bytes, GFP_NOFS);
+ new_iters = kmalloc(iters_bytes + updates_bytes, GFP_NOFS);
if (new_iters)
goto success;
trans->used_mempool = true;
success:
new_updates = new_iters + iters_bytes;
- new_sorted = new_updates + updates_bytes;
memcpy(new_iters, trans->iters,
sizeof(struct btree_iter) * trans->nr_iters);
trans->iters = new_iters;
trans->updates = new_updates;
- trans->updates_sorted = new_sorted;
trans->size = new_size;
if (trans->iters_live) {
got_slot:
BUG_ON(trans->iters_linked & (1ULL << idx));
trans->iters_linked |= 1ULL << idx;
+ trans->iters[idx].flags = 0;
return &trans->iters[idx];
}
if (btree_node_locked(dst, i))
six_lock_increment(&dst->l[i].b->c.lock,
__btree_lock_want(dst, i));
+
+ dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+ dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
}
static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
iter = best;
}
- iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
iter->flags &= ~(BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
iter->flags |= flags & (BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
BUG_ON(iter->btree_id != btree_id);
BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
+ BUG_ON(iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT);
BUG_ON(trans->iters_live & (1ULL << iter->idx));
trans->iters_live |= 1ULL << iter->idx;
* it's cheap to copy it again:
*/
trans->iters_touched &= ~(1ULL << iter->idx);
- iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
return iter;
}
struct btree_iter *iter;
trans_for_each_iter(trans, iter)
- iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+ iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
+ BTREE_ITER_SET_POS_AFTER_COMMIT);
bch2_trans_unlink_iters(trans);
trans->iters_touched &= trans->iters_live;
+ trans->need_reset = 0;
trans->nr_updates = 0;
if (flags & TRANS_RESET_MEM)
trans->size = ARRAY_SIZE(trans->iters_onstack);
trans->iters = trans->iters_onstack;
trans->updates = trans->updates_onstack;
- trans->updates_sorted = trans->updates_sorted_onstack;
trans->fs_usage_deltas = NULL;
if (expected_nr_iters > trans->size)
*/
#define BTREE_ITER_IS_EXTENTS (1 << 6)
#define BTREE_ITER_ERROR (1 << 7)
+#define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 8)
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
- u8 idx;
-
struct btree_trans *trans;
struct bpos pos;
+ struct bpos pos_after_commit;
+
+ u16 flags;
+ u8 idx;
- u8 flags;
enum btree_iter_uptodate uptodate:4;
enum btree_id btree_id:4;
unsigned level:4,
struct btree_insert_entry {
unsigned trigger_flags;
+ unsigned trans_triggers_run:1;
struct bkey_i *k;
struct btree_iter *iter;
};
unsigned used_mempool:1;
unsigned error:1;
unsigned nounlock:1;
+ unsigned need_reset:1;
unsigned mem_top;
unsigned mem_bytes;
struct btree_iter *iters;
struct btree_insert_entry *updates;
- u8 *updates_sorted;
/* update path: */
struct journal_res journal_res;
struct btree_iter iters_onstack[2];
struct btree_insert_entry updates_onstack[2];
- u8 updates_sorted_onstack[2];
};
#define BTREE_FLAG(flag) \
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_btree_ptr *);
+int bch2_trans_update(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *, enum btree_trigger_flags);
int __bch2_trans_commit(struct btree_trans *);
/**
return __bch2_trans_commit(trans);
}
-static inline void bch2_trans_update(struct btree_trans *trans,
- struct btree_iter *iter, struct bkey_i *k,
- enum btree_trigger_flags flags)
-{
- EBUG_ON(trans->nr_updates >= trans->nr_iters);
-
- iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
-
- trans->updates[trans->nr_updates++] = (struct btree_insert_entry) {
- .trigger_flags = flags, .iter = iter, .k = k
- };
-}
-
#define __bch2_trans_do(_trans, _disk_res, _journal_seq, \
_flags, _reset_flags, _do) \
({ \
#include <linux/sort.h>
static inline bool same_leaf_as_prev(struct btree_trans *trans,
- unsigned idx)
+ struct btree_insert_entry *i)
{
- return idx &&
- trans->updates[trans->updates_sorted[idx]].iter->l[0].b ==
- trans->updates[trans->updates_sorted[idx - 1]].iter->l[0].b;
+ return i != trans->updates &&
+ i[0].iter->l[0].b == i[-1].iter->l[0].b;
}
-#define trans_for_each_update_sorted(_trans, _i, _iter) \
- for (_iter = 0; \
- _iter < _trans->nr_updates && \
- (_i = _trans->updates + _trans->updates_sorted[_iter], 1); \
- _iter++)
inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
bch2_btree_init_next(c, b, iter);
}
-static inline void btree_trans_sort_updates(struct btree_trans *trans)
-{
- struct btree_insert_entry *l, *r;
- unsigned nr = 0, pos;
-
- trans_for_each_update(trans, l) {
- for (pos = 0; pos < nr; pos++) {
- r = trans->updates + trans->updates_sorted[pos];
-
- if (btree_iter_cmp(l->iter, r->iter) <= 0)
- break;
- }
-
- memmove(&trans->updates_sorted[pos + 1],
- &trans->updates_sorted[pos],
- (nr - pos) * sizeof(trans->updates_sorted[0]));
-
- trans->updates_sorted[pos] = l - trans->updates;
- nr++;
- }
-}
-
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
- unsigned iter, u64s = 0;
+ unsigned u64s = 0;
bool marking = false;
int ret;
prefetch(&trans->c->journal.flags);
- trans_for_each_update_sorted(trans, i, iter) {
+ trans_for_each_update(trans, i) {
/* Multiple inserts might go to same leaf: */
- if (!same_leaf_as_prev(trans, iter))
+ if (!same_leaf_as_prev(trans, i))
u64s = 0;
u64s += i->k->k.u64s;
{
struct btree_insert_entry *i;
struct btree_iter *iter;
- unsigned idx;
int ret;
trans_for_each_update(trans, i)
btree_insert_entry_checks(trans, i->iter, i->k);
bch2_btree_trans_verify_locks(trans);
- /*
- * No more updates can be added - sort updates so we can take write
- * locks in the correct order:
- */
- btree_trans_sort_updates(trans);
-
- trans_for_each_update_sorted(trans, i, idx)
- if (!same_leaf_as_prev(trans, idx))
+ trans_for_each_update(trans, i)
+ if (!same_leaf_as_prev(trans, i))
bch2_btree_node_lock_for_insert(trans->c,
i->iter->l[0].b, i->iter);
ret = bch2_trans_commit_write_locked(trans, stopped_at);
- trans_for_each_update_sorted(trans, i, idx)
- if (!same_leaf_as_prev(trans, idx))
+ trans_for_each_update(trans, i)
+ if (!same_leaf_as_prev(trans, i))
bch2_btree_node_unlock_write_inlined(i->iter->l[0].b,
i->iter);
if (trans->flags & BTREE_INSERT_NOUNLOCK)
trans->nounlock = true;
- trans_for_each_update_sorted(trans, i, idx)
- if (!same_leaf_as_prev(trans, idx))
+ trans_for_each_update(trans, i)
+ if (!same_leaf_as_prev(trans, i))
bch2_foreground_maybe_merge(trans->c, i->iter,
0, trans->flags);
int __bch2_trans_commit(struct btree_trans *trans)
{
struct btree_insert_entry *i = NULL;
+ struct btree_iter *iter;
+ bool trans_trigger_run;
unsigned u64s;
int ret = 0;
+ BUG_ON(trans->need_reset);
+
if (!trans->nr_updates)
goto out_noupdates;
}
/*
- * note: running triggers will append more updates to the list of
- * updates as we're walking it:
+ * Running triggers will append more updates to the list of updates as
+ * we're walking it:
*/
+ do {
+ trans_trigger_run = false;
+
+ trans_for_each_update(trans, i) {
+ if (iter_has_trans_triggers(i->iter) &&
+ !i->trans_triggers_run) {
+ i->trans_triggers_run = true;
+ trans_trigger_run = true;
+
+ ret = bch2_trans_mark_update(trans, i->iter, i->k,
+ i->trigger_flags);
+ if (unlikely(ret)) {
+ if (ret == -EINTR)
+ trace_trans_restart_mark(trans->ip);
+ goto out;
+ }
+ }
+ }
+ } while (trans_trigger_run);
+
trans_for_each_update(trans, i) {
/* we know trans->nounlock won't be set here: */
if (unlikely(!(i->iter->locks_want < 1
goto out;
}
- if (iter_has_trans_triggers(i->iter)) {
- ret = bch2_trans_mark_update(trans, i->iter, i->k,
- i->trigger_flags);
- if (unlikely(ret)) {
- if (ret == -EINTR)
- trace_trans_restart_mark(trans->ip);
- goto out;
- }
- }
-
u64s = jset_u64s(i->k->k.u64s);
if (0)
trans->journal_preres_u64s += u64s;
if (ret)
goto err;
+
+ trans_for_each_iter(trans, iter)
+ if ((trans->iters_live & (1ULL << iter->idx)) &&
+ (iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT)) {
+ if (trans->flags & BTREE_INSERT_NOUNLOCK)
+ bch2_btree_iter_set_pos_same_leaf(iter, iter->pos_after_commit);
+ else
+ bch2_btree_iter_set_pos(iter, iter->pos_after_commit);
+ }
out:
bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
goto retry;
}
+int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_i *k, enum btree_trigger_flags flags)
+{
+ struct btree_insert_entry *i, n = (struct btree_insert_entry) {
+ .trigger_flags = flags, .iter = iter, .k = k
+ };
+
+ EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&k->k)));
+
+ iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS) {
+ iter->pos_after_commit = k->k.p;
+ iter->flags |= BTREE_ITER_SET_POS_AFTER_COMMIT;
+ }
+
+ /*
+ * Pending updates are kept sorted: first, find position of new update:
+ */
+ trans_for_each_update(trans, i)
+ if (btree_iter_cmp(iter, i->iter) <= 0)
+ break;
+
+ /*
+ * Now delete/trim any updates the new update overwrites:
+ */
+ if (i > trans->updates &&
+ i[-1].iter->btree_id == iter->btree_id &&
+ bkey_cmp(iter->pos, i[-1].k->k.p) < 0)
+ bch2_cut_back(n.iter->pos, i[-1].k);
+
+ while (i < trans->updates + trans->nr_updates &&
+ iter->btree_id == i->iter->btree_id &&
+ bkey_cmp(n.k->k.p, i->k->k.p) >= 0)
+ array_remove_item(trans->updates, trans->nr_updates,
+ i - trans->updates);
+
+ if (i < trans->updates + trans->nr_updates &&
+ iter->btree_id == i->iter->btree_id &&
+ bkey_cmp(n.k->k.p, i->iter->pos) > 0) {
+ /*
+ * When we have an extent that overwrites the start of another
+ * update, trimming that extent will mean the iterator's
+ * position has to change since the iterator position has to
+ * match the extent's start pos - but we don't want to change
+ * the iterator pos if some other code is using it, so we may
+ * need to clone it:
+ */
+ if (trans->iters_live & (1ULL << i->iter->idx)) {
+ i->iter = bch2_trans_copy_iter(trans, i->iter);
+ if (IS_ERR(i->iter)) {
+ trans->need_reset = true;
+ return PTR_ERR(i->iter);
+ }
+
+ i->iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
+ bch2_trans_iter_put(trans, i->iter);
+ }
+
+ bch2_cut_front(n.k->k.p, i->k);
+ bch2_btree_iter_set_pos(i->iter, n.k->k.p);
+ }
+
+ EBUG_ON(trans->nr_updates >= trans->nr_iters);
+
+ array_insert_item(trans->updates, trans->nr_updates,
+ i - trans->updates, n);
+ return 0;
+}
+
static int __bch2_btree_insert(struct btree_trans *trans,
enum btree_id id, struct bkey_i *k)
{
return ret;
}
-static void *trans_update_key(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned u64s)
-{
- struct btree_insert_entry *i;
- struct bkey_i *new_k;
-
- new_k = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
- if (IS_ERR(new_k))
- return new_k;
-
- bkey_init(&new_k->k);
- new_k->k.p = iter->pos;
-
- trans_for_each_update(trans, i)
- if (i->iter == iter) {
- i->k = new_k;
- return new_k;
- }
-
- bch2_trans_update(trans, iter, new_k, 0);
- return new_k;
-}
-
static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
u.data_type = u.dirty_sectors || u.cached_sectors
? data_type : 0;
- a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX);
+ a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
bkey_alloc_init(&a->k_i);
a->k.p = iter->pos;
bch2_alloc_pack(a, u);
+ bch2_trans_update(trans, iter, &a->k_i, 0);
out:
bch2_trans_iter_put(trans, iter);
return ret;
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
- struct bkey_i *new_k;
struct bkey_s_c k;
- struct bkey_s_stripe s;
+ struct bkey_i_stripe *s;
int ret = 0;
ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
goto out;
}
- new_k = trans_update_key(trans, iter, k.k->u64s);
- ret = PTR_ERR_OR_ZERO(new_k);
+ s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(s);
if (ret)
goto out;
- bkey_reassemble(new_k, k);
- s = bkey_i_to_s_stripe(new_k);
+ bkey_reassemble(&s->k_i, k);
- stripe_blockcount_set(s.v, p.block,
- stripe_blockcount_get(s.v, p.block) +
+ stripe_blockcount_set(&s->v, p.block,
+ stripe_blockcount_get(&s->v, p.block) +
sectors);
- *nr_data = s.v->nr_blocks - s.v->nr_redundant;
- *nr_parity = s.v->nr_redundant;
- bch2_bkey_to_replicas(&r->e, s.s_c);
+ *nr_data = s->v.nr_blocks - s->v.nr_redundant;
+ *nr_parity = s->v.nr_redundant;
+ bch2_bkey_to_replicas(&r->e, bkey_i_to_s_c(&s->k_i));
+ bch2_trans_update(trans, iter, &s->k_i, 0);
out:
bch2_trans_iter_put(trans, iter);
return ret;
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
- struct bkey_i *new_k;
struct bkey_s_c k;
struct bkey_i_reflink_v *r_v;
s64 ret;
bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
- new_k = trans_update_key(trans, iter, k.k->u64s);
- ret = PTR_ERR_OR_ZERO(new_k);
+ r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(r_v);
if (ret)
goto err;
- bkey_reassemble(new_k, k);
- r_v = bkey_i_to_reflink_v(new_k);
+ bkey_reassemble(&r_v->k_i, k);
le64_add_cpu(&r_v->v.refcount,
!(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
r_v->k.type = KEY_TYPE_deleted;
set_bkey_val_u64s(&r_v->k, 0);
}
+
+ bch2_trans_update(trans, iter, &r_v->k_i, 0);
out:
ret = k.k->p.offset - idx;
err:
struct address_space *mapping = inode->v.i_mapping;
struct bkey_on_stack copy;
struct btree_trans trans;
- struct btree_iter *src, *dst, *del = NULL;
+ struct btree_iter *src, *dst;
loff_t shift, new_size;
u64 src_start;
int ret;
next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
- /*
- * If the new and old keys overlap (because we're moving an
- * extent that's bigger than the amount we're collapsing by),
- * we need to trim the delete key here so they don't overlap
- * because overlaps on insertions aren't handled before
- * triggers are run, so the overwrite will get double counted
- * by the triggers machinery:
- */
- if (insert &&
- bkey_cmp(bkey_start_pos(©.k->k), delete.k.p) < 0) {
- bch2_cut_back(bkey_start_pos(©.k->k), &delete);
- } else if (!insert &&
- bkey_cmp(copy.k->k.p,
- bkey_start_pos(&delete.k)) > 0) {
- bch2_cut_front(copy.k->k.p, &delete);
-
- del = bch2_trans_copy_iter(&trans, src);
- BUG_ON(IS_ERR_OR_NULL(del));
-
- bch2_btree_iter_set_pos(del,
- bkey_start_pos(&delete.k));
- }
-
if (copy.k->k.size == k.k->size) {
/*
* If we're moving the entire extent, we can skip
BUG_ON(ret);
}
- bch2_trans_update(&trans, dst, copy.k, trigger_flags);
- bch2_trans_update(&trans, del ?: src, &delete, trigger_flags);
-
- ret = bch2_trans_commit(&trans, &disk_res,
- &inode->ei_journal_seq,
- BTREE_INSERT_NOFAIL);
+ ret = bch2_trans_update(&trans, src, &delete, trigger_flags) ?:
+ bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
+ bch2_trans_commit(&trans, &disk_res,
+ &inode->ei_journal_seq,
+ BTREE_INSERT_NOFAIL);
bch2_disk_reservation_put(c, &disk_res);
bkey_err:
- if (del)
- bch2_trans_iter_put(&trans, del);
- del = NULL;
-
if (!ret)
bch2_btree_iter_set_pos(src, next_pos);