static int bch2_trans_realloc_iters(struct btree_trans *trans,
unsigned new_size)
{
- void *new_iters, *new_updates;
+ void *new_iters, *new_updates, *new_sorted;
+ size_t iters_bytes;
+ size_t updates_bytes;
+ size_t sorted_bytes;
new_size = roundup_pow_of_two(new_size);
bch2_trans_unlock(trans);
- new_iters = kmalloc(sizeof(struct btree_iter) * new_size +
- sizeof(struct btree_insert_entry) * (new_size + 4),
- GFP_NOFS);
+ iters_bytes = sizeof(struct btree_iter) * new_size;
+ updates_bytes = sizeof(struct btree_insert_entry) * (new_size + 4);
+ sorted_bytes = sizeof(u8) * (new_size + 4);
+
+ new_iters = kmalloc(iters_bytes +
+ updates_bytes +
+ sorted_bytes, GFP_NOFS);
if (new_iters)
goto success;
trans->used_mempool = true;
success:
- new_updates = new_iters + sizeof(struct btree_iter) * new_size;
+ new_updates = new_iters + iters_bytes;
+ new_sorted = new_updates + updates_bytes;
memcpy(new_iters, trans->iters,
sizeof(struct btree_iter) * trans->nr_iters);
if (trans->iters != trans->iters_onstack)
kfree(trans->iters);
- trans->iters = new_iters;
- trans->updates = new_updates;
- trans->size = new_size;
+ trans->iters = new_iters;
+ trans->updates = new_updates;
+ trans->updates_sorted = new_sorted;
+ trans->size = new_size;
if (trans->iters_live) {
trace_trans_restart_iters_realloced(trans->ip, trans->size);
trans->size = ARRAY_SIZE(trans->iters_onstack);
trans->iters = trans->iters_onstack;
trans->updates = trans->updates_onstack;
+ trans->updates_sorted = trans->updates_sorted_onstack;
trans->fs_usage_deltas = NULL;
if (expected_nr_iters > trans->size)
return trans->error ? -EIO : 0;
}
+
+void bch2_fs_btree_iter_exit(struct bch_fs *c)
+{
+ mempool_exit(&c->btree_iters_pool);
+}
+
+int bch2_fs_btree_iter_init(struct bch_fs *c)
+{
+ unsigned nr = BTREE_ITER_MAX;
+
+ return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
+ sizeof(struct btree_iter) * nr +
+ sizeof(struct btree_insert_entry) * (nr + 4) +
+ sizeof(u8) * (nr + 4));
+}
#include <linux/sort.h>
+static inline bool same_leaf_as_prev(struct btree_trans *trans,
+ unsigned sorted_idx)
+{
+ struct btree_insert_entry *i = trans->updates +
+ trans->updates_sorted[sorted_idx];
+ struct btree_insert_entry *prev = sorted_idx
+ ? trans->updates + trans->updates_sorted[sorted_idx - 1]
+ : NULL;
+
+ return !i->deferred &&
+ prev &&
+ i->iter->l[0].b == prev->iter->l[0].b;
+}
+
+#define trans_for_each_update_sorted(_trans, _i, _iter) \
+ for (iter = 0; \
+ _iter < _trans->nr_updates && \
+ (_i = _trans->updates + _trans->updates_sorted[_iter], 1); \
+ _iter++)
+
inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
{
bch2_btree_init_next(c, b, iter);
}
-static void btree_trans_lock_write(struct bch_fs *c, struct btree_trans *trans)
+static void btree_trans_lock_write(struct btree_trans *trans, bool lock)
{
+ struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
+ unsigned iter;
- trans_for_each_update_leaf(trans, i)
- bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
-}
-
-static void btree_trans_unlock_write(struct btree_trans *trans)
-{
- struct btree_insert_entry *i;
+ trans_for_each_update_sorted(trans, i, iter) {
+ if (same_leaf_as_prev(trans, iter))
+ continue;
- trans_for_each_update_leaf(trans, i)
- bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
+ if (lock)
+ bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
+ else
+ bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
+ }
}
static inline int btree_trans_cmp(struct btree_insert_entry l,
btree_iter_cmp(l.iter, r.iter);
}
+static inline void btree_trans_sort_updates(struct btree_trans *trans)
+{
+ struct btree_insert_entry *l, *r;
+ unsigned nr = 0, pos;
+
+ trans_for_each_update(trans, l) {
+ for (pos = 0; pos < nr; pos++) {
+ r = trans->updates + trans->updates_sorted[pos];
+
+ if (btree_trans_cmp(*l, *r) <= 0)
+ break;
+ }
+
+ memmove(&trans->updates_sorted[pos + 1],
+ &trans->updates_sorted[pos],
+ (nr - pos) * sizeof(trans->updates_sorted[0]));
+
+ trans->updates_sorted[pos] = l - trans->updates;
+ nr++;
+ }
+
+ BUG_ON(nr != trans->nr_updates);
+}
+
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
struct btree_insert_entry **stopped_at)
{
struct btree_insert_entry *i;
- unsigned u64s = 0;
+ unsigned iter, u64s = 0;
int ret;
- trans_for_each_update_iter(trans, i) {
+ trans_for_each_update_sorted(trans, i, iter) {
/* Multiple inserts might go to same leaf: */
- if (!same_leaf_as_prev(trans, i))
+ if (!same_leaf_as_prev(trans, iter))
u64s = 0;
u64s += i->k->k.u64s;
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
- bool saw_non_marked;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0;
trans_for_each_update_iter(trans, i)
BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
+ /*
+ * note: running triggers will append more updates to the list of
+ * updates as we're walking it:
+ */
trans_for_each_update_iter(trans, i)
- i->marked = false;
-
- do {
- saw_non_marked = false;
-
- trans_for_each_update_iter(trans, i) {
- if (i->marked)
- continue;
-
- saw_non_marked = true;
- i->marked = true;
-
- if (update_has_triggers(trans, i) &&
- update_triggers_transactional(trans, i)) {
- ret = bch2_trans_mark_update(trans, i->iter, i->k);
- if (ret == -EINTR)
- trace_trans_restart_mark(trans->ip);
- if (ret)
- goto out_clear_replicas;
- }
+ if (update_has_triggers(trans, i) &&
+ update_triggers_transactional(trans, i)) {
+ ret = bch2_trans_mark_update(trans, i->iter, i->k);
+ if (ret == -EINTR)
+ trace_trans_restart_mark(trans->ip);
+ if (ret)
+ goto out_clear_replicas;
}
- } while (saw_non_marked);
trans_for_each_update(trans, i)
btree_insert_entry_checks(trans, i);
bch2_btree_trans_verify_locks(trans);
- btree_trans_lock_write(c, trans);
+ /*
+ * No more updates can be added - sort updates so we can take write
+ * locks in the correct order:
+ */
+ btree_trans_sort_updates(trans);
+
+ btree_trans_lock_write(trans, true);
if (race_fault()) {
ret = -EINTR;
goto out;
trans_for_each_update_iter(trans, i) {
- if (i->deferred ||
- !btree_node_type_needs_gc(i->iter->btree_id))
+ if (!btree_node_type_needs_gc(i->iter->btree_id))
continue;
if (!fs_usage) {
(trans->flags & BTREE_INSERT_JOURNAL_RESERVED) &&
trans->journal_res.ref);
- btree_trans_unlock_write(trans);
+ btree_trans_lock_write(trans, false);
if (fs_usage) {
bch2_fs_usage_scratch_put(c, fs_usage);
{
struct bch_fs *c = trans->c;
unsigned flags = trans->flags;
- struct btree_insert_entry *src, *dst;
-
- src = dst = trans->updates;
-
- while (src < trans->updates + trans->nr_updates) {
- if (!src->triggered) {
- *dst = *src;
- dst++;
- }
- src++;
- }
-
- trans->nr_updates = dst - trans->updates;
/*
* BTREE_INSERT_NOUNLOCK means don't unlock _after_ successful btree
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
+ unsigned iter;
int ret;
trans_for_each_update_iter(trans, i) {
if (trans->flags & BTREE_INSERT_NOUNLOCK)
trans->nounlock = true;
- trans_for_each_update_leaf(trans, i)
- bch2_foreground_maybe_merge(c, i->iter, 0, trans->flags);
+ trans_for_each_update_sorted(trans, i, iter)
+ if (!same_leaf_as_prev(trans, iter))
+ bch2_foreground_maybe_merge(c, i->iter,
+ 0, trans->flags);
trans->nounlock = false;
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i = NULL;
- unsigned orig_mem_top = trans->mem_top;
+ unsigned orig_nr_updates = trans->nr_updates;
+ unsigned orig_mem_top = trans->mem_top;
int ret = 0;
if (!trans->nr_updates)
err:
ret = bch2_trans_commit_error(trans, i, ret);
+ /* free updates and memory used by triggers, they'll be reexecuted: */
+ trans->nr_updates = orig_nr_updates;
+ trans->mem_top = orig_mem_top;
+
/* can't loop if it was passed in and we changed it: */
if (unlikely(trans->flags & BTREE_INSERT_NO_CLEAR_REPLICAS) && !ret)
ret = -EINTR;
- if (!ret) {
- /* free memory used by triggers, they'll be reexecuted: */
- trans->mem_top = orig_mem_top;
+ if (!ret)
goto retry;
- }
goto out;
}
-struct btree_insert_entry *bch2_trans_update(struct btree_trans *trans,
- struct btree_insert_entry entry)
-{
- struct btree_insert_entry *i;
-
- BUG_ON(trans->nr_updates >= trans->nr_iters + 4);
-
- for (i = trans->updates;
- i < trans->updates + trans->nr_updates;
- i++)
- if (btree_trans_cmp(entry, *i) < 0)
- break;
-
- memmove(&i[1], &i[0],
- (void *) &trans->updates[trans->nr_updates] - (void *) i);
- trans->nr_updates++;
- *i = entry;
- return i;
-}
-
/**
* bch2_btree_insert - insert keys into the extent btree
* @c: pointer to struct bch_fs