memset(trans, 0, sizeof(*trans));
trans->c = c;
trans->fn = fn;
+ trans->task = current;
trans->journal_replay_not_finished =
!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
- trans->pid = current->pid;
+ struct btree_trans *pos;
+
mutex_lock(&c->btree_trans_lock);
- list_add(&trans->list, &c->btree_trans_list);
+ list_for_each_entry(pos, &c->btree_trans_list, list) {
+ if (trans->task->pid < pos->task->pid) {
+ list_add_tail(&trans->list, &pos->list);
+ goto list_add_done;
+ }
+ }
+ list_add_tail(&trans->list, &c->btree_trans_list);
+list_add_done:
mutex_unlock(&c->btree_trans_lock);
}
}
}
#ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
-static bool trans_has_locks(struct btree_trans *trans)
+void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
{
struct btree_path *path;
-
- trans_for_each_path(trans, path)
- if (path->nodes_locked)
- return true;
- return false;
-}
-#endif
-
-void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
- struct btree_trans *trans;
- struct btree_path *path;
struct btree *b;
static char lock_types[] = { 'r', 'i', 'w' };
unsigned l;
- mutex_lock(&c->btree_trans_lock);
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- if (!trans_has_locks(trans))
- continue;
-
- prt_printf(out, "%i %s\n", trans->pid, trans->fn);
+ prt_printf(out, "%i %s\n", trans->task->pid, trans->fn);
- trans_for_each_path(trans, path) {
- if (!path->nodes_locked)
- continue;
+ trans_for_each_path(trans, path) {
+ if (!path->nodes_locked)
+ continue;
- prt_printf(out, " path %u %c l=%u %s:",
- path->idx,
- path->cached ? 'c' : 'b',
- path->level,
- bch2_btree_ids[path->btree_id]);
- bch2_bpos_to_text(out, path->pos);
- prt_printf(out, "\n");
-
- for (l = 0; l < BTREE_MAX_DEPTH; l++) {
- if (btree_node_locked(path, l)) {
- prt_printf(out, " %s l=%u ",
- btree_node_intent_locked(path, l) ? "i" : "r", l);
- bch2_btree_path_node_to_text(out,
- (void *) path->l[l].b,
- path->cached);
- prt_printf(out, "\n");
- }
+ prt_printf(out, " path %u %c l=%u %s:",
+ path->idx,
+ path->cached ? 'c' : 'b',
+ path->level,
+ bch2_btree_ids[path->btree_id]);
+ bch2_bpos_to_text(out, path->pos);
+ prt_printf(out, "\n");
+
+ for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+ if (btree_node_locked(path, l)) {
+ prt_printf(out, " %s l=%u ",
+ btree_node_intent_locked(path, l) ? "i" : "r", l);
+ bch2_btree_path_node_to_text(out,
+ (void *) path->l[l].b,
+ path->cached);
+ prt_printf(out, "\n");
}
}
+ }
- b = READ_ONCE(trans->locking);
- if (b) {
- path = &trans->paths[trans->locking_path_idx];
- prt_printf(out, " locking path %u %c l=%u %c %s:",
- trans->locking_path_idx,
- path->cached ? 'c' : 'b',
- trans->locking_level,
- lock_types[trans->locking_lock_type],
- bch2_btree_ids[trans->locking_btree_id]);
- bch2_bpos_to_text(out, trans->locking_pos);
-
- prt_printf(out, " node ");
- bch2_btree_path_node_to_text(out,
- (void *) b, path->cached);
- prt_printf(out, "\n");
- }
+ b = READ_ONCE(trans->locking);
+ if (b) {
+ path = &trans->paths[trans->locking_path_idx];
+ prt_printf(out, " locking path %u %c l=%u %c %s:",
+ trans->locking_path_idx,
+ path->cached ? 'c' : 'b',
+ trans->locking_level,
+ lock_types[trans->locking_lock_type],
+ bch2_btree_ids[trans->locking_btree_id]);
+ bch2_bpos_to_text(out, trans->locking_pos);
+
+ prt_printf(out, " node ");
+ bch2_btree_path_node_to_text(out,
+ (void *) b, path->cached);
+ prt_printf(out, "\n");
}
- mutex_unlock(&c->btree_trans_lock);
-#endif
}
+#endif
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
.read = bch2_cached_btree_nodes_read,
};
+#ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
+static int prt_backtrace(struct printbuf *out, struct task_struct *task)
+{
+ unsigned long entries[32];
+ unsigned i, nr_entries;
+ int ret;
+
+ ret = down_read_killable(&task->signal->exec_update_lock);
+ if (ret)
+ return ret;
+
+ nr_entries = stack_trace_save_tsk(task, entries, ARRAY_SIZE(entries), 0);
+ for (i = 0; i < nr_entries; i++) {
+ prt_printf(out, "[<0>] %pB", (void *)entries[i]);
+ prt_newline(out);
+ }
+
+ up_read(&task->signal->exec_update_lock);
+ return 0;
+}
+
+static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ struct btree_trans *trans;
+ int err;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ mutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ if (trans->task->pid <= i->iter)
+ continue;
+
+ err = flush_buf(i);
+ if (err)
+ return err;
+
+ if (!i->size)
+ break;
+
+ bch2_btree_trans_to_text(&i->buf, trans);
+
+ prt_printf(&i->buf, "backtrace:");
+ prt_newline(&i->buf);
+ printbuf_indent_add(&i->buf, 2);
+ prt_backtrace(&i->buf, trans->task);
+ printbuf_indent_sub(&i->buf, 2);
+ prt_newline(&i->buf);
+
+ i->iter = trans->task->pid;
+ }
+ mutex_unlock(&c->btree_trans_lock);
+
+ if (i->buf.allocation_failure)
+ return -ENOMEM;
+
+ return i->ret;
+}
+
+static const struct file_operations btree_transactions_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_btree_transactions_read,
+};
+#endif /* CONFIG_BCACHEFS_DEBUG_TRANSACTIONS */
+
static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir,
c->btree_debug, &cached_btree_nodes_ops);
+#ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
+ debugfs_create_file("btree_transactions", 0400, c->fs_debug_dir,
+ c->btree_debug, &btree_transactions_ops);
+#endif
+
debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
c->btree_debug, &journal_pins_ops);