/* XXX: bch_fs refcounting */
struct dump_iter {
- struct bpos from;
- struct bch_fs *c;
+ struct bch_fs *c;
enum btree_id id;
+ struct bpos from;
+ u64 iter;
struct printbuf buf;
file->private_data = i;
i->from = POS_MIN;
+ i->iter = 0;
i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]);
i->id = bd->id;
i->buf = PRINTBUF;
.read = bch2_read_bfloat_failed,
};
+static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
+ struct btree *b)
+{
+ out->tabstops[0] = 32;
+
+ pr_buf(out, "%px btree=%s l=%u ",
+ b,
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level);
+ pr_newline(out);
+
+ pr_indent_push(out, 2);
+
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+ pr_newline(out);
+
+ pr_buf(out, "flags: ");
+ pr_tab(out);
+ bch2_flags_to_text(out, bch2_btree_node_flags, b->flags);
+ pr_newline(out);
+
+ pr_buf(out, "written:");
+ pr_tab(out);
+ pr_buf(out, "%u", b->written);
+ pr_newline(out);
+
+ pr_buf(out, "writes blocked:");
+ pr_tab(out);
+ pr_buf(out, "%u", !list_empty_careful(&b->write_blocked));
+ pr_newline(out);
+
+ pr_buf(out, "will make reachable:");
+ pr_tab(out);
+ pr_buf(out, "%lx", b->will_make_reachable);
+ pr_newline(out);
+
+ pr_buf(out, "journal pin %px:", &b->writes[0].journal);
+ pr_tab(out);
+ pr_buf(out, "%llu", b->writes[0].journal.seq);
+ pr_newline(out);
+
+ pr_buf(out, "journal pin %px:", &b->writes[1].journal);
+ pr_tab(out);
+ pr_buf(out, "%llu", b->writes[1].journal.seq);
+ pr_newline(out);
+
+ pr_indent_pop(out, 2);
+}
+
+static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ bool done = false;
+ int err;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ do {
+ struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct btree *b;
+
+ err = flush_buf(i);
+ if (err)
+ return err;
+
+ if (!i->size)
+ break;
+
+ rcu_read_lock();
+ i->buf.atomic++;
+ tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
+ &c->btree_cache.table);
+ if (i->iter < tbl->size) {
+ rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
+ bch2_cached_btree_node_to_text(&i->buf, c, b);
+ i->iter++;;
+ } else {
+ done = true;
+ }
+ --i->buf.atomic;
+ rcu_read_unlock();
+ } while (!done);
+
+ if (i->buf.allocation_failure)
+ return -ENOMEM;
+
+ return i->ret;
+}
+
+static const struct file_operations cached_btree_nodes_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_cached_btree_nodes_read,
+};
+
+static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ bool done = false;
+ int err;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ do {
+ err = flush_buf(i);
+ if (err)
+ return err;
+
+ if (!i->size)
+ break;
+
+ done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
+ i->iter++;
+ } while (!done);
+
+ if (i->buf.allocation_failure)
+ return -ENOMEM;
+
+ return i->ret;
+}
+
+static const struct file_operations journal_pins_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_journal_pins_read,
+};
+
void bch2_fs_debug_exit(struct bch_fs *c)
{
- if (!IS_ERR_OR_NULL(c->debug))
- debugfs_remove_recursive(c->debug);
+ if (!IS_ERR_OR_NULL(c->fs_debug_dir))
+ debugfs_remove_recursive(c->fs_debug_dir);
}
void bch2_fs_debug_init(struct bch_fs *c)
return;
snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
- c->debug = debugfs_create_dir(name, bch_debug);
- if (IS_ERR_OR_NULL(c->debug))
+ c->fs_debug_dir = debugfs_create_dir(name, bch_debug);
+ if (IS_ERR_OR_NULL(c->fs_debug_dir))
+ return;
+
+ debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir,
+ c->btree_debug, &cached_btree_nodes_ops);
+
+ debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
+ c->btree_debug, &journal_pins_ops);
+
+ c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir);
+ if (IS_ERR_OR_NULL(c->btree_debug_dir))
return;
for (bd = c->btree_debug;
bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
bd++) {
bd->id = bd - c->btree_debug;
- bd->btree = debugfs_create_file(bch2_btree_ids[bd->id],
- 0400, c->debug, bd,
- &btree_debug_ops);
+ debugfs_create_file(bch2_btree_ids[bd->id],
+ 0400, c->btree_debug_dir, bd,
+ &btree_debug_ops);
snprintf(name, sizeof(name), "%s-formats",
bch2_btree_ids[bd->id]);
- bd->btree_format = debugfs_create_file(name, 0400, c->debug, bd,
- &btree_format_debug_ops);
+ debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
+ &btree_format_debug_ops);
snprintf(name, sizeof(name), "%s-bfloat-failed",
bch2_btree_ids[bd->id]);
- bd->failed = debugfs_create_file(name, 0400, c->debug, bd,
- &bfloat_failed_debug_ops);
+ debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
+ &bfloat_failed_debug_ops);
}
}
spin_unlock(&j->lock);
}
-void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
+bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
{
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *pin;
- u64 i;
spin_lock(&j->lock);
+ *seq = max(*seq, j->pin.front);
+
+ if (*seq >= j->pin.back) {
+ spin_unlock(&j->lock);
+ return true;
+ }
+
out->atomic++;
- fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
- pr_buf(out, "%llu: count %u\n",
- i, atomic_read(&pin_list->count));
+ pin_list = journal_seq_pin(j, *seq);
- list_for_each_entry(pin, &pin_list->key_cache_list, list)
- pr_buf(out, "\t%px %ps\n",
- pin, pin->flush);
+ pr_buf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
+ pr_newline(out);
+ pr_indent_push(out, 2);
- list_for_each_entry(pin, &pin_list->list, list)
- pr_buf(out, "\t%px %ps\n",
- pin, pin->flush);
+ list_for_each_entry(pin, &pin_list->list, list) {
+ pr_buf(out, "\t%px %ps", pin, pin->flush);
+ pr_newline(out);
+ }
+
+ list_for_each_entry(pin, &pin_list->key_cache_list, list) {
+ pr_buf(out, "\t%px %ps", pin, pin->flush);
+ pr_newline(out);
+ }
- if (!list_empty(&pin_list->flushed))
- pr_buf(out, "flushed:\n");
+ if (!list_empty(&pin_list->flushed)) {
+ pr_buf(out, "flushed:");
+ pr_newline(out);
+ }
- list_for_each_entry(pin, &pin_list->flushed, list)
- pr_buf(out, "\t%px %ps\n",
- pin, pin->flush);
+ list_for_each_entry(pin, &pin_list->flushed, list) {
+ pr_buf(out, "\t%px %ps", pin, pin->flush);
+ pr_newline(out);
}
+ pr_indent_pop(out, 2);
+
--out->atomic;
spin_unlock(&j->lock);
+
+ return false;
+}
+
+void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
+{
+ u64 seq = 0;
+
+ while (!bch2_journal_seq_pins_to_text(out, j, &seq))
+ seq++;
}
read_attribute(btree_cache_size);
read_attribute(compression_stats);
read_attribute(journal_debug);
-read_attribute(journal_pins);
read_attribute(btree_updates);
-read_attribute(dirty_btree_nodes);
read_attribute(btree_cache);
read_attribute(btree_key_cache);
read_attribute(btree_transactions);
if (attr == &sysfs_journal_debug)
bch2_journal_debug_to_text(out, &c->journal);
- if (attr == &sysfs_journal_pins)
- bch2_journal_pins_to_text(out, &c->journal);
-
if (attr == &sysfs_btree_updates)
bch2_btree_updates_to_text(out, c);
- if (attr == &sysfs_dirty_btree_nodes)
- bch2_dirty_btree_nodes_to_text(out, c);
-
if (attr == &sysfs_btree_cache)
bch2_btree_cache_to_text(out, c);
struct attribute *bch2_fs_internal_files[] = {
&sysfs_journal_debug,
- &sysfs_journal_pins,
&sysfs_btree_updates,
- &sysfs_dirty_btree_nodes,
&sysfs_btree_cache,
&sysfs_btree_key_cache,
&sysfs_btree_transactions,