if (!btree_node_may_write(b))
return;
+ if (old & (1 << BTREE_NODE_never_write))
+ return;
+
if (old & (1 << BTREE_NODE_write_in_flight)) {
btree_node_wait_on_io(b);
continue;
seq = max(seq, le64_to_cpu(i->journal_seq));
}
+ BUG_ON(b->written && !seq);
+
/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
bytes += 8;
BTREE_NODE_fake,
BTREE_NODE_old_extent_overwrite,
BTREE_NODE_need_rewrite,
+ BTREE_NODE_never_write,
};
BTREE_FLAG(read_in_flight);
BTREE_FLAG(fake);
BTREE_FLAG(old_extent_overwrite);
BTREE_FLAG(need_rewrite);
+BTREE_FLAG(never_write);
static inline struct btree_write *btree_current_write(struct btree *b)
{
list_del(&as->write_blocked_list);
- if (!ret && as->b == b) {
+ /*
+ * Node might have been freed, recheck under
+ * btree_interior_update_lock:
+ */
+ if (as->b == b) {
struct bset *i = btree_bset_last(b);
BUG_ON(!b->c.level);
BUG_ON(!btree_node_dirty(b));
- i->journal_seq = cpu_to_le64(
- max(journal_seq,
- le64_to_cpu(i->journal_seq)));
-
- bch2_btree_add_journal_pin(c, b, journal_seq);
+ if (!ret) {
+ i->journal_seq = cpu_to_le64(
+ max(journal_seq,
+ le64_to_cpu(i->journal_seq)));
+
+ bch2_btree_add_journal_pin(c, b, journal_seq);
+ } else {
+ /*
+ * If we didn't get a journal sequence number we
+ * can't write this btree node, because recovery
+ * won't know to ignore this write:
+ */
+ set_btree_node_never_write(b);
+ }
}
mutex_unlock(&c->btree_interior_update_lock);