void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
unsigned expected_nr_iters,
size_t expected_mem_bytes)
+ __acquires(&c->btree_trans_barrier)
{
memset(trans, 0, sizeof(*trans));
trans->c = c;
}
int bch2_trans_exit(struct btree_trans *trans)
+ __releases(&c->btree_trans_barrier)
{
struct bch_fs *c = trans->c;
unsigned i;
for (i = 0; i < j->nr_ptrs; i++) {
- struct bch_dev *ca = c->devs[j->ptrs[i].dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
u64 offset;
div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset);
test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)) {
w->noflush = true;
SET_JSET_NO_FLUSH(jset, true);
- jset->last_seq = w->last_seq = 0;
+ jset->last_seq = 0;
+ w->last_seq = 0;
j->nr_noflush_writes++;
} else {
bl->start[nr].start = cpu_to_le64(start);
bl->start[nr].end = cpu_to_le64(end);
out_write_sb:
- c->disk_sb.sb->features[0] |=
- 1ULL << BCH_FEATURE_journal_seq_blacklist_v3;
+ c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
ret = bch2_write_super(c);
out:
BUG_ON(new_nr && !bl);
if (!new_nr)
- c->disk_sb.sb->features[0] &=
- ~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
bch2_write_super(c);
}
rewrite_old_nodes_pred, c, stats);
if (!ret) {
mutex_lock(&c->sb_lock);
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_extents_above_btree_updates_done;
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_bformat_overflow_done;
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
c->disk_sb.sb->version_min = c->disk_sb.sb->version;
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
case BCH_JSET_ENTRY_dev_usage: {
struct jset_entry_dev_usage *u =
container_of(entry, struct jset_entry_dev_usage, entry);
- struct bch_dev *ca = bch_dev_bkey_exists(c, u->dev);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
sizeof(struct jset_entry_dev_usage_type);
struct jset_entry_clock *clock =
container_of(entry, struct jset_entry_clock, entry);
- atomic64_set(&c->io_clock[clock->rw].now, clock->time);
+ atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
}
}
mutex_lock(&c->sb_lock);
if (c->opts.version_upgrade) {
- c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
- c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
+ c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
write_sb = true;
}
if (!test_bit(BCH_FS_ERROR, &c->flags)) {
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_alloc_info;
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
write_sb = true;
}
bch_notice(c, "initializing new filesystem");
mutex_lock(&c->sb_lock);
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_extents_above_btree_updates_done;
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_bformat_overflow_done;
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
if (c->opts.version_upgrade) {
- c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
- c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
+ c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
bch2_write_super(c);
}
mutex_lock(&c->sb_lock);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
- c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALWAYS;
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
ret = bch2_write_super(c);
mutex_unlock(&c->sb_lock);
* The u64s field counts from the start of data, ignoring the shared
* fields.
*/
- entry->u64s = u64s - 1;
+ entry->u64s = cpu_to_le16(u64s - 1);
*end = vstruct_next(*end);
return entry;
clock->entry.type = BCH_JSET_ENTRY_clock;
clock->rw = i;
- clock->time = atomic64_read(&c->io_clock[i].now);
+ clock->time = cpu_to_le64(atomic64_read(&c->io_clock[i].now));
}
}
SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_alloc_info;
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_alloc_metadata;
- c->disk_sb.sb->features[0] &= ~(1ULL << BCH_FEATURE_extents_above_btree_updates);
- c->disk_sb.sb->features[0] &= ~(1ULL << BCH_FEATURE_btree_updates_journalled);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata);
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates));
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled));
u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
/* Device add/removal: */
-int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
+static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
{
struct btree_trans trans;
size_t i;
return 0;
}
-void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
+static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
{
pr_buf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
bch2_bpos_to_text(out, c->gc_gens_pos);