g->_mark.cached_sectors = get_alloc_field(a, &d, idx++);
}
-static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g)
+static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g,
+ struct bucket_mark m)
{
- struct bucket_mark m = READ_ONCE(g->mark);
unsigned idx = 0;
void *d = a->v.data;
__BKEY_PADDED(k, 8) alloc_key;
#endif
struct bkey_i_alloc *a = bkey_alloc_init(&alloc_key.k);
+ struct bucket *g;
+ struct bucket_mark m;
int ret;
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
a->k.p = POS(ca->dev_idx, b);
percpu_down_read(&c->usage_lock);
- __alloc_write_key(a, bucket(ca, b));
+ g = bucket(ca, b);
+ m = bucket_cmpxchg(g, m, m.dirty = false);
+
+ __alloc_write_key(a, g, m);
percpu_up_read(&c->usage_lock);
bch2_btree_iter_cond_resched(iter);
for_each_rw_member(ca, c, i) {
struct btree_iter iter;
- unsigned long bucket;
+ struct bucket_array *buckets;
+ size_t b;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
down_read(&ca->bucket_lock);
- for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
- ret = __bch2_alloc_write_key(c, ca, bucket,
- &iter, NULL, 0);
+ buckets = bucket_array(ca);
+
+ for (b = buckets->first_bucket;
+ b < buckets->nbuckets;
+ b++) {
+ if (!buckets->b[b].mark.dirty)
+ continue;
+
+ ret = __bch2_alloc_write_key(c, ca, b, &iter, NULL, 0);
if (ret)
break;
-
- clear_bit(bucket, ca->buckets_dirty);
}
up_read(&ca->bucket_lock);
bch2_btree_iter_unlock(&iter);
if (!is_available_bucket(mark))
return false;
+ if (ca->buckets_nouse &&
+ test_bit(bucket, ca->buckets_nouse))
+ return false;
+
gc_gen = bucket_gc_gen(ca, bucket);
if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
m = READ_ONCE(buckets->b[bu].mark);
if (!buckets->b[bu].gen_valid ||
+ !test_bit(bu, ca->buckets_nouse) ||
!is_available_bucket(m) ||
m.cached_sectors)
continue;
bch2_invalidate_one_bucket(c, ca, bu, &journal_seq);
fifo_push(&ca->free[RESERVE_BTREE], bu);
- set_bit(bu, ca->buckets_dirty);
+ bucket_set_dirty(ca, bu);
}
}
* Or rcu_read_lock(), but only for ptr_stale():
*/
struct bucket_array __rcu *buckets[2];
- unsigned long *buckets_dirty;
+ unsigned long *buckets_nouse;
unsigned long *buckets_written;
/* most out of date gen in the btree */
u8 *oldest_gens;
k.k->type, ptr->gen)) {
g->_mark.gen = ptr->gen;
g->gen_valid = 1;
- set_bit(b, ca->buckets_dirty);
+ bucket_set_dirty(ca, b);
}
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
k.k->type, ptr->gen, g->mark.gen)) {
g->_mark.gen = ptr->gen;
g->gen_valid = 1;
- set_bit(b, ca->buckets_dirty);
+ bucket_set_dirty(ca, b);
set_bit(BCH_FS_FIXED_GENS, &c->flags);
}
}
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
{
struct bucket_array *buckets = NULL, *old_buckets = NULL;
- unsigned long *buckets_dirty = NULL;
+ unsigned long *buckets_nouse = NULL;
unsigned long *buckets_written = NULL;
u8 *oldest_gens = NULL;
alloc_fifo free[RESERVE_NR];
GFP_KERNEL|__GFP_ZERO)) ||
!(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
GFP_KERNEL|__GFP_ZERO)) ||
- !(buckets_dirty = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)) ||
!(buckets_written = kvpmalloc(BITS_TO_LONGS(nbuckets) *
memcpy(oldest_gens,
ca->oldest_gens,
n * sizeof(u8));
- memcpy(buckets_dirty,
- ca->buckets_dirty,
+ memcpy(buckets_nouse,
+ ca->buckets_nouse,
BITS_TO_LONGS(n) * sizeof(unsigned long));
memcpy(buckets_written,
ca->buckets_written,
buckets = old_buckets;
swap(ca->oldest_gens, oldest_gens);
- swap(ca->buckets_dirty, buckets_dirty);
+ swap(ca->buckets_nouse, buckets_nouse);
swap(ca->buckets_written, buckets_written);
if (resize)
free_fifo(&free_inc);
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&free[i]);
- kvpfree(buckets_dirty,
+ kvpfree(buckets_nouse,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
kvpfree(buckets_written,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
free_fifo(&ca->free[i]);
kvpfree(ca->buckets_written,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
- kvpfree(ca->buckets_dirty,
+ kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
return __bucket(ca, b, false);
}
+static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
+{
+ struct bucket *g;
+ struct bucket_mark m;
+
+ rcu_read_lock();
+ g = bucket(ca, b);
+ bucket_cmpxchg(g, m, m.dirty = true);
+ rcu_read_unlock();
+
+}
+
static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
size_t b, int rw)
{
{
return (!mark.owned_by_allocator &&
!mark.dirty_sectors &&
- !mark.stripe &&
- !mark.nouse);
+ !mark.stripe);
}
static inline bool bucket_needs_journal_commit(struct bucket_mark m,
u8 gen;
u8 data_type:3,
owned_by_allocator:1,
- nouse:1,
+ dirty:1,
journal_seq_valid:1,
stripe:1;
u16 dirty_sectors;