if (a.k->p.offset >= ca->mi.nbuckets)
return;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
__alloc_read_key(bucket(ca, a.k->p.offset), a.v);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
}
int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
a->k.p = POS(ca->dev_idx, b);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
g = bucket(ca, b);
m = bucket_cmpxchg(g, m, m.dirty = false);
__alloc_write_key(a, g, m);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
bch2_btree_iter_cond_resched(iter);
{
struct bucket_mark m;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
bch2_invalidate_bucket(c, ca, bucket, &m);
bucket_io_clock_reset(c, ca, bucket, READ);
bucket_io_clock_reset(c, ca, bucket, WRITE);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
if (m.journal_seq_valid) {
u64 journal_seq = atomic64_read(&c->journal.seq);
struct bucket_mark m;
down_read(&ca->bucket_lock);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
buckets = bucket_array(ca);
if (fifo_full(&ca->free[RESERVE_BTREE]))
break;
}
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
up_read(&ca->bucket_lock);
}
return;
}
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&ob->lock);
bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
ob->valid = false;
spin_unlock(&ob->lock);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
ob->freelist = c->open_buckets_freelist;
open_bucket_for_each(c, &h->blocks, ob, i)
__clear_bit(ob->ptr.dev, devs.d);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
rcu_read_lock();
if (h->parity.nr < h->redundancy) {
}
rcu_read_unlock();
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
return bch2_ec_stripe_new_alloc(c, h);
err:
rcu_read_unlock();
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
return -1;
}
if (*nr_effective >= nr_replicas)
return 0;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
rcu_read_lock();
retry_blocking:
}
rcu_read_unlock();
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
return ret;
}
/*
* Buckets:
- * Per-bucket arrays are protected by c->usage_lock, bucket_lock and
+ * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
* gc_lock, for device resize - holding any is sufficient for access:
* Or rcu_read_lock(), but only for ptr_stale():
*/
struct bch_fs_usage __percpu *usage[2];
- struct percpu_rw_semaphore usage_lock;
+ struct percpu_rw_semaphore mark_lock;
/*
* When we invalidate buckets, we use both the priority and the amount
*/
if (c) {
lockdep_assert_held(&c->sb_lock);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
} else {
preempt_disable();
}
}
if (c) {
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
} else {
preempt_enable();
}
size_t i, j, iter;
unsigned ci;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
gc_pos_set(c, gc_pos_alloc(c, NULL));
spin_unlock(&ob->lock);
}
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
}
static void bch2_gc_free(struct bch_fs *c)
#define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
- percpu_down_write(&c->usage_lock);
+ percpu_down_write(&c->mark_lock);
if (initial) {
bch2_gc_done_nocheck(c);
preempt_enable();
}
out:
- percpu_up_write(&c->usage_lock);
+ percpu_up_write(&c->mark_lock);
#undef copy_fs_field
#undef copy_dev_field
}
}
- percpu_down_write(&c->usage_lock);
+ percpu_down_write(&c->mark_lock);
for_each_member_device(ca, c, i) {
struct bucket_array *dst = __bucket_array(ca, 1);
dst->b[b]._mark.gen = src->b[b].mark.gen;
};
- percpu_up_write(&c->usage_lock);
+ percpu_up_write(&c->mark_lock);
return bch2_ec_mem_alloc(c, true);
}
__bch2_btree_set_root_inmem(c, b);
mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0,
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
}
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0,
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_node(b));
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
bch2_btree_bset_insert_key(iter, b, node_iter, insert);
bch2_btree_node_lock_write(b, iter);
mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0,
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
s64 added = sum.data + sum.reserved;
s64 should_not_have_added;
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
/*
* Not allowed to reduce sectors_available except by getting a
{
struct bch_dev_usage *dev_usage;
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
bch2_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c,
struct bucket_array *buckets;
struct bucket *g;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
fs_usage = this_cpu_ptr(c->usage[0]);
buckets = bucket_array(ca);
for_each_bucket(g, buckets)
if (g->mark.data_type)
bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
}
#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old)
{
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
__bch2_invalidate_bucket(c, ca, b, old, false);
size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags)
{
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
preempt_disable();
if (likely(c)) {
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_metadata_bucket(c, ca, b, type, sectors,
{
int ret;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, inserting, sectors,
pos, stats, journal_seq, flags);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
return ret;
}
if (!btree_node_type_needs_gc(iter->btree_id))
return;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
}
/* Disk reservations: */
void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
this_cpu_sub(c->usage[0]->online_reserved,
res->sectors);
bch2_fs_stats_verify(c);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
res->sectors = 0;
}
s64 sectors_available;
int ret;
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
preempt_disable();
stats = this_cpu_ptr(c->usage[0]);
if (get < sectors) {
preempt_enable();
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
goto recalculate;
}
} while ((v = atomic64_cmpxchg(&c->sectors_available,
bch2_disk_reservations_verify(c, flags);
bch2_fs_stats_verify(c);
preempt_enable();
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
return 0;
recalculate:
return -EINTR;
}
- percpu_down_write(&c->usage_lock);
+ percpu_down_write(&c->mark_lock);
sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available ||
}
bch2_fs_stats_verify(c);
- percpu_up_write(&c->usage_lock);
+ percpu_up_write(&c->mark_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
if (resize) {
down_write(&c->gc_lock);
down_write(&ca->bucket_lock);
- percpu_down_write(&c->usage_lock);
+ percpu_down_write(&c->mark_lock);
}
old_buckets = bucket_array(ca);
swap(ca->buckets_written, buckets_written);
if (resize)
- percpu_up_write(&c->usage_lock);
+ percpu_up_write(&c->mark_lock);
spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++) {
{
return rcu_dereference_check(ca->buckets[gc],
!ca->fs ||
- percpu_rwsem_is_held(&ca->fs->usage_lock) ||
+ percpu_rwsem_is_held(&ca->fs->mark_lock) ||
lockdep_is_held(&ca->fs->gc_lock) ||
lockdep_is_held(&ca->bucket_lock));
}
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) {
bio_inc_remaining(&orig->bio);
}
if (c) {
- percpu_down_read(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->journal.lock);
} else {
preempt_disable();
if (c) {
spin_unlock(&c->journal.lock);
- percpu_up_read(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
} else {
preempt_enable();
}
bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
- percpu_free_rwsem(&c->usage_lock);
+ percpu_free_rwsem(&c->mark_lock);
free_percpu(c->usage[0]);
mempool_exit(&c->btree_iters_pool);
mempool_exit(&c->btree_bounce_pool);
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
!(c->usage[0] = alloc_percpu(struct bch_fs_usage)) ||
- percpu_init_rwsem(&c->usage_lock) ||
+ percpu_init_rwsem(&c->mark_lock) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,