return NULL;
bkey_btree_ptr_init(&b->key);
- bch2_btree_lock_init(&b->c);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
b->byte_order = ilog2(btree_bytes(c));
return NULL;
}
+ bch2_btree_lock_init(&b->c, 0);
+
bc->used++;
list_add(&b->list, &bc->freeable);
return b;
while (!list_empty(&bc->freed_nonpcpu)) {
b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
list_del(&b->list);
- six_lock_pcpu_free(&b->c.lock);
+ six_lock_exit(&b->c.lock);
kfree(b);
}
mutex_lock(&bc->lock);
}
- if (pcpu_read_locks)
- six_lock_pcpu_alloc(&b->c.lock);
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->c.lock));
return NULL;
init:
INIT_LIST_HEAD(&ck->list);
- bch2_btree_lock_init(&ck->c);
- if (pcpu_readers)
- six_lock_pcpu_alloc(&ck->c.lock);
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
ck->c.cached = true;
BUG_ON(!six_trylock_intent(&ck->c.lock));
}
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
- } else {
- if (path->btree_id == BTREE_ID_subvolumes)
- six_lock_pcpu_alloc(&ck->c.lock);
}
ck->c.level = 0;
break;
list_del(&ck->list);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
break;
list_del(&ck->list);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
list_del(&ck->list);
kfree(ck->k);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
}
static struct lock_class_key bch2_btree_node_lock_key;
-void bch2_btree_lock_init(struct btree_bkey_cached_common *b)
+void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
+ enum six_lock_init_flags flags)
{
- __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key);
+ __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
lockdep_set_novalidate_class(&b->lock);
}
#include "btree_iter.h"
#include "six.h"
-void bch2_btree_lock_init(struct btree_bkey_cached_common *);
+void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
#ifdef CONFIG_LOCKDEP
void bch2_assert_btree_nodes_not_locked(void);
}
EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
-void six_lock_pcpu_free(struct six_lock *lock)
-{
- BUG_ON(lock->readers && pcpu_read_count(lock));
- BUG_ON(lock->state.read_lock);
-
- free_percpu(lock->readers);
- lock->readers = NULL;
-}
-EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
-
-void six_lock_pcpu_alloc(struct six_lock *lock)
-{
-#ifdef __KERNEL__
- if (!lock->readers)
- lock->readers = alloc_percpu(unsigned);
-#endif
-}
-EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
-
/*
* Returns lock held counts, for both read and intent
*/
atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
}
EXPORT_SYMBOL_GPL(six_lock_readers_add);
+
+void six_lock_exit(struct six_lock *lock)
+{
+ WARN_ON(lock->readers && pcpu_read_count(lock));
+ WARN_ON(lock->state.read_lock);
+
+ free_percpu(lock->readers);
+ lock->readers = NULL;
+}
+EXPORT_SYMBOL_GPL(six_lock_exit);
+
+void __six_lock_init(struct six_lock *lock, const char *name,
+ struct lock_class_key *key, enum six_lock_init_flags flags)
+{
+ atomic64_set(&lock->state.counter, 0);
+ raw_spin_lock_init(&lock->wait_lock);
+ INIT_LIST_HEAD(&lock->wait_list);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ debug_check_no_locks_freed((void *) lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+
+ if (flags & SIX_LOCK_INIT_PCPU) {
+ /*
+ * We don't return an error here on memory allocation failure
+ * since percpu is an optimization, and locks will work with the
+ * same semantics in non-percpu mode: callers can check for
+ * failure if they wish by checking lock->readers, but generally
+ * will not want to treat it as an error.
+ */
+ lock->readers = alloc_percpu(unsigned);
+ }
+}
+EXPORT_SYMBOL_GPL(__six_lock_init);
typedef int (*six_lock_should_sleep_fn)(struct six_lock *lock, void *);
-static __always_inline void __six_lock_init(struct six_lock *lock,
- const char *name,
- struct lock_class_key *key)
-{
- atomic64_set(&lock->state.counter, 0);
- raw_spin_lock_init(&lock->wait_lock);
- INIT_LIST_HEAD(&lock->wait_list);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- debug_check_no_locks_freed((void *) lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
-}
+void six_lock_exit(struct six_lock *lock);
-#define six_lock_init(lock) \
+enum six_lock_init_flags {
+ SIX_LOCK_INIT_PCPU = 1U << 0,
+};
+
+void __six_lock_init(struct six_lock *lock, const char *name,
+ struct lock_class_key *key, enum six_lock_init_flags flags);
+
+#define six_lock_init(lock, flags) \
do { \
static struct lock_class_key __key; \
\
- __six_lock_init((lock), #lock, &__key); \
+ __six_lock_init((lock), #lock, &__key, flags); \
} while (0)
#define __SIX_LOCK(type) \
void six_lock_wakeup_all(struct six_lock *);
-void six_lock_pcpu_free(struct six_lock *);
-void six_lock_pcpu_alloc(struct six_lock *);
-
struct six_lock_count {
unsigned n[3];
};