Fixes for various checkpatch errors.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
void bch2_alloc_v4_swab(struct bkey_s);
void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_alloc (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v1_invalid, \
.val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
-#define bch2_bkey_ops_alloc_v2 (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v2_invalid, \
.val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
-#define bch2_bkey_ops_alloc_v3 (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v3_invalid, \
.val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
-#define bch2_bkey_ops_alloc_v4 (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v4_invalid, \
.val_to_text = bch2_alloc_to_text, \
.swab = bch2_alloc_v4_swab, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
static inline bool bkey_is_alloc(const struct bkey *k)
{
"When reading btree nodes, read all replicas and " \
"compare them")
-/* Parameters that should only be compiled in in debug mode: */
+/* Parameters that should only be compiled in debug mode: */
#define BCH_DEBUG_PARAMS_DEBUG() \
BCH_DEBUG_PARAM(expensive_debug_checks, \
"Enables various runtime debugging checks that " \
static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
{
__le64 ret;
+
memcpy(&ret, &sb->uuid, sizeof(ret));
return ret;
}
const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
-struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
- const struct bkey_packed *);
-
void bch2_bkey_packed_to_binary_text(struct printbuf *out,
const struct bkey_format *f,
const struct bkey_packed *k)
return 0;
}
-#define bch2_bkey_ops_deleted (struct bkey_ops) { \
+#define bch2_bkey_ops_deleted ((struct bkey_ops) { \
.key_invalid = deleted_key_invalid, \
-}
+})
-#define bch2_bkey_ops_whiteout (struct bkey_ops) { \
+#define bch2_bkey_ops_whiteout ((struct bkey_ops) { \
.key_invalid = deleted_key_invalid, \
-}
+})
static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
return 0;
}
-#define bch2_bkey_ops_error (struct bkey_ops) { \
+#define bch2_bkey_ops_error ((struct bkey_ops) { \
.key_invalid = empty_val_key_invalid, \
-}
+})
static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
return 0;
}
-#define bch2_bkey_ops_cookie (struct bkey_ops) { \
+#define bch2_bkey_ops_cookie ((struct bkey_ops) { \
.key_invalid = key_type_cookie_invalid, \
-}
+})
-#define bch2_bkey_ops_hash_whiteout (struct bkey_ops) { \
+#define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
.key_invalid = empty_val_key_invalid, \
-}
+})
static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
datalen, min(datalen, 32U), d.v->data);
}
-#define bch2_bkey_ops_inline_data (struct bkey_ops) { \
+#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
.key_invalid = key_type_inline_data_invalid, \
.val_to_text = key_type_inline_data_to_text, \
-}
+})
static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
return true;
}
-#define bch2_bkey_ops_set (struct bkey_ops) { \
+#define bch2_bkey_ops_set ((struct bkey_ops) { \
.key_invalid = key_type_set_invalid, \
.key_merge = key_type_set_merge, \
-}
+})
const struct bkey_ops bch2_bkey_ops[] = {
#define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
btree_id == BTREE_ID_inodes) {
if (!bkey_packed(k)) {
struct bkey_i *u = packed_to_bkey(k);
+
swap(u->k.p.inode, u->k.p.offset);
} else if (f->bits_per_field[BKEY_FIELD_INODE] &&
f->bits_per_field[BKEY_FIELD_OFFSET]) {
*
* When invalid, error string is returned via @err. @rw indicates whether key is
* being read or written; more aggressive checks can be enabled when rw == WRITE.
-*/
+ */
struct bkey_ops {
int (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err);
static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
{
- struct btree *b = kzalloc(sizeof(struct btree), gfp);
+ struct btree *b;
+
+ b = kzalloc(sizeof(struct btree), gfp);
if (!b)
return NULL;
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
- struct btree *b = __btree_node_mem_alloc(c, GFP_KERNEL);
+ struct btree *b;
+
+ b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
return NULL;
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{
int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+
BUG_ON(ret);
/* Cause future lookups for this node to fail: */
struct bkey_i_btree_ptr_v2 *new;
int ret;
- new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+ new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (ret)
return ret;
- new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+ new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (rb->have_ioref) {
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
bch2_latency_acct(ca, rb->start_time, READ);
}
if (rb->have_ioref) {
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
bch2_latency_acct(ca, rb->start_time, READ);
}
if (p) {
struct bkey uk = bkey_unpack_key(l->b, p);
+
bch2_bkey_to_text(&buf2, &uk);
} else {
prt_printf(&buf2, "(none)");
if (k) {
struct bkey uk = bkey_unpack_key(l->b, k);
+
bch2_bkey_to_text(&buf3, &uk);
} else {
prt_printf(&buf3, "(none)");
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_KEY_CACHE_H
#define _BCACHEFS_BTREE_KEY_CACHE_H
{
switch (type) {
case BCH_CSUM_OPT_none:
- return BCH_CSUM_none;
+ return BCH_CSUM_none;
case BCH_CSUM_OPT_crc32c:
- return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
+ return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
case BCH_CSUM_OPT_crc64:
- return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
+ return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
case BCH_CSUM_OPT_xxhash:
- return BCH_CSUM_xxhash;
+ return BCH_CSUM_xxhash;
default:
- BUG();
+ BUG();
}
}
int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_dirent (struct bkey_ops) { \
+#define bch2_bkey_ops_dirent ((struct bkey_ops) { \
.key_invalid = bch2_dirent_invalid, \
.val_to_text = bch2_dirent_to_text, \
-}
+})
struct qstr;
struct file;
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-#define bch2_bkey_ops_stripe (struct bkey_ops) { \
+#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
.key_invalid = bch2_stripe_invalid, \
.val_to_text = bch2_stripe_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_stripe, \
.atomic_trigger = bch2_mark_stripe, \
-}
+})
static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
{
const char *bch2_err_str(int err)
{
const char *errstr;
+
err = abs(err);
BUG_ON(err >= BCH_ERR_MAX);
switch (k.k->type) {
case KEY_TYPE_btree_ptr: {
struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
+
return (struct bkey_ptrs_c) {
to_entry(&e.v->start[0]),
to_entry(extent_entry_last(e))
}
case KEY_TYPE_extent: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+
return (struct bkey_ptrs_c) {
e.v->start,
extent_entry_last(e)
}
case KEY_TYPE_stripe: {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
return (struct bkey_ptrs_c) {
to_entry(&s.v->ptrs[0]),
to_entry(&s.v->ptrs[s.v->nr_blocks]),
}
case KEY_TYPE_btree_ptr_v2: {
struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
+
return (struct bkey_ptrs_c) {
to_entry(&e.v->start[0]),
to_entry(extent_entry_last(e))
#define extent_for_each_entry_from(_e, _entry, _start) \
__bkey_extent_entry_for_each_from(_start, \
- extent_entry_last(_e),_entry)
+ extent_entry_last(_e), _entry)
#define extent_for_each_entry(_e, _entry) \
extent_for_each_entry_from(_e, _entry, (_e).v->start)
void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
int, struct bkey_s);
-#define bch2_bkey_ops_btree_ptr (struct bkey_ops) { \
+#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) { \
.key_invalid = bch2_btree_ptr_invalid, \
.val_to_text = bch2_btree_ptr_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
-#define bch2_bkey_ops_btree_ptr_v2 (struct bkey_ops) { \
+#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \
.key_invalid = bch2_btree_ptr_v2_invalid, \
.val_to_text = bch2_btree_ptr_v2_to_text, \
.swab = bch2_ptr_swab, \
.compat = bch2_btree_ptr_v2_compat, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
/* KEY_TYPE_extent: */
bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-#define bch2_bkey_ops_extent (struct bkey_ops) { \
+#define bch2_bkey_ops_extent ((struct bkey_ops) { \
.key_invalid = bch2_bkey_ptrs_invalid, \
.val_to_text = bch2_bkey_ptrs_to_text, \
.swab = bch2_ptr_swab, \
.key_merge = bch2_extent_merge, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
/* KEY_TYPE_reservation: */
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-#define bch2_bkey_ops_reservation (struct bkey_ops) { \
+#define bch2_bkey_ops_reservation ((struct bkey_ops) { \
.key_invalid = bch2_reservation_invalid, \
.val_to_text = bch2_reservation_to_text, \
.key_merge = bch2_reservation_merge, \
.trans_trigger = bch2_trans_mark_reservation, \
.atomic_trigger = bch2_mark_reservation, \
-}
+})
/* Extent checksum entries: */
(((p) - (fifo)->data)))
#define fifo_entry_idx(fifo, p) (((p) - &fifo_peek_front(fifo)) & (fifo)->mask)
-#define fifo_idx_entry(fifo, i) (fifo)->data[((fifo)->front + (i)) & (fifo)->mask]
+#define fifo_idx_entry(fifo, i) ((fifo)->data[((fifo)->front + (i)) & (fifo)->mask])
#define fifo_push_back_ref(f) \
(fifo_full((f)) ? NULL : &(f)->data[(f)->back++ & (f)->mask])
printbuf_exit(&buf);
return ret;
bad_hash:
- if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, "
- "hashed to %llu\n%s",
+ if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
int bch2_inode_v2_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_inode (struct bkey_ops) { \
+#define bch2_bkey_ops_inode ((struct bkey_ops) { \
.key_invalid = bch2_inode_invalid, \
.val_to_text = bch2_inode_to_text, \
.trans_trigger = bch2_trans_mark_inode, \
.atomic_trigger = bch2_mark_inode, \
-}
+})
-#define bch2_bkey_ops_inode_v2 (struct bkey_ops) { \
+#define bch2_bkey_ops_inode_v2 ((struct bkey_ops) { \
.key_invalid = bch2_inode_v2_invalid, \
.val_to_text = bch2_inode_to_text, \
.trans_trigger = bch2_trans_mark_inode, \
.atomic_trigger = bch2_mark_inode, \
-}
+})
static inline bool bkey_is_inode(const struct bkey *k)
{
int, struct printbuf *);
void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_inode_generation (struct bkey_ops) { \
+#define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \
.key_invalid = bch2_inode_generation_invalid, \
.val_to_text = bch2_inode_generation_to_text, \
-}
+})
#if 0
typedef struct {
*
* Synchronous updates are specified by passing a closure (@flush_cl) to
* bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
- * down to the journalling code. That closure will will wait on the journal
- * write to complete (via closure_wait()).
+ * down to the journalling code. That closure will wait on the journal write to
+ * complete (via closure_wait()).
*
* If the index update wasn't synchronous, the journal entry will be
* written out after 10 ms have elapsed, by default (the delay_ms field
int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_lru (struct bkey_ops) { \
+#define bch2_bkey_ops_lru ((struct bkey_ops) { \
.key_invalid = bch2_lru_invalid, \
.val_to_text = bch2_lru_to_text, \
-}
+})
int bch2_lru_delete(struct btree_trans *, u64, u64, u64, struct bkey_s_c);
int bch2_lru_set(struct btree_trans *, u64, u64, u64 *);
int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_quota (struct bkey_ops) { \
+#define bch2_bkey_ops_quota ((struct bkey_ops) { \
.key_invalid = bch2_quota_invalid, \
.val_to_text = bch2_quota_to_text, \
-}
+})
static inline struct bch_qid bch_qid(struct bch_inode_unpacked *u)
{
goto err;
bch_verbose(c, "reading snapshots done");
- bch2_inode_init(c, &root_inode, 0, 0,
- S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
+ bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
root_inode.bi_inum = BCACHEFS_ROOT_INO;
root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
bch2_inode_pack(c, &packed_inode, &root_inode);
struct bkey_s_c);
bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-#define bch2_bkey_ops_reflink_p (struct bkey_ops) { \
+#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
.key_invalid = bch2_reflink_p_invalid, \
.val_to_text = bch2_reflink_p_to_text, \
.key_merge = bch2_reflink_p_merge, \
.trans_trigger = bch2_trans_mark_reflink_p, \
.atomic_trigger = bch2_mark_reflink_p, \
-}
+})
int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c,
int, struct printbuf *);
int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_i *, unsigned);
-#define bch2_bkey_ops_reflink_v (struct bkey_ops) { \
+#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
.key_invalid = bch2_reflink_v_invalid, \
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_reflink_v, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c,
int, struct printbuf *);
struct bkey_s_c, struct bkey_i *,
unsigned);
-#define bch2_bkey_ops_indirect_inline_data (struct bkey_ops) { \
+#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) { \
.key_invalid = bch2_indirect_inline_data_invalid, \
.val_to_text = bch2_indirect_inline_data_to_text, \
.trans_trigger = bch2_trans_mark_indirect_inline_data, \
-}
+})
static inline const __le64 *bkey_refcount_c(struct bkey_s_c k)
{
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_REPLICAS_TYPES_H
#define _BCACHEFS_REPLICAS_TYPES_H
for (i = 0; i < 2; i++) {
int ret = snapshot_live(trans, child[i]);
+
if (ret < 0)
return ret;
int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c,
int rw, struct printbuf *);
-#define bch2_bkey_ops_snapshot (struct bkey_ops) { \
+#define bch2_bkey_ops_snapshot ((struct bkey_ops) { \
.key_invalid = bch2_snapshot_invalid, \
.val_to_text = bch2_snapshot_to_text, \
-}
+})
int bch2_mark_snapshot(struct btree_trans *, struct bkey_s_c,
struct bkey_s_c, unsigned);
int rw, struct printbuf *);
void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_subvolume (struct bkey_ops) { \
+#define bch2_bkey_ops_subvolume ((struct bkey_ops) { \
.key_invalid = bch2_subvolume_invalid, \
.val_to_text = bch2_subvolume_to_text, \
-}
+})
int bch2_subvolume_get(struct btree_trans *, unsigned,
bool, int, struct bch_subvolume *);
static struct attribute sysfs_##_name = \
{ .name = #_name, .mode = _mode }
-#define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
-#define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
-#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
+#define write_attribute(n) __sysfs_attribute(n, 0200)
+#define read_attribute(n) __sysfs_attribute(n, 0444)
+#define rw_attribute(n) __sysfs_attribute(n, 0644)
#define sysfs_printf(file, fmt, ...) \
do { \
#define x(_name) \
static struct attribute sysfs_time_stat_##_name = \
- { .name = #_name, .mode = S_IRUGO };
+ { .name = #_name, .mode = 0444 };
BCH_TIME_STATS()
#undef x
static struct attribute sysfs_state_rw = {
.name = "state",
- .mode = S_IRUGO
+ .mode = 0444,
};
static size_t bch2_btree_cache_size(struct bch_fs *c)
SHOW(bch2_fs_internal)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
return bch2_fs_to_text(out, &c->kobj, attr);
}
STORE(bch2_fs_internal)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
return bch2_fs_store(&c->kobj, attr, buf, size);
}
SYSFS_OPS(bch2_fs_internal);
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->required = required;
),
TP_fast_assign(
struct btree *b = btree_path_node(path, level);
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = path->level;
TP_fast_assign(
struct six_lock_count c;
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = level;
TP_fast_assign(
__entry->dev = ca->dev;
- strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+ strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
__entry->user = user;
__entry->bucket = bucket;
__entry->free = free;
__entry->need_journal_commit = s->skipped_need_journal_commit;
__entry->nouse = s->skipped_nouse;
__entry->nonblocking = nonblocking;
- strlcpy(__entry->err, err, sizeof(__entry->err));
+ strscpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("%d,%d reserve %s user %u bucket %llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
__entry->open = open;
__entry->need_journal_commit = need_journal_commit;
__entry->discarded = discarded;
- strlcpy(__entry->err, err, sizeof(__entry->err));
+ strscpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
),
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->flags = flags;
),
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos)
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->old_locks_want = old_locks_want;
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
),
TP_printk("%s", __entry->trans_fn)
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->bytes = bytes;
),
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_xattr (struct bkey_ops) { \
+#define bch2_bkey_ops_xattr ((struct bkey_ops) { \
.key_invalid = bch2_xattr_invalid, \
.val_to_text = bch2_xattr_to_text, \
-}
+})
static inline unsigned xattr_val_u64s(unsigned name_len, unsigned val_len)
{