bkey_xattr_init(&xattr->k_i);
xattr->k.u64s = u64s;
xattr->v.x_type = acl_to_xattr_type(type);
- xattr->v.x_name_len = 0,
+ xattr->v.x_name_len = 0;
xattr->v.x_val_len = cpu_to_le16(acl_len);
acl_header = xattr_val(&xattr->v);
* bch_bucket_alloc - allocate a single bucket from a specific device
*
* Returns index of bucket on success, 0 on failure
- * */
+ */
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bch_dev *ca,
enum alloc_reserve reserve,
*
* BTREE NODES:
*
- * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
+ * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
* free smaller than a bucket - so, that's how big our btree nodes are.
*
* (If buckets are really big we'll only use part of the bucket for a btree node
* number.
*
* - WHITEOUT: for hash table btrees
-*/
+ */
#define BCH_BKEY_TYPES() \
x(deleted, 0) \
x(whiteout, 1) \
* User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
* flags)
*/
- __BCH_INODE_SYNC = 0,
- __BCH_INODE_IMMUTABLE = 1,
- __BCH_INODE_APPEND = 2,
- __BCH_INODE_NODUMP = 3,
- __BCH_INODE_NOATIME = 4,
-
- __BCH_INODE_I_SIZE_DIRTY= 5,
- __BCH_INODE_I_SECTORS_DIRTY= 6,
- __BCH_INODE_UNLINKED = 7,
- __BCH_INODE_BACKPTR_UNTRUSTED = 8,
+ __BCH_INODE_SYNC = 0,
+ __BCH_INODE_IMMUTABLE = 1,
+ __BCH_INODE_APPEND = 2,
+ __BCH_INODE_NODUMP = 3,
+ __BCH_INODE_NOATIME = 4,
+
+ __BCH_INODE_I_SIZE_DIRTY = 5,
+ __BCH_INODE_I_SECTORS_DIRTY = 6,
+ __BCH_INODE_UNLINKED = 7,
+ __BCH_INODE_BACKPTR_UNTRUSTED = 8,
/* bits 20+ reserved for packed fields below: */
};
if (bkey_packed(l)) {
__bkey_unpack_key_format_checked(b, &unpacked, l);
- l = (void*) &unpacked;
+ l = (void *) &unpacked;
} else if (bkey_packed(r)) {
__bkey_unpack_key_format_checked(b, &unpacked, r);
- r = (void*) &unpacked;
+ r = (void *) &unpacked;
}
return bpos_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
}
/*
- * we prefer to pass bpos by ref, but it's often enough terribly convenient to
- * pass it by by val... as much as I hate c++, const ref would be nice here:
+ * The compiler generates better code when we pass bpos by ref, but it's often
+ * enough terribly convenient to pass it by val... as much as I hate c++, const
+ * ref would be nice here:
*/
__pure __flatten
static inline int bkey_cmp_left_packed_byval(const struct btree *b,
t->size -= j - l;
for (j = l; j < t->size; j++)
- rw_aux_tree(b, t)[j].offset += shift;
+ rw_aux_tree(b, t)[j].offset += shift;
EBUG_ON(l < t->size &&
rw_aux_tree(b, t)[l].offset ==
bch2_btree_node_iter_sort(iter, b);
}
-noinline __flatten __attribute__((cold))
+noinline __flatten __cold
static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
struct btree *b, struct bpos *search)
{
if (likely(c->opts.btree_node_mem_ptr_optimization &&
b &&
b->hash_val == btree_ptr_hash_val(k)))
- goto lock_node;
+ goto lock_node;
retry:
b = btree_cache_find(bc, k);
if (unlikely(!b)) {
/* XXX we're called from btree_gc which will be holding other btree
* nodes locked
- * */
+ */
__bch2_btree_node_wait_on_read(b);
__bch2_btree_node_wait_on_write(b);
" node %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1.buf, buf2.buf))
- ret = set_node_min(c, cur, expected_start);
+ ret = set_node_min(c, cur, expected_start);
}
out:
fsck_err:
BUG_ON(trans->used_mempool);
#ifdef __KERNEL__
- p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
+ p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
#endif
if (!p)
p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
rcu_read_lock();
owner = READ_ONCE(b->lock.owner);
- pid = owner ? owner->pid : 0;;
+ pid = owner ? owner->pid : 0;
rcu_read_unlock();
prt_tab(out);
+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "btree_cache.h"
bool was_new = true;
ck = bkey_cached_alloc(trans, path);
- if (unlikely(IS_ERR(ck)))
+ if (IS_ERR(ck))
return ck;
if (unlikely(!ck)) {
return ret;
}
-noinline static int
+static noinline int
bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
unsigned flags)
{
* Since journal reclaim depends on us making progress here, and the
* allocator/copygc depend on journal reclaim making progress, we need
* to be using alloc reserves:
- * */
+ */
ret = bch2_btree_iter_traverse(&b_iter) ?:
bch2_trans_update(trans, &b_iter, ck->k,
BTREE_UPDATE_KEY_CACHE_RECLAIM|
void bch2_btree_key_cache_exit(void)
{
- if (bch2_key_cache)
- kmem_cache_destroy(bch2_key_cache);
+ kmem_cache_destroy(bch2_key_cache);
}
int __init bch2_btree_key_cache_init(void)
struct btree_path *path;
if (unlikely(trans->restarted))
- return - ((int) trans->restarted);
+ return -((int) trans->restarted);
trans_for_each_path(trans, path)
if (path->should_be_locked &&
goto out;
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
-out :
+out:
bch2_trans_iter_exit(trans, &iter);
return ret;
{
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bch_fs_usage __percpu *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
return -ENOMEM;
}
- return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
+ return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
}
size_t orig_len = len;
int ret, i;
- sg = kmalloc_array(sizeof(*sg), pages, GFP_KERNEL);
+ sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
if (!sg)
return -ENOMEM;
bch2_write_op_init(&m->op, c, io_opts);
m->op.pos = bkey_start_pos(k.k);
m->op.version = k.k->version;
- m->op.target = data_opts.target,
+ m->op.target = data_opts.target;
m->op.write_point = wp;
m->op.flags |= BCH_WRITE_PAGES_STABLE|
BCH_WRITE_PAGES_OWNED|
if (i->iter < tbl->size) {
rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
bch2_cached_btree_node_to_text(&i->buf, c, b);
- i->iter++;;
+ i->iter++;
} else {
done = true;
}
if (bkey_val_u64s(k.k) > dirent_val_u64s(len)) {
prt_printf(err, "value too big (%zu > %u)",
- bkey_val_u64s(k.k),dirent_val_u64s(len));
+ bkey_val_u64s(k.k), dirent_val_u64s(len));
return -EINVAL;
}
if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
lp.crc.uncompressed_size) {
/* can use left extent's crc entry */
- } else if (lp.crc.live_size <= rp.crc.offset ) {
+ } else if (lp.crc.live_size <= rp.crc.offset) {
/* can use right extent's crc entry */
} else {
/* check if checksums can be merged: */
if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
crc_l.uncompressed_size) {
/* can use left extent's crc entry */
- } else if (crc_l.live_size <= crc_r.offset ) {
+ } else if (crc_l.live_size <= crc_r.offset) {
/* can use right extent's crc entry */
crc_r.offset -= crc_l.live_size;
bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
ret = bch2_inode_write(trans, &src_dir_iter, src_dir_u) ?:
(src_dir.inum != dst_dir.inum
? bch2_inode_write(trans, &dst_dir_iter, dst_dir_u)
- : 0 ) ?:
+ : 0) ?:
bch2_inode_write(trans, &src_inode_iter, src_inode_u) ?:
(dst_inum.inum
? bch2_inode_write(trans, &dst_inode_iter, dst_inode_u)
- : 0 );
+ : 0);
err:
bch2_trans_iter_exit(trans, &dst_inode_iter);
bch2_trans_iter_exit(trans, &src_inode_iter);
truncate_pagecache_range(&inode->v, offset, end - 1);
- if (block_start < block_end ) {
+ if (block_start < block_end) {
s64 i_sectors_delta = 0;
ret = bch2_fpunch(c, inode_inum(inode),
inode = __bch2_create(idmap, dir, dentry, S_IFLNK|S_IRWXUGO, 0,
(subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
- if (unlikely(IS_ERR(inode)))
+ if (IS_ERR(inode))
return bch2_err_class(PTR_ERR(inode));
inode_lock(&inode->v);
sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec);
c->vfs_sb = sb;
- strlcpy(sb->s_id, c->name, sizeof(sb->s_id));
+ strscpy(sb->s_id, c->name, sizeof(sb->s_id));
ret = super_setup_bdi(sb);
if (ret)
void bch2_vfs_exit(void)
{
unregister_filesystem(&bcache_fs_type);
- if (bch2_inode_cache)
- kmem_cache_destroy(bch2_inode_cache);
+ kmem_cache_destroy(bch2_inode_cache);
}
int __init bch2_vfs_init(void)
{
if (t->nr == t->size) {
size_t new_size = max_t(size_t, 128UL, t->size * 2);
- void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
+ void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
+
if (!d) {
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
new_size);
return -EINVAL;
}
- if (bch2_inode_unpack(k, &unpacked)){
+ if (bch2_inode_unpack(k, &unpacked)) {
prt_printf(err, "invalid variable length fields");
return -EINVAL;
}
return ret;
entry = container_of(journal_res_entry(j, &res),
- struct jset_entry_log, entry);;
+ struct jset_entry_log, entry);
memset(entry, 0, u64s * sizeof(u64));
entry->entry.type = BCH_JSET_ENTRY_log;
entry->entry.u64s = u64s - 1;
bch2_journal_block(&c->journal);
}
- bu = kzalloc(nr_want * sizeof(*bu), GFP_KERNEL);
- ob = kzalloc(nr_want * sizeof(*ob), GFP_KERNEL);
- new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
- new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
+ bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
+ ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
+ new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
+ new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
if (!bu || !ob || !new_buckets || !new_bucket_seq) {
ret = -ENOMEM;
goto err_unblock;
rcu_read_lock();
s = READ_ONCE(j->reservations);
- prt_printf(out, "dirty journal entries:\t%llu/%llu\n",fifo_used(&j->pin), j->pin.size);
+ prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
if ((j->space[journal_space_clean_ondisk].next_entry <
j->space[journal_space_clean_ondisk].total) &&
(clean - clean_ondisk <= total / 8) &&
- (clean_ondisk * 2 > clean ))
+ (clean_ondisk * 2 > clean))
set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
else
clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
if (!nr)
return 0;
- b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
+ b = kmalloc_array(nr, sizeof(u64), GFP_KERNEL);
if (!b)
return -ENOMEM;
if (!nr)
return 0;
- b = kmalloc_array(sizeof(*b), nr, GFP_KERNEL);
+ b = kmalloc_array(nr, sizeof(*b), GFP_KERNEL);
if (!b)
return -ENOMEM;
/*
* The iterator gets unlocked by __bch2_read_extent - need to
* save a copy of @k elsewhere:
- */
+ */
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
i++;
}
- return data_opts->rewrite_ptrs != 0;;
+ return data_opts->rewrite_ptrs != 0;
}
static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
.size = max_t(size_t, keys->size, 8) * 2,
};
- new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
+ new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
if (!new_keys.d) {
bch_err(c, "%s: error allocating new key array (size %zu)",
__func__, new_keys.size);
keys->size = roundup_pow_of_two(nr_keys);
- keys->d = kvmalloc(sizeof(keys->d[0]) * keys->size, GFP_KERNEL);
+ keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
if (!keys->d)
return -ENOMEM;
r = (ctx->v[0] ^ ctx->v[1]) ^ (ctx->v[2] ^ ctx->v[3]);
memset(ctx, 0, sizeof(*ctx));
- return (r);
+ return r;
}
u64 SipHash(const SIPHASH_KEY *key, int rc, int rf, const void *src, size_t len)
void bch2_free_super(struct bch_sb_handle *sb)
{
- if (sb->bio)
- kfree(sb->bio);
+ kfree(sb->bio);
if (!IS_ERR_OR_NULL(sb->bdev))
blkdev_put(sb->bdev, sb->holder);
kfree(sb->holder);
bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0);
- if (sb->bio)
- kfree(sb->bio);
+ kfree(sb->bio);
sb->bio = bio;
}
kfree(c->unused_inode_hints);
free_heap(&c->copygc_heap);
- if (c->io_complete_wq )
- destroy_workqueue(c->io_complete_wq );
+ if (c->io_complete_wq)
+ destroy_workqueue(c->io_complete_wq);
if (c->copygc_wq)
destroy_workqueue(c->copygc_wq);
if (c->btree_io_complete_wq)
goto err;
pr_uuid(&name, c->sb.user_uuid.b);
- strlcpy(c->name, name.buf, sizeof(c->name));
+ strscpy(c->name, name.buf, sizeof(c->name));
printbuf_exit(&name);
ret = name.allocation_failure ? -ENOMEM : 0;
}
ret = bch2_trans_mark_dev_sb(c, ca);
- if (ret) {
+ if (ret)
goto err;
- }
mutex_lock(&c->sb_lock);
mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
}
}
-#include "eytzinger.h"
-
static int alignment_ok(const void *base, size_t align)
{
return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||