bcachefs: kill kvpmalloc()
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 1 Feb 2024 11:35:46 +0000 (06:35 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 13 Mar 2024 22:39:12 +0000 (18:39 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
14 files changed:
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_journal_iter.c
fs/bcachefs/buckets.c
fs/bcachefs/compress.c
fs/bcachefs/debug.c
fs/bcachefs/ec.c
fs/bcachefs/fifo.h
fs/bcachefs/journal.c
fs/bcachefs/journal_io.c
fs/bcachefs/super.c
fs/bcachefs/util.c
fs/bcachefs/util.h

index a8b393bc7567b47e7e7106fd611412a91ecd2c1a..72d24933dc193d5a6f41a0d9adea73d82ae192fe 100644 (file)
@@ -60,7 +60,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
 
        clear_btree_node_just_written(b);
 
-       kvpfree(b->data, btree_buf_bytes(b));
+       kvfree(b->data);
        b->data = NULL;
 #ifdef __KERNEL__
        kvfree(b->aux_data);
@@ -94,7 +94,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 {
        BUG_ON(b->data || b->aux_data);
 
-       b->data = kvpmalloc(btree_buf_bytes(b), gfp);
+       b->data = kvmalloc(btree_buf_bytes(b), gfp);
        if (!b->data)
                return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
 #ifdef __KERNEL__
@@ -107,7 +107,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
                b->aux_data = NULL;
 #endif
        if (!b->aux_data) {
-               kvpfree(b->data, btree_buf_bytes(b));
+               kvfree(b->data);
                b->data = NULL;
                return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
        }
@@ -408,7 +408,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        if (c->verify_data)
                list_move(&c->verify_data->list, &bc->live);
 
-       kvpfree(c->verify_ondisk, c->opts.btree_node_size);
+       kvfree(c->verify_ondisk);
 
        for (i = 0; i < btree_id_nr_alive(c); i++) {
                struct btree_root *r = bch2_btree_id_root(c, i);
index 3005b39d3a1a7a6ac262fc54feb3b96debc1683a..f4872f1d6fc614d8bc802cc79e51219d711d97b0 100644 (file)
@@ -1193,9 +1193,7 @@ static void bch2_gc_free(struct bch_fs *c)
        genradix_free(&c->gc_stripes);
 
        for_each_member_device(c, ca) {
-               kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
-                       sizeof(struct bucket_array) +
-                       ca->mi.nbuckets * sizeof(struct bucket));
+               kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
                ca->buckets_gc = NULL;
 
                free_percpu(ca->usage_gc);
@@ -1494,7 +1492,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
 static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
 {
        for_each_member_device(c, ca) {
-               struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
+               struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
                                ca->mi.nbuckets * sizeof(struct bucket),
                                GFP_KERNEL|__GFP_ZERO);
                if (!buckets) {
index caf3eecfc801e902bd8ec2b589bff05a5cb0d3bf..767cdbe7f586198fb7050356174b4a327731f809 100644 (file)
@@ -103,7 +103,7 @@ static void btree_bounce_free(struct bch_fs *c, size_t size,
        if (used_mempool)
                mempool_free(p, &c->btree_bounce_pool);
        else
-               vpfree(p, size);
+               kvfree(p);
 }
 
 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
@@ -115,7 +115,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
        BUG_ON(size > c->opts.btree_node_size);
 
        *used_mempool = false;
-       p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
+       p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
        if (!p) {
                *used_mempool = true;
                p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
index b7ac93c8fdd8abf0fd3c919dcabcf5d5215b0290..3da65562fdb0423ab0cfcba0fdf3d5cff40b74fa 100644 (file)
@@ -447,9 +447,7 @@ void bch2_journal_entries_free(struct bch_fs *c)
        struct genradix_iter iter;
 
        genradix_for_each(&c->journal_entries, iter, i)
-               if (*i)
-                       kvpfree(*i, offsetof(struct journal_replay, j) +
-                               vstruct_bytes(&(*i)->j));
+               kvfree(*i);
        genradix_free(&c->journal_entries);
 }
 
index 54f7826ac49874d46b08330678ea0b2565ecc491..7dca10ba70d253fe1e0619e738ea7826d1ea1ca1 100644 (file)
@@ -1335,7 +1335,7 @@ static void bucket_gens_free_rcu(struct rcu_head *rcu)
        struct bucket_gens *buckets =
                container_of(rcu, struct bucket_gens, rcu);
 
-       kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
+       kvfree(buckets);
 }
 
 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
@@ -1345,16 +1345,16 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
        bool resize = ca->bucket_gens != NULL;
        int ret;
 
-       if (!(bucket_gens       = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
-                                           GFP_KERNEL|__GFP_ZERO))) {
+       if (!(bucket_gens       = kvmalloc(sizeof(struct bucket_gens) + nbuckets,
+                                          GFP_KERNEL|__GFP_ZERO))) {
                ret = -BCH_ERR_ENOMEM_bucket_gens;
                goto err;
        }
 
        if ((c->opts.buckets_nouse &&
-            !(buckets_nouse    = kvpmalloc(BITS_TO_LONGS(nbuckets) *
-                                           sizeof(unsigned long),
-                                           GFP_KERNEL|__GFP_ZERO)))) {
+            !(buckets_nouse    = kvmalloc(BITS_TO_LONGS(nbuckets) *
+                                          sizeof(unsigned long),
+                                          GFP_KERNEL|__GFP_ZERO)))) {
                ret = -BCH_ERR_ENOMEM_buckets_nouse;
                goto err;
        }
@@ -1397,8 +1397,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
 
        ret = 0;
 err:
-       kvpfree(buckets_nouse,
-               BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+       kvfree(buckets_nouse);
        if (bucket_gens)
                call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
 
@@ -1407,27 +1406,21 @@ err:
 
 void bch2_dev_buckets_free(struct bch_dev *ca)
 {
-       unsigned i;
-
-       kvpfree(ca->buckets_nouse,
-               BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
-       kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
-               sizeof(struct bucket_gens) + ca->mi.nbuckets);
+       kvfree(ca->buckets_nouse);
+       kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
 
-       for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
+       for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++)
                free_percpu(ca->usage[i]);
        kfree(ca->usage_base);
 }
 
 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
 {
-       unsigned i;
-
        ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
        if (!ca->usage_base)
                return -BCH_ERR_ENOMEM_usage_init;
 
-       for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
+       for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) {
                ca->usage[i] = alloc_percpu(struct bch_dev_usage);
                if (!ca->usage[i])
                        return -BCH_ERR_ENOMEM_usage_init;
index 33df8cf86bd8f83bbf42d45944d0632da404fd71..1410365a889156450c78da9165bdb146872370ed 100644 (file)
@@ -601,13 +601,13 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
                return 0;
 
        if (!mempool_initialized(&c->compression_bounce[READ]) &&
-           mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
-                                       1, c->opts.encoded_extent_max))
+           mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
+                                      1, c->opts.encoded_extent_max))
                return -BCH_ERR_ENOMEM_compression_bounce_read_init;
 
        if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
-           mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
-                                       1, c->opts.encoded_extent_max))
+           mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
+                                      1, c->opts.encoded_extent_max))
                return -BCH_ERR_ENOMEM_compression_bounce_write_init;
 
        for (i = compression_types;
@@ -622,15 +622,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
                if (mempool_initialized(&c->compress_workspace[i->type]))
                        continue;
 
-               if (mempool_init_kvpmalloc_pool(
+               if (mempool_init_kvmalloc_pool(
                                &c->compress_workspace[i->type],
                                1, i->compress_workspace))
                        return -BCH_ERR_ENOMEM_compression_workspace_init;
        }
 
        if (!mempool_initialized(&c->decompress_workspace) &&
-           mempool_init_kvpmalloc_pool(&c->decompress_workspace,
-                                       1, decompress_workspace_size))
+           mempool_init_kvmalloc_pool(&c->decompress_workspace,
+                                      1, decompress_workspace_size))
                return -BCH_ERR_ENOMEM_decompression_workspace_init;
 
        return 0;
index 7bdba8507fc93cdfdecc29de3e70e5589cf8177b..b1f147e6be4d5cdd0ab491932db9c625b763e29e 100644 (file)
@@ -137,7 +137,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
        mutex_lock(&c->verify_lock);
 
        if (!c->verify_ondisk) {
-               c->verify_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
+               c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
                if (!c->verify_ondisk)
                        goto out;
        }
@@ -199,7 +199,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
                return;
        }
 
-       n_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
+       n_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
        if (!n_ondisk) {
                prt_printf(out, "memory allocation failure\n");
                goto out;
@@ -293,7 +293,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
 out:
        if (bio)
                bio_put(bio);
-       kvpfree(n_ondisk, btree_buf_bytes(b));
+       kvfree(n_ondisk);
        percpu_ref_put(&ca->io_ref);
 }
 
index d503af2700247d8aa1257962c37df9b042ee55ec..b98e2c2b8bf06f59fa70cfe23873e51529a917b8 100644 (file)
@@ -504,7 +504,7 @@ static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
                unsigned i;
 
                for (i = 0; i < s->v.nr_blocks; i++) {
-                       kvpfree(buf->data[i], buf->size << 9);
+                       kvfree(buf->data[i]);
                        buf->data[i] = NULL;
                }
        }
@@ -531,7 +531,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
        memset(buf->valid, 0xFF, sizeof(buf->valid));
 
        for (i = 0; i < v->nr_blocks; i++) {
-               buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
+               buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
                if (!buf->data[i])
                        goto err;
        }
index 66b945be10c2309a9e758b228b146047b20674e2..d8153fe27037ef46d1b2b220430f78fae78f2e35 100644 (file)
@@ -24,12 +24,12 @@ struct {                                                            \
        (fifo)->mask    = (fifo)->size                                  \
                ? roundup_pow_of_two((fifo)->size) - 1                  \
                : 0;                                                    \
-       (fifo)->data    = kvpmalloc(fifo_buf_size(fifo), (_gfp));       \
+       (fifo)->data    = kvmalloc(fifo_buf_size(fifo), (_gfp));        \
 })
 
 #define free_fifo(fifo)                                                        \
 do {                                                                   \
-       kvpfree((fifo)->data, fifo_buf_size(fifo));                     \
+       kvfree((fifo)->data);                                           \
        (fifo)->data = NULL;                                            \
 } while (0)
 
index fe5f7a944ad3083cac3bc36e8c019280652b0fcd..f5be8e417c8a63ddef0e6a6c8f533b9d4218e772 100644 (file)
@@ -1343,7 +1343,7 @@ void bch2_fs_journal_exit(struct journal *j)
        darray_exit(&j->early_journal_entries);
 
        for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
-               kvpfree(j->buf[i].data, j->buf[i].buf_size);
+               kvfree(j->buf[i].data);
        free_fifo(&j->pin);
 }
 
@@ -1372,7 +1372,7 @@ int bch2_fs_journal_init(struct journal *j)
 
        for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
                j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
-               j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
+               j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
                if (!j->buf[i].data)
                        return -BCH_ERR_ENOMEM_journal_buf;
                j->buf[i].idx = i;
index cd8921a2c0daed476edb5aa3b9627d4695080ea3..c4f19b996b45f01078ee09d86eeb16f8cac13d05 100644 (file)
@@ -84,8 +84,7 @@ static void __journal_replay_free(struct bch_fs *c,
 
        BUG_ON(*p != i);
        *p = NULL;
-       kvpfree(i, offsetof(struct journal_replay, j) +
-               vstruct_bytes(&i->j));
+       kvfree(i);
 }
 
 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
@@ -196,7 +195,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
                goto out;
        }
 replace:
-       i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
+       i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
        if (!i)
                return -BCH_ERR_ENOMEM_journal_entry_add;
 
@@ -965,11 +964,11 @@ static int journal_read_buf_realloc(struct journal_read_buf *b,
                return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
 
        new_size = roundup_pow_of_two(new_size);
-       n = kvpmalloc(new_size, GFP_KERNEL);
+       n = kvmalloc(new_size, GFP_KERNEL);
        if (!n)
                return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
 
-       kvpfree(b->data, b->size);
+       kvfree(b->data);
        b->data = n;
        b->size = new_size;
        return 0;
@@ -1195,7 +1194,7 @@ found:
                ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
 out:
        bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
-       kvpfree(buf.data, buf.size);
+       kvfree(buf.data);
        percpu_ref_put(&ca->io_ref);
        closure_return(cl);
        return;
@@ -1576,7 +1575,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
        if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
                return;
 
-       new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
+       new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
        if (!new_buf)
                return;
 
@@ -1587,7 +1586,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
        swap(buf->buf_size,     new_size);
        spin_unlock(&j->lock);
 
-       kvpfree(new_buf, new_size);
+       kvfree(new_buf);
 }
 
 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
index b91efec11dfeefb9be82f904d2d004dcadeaeac6..b17f2e199322c793a53ae5c637f76d1cabb7dd8b 100644 (file)
@@ -576,7 +576,7 @@ static void __bch2_fs_free(struct bch_fs *c)
                destroy_workqueue(c->btree_update_wq);
 
        bch2_free_super(&c->disk_sb);
-       kvpfree(c, sizeof(*c));
+       kvfree(c);
        module_put(THIS_MODULE);
 }
 
@@ -715,7 +715,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        unsigned i, iter_size;
        int ret = 0;
 
-       c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
+       c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
        if (!c) {
                c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
                goto out;
@@ -882,8 +882,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
                        BIOSET_NEED_BVECS) ||
            !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
            !(c->online_reserved = alloc_percpu(u64)) ||
-           mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
-                                       c->opts.btree_node_size) ||
+           mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
+                                      c->opts.btree_node_size) ||
            mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
            !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
                                              sizeof(u64), GFP_KERNEL))) {
index 3a32faa86b5c4a2eee98de32951c18dc73052041..bd3687a726f72d80bca5e8fafd69e55130d59cb2 100644 (file)
@@ -1007,28 +1007,6 @@ void sort_cmp_size(void *base, size_t num, size_t size,
        }
 }
 
-static void mempool_free_vp(void *element, void *pool_data)
-{
-       size_t size = (size_t) pool_data;
-
-       vpfree(element, size);
-}
-
-static void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
-{
-       size_t size = (size_t) pool_data;
-
-       return vpmalloc(size, gfp_mask);
-}
-
-int mempool_init_kvpmalloc_pool(mempool_t *pool, int min_nr, size_t size)
-{
-       return size < PAGE_SIZE
-               ? mempool_init_kmalloc_pool(pool, min_nr, size)
-               : mempool_init(pool, min_nr, mempool_alloc_vp,
-                              mempool_free_vp, (void *) size);
-}
-
 #if 0
 void eytzinger1_test(void)
 {
index b414736d59a5b36d1344657eaeb6de6113ec5a09..7fed75c44cd59b8c51dda22cfa9db71d07fcf633 100644 (file)
@@ -53,38 +53,6 @@ static inline size_t buf_pages(void *p, size_t len)
                            PAGE_SIZE);
 }
 
-static inline void vpfree(void *p, size_t size)
-{
-       if (is_vmalloc_addr(p))
-               vfree(p);
-       else
-               free_pages((unsigned long) p, get_order(size));
-}
-
-static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
-{
-       return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
-                                        get_order(size)) ?:
-               __vmalloc(size, gfp_mask);
-}
-
-static inline void kvpfree(void *p, size_t size)
-{
-       if (size < PAGE_SIZE)
-               kfree(p);
-       else
-               vpfree(p, size);
-}
-
-static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
-{
-       return size < PAGE_SIZE
-               ? kmalloc(size, gfp_mask)
-               : vpmalloc(size, gfp_mask);
-}
-
-int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
-
 #define HEAP(type)                                                     \
 struct {                                                               \
        size_t size, used;                                              \
@@ -97,13 +65,13 @@ struct {                                                            \
 ({                                                                     \
        (heap)->used = 0;                                               \
        (heap)->size = (_size);                                         \
-       (heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
+       (heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
                                 (gfp));                                \
 })
 
 #define free_heap(heap)                                                        \
 do {                                                                   \
-       kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0]));  \
+       kvfree((heap)->data);                                           \
        (heap)->data = NULL;                                            \
 } while (0)