bcachefs: Use kvpmalloc mempools for compression bounce
authorKent Overstreet <kent.overstreet@gmail.com>
Sun, 29 Mar 2020 16:33:41 +0000 (12:33 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:37 +0000 (17:08 -0400)
This fixes an issue where mounting would fail because of memory
fragmentation - previously the compression bounce buffers were using
get_free_pages().

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/compress.c

index 117afac3db1a2ca4d18a61e5969f0cb52586852a..89eb03a988f181140afeddee5601ad99c5ef124e 100644 (file)
@@ -17,7 +17,6 @@ struct bbuf {
                BB_NONE,
                BB_VMAP,
                BB_KMALLOC,
-               BB_VMALLOC,
                BB_MEMPOOL,
        }               type;
        int             rw;
@@ -33,17 +32,7 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
        if (b)
                return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
 
-       b = mempool_alloc(&c->compression_bounce[rw], GFP_NOWAIT);
-       b = b ? page_address(b) : NULL;
-       if (b)
-               return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
-
-       b = vmalloc(size);
-       if (b)
-               return (struct bbuf) { .b = b, .type = BB_VMALLOC, .rw = rw };
-
        b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO);
-       b = b ? page_address(b) : NULL;
        if (b)
                return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
 
@@ -129,12 +118,8 @@ static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
        case BB_KMALLOC:
                kfree(buf.b);
                break;
-       case BB_VMALLOC:
-               vfree(buf.b);
-               break;
        case BB_MEMPOOL:
-               mempool_free(virt_to_page(buf.b),
-                            &c->compression_bounce[buf.rw]);
+               mempool_free(buf.b, &c->compression_bounce[buf.rw]);
                break;
        }
 }
@@ -561,15 +546,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
 have_compressed:
 
        if (!mempool_initialized(&c->compression_bounce[READ])) {
-               ret = mempool_init_page_pool(&c->compression_bounce[READ],
-                                            1, order);
+               ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
+                                                 1, order);
                if (ret)
                        goto out;
        }
 
        if (!mempool_initialized(&c->compression_bounce[WRITE])) {
-               ret = mempool_init_page_pool(&c->compression_bounce[WRITE],
-                                            1, order);
+               ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
+                                                 1, order);
                if (ret)
                        goto out;
        }