bcachefs: Switch replicas.c allocations to GFP_KERNEL
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 22 Jan 2021 00:14:37 +0000 (19:14 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:51 +0000 (17:08 -0400)
We're transitioning to memalloc_nofs_save/restore instead of GFP flags
with the rest of the kernel, and GFP_NOIO was excessively strict and
causing unnnecessary allocation failures - these allocations are done
with btree locks dropped.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/replicas.c

index a0840e1c9f8820f1432c4425de204b4bb8cd4e1f..979e9c2b8c74dd56857b0ec3da70f0d740247272 100644 (file)
@@ -159,7 +159,7 @@ cpu_replicas_add_entry(struct bch_replicas_cpu *old,
        BUG_ON(!new_entry->data_type);
        verify_replicas_entry(new_entry);
 
-       new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
+       new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
        if (!new.entries)
                return new;
 
@@ -284,20 +284,20 @@ static int replicas_table_update(struct bch_fs *c,
 
        for (i = 0; i < ARRAY_SIZE(new_usage); i++)
                if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
-                                       sizeof(u64), GFP_NOIO)))
+                                       sizeof(u64), GFP_KERNEL)))
                        goto err;
 
        memset(new_usage, 0, sizeof(new_usage));
 
        for (i = 0; i < ARRAY_SIZE(new_usage); i++)
                if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
-                                       sizeof(u64), GFP_NOIO)))
+                                       sizeof(u64), GFP_KERNEL)))
                        goto err;
 
-       if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
-           !(new_scratch  = kmalloc(scratch_bytes, GFP_NOIO)) ||
+       if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
+           !(new_scratch  = kmalloc(scratch_bytes, GFP_KERNEL)) ||
            (c->usage_gc &&
-            !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
+            !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
                goto err;
 
        for (i = 0; i < ARRAY_SIZE(new_usage); i++)
@@ -557,7 +557,7 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
 
        c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
                                         c->replicas_gc.entry_size,
-                                        GFP_NOIO);
+                                        GFP_KERNEL);
        if (!c->replicas_gc.entries) {
                mutex_unlock(&c->sb_lock);
                bch_err(c, "error allocating c->replicas_gc");
@@ -680,7 +680,7 @@ __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
                nr++;
        }
 
-       cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
+       cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
        if (!cpu_r->entries)
                return -ENOMEM;
 
@@ -712,7 +712,7 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
        entry_size += sizeof(struct bch_replicas_entry) -
                sizeof(struct bch_replicas_entry_v0);
 
-       cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
+       cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
        if (!cpu_r->entries)
                return -ENOMEM;