xfs: pivot online scrub away from kmem.[ch]
authorDarrick J. Wong <djwong@kernel.org>
Mon, 7 Nov 2022 01:03:16 +0000 (17:03 -0800)
committerDarrick J. Wong <djwong@kernel.org>
Wed, 16 Nov 2022 23:25:02 +0000 (15:25 -0800)
Convert all the online scrub code to use the Linux slab allocator
functions directly instead of going through the kmem wrappers.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
fs/xfs/scrub/agheader_repair.c
fs/xfs/scrub/attr.c
fs/xfs/scrub/bitmap.c
fs/xfs/scrub/btree.c
fs/xfs/scrub/dabtree.c
fs/xfs/scrub/fscounters.c
fs/xfs/scrub/refcount.c
fs/xfs/scrub/scrub.c

index 82ceb60ea5fcdbb4da96bd96fb4c9a85aa3d1244..d75d82151eeba3e216b62f17f4f22e9a3a9190f6 100644 (file)
@@ -685,7 +685,7 @@ xrep_agfl_init_header(
                if (br->len)
                        break;
                list_del(&br->list);
-               kmem_free(br);
+               kfree(br);
        }
 
        /* Write new AGFL to disk. */
index 11b2593a2be792d7afa9206d9dda27b5f8701142..31529b9bf389199c53d393646d357f14914237ff 100644 (file)
@@ -49,7 +49,7 @@ xchk_setup_xattr_buf(
        if (ab) {
                if (sz <= ab->sz)
                        return 0;
-               kmem_free(ab);
+               kvfree(ab);
                sc->buf = NULL;
        }
 
index b89bf9de9b1c5551963d33847fc11e371aaaa23d..a255f09e9f0a6f627fae7f8a8dd99d3d578cc3a7 100644 (file)
@@ -10,6 +10,7 @@
 #include "xfs_trans_resv.h"
 #include "xfs_mount.h"
 #include "xfs_btree.h"
+#include "scrub/scrub.h"
 #include "scrub/bitmap.h"
 
 /*
@@ -25,7 +26,7 @@ xbitmap_set(
 {
        struct xbitmap_range    *bmr;
 
-       bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
+       bmr = kmalloc(sizeof(struct xbitmap_range), XCHK_GFP_FLAGS);
        if (!bmr)
                return -ENOMEM;
 
@@ -47,7 +48,7 @@ xbitmap_destroy(
 
        for_each_xbitmap_extent(bmr, n, bitmap) {
                list_del(&bmr->list);
-               kmem_free(bmr);
+               kfree(bmr);
        }
 }
 
@@ -174,15 +175,15 @@ xbitmap_disunion(
                        /* Total overlap, just delete ex. */
                        lp = lp->next;
                        list_del(&br->list);
-                       kmem_free(br);
+                       kfree(br);
                        break;
                case 0:
                        /*
                         * Deleting from the middle: add the new right extent
                         * and then shrink the left extent.
                         */
-                       new_br = kmem_alloc(sizeof(struct xbitmap_range),
-                                       KM_MAYFAIL);
+                       new_br = kmalloc(sizeof(struct xbitmap_range),
+                                       XCHK_GFP_FLAGS);
                        if (!new_br) {
                                error = -ENOMEM;
                                goto out;
index 075ff3071122edc11b66b3f774ca778fd5a56630..0fd36d5b464670da1bcde1bb7b6995645e5f362e 100644 (file)
@@ -432,8 +432,7 @@ xchk_btree_check_owner(
        if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
                struct check_owner      *co;
 
-               co = kmem_alloc(sizeof(struct check_owner),
-                               KM_MAYFAIL);
+               co = kmalloc(sizeof(struct check_owner), XCHK_GFP_FLAGS);
                if (!co)
                        return -ENOMEM;
 
@@ -652,7 +651,7 @@ xchk_btree(
                xchk_btree_set_corrupt(sc, cur, 0);
                return 0;
        }
-       bs = kmem_zalloc(cur_sz, KM_NOFS | KM_MAYFAIL);
+       bs = kzalloc(cur_sz, XCHK_GFP_FLAGS);
        if (!bs)
                return -ENOMEM;
        bs->cur = cur;
@@ -743,9 +742,9 @@ out:
                        error = xchk_btree_check_block_owner(bs, co->level,
                                        co->daddr);
                list_del(&co->list);
-               kmem_free(co);
+               kfree(co);
        }
-       kmem_free(bs);
+       kfree(bs);
 
        return error;
 }
index 84fe3d33d69954fce8444365ec768abd64328a31..d17cee1770854e938da19fa73e9fa2a6b7e5c6a0 100644 (file)
@@ -486,7 +486,7 @@ xchk_da_btree(
                return 0;
 
        /* Set up initial da state. */
-       ds = kmem_zalloc(sizeof(struct xchk_da_btree), KM_NOFS | KM_MAYFAIL);
+       ds = kzalloc(sizeof(struct xchk_da_btree), XCHK_GFP_FLAGS);
        if (!ds)
                return -ENOMEM;
        ds->dargs.dp = sc->ip;
@@ -591,6 +591,6 @@ out:
 
 out_state:
        xfs_da_state_free(ds->state);
-       kmem_free(ds);
+       kfree(ds);
        return error;
 }
index 6a6f8fe7f87c07a691176d1b96a4eb4cad922019..3c56f5890da4f812f7b20932a4d5081d20ef05b5 100644 (file)
@@ -116,7 +116,7 @@ xchk_setup_fscounters(
        struct xchk_fscounters  *fsc;
        int                     error;
 
-       sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0);
+       sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
        if (!sc->buf)
                return -ENOMEM;
        fsc = sc->buf;
index a26ee0f24ef2a0c793e0ca651521ab36f613049a..d9c1b3cea4a52cec7c55cb72d6e61830487b275c 100644 (file)
@@ -127,8 +127,8 @@ xchk_refcountbt_rmap_check(
                 * is healthy each rmap_irec we see will be in agbno order
                 * so we don't need insertion sort here.
                 */
-               frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
-                               KM_MAYFAIL);
+               frag = kmalloc(sizeof(struct xchk_refcnt_frag),
+                               XCHK_GFP_FLAGS);
                if (!frag)
                        return -ENOMEM;
                memcpy(&frag->rm, rec, sizeof(frag->rm));
@@ -215,7 +215,7 @@ xchk_refcountbt_process_rmap_fragments(
                                continue;
                        }
                        list_del(&frag->list);
-                       kmem_free(frag);
+                       kfree(frag);
                        nr++;
                }
 
@@ -257,11 +257,11 @@ done:
        /* Delete fragments and work list. */
        list_for_each_entry_safe(frag, n, &worklist, list) {
                list_del(&frag->list);
-               kmem_free(frag);
+               kfree(frag);
        }
        list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
                list_del(&frag->list);
-               kmem_free(frag);
+               kfree(frag);
        }
 }
 
@@ -306,7 +306,7 @@ xchk_refcountbt_xref_rmap(
 out_free:
        list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
                list_del(&frag->list);
-               kmem_free(frag);
+               kfree(frag);
        }
 }
 
index 2e8e400f10a9a8b3a1e10eedc4df71c136454f81..07a7a75f987fcd1abed676b386126b9470d60afc 100644 (file)
@@ -174,7 +174,7 @@ xchk_teardown(
        if (sc->flags & XCHK_REAPING_DISABLED)
                xchk_start_reaping(sc);
        if (sc->buf) {
-               kmem_free(sc->buf);
+               kvfree(sc->buf);
                sc->buf = NULL;
        }
        return error;
@@ -467,7 +467,7 @@ xfs_scrub_metadata(
        xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB,
  "EXPERIMENTAL online scrub feature in use. Use at your own risk!");
 
-       sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL);
+       sc = kzalloc(sizeof(struct xfs_scrub), XCHK_GFP_FLAGS);
        if (!sc) {
                error = -ENOMEM;
                goto out;
@@ -557,7 +557,7 @@ out_nofix:
 out_teardown:
        error = xchk_teardown(sc, error);
 out_sc:
-       kmem_free(sc);
+       kfree(sc);
 out:
        trace_xchk_done(XFS_I(file_inode(file)), sm, error);
        if (error == -EFSCORRUPTED || error == -EFSBADCRC) {