xfs: replace kmem_alloc_large() with kvmalloc()
authorDave Chinner <dchinner@redhat.com>
Mon, 9 Aug 2021 17:10:01 +0000 (10:10 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Mon, 9 Aug 2021 22:57:43 +0000 (15:57 -0700)
There is no reason for this wrapper existing anymore. All the places
that use KM_NOFS allocation are within transaction contexts and
hence covered by memalloc_nofs_save/restore contexts. Hence we don't
need any special handling of vmalloc for large IOs anymore and
so special casing this code isn't necessary.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
fs/xfs/kmem.c
fs/xfs/kmem.h
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/scrub/attr.c
fs/xfs/scrub/attr.h
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_trace.h

index 3f2979fd2f2bf464f73537c88057995d408e53b3..6f49bf39183c78e7d3cc82f21408a3b06f405453 100644 (file)
@@ -29,42 +29,3 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
                congestion_wait(BLK_RW_ASYNC, HZ/50);
        } while (1);
 }
-
-
-/*
- * __vmalloc() will allocate data pages and auxiliary structures (e.g.
- * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
- * we need to tell memory reclaim that we are in such a context via
- * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
- * and potentially deadlocking.
- */
-static void *
-__kmem_vmalloc(size_t size, xfs_km_flags_t flags)
-{
-       unsigned nofs_flag = 0;
-       void    *ptr;
-       gfp_t   lflags = kmem_flags_convert(flags);
-
-       if (flags & KM_NOFS)
-               nofs_flag = memalloc_nofs_save();
-
-       ptr = __vmalloc(size, lflags);
-
-       if (flags & KM_NOFS)
-               memalloc_nofs_restore(nofs_flag);
-
-       return ptr;
-}
-
-void *
-kmem_alloc_large(size_t size, xfs_km_flags_t flags)
-{
-       void    *ptr;
-
-       trace_kmem_alloc_large(size, flags, _RET_IP_);
-
-       ptr = kmem_alloc(size, flags | KM_MAYFAIL);
-       if (ptr)
-               return ptr;
-       return __kmem_vmalloc(size, flags);
-}
index 9ff20047f8b86fedd765cb64020f7b8038d774bb..54da6d717a061c91f13036816b88dca3ef1ca3a6 100644 (file)
@@ -57,7 +57,6 @@ kmem_flags_convert(xfs_km_flags_t flags)
 }
 
 extern void *kmem_alloc(size_t, xfs_km_flags_t);
-extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
 static inline void  kmem_free(const void *ptr)
 {
        kvfree(ptr);
index b277e0511cdd9de0568f987700920cd53e42d237..cdd06213ab44a2d5d68289997234ac893c3137e5 100644 (file)
@@ -489,7 +489,7 @@ xfs_attr_copy_value(
        }
 
        if (!args->value) {
-               args->value = kmem_alloc_large(valuelen, KM_NOLOCKDEP);
+               args->value = kvmalloc(valuelen, GFP_KERNEL | __GFP_NOLOCKDEP);
                if (!args->value)
                        return -ENOMEM;
        }
index 552af0cf848287355cf5c6615157969952167cb4..6c36af6dbd354bb3699ebebf0cc5884f7da6b052 100644 (file)
  * reallocating the buffer if necessary.  Buffer contents are not preserved
  * across a reallocation.
  */
-int
+static int
 xchk_setup_xattr_buf(
        struct xfs_scrub        *sc,
        size_t                  value_size,
-       xfs_km_flags_t          flags)
+       gfp_t                   flags)
 {
        size_t                  sz;
        struct xchk_xattr_buf   *ab = sc->buf;
@@ -57,7 +57,7 @@ xchk_setup_xattr_buf(
         * Don't zero the buffer upon allocation to avoid runtime overhead.
         * All users must be careful never to read uninitialized contents.
         */
-       ab = kmem_alloc_large(sizeof(*ab) + sz, flags);
+       ab = kvmalloc(sizeof(*ab) + sz, flags);
        if (!ab)
                return -ENOMEM;
 
@@ -79,7 +79,7 @@ xchk_setup_xattr(
         * without the inode lock held, which means we can sleep.
         */
        if (sc->flags & XCHK_TRY_HARDER) {
-               error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0);
+               error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, GFP_KERNEL);
                if (error)
                        return error;
        }
@@ -138,7 +138,8 @@ xchk_xattr_listent(
         * doesn't work, we overload the seen_enough variable to convey
         * the error message back to the main scrub function.
         */
-       error = xchk_setup_xattr_buf(sx->sc, valuelen, KM_MAYFAIL);
+       error = xchk_setup_xattr_buf(sx->sc, valuelen,
+                       GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (error == -ENOMEM)
                error = -EDEADLOCK;
        if (error) {
@@ -323,7 +324,8 @@ xchk_xattr_block(
                return 0;
 
        /* Allocate memory for block usage checking. */
-       error = xchk_setup_xattr_buf(ds->sc, 0, KM_MAYFAIL);
+       error = xchk_setup_xattr_buf(ds->sc, 0,
+                       GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (error == -ENOMEM)
                return -EDEADLOCK;
        if (error)
index 13a1d2e8424d9a3b4d52c1721eea610ad9bd6780..1719e1c4da59d856cedb566ed528379938f77cfd 100644 (file)
@@ -65,7 +65,4 @@ xchk_xattr_dstmap(
                        BITS_TO_LONGS(sc->mp->m_attr_geo->blksize);
 }
 
-int xchk_setup_xattr_buf(struct xfs_scrub *sc, size_t value_size,
-               xfs_km_flags_t flags);
-
 #endif /* __XFS_SCRUB_ATTR_H__ */
index fdc4d0636413b06fc2f788ee1f7611968ad0ce04..eb8341027cc7fbe012cc56430b446b8bd47d6211 100644 (file)
@@ -1487,8 +1487,8 @@ xlog_alloc_log(
                iclog->ic_prev = prev_iclog;
                prev_iclog = iclog;
 
-               iclog->ic_data = kmem_alloc_large(log->l_iclog_size,
-                                               KM_MAYFAIL | KM_ZERO);
+               iclog->ic_data = kvzalloc(log->l_iclog_size,
+                               GFP_KERNEL | __GFP_RETRY_MAYFAIL);
                if (!iclog->ic_data)
                        goto out_free_iclog;
 #ifdef DEBUG
index 4c44bc3786c0f07945aa9631a115bb66e670ca33..4e41130f206f05fa0ed1d1542a4978bda222444c 100644 (file)
@@ -185,7 +185,15 @@ xlog_cil_alloc_shadow_bufs(
                         */
                        kmem_free(lip->li_lv_shadow);
 
-                       lv = kmem_alloc_large(buf_size, KM_NOFS);
+                       /*
+                        * We are in transaction context, which means this
+                        * allocation will pick up GFP_NOFS from the
+                        * memalloc_nofs_save/restore context the transaction
+                        * holds. This means we can use GFP_KERNEL here so the
+                        * generic kvmalloc() code will run vmalloc on
+                        * contiguous page allocation failure as we require.
+                        */
+                       lv = kvmalloc(buf_size, GFP_KERNEL);
                        memset(lv, 0, xlog_cil_iovec_space(niovecs));
 
                        lv->lv_item = lip;
index 48bb5c31ffd041961c55c121b4532d17bb46e809..ea96b5e45364db58e3d51b3cd4d71e5f677ff235 100644 (file)
@@ -106,7 +106,7 @@ xlog_alloc_buffer(
        if (nbblks > 1 && log->l_sectBBsize > 1)
                nbblks += log->l_sectBBsize;
        nbblks = round_up(nbblks, log->l_sectBBsize);
-       return kmem_alloc_large(BBTOB(nbblks), KM_MAYFAIL | KM_ZERO);
+       return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
 }
 
 /*
index 2d31e64b46dc013778384a87055e42735cb72b5b..36d771d00f7fa25d3a241abbeb960a8e2090834a 100644 (file)
@@ -3774,7 +3774,6 @@ DEFINE_EVENT(xfs_kmem_class, name, \
        TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
        TP_ARGS(size, flags, caller_ip))
 DEFINE_KMEM_EVENT(kmem_alloc);
-DEFINE_KMEM_EVENT(kmem_alloc_large);
 
 TRACE_EVENT(xfs_check_new_dalign,
        TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),