xfs: minimize overhead of drain wakeups by using jump labels
authorDarrick J. Wong <djwong@kernel.org>
Wed, 12 Apr 2023 01:59:59 +0000 (18:59 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Wed, 12 Apr 2023 01:59:59 +0000 (18:59 -0700)
To reduce the runtime overhead even further when online fsck isn't
running, use a static branch key to decide if we call wake_up on the
drain.  For compilers that support jump labels, the call to wake_up is
replaced by a nop sled when nobody is waiting for intents to drain.

From my initial microbenchmarking, every transition of the static key
between the on and off states takes about 22000ns to complete; this is
paid entirely by the xfs_scrub process.  When the static key is off
(which it should be when fsck isn't running), the nop sled adds an
overhead of approximately 0.36ns to runtime code.  The post-atomic
lockless waiter check adds about 0.03ns, which is basically free.

For the few compilers that don't support jump labels, runtime code pays
the cost of calling wake_up on an empty waitqueue, which was observed to
be about 30ns.  However, most architectures that have sufficient memory
and CPU capacity to run XFS also support jump labels, so this is not
much of a worry.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
17 files changed:
fs/xfs/Kconfig
fs/xfs/scrub/agheader.c
fs/xfs/scrub/alloc.c
fs/xfs/scrub/bmap.c
fs/xfs/scrub/common.c
fs/xfs/scrub/common.h
fs/xfs/scrub/fscounters.c
fs/xfs/scrub/ialloc.c
fs/xfs/scrub/inode.c
fs/xfs/scrub/quota.c
fs/xfs/scrub/refcount.c
fs/xfs/scrub/rmap.c
fs/xfs/scrub/scrub.c
fs/xfs/scrub/scrub.h
fs/xfs/scrub/trace.h
fs/xfs/xfs_drain.c
fs/xfs/xfs_drain.h

index ab24e683b4402bda375564ed3d6f4dabfaafe7b5..05bc865142b8a9e01f11a08d1912117478d2dd89 100644 (file)
@@ -95,6 +95,7 @@ config XFS_RT
 
 config XFS_DRAIN_INTENTS
        bool
+       select JUMP_LABEL if HAVE_ARCH_JUMP_LABEL
 
 config XFS_ONLINE_SCRUB
        bool "XFS online metadata check support"
index c91819da1f5f5368028ddd3010c368ca8fbcbc03..87cb13a6e84a068e0887bc0161553933d721ccd2 100644 (file)
 #include "scrub/scrub.h"
 #include "scrub/common.h"
 
+int
+xchk_setup_agheader(
+       struct xfs_scrub        *sc)
+{
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+       return xchk_setup_fs(sc);
+}
+
 /* Superblock */
 
 /* Cross-reference with the other btrees. */
index 39e79b9536bc3aa73b25f4ab3347159c18e17c48..de313df2b15b58d97ed86718be79805a5e712cfc 100644 (file)
@@ -24,6 +24,9 @@ int
 xchk_setup_ag_allocbt(
        struct xfs_scrub        *sc)
 {
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
        return xchk_setup_ag_btree(sc, false);
 }
 
index f6d8cb938a02b8308726ee391706485ea5547208..a5078d63808ff2a7c0214391181b7fbaab776d32 100644 (file)
@@ -31,6 +31,9 @@ xchk_setup_inode_bmap(
 {
        int                     error;
 
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
        error = xchk_get_inode(sc);
        if (error)
                goto out;
index 2a496d1699a3912fb5427dae9c63e0fef083b7d8..87649facbbdef5be91ae1b55cd16d5ff3d6cfbda 100644 (file)
@@ -487,6 +487,8 @@ xchk_perag_drain_and_lock(
                        sa->agi_bp = NULL;
                }
 
+               if (!(sc->flags & XCHK_FSGATES_DRAIN))
+                       return -EDEADLOCK;
                error = xfs_perag_intent_drain(sa->pag);
                if (error == -ERESTARTSYS)
                        error = -EINTR;
@@ -1005,3 +1007,25 @@ xchk_start_reaping(
        }
        sc->flags &= ~XCHK_REAPING_DISABLED;
 }
+
+/*
+ * Enable filesystem hooks (i.e. runtime code patching) before starting a scrub
+ * operation.  Callers must not hold any locks that intersect with the CPU
+ * hotplug lock (e.g. writeback locks) because code patching must halt the CPUs
+ * to change kernel code.
+ */
+void
+xchk_fsgates_enable(
+       struct xfs_scrub        *sc,
+       unsigned int            scrub_fsgates)
+{
+       ASSERT(!(scrub_fsgates & ~XCHK_FSGATES_ALL));
+       ASSERT(!(sc->flags & scrub_fsgates));
+
+       trace_xchk_fsgates_enable(sc, scrub_fsgates);
+
+       if (scrub_fsgates & XCHK_FSGATES_DRAIN)
+               xfs_drain_wait_enable();
+
+       sc->flags |= scrub_fsgates;
+}
index 273a4331da050f0b8b7794466cf287a86f56fb4a..4714e8a43094cfdf6f2c885fc27769a2bf82f282 100644 (file)
@@ -72,6 +72,7 @@ bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
                           struct xfs_btree_cur **curpp);
 
 /* Setup functions */
+int xchk_setup_agheader(struct xfs_scrub *sc);
 int xchk_setup_fs(struct xfs_scrub *sc);
 int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
 int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
@@ -151,4 +152,18 @@ int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
 void xchk_stop_reaping(struct xfs_scrub *sc);
 void xchk_start_reaping(struct xfs_scrub *sc);
 
+/*
+ * Setting up a hook to wait for intents to drain is costly -- we have to take
+ * the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
+ * up, and again to tear it down.  These costs add up quickly, so we only want
+ * to enable the drain waiter if the drain actually detected a conflict with
+ * running intent chains.
+ */
+static inline bool xchk_need_intent_drain(struct xfs_scrub *sc)
+{
+       return sc->flags & XCHK_TRY_HARDER;
+}
+
+void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
+
 #endif /* __XFS_SCRUB_COMMON_H__ */
index a38006c71bff760e2a7ee8ba0762c2dc87a1bd95..faa315be7978d4f11715c8c90ea76595f68104a9 100644 (file)
@@ -130,6 +130,13 @@ xchk_setup_fscounters(
        struct xchk_fscounters  *fsc;
        int                     error;
 
+       /*
+        * If the AGF doesn't track btreeblks, we have to lock the AGF to count
+        * btree block usage by walking the actual btrees.
+        */
+       if (!xfs_has_lazysbcount(sc->mp))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
        sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
        if (!sc->buf)
                return -ENOMEM;
index b14270bd1c62e23c49bf8c72aada0d542a19e759..9563769a88815b60dbea68e563f2094021e8bd00 100644 (file)
@@ -32,6 +32,8 @@ int
 xchk_setup_ag_iallocbt(
        struct xfs_scrub        *sc)
 {
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
        return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
 }
 
index dc66a1465f1bf0937a169f578a1ee03bc7e553a6..bbf9432c02c2c83dbd3f4f2f2d897ab41569b998 100644 (file)
@@ -32,6 +32,9 @@ xchk_setup_inode(
 {
        int                     error;
 
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
        /*
         * Try to get the inode.  If the verifiers fail, we try again
         * in raw mode.
index b019c70c065a985c5143eaf47bd6bce4f181fcd7..e6caa358cbdab07272ae65151e7c3661ca508dad 100644 (file)
@@ -53,6 +53,9 @@ xchk_setup_quota(
        if (!xfs_this_quota_on(sc->mp, dqtype))
                return -ENOENT;
 
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
        error = xchk_setup_fs(sc);
        if (error)
                return error;
index 756066f3dea2e28fec1f89c41b4a2ace84160d28..6f649cc01310a37b8fcc7ffbaf9341d7110563ac 100644 (file)
@@ -27,6 +27,8 @@ int
 xchk_setup_ag_refcountbt(
        struct xfs_scrub        *sc)
 {
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
        return xchk_setup_ag_btree(sc, false);
 }
 
index 4dc79e1a675d67269a3c79ba30729936e540d889..c6e47ef4c79ba348b832cd9b09b87d278cf306c4 100644 (file)
@@ -24,6 +24,9 @@ int
 xchk_setup_ag_rmapbt(
        struct xfs_scrub        *sc)
 {
+       if (xchk_need_intent_drain(sc))
+               xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
        return xchk_setup_ag_btree(sc, false);
 }
 
index 9364fe7d07b4bf113bdb89445bf0b23b85c47122..bd5d4357cd64c51d292fb75b5cd069d8dc2902b0 100644 (file)
@@ -145,6 +145,21 @@ xchk_probe(
 
 /* Scrub setup and teardown */
 
+static inline void
+xchk_fsgates_disable(
+       struct xfs_scrub        *sc)
+{
+       if (!(sc->flags & XCHK_FSGATES_ALL))
+               return;
+
+       trace_xchk_fsgates_disable(sc, sc->flags & XCHK_FSGATES_ALL);
+
+       if (sc->flags & XCHK_FSGATES_DRAIN)
+               xfs_drain_wait_disable();
+
+       sc->flags &= ~XCHK_FSGATES_ALL;
+}
+
 /* Free all the resources and finish the transactions. */
 STATIC int
 xchk_teardown(
@@ -177,6 +192,8 @@ xchk_teardown(
                kvfree(sc->buf);
                sc->buf = NULL;
        }
+
+       xchk_fsgates_disable(sc);
        return error;
 }
 
@@ -191,25 +208,25 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
        },
        [XFS_SCRUB_TYPE_SB] = {         /* superblock */
                .type   = ST_PERAG,
-               .setup  = xchk_setup_fs,
+               .setup  = xchk_setup_agheader,
                .scrub  = xchk_superblock,
                .repair = xrep_superblock,
        },
        [XFS_SCRUB_TYPE_AGF] = {        /* agf */
                .type   = ST_PERAG,
-               .setup  = xchk_setup_fs,
+               .setup  = xchk_setup_agheader,
                .scrub  = xchk_agf,
                .repair = xrep_agf,
        },
        [XFS_SCRUB_TYPE_AGFL]= {        /* agfl */
                .type   = ST_PERAG,
-               .setup  = xchk_setup_fs,
+               .setup  = xchk_setup_agheader,
                .scrub  = xchk_agfl,
                .repair = xrep_agfl,
        },
        [XFS_SCRUB_TYPE_AGI] = {        /* agi */
                .type   = ST_PERAG,
-               .setup  = xchk_setup_fs,
+               .setup  = xchk_setup_agheader,
                .scrub  = xchk_agi,
                .repair = xrep_agi,
        },
index 72a5a8a64a8710fc6769a06b2589e48cac76f95c..4fdb6017f8206dd4ee0a24f5599e48c8d21a8856 100644 (file)
@@ -96,9 +96,18 @@ struct xfs_scrub {
 
 /* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
 #define XCHK_TRY_HARDER                (1 << 0)  /* can't get resources, try again */
-#define XCHK_REAPING_DISABLED  (1 << 2)  /* background block reaping paused */
+#define XCHK_REAPING_DISABLED  (1 << 1)  /* background block reaping paused */
+#define XCHK_FSGATES_DRAIN     (1 << 2)  /* defer ops draining enabled */
 #define XREP_ALREADY_FIXED     (1 << 31) /* checking our repair work */
 
+/*
+ * The XCHK_FSGATES* flags reflect functionality in the main filesystem that
+ * are only enabled for this particular online fsck.  When not in use, the
+ * features are gated off via dynamic code patching, which is why the state
+ * must be enabled during scrub setup and can only be torn down afterwards.
+ */
+#define XCHK_FSGATES_ALL       (XCHK_FSGATES_DRAIN)
+
 /* Metadata scrubbers */
 int xchk_tester(struct xfs_scrub *sc);
 int xchk_superblock(struct xfs_scrub *sc);
index ad25ae88fce18f3127685060ed49408acb3d91a4..304c55192c908c65fd9ee35aa62a4a4c6d7fbf2f 100644 (file)
@@ -96,6 +96,12 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FSCOUNTERS);
        { XFS_SCRUB_OFLAG_WARNING,              "warning" }, \
        { XFS_SCRUB_OFLAG_NO_REPAIR_NEEDED,     "norepair" }
 
+#define XFS_SCRUB_STATE_STRINGS \
+       { XCHK_TRY_HARDER,                      "try_harder" }, \
+       { XCHK_REAPING_DISABLED,                "reaping_disabled" }, \
+       { XCHK_FSGATES_DRAIN,                   "fsgates_drain" }, \
+       { XREP_ALREADY_FIXED,                   "already_fixed" }
+
 DECLARE_EVENT_CLASS(xchk_class,
        TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
                 int error),
@@ -142,6 +148,33 @@ DEFINE_SCRUB_EVENT(xchk_deadlock_retry);
 DEFINE_SCRUB_EVENT(xrep_attempt);
 DEFINE_SCRUB_EVENT(xrep_done);
 
+DECLARE_EVENT_CLASS(xchk_fsgate_class,
+       TP_PROTO(struct xfs_scrub *sc, unsigned int fsgate_flags),
+       TP_ARGS(sc, fsgate_flags),
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(unsigned int, type)
+               __field(unsigned int, fsgate_flags)
+       ),
+       TP_fast_assign(
+               __entry->dev = sc->mp->m_super->s_dev;
+               __entry->type = sc->sm->sm_type;
+               __entry->fsgate_flags = fsgate_flags;
+       ),
+       TP_printk("dev %d:%d type %s fsgates '%s'",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
+                 __print_flags(__entry->fsgate_flags, "|", XFS_SCRUB_STATE_STRINGS))
+)
+
+#define DEFINE_SCRUB_FSHOOK_EVENT(name) \
+DEFINE_EVENT(xchk_fsgate_class, name, \
+       TP_PROTO(struct xfs_scrub *sc, unsigned int fsgates_flags), \
+       TP_ARGS(sc, fsgates_flags))
+
+DEFINE_SCRUB_FSHOOK_EVENT(xchk_fsgates_enable);
+DEFINE_SCRUB_FSHOOK_EVENT(xchk_fsgates_disable);
+
 TRACE_EVENT(xchk_op_error,
        TP_PROTO(struct xfs_scrub *sc, xfs_agnumber_t agno,
                 xfs_agblock_t bno, int error, void *ret_ip),
index b431abdf0af141e1e3cc2d3211f3319b9e773a0c..005a66be44a259e6d2b8e6a2898d45beeeb4b705 100644 (file)
 #include "xfs_ag.h"
 #include "xfs_trace.h"
 
+/*
+ * Use a static key here to reduce the overhead of xfs_drain_rele.  If the
+ * compiler supports jump labels, the static branch will be replaced by a nop
+ * sled when there are no xfs_drain_wait callers.  Online fsck is currently
+ * the only caller, so this is a reasonable tradeoff.
+ *
+ * Note: Patching the kernel code requires taking the cpu hotplug lock.  Other
+ * parts of the kernel allocate memory with that lock held, which means that
+ * XFS callers cannot hold any locks that might be used by memory reclaim or
+ * writeback when calling the static_branch_{inc,dec} functions.
+ */
+static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate);
+
+void
+xfs_drain_wait_disable(void)
+{
+       static_branch_dec(&xfs_drain_waiter_gate);
+}
+
+void
+xfs_drain_wait_enable(void)
+{
+       static_branch_inc(&xfs_drain_waiter_gate);
+}
+
 void
 xfs_defer_drain_init(
        struct xfs_defer_drain  *dr)
@@ -46,6 +71,7 @@ static inline bool has_waiters(struct wait_queue_head *wq_head)
 static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr)
 {
        if (atomic_dec_and_test(&dr->dr_count) &&
+           static_branch_unlikely(&xfs_drain_waiter_gate) &&
            has_waiters(&dr->dr_waiters))
                wake_up(&dr->dr_waiters);
 }
index 9b16df3cc7dc32ba4dd681871cb13e9f476d1ff9..50a5772a8296c0735f608ac399f303b3f580c9a4 100644 (file)
@@ -25,6 +25,9 @@ struct xfs_defer_drain {
 void xfs_defer_drain_init(struct xfs_defer_drain *dr);
 void xfs_defer_drain_free(struct xfs_defer_drain *dr);
 
+void xfs_drain_wait_disable(void);
+void xfs_drain_wait_enable(void);
+
 /*
  * Deferred Work Intent Drains
  * ===========================