struct xfs_scrub        *sc)
 {
        /*
-        * Readonly filesystems do not perform inactivation, so there's no
-        * need to restart the worker.
+        * Readonly filesystems do not perform inactivation or speculative
+        * preallocation, so there's no need to restart the workers.
         */
-       if (!(sc->mp->m_flags & XFS_MOUNT_RDONLY))
+       if (!(sc->mp->m_flags & XFS_MOUNT_RDONLY)) {
                xfs_inodegc_start(sc->mp);
-       xfs_blockgc_start(sc->mp);
+               xfs_blockgc_start(sc->mp);
+       }
        sc->flags &= ~XCHK_REAPING_DISABLED;
 }
 
 xfs_blockgc_queue(
        struct xfs_perag        *pag)
 {
+       struct xfs_mount        *mp = pag->pag_mount;
+
+       if (!xfs_is_blockgc_enabled(mp))
+               return;
+
        rcu_read_lock();
        if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
                queue_delayed_work(pag->pag_mount->m_blockgc_wq,
        struct xfs_perag        *pag;
        xfs_agnumber_t          agno;
 
-       for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+       if (!xfs_clear_blockgc_enabled(mp))
+               return;
+
+       for_each_perag(mp, agno, pag)
                cancel_delayed_work_sync(&pag->pag_blockgc_work);
+       trace_xfs_blockgc_stop(mp, __return_address);
 }
 
 /* Enable post-EOF and CoW block auto-reclamation. */
        struct xfs_perag        *pag;
        xfs_agnumber_t          agno;
 
+       if (xfs_set_blockgc_enabled(mp))
+               return;
+
+       trace_xfs_blockgc_start(mp, __return_address);
        for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
                xfs_blockgc_queue(pag);
 }
        struct xfs_mount        *mp = pag->pag_mount;
        int                     error;
 
-       if (!sb_start_write_trylock(mp->m_super))
-               return;
+       trace_xfs_blockgc_worker(mp, __return_address);
+
        error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
        if (error)
                xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
                                pag->pag_agno, error);
-       sb_end_write(mp->m_super);
        xfs_blockgc_queue(pag);
 }
 
 
 
        /* Enable background inode inactivation workers. */
        xfs_inodegc_start(mp);
+       xfs_blockgc_start(mp);
 
        /*
         * Get and sanity-check the root inode.
 
  * processed.
  */
 #define XFS_OPSTATE_INODEGC_ENABLED    0
+/*
+ * If set, background speculative prealloc gc worker threads will be scheduled
+ * to process queued blockgc work.  If not, inodes retain their preallocations
+ * until explicitly deleted.
+ */
+#define XFS_OPSTATE_BLOCKGC_ENABLED    1
 
 #define __XFS_IS_OPSTATE(name, NAME) \
 static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
 }
 
 __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
+__XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
 
 #define XFS_OPSTATE_STRINGS \
-       { (1UL << XFS_OPSTATE_INODEGC_ENABLED),         "inodegc" }
+       { (1UL << XFS_OPSTATE_INODEGC_ENABLED),         "inodegc" }, \
+       { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED),         "blockgc" }
 
 /*
  * Max and min values for mount-option defined I/O
 
         * down inodegc because once SB_FREEZE_FS is set it's too late to
         * prevent inactivation races with freeze. The fs doesn't get called
         * again by the freezing process until after SB_FREEZE_FS has been set,
-        * so it's now or never.
+        * so it's now or never.  Same logic applies to speculative allocation
+        * garbage collection.
         *
         * We don't care if this is a normal syncfs call that does this or
         * freeze that does this - we can run this multiple times without issue
         * and we won't race with a restart because a restart can only occur
         * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
         */
-       if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT)
+       if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
                xfs_inodegc_stop(mp);
+               xfs_blockgc_stop(mp);
+       }
 
        return 0;
 }
         * set a GFP_NOFS context here to avoid recursion deadlocks.
         */
        flags = memalloc_nofs_save();
-       xfs_blockgc_stop(mp);
        xfs_save_resvblks(mp);
        ret = xfs_log_quiesce(mp);
        memalloc_nofs_restore(flags);
         * here, so we can restart safely without racing with a stop in
         * xfs_fs_sync_fs().
         */
-       if (ret && !(mp->m_flags & XFS_MOUNT_RDONLY))
+       if (ret && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               xfs_blockgc_start(mp);
                xfs_inodegc_start(mp);
+       }
 
        return ret;
 }
 
        xfs_restore_resvblks(mp);
        xfs_log_work_queue(mp);
-       xfs_blockgc_start(mp);
 
        /*
         * Don't reactivate the inodegc worker on a readonly filesystem because
-        * inodes are sent directly to reclaim.
+        * inodes are sent directly to reclaim.  Don't reactivate the blockgc
+        * worker because there are no speculative preallocations on a readonly
+        * filesystem.
         */
-       if (!(mp->m_flags & XFS_MOUNT_RDONLY))
+       if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               xfs_blockgc_start(mp);
                xfs_inodegc_start(mp);
+       }
 
        return 0;
 }
 
 DEFINE_FS_EVENT(xfs_inodegc_queue);
 DEFINE_FS_EVENT(xfs_inodegc_throttle);
 DEFINE_FS_EVENT(xfs_fs_sync_fs);
+DEFINE_FS_EVENT(xfs_blockgc_start);
+DEFINE_FS_EVENT(xfs_blockgc_stop);
+DEFINE_FS_EVENT(xfs_blockgc_worker);
 
 DECLARE_EVENT_CLASS(xfs_ag_class,
        TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),