xfs: convert CIL busy extents to per-cpu
authorDave Chinner <dchinner@redhat.com>
Thu, 7 Jul 2022 08:52:59 +0000 (18:52 +1000)
committerDave Chinner <david@fromorbit.com>
Thu, 7 Jul 2022 08:52:59 +0000 (18:52 +1000)
To get them out from under the CIL lock.

This is an unordered list, so we can simply punt it to per-cpu lists
during transaction commits and reaggregate it back into a single
list during the CIL push work.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
fs/xfs/xfs_log_cil.c

index e38e10082da26378dbfd2da058903c14f8ac6f9a..f02a75d5a03eb5179a29fc28867e84685ab27f39 100644 (file)
@@ -128,6 +128,11 @@ xlog_cil_push_pcp_aggregate(
                ctx->ticket->t_curr_res += cilpcp->space_reserved;
                cilpcp->space_reserved = 0;
 
+               if (!list_empty(&cilpcp->busy_extents)) {
+                       list_splice_init(&cilpcp->busy_extents,
+                                       &ctx->busy_extents);
+               }
+
                /*
                 * We're in the middle of switching cil contexts.  Reset the
                 * counter we use to detect when the current context is nearing
@@ -634,6 +639,9 @@ xlog_cil_insert_items(
        } else {
                cilpcp->space_used += len;
        }
+       /* attach the transaction to the CIL if it has any busy extents */
+       if (!list_empty(&tp->t_busy))
+               list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
        put_cpu_ptr(cilpcp);
 
        /*
@@ -656,9 +664,6 @@ xlog_cil_insert_items(
                        list_move_tail(&lip->li_cil, &cil->xc_cil);
        }
 
-       /* attach the transaction to the CIL if it has any busy extents */
-       if (!list_empty(&tp->t_busy))
-               list_splice_init(&tp->t_busy, &ctx->busy_extents);
        spin_unlock(&cil->xc_cil_lock);
 
        /*
@@ -1756,6 +1761,8 @@ xlog_cil_pcp_dead(
                ctx->ticket->t_curr_res += cilpcp->space_reserved;
        cilpcp->space_reserved = 0;
 
+       if (!list_empty(&cilpcp->busy_extents))
+               list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents);
        atomic_add(cilpcp->space_used, &ctx->space_used);
        cilpcp->space_used = 0;
        up_write(&cil->xc_ctx_lock);
@@ -1766,10 +1773,12 @@ xlog_cil_pcp_dead(
  */
 int
 xlog_cil_init(
-       struct xlog     *log)
+       struct xlog             *log)
 {
-       struct xfs_cil  *cil;
-       struct xfs_cil_ctx *ctx;
+       struct xfs_cil          *cil;
+       struct xfs_cil_ctx      *ctx;
+       struct xlog_cil_pcp     *cilpcp;
+       int                     cpu;
 
        cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
        if (!cil)
@@ -1789,6 +1798,11 @@ xlog_cil_init(
        if (!cil->xc_pcp)
                goto out_destroy_wq;
 
+       for_each_possible_cpu(cpu) {
+               cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+               INIT_LIST_HEAD(&cilpcp->busy_extents);
+       }
+
        INIT_LIST_HEAD(&cil->xc_cil);
        INIT_LIST_HEAD(&cil->xc_committing);
        spin_lock_init(&cil->xc_cil_lock);