srcu: Only accelerate on enqueue time
authorFrederic Weisbecker <frederic@kernel.org>
Tue, 3 Oct 2023 23:29:00 +0000 (01:29 +0200)
committerFrederic Weisbecker <frederic@kernel.org>
Fri, 13 Oct 2023 12:00:54 +0000 (14:00 +0200)
Acceleration in SRCU happens on enqueue time for each new callback. This
operation is expected not to fail and therefore any similar attempt
from other places shouldn't find any remaining callbacks to accelerate.

Moreover accelerations performed beyond enqueue time are error prone
because rcu_seq_snap() then may return the snapshot for a new grace
period that is not going to be started.

Remove these dangerous and needless accelerations and introduce instead
assertions reporting leaking unaccelerated callbacks beyond enqueue
time.

Co-developed-by: Yong He <alexyonghe@tencent.com>
Signed-off-by: Yong He <alexyonghe@tencent.com>
Co-developed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Co-developed-by: Neeraj upadhyay <Neeraj.Upadhyay@amd.com>
Signed-off-by: Neeraj upadhyay <Neeraj.Upadhyay@amd.com>
Reviewed-by: Like Xu <likexu@tencent.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
kernel/rcu/srcutree.c

index 9fab9ac369963327ad293a0cf3dd26f11e17a2eb..560e99ec53335c9b4b0c7688cebff33a1fac808c 100644 (file)
@@ -784,8 +784,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
        spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
        rcu_segcblist_advance(&sdp->srcu_cblist,
                              rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
-       (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
-                                      rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
+       WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
        spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
        WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
        WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
@@ -1721,6 +1720,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
        ssp = sdp->ssp;
        rcu_cblist_init(&ready_cbs);
        spin_lock_irq_rcu_node(sdp);
+       WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
        rcu_segcblist_advance(&sdp->srcu_cblist,
                              rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
        if (sdp->srcu_cblist_invoking ||
@@ -1750,8 +1750,6 @@ static void srcu_invoke_callbacks(struct work_struct *work)
         */
        spin_lock_irq_rcu_node(sdp);
        rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
-       (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
-                                      rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
        sdp->srcu_cblist_invoking = false;
        more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
        spin_unlock_irq_rcu_node(sdp);