rcu: Make synchronize_rcu_expedited() fast path update .expedited_sequence
authorPaul E. McKenney <paulmck@kernel.org>
Fri, 5 Aug 2022 00:43:53 +0000 (17:43 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Wed, 31 Aug 2022 12:09:21 +0000 (05:09 -0700)
This commit causes the early boot single-CPU synchronize_rcu_expedited()
fastpath to update the rcu_state structure's ->expedited_sequence
counter.  This will allow the full-state polled grace-period APIs to
detect all expedited grace periods without the need to track the special
combined polling-only counter, which is another step towards removing
the ->rgos_polled field from the rcu_gp_oldstate, thereby reducing its
size by one third.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree_exp.h

index 1a51f9301ebff7a3b192942cb9856e0762f3c88e..54e05d13d15122ab4b6040840a01fa90b1294c12 100644 (file)
@@ -906,6 +906,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 void synchronize_rcu_expedited(void)
 {
        bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
+       unsigned long flags;
        struct rcu_exp_work rew;
        struct rcu_node *rnp;
        unsigned long s;
@@ -924,6 +925,11 @@ void synchronize_rcu_expedited(void)
                // them, which allows reuse of ->gp_seq_polled_exp_snap.
                rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
                rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
+
+               local_irq_save(flags);
+               WARN_ON_ONCE(num_online_cpus() > 1);
+               rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
+               local_irq_restore(flags);
                return;  // Context allows vacuous grace periods.
        }