rcu: Remove rcu_data.exp_deferred_qs and convert to rcu_data.cpu no_qs.b.exp
authorFrederic Weisbecker <frederic@kernel.org>
Thu, 16 Sep 2021 12:10:48 +0000 (14:10 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Wed, 8 Dec 2021 00:22:21 +0000 (16:22 -0800)
Having two fields for the same purpose with subtle differences on
different RCU flavours is confusing, especially when both fields always
exist on both RCU flavours.

Fortunately, it is now safe for preemptible RCU to rely on the rcu_data
structure's ->cpu_no_qs.b.exp field, just like non-preemptible RCU.
This commit therefore removes the ad-hoc ->exp_deferred_qs field.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h

index 305cf6aeb408686dcd5eeacd1fa6e33187f6f2b0..ea46ed40f6bc118b09e29964519f029eecbc81f2 100644 (file)
@@ -157,7 +157,6 @@ struct rcu_data {
        bool            core_needs_qs;  /* Core waits for quiescent state. */
        bool            beenonline;     /* CPU online at least once. */
        bool            gpwrap;         /* Possible ->gp_seq wrap. */
-       bool            exp_deferred_qs; /* This CPU awaiting a deferred QS? */
        bool            cpu_started;    /* RCU watching this onlining CPU. */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
        unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
index 6c6eb32203852e9b6deb3a5568efed9d468168f3..fc2ee326a6f73b0d2b5f6af808fba1a6475b1c8f 100644 (file)
@@ -255,7 +255,6 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
  */
 static void rcu_report_exp_rdp(struct rcu_data *rdp)
 {
-       WRITE_ONCE(rdp->exp_deferred_qs, false);
        WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
        rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
 }
@@ -656,7 +655,7 @@ static void rcu_exp_handler(void *unused)
                    rcu_dynticks_curr_cpu_in_eqs()) {
                        rcu_report_exp_rdp(rdp);
                } else {
-                       rdp->exp_deferred_qs = true;
+                       WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
                        set_tsk_need_resched(t);
                        set_preempt_need_resched();
                }
@@ -678,7 +677,7 @@ static void rcu_exp_handler(void *unused)
        if (depth > 0) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->expmask & rdp->grpmask) {
-                       rdp->exp_deferred_qs = true;
+                       WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
                        t->rcu_read_unlock_special.b.exp_hint = true;
                }
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
index 6d58b75d2782e84f774e3174e44d0fc548803455..e1a9fb96e0b917135670d21701513d05b4804388 100644 (file)
@@ -260,10 +260,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
         * no need to check for a subsequent expedited GP.  (Though we are
         * still in a quiescent state in any case.)
         */
-       if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
+       if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
                rcu_report_exp_rdp(rdp);
        else
-               WARN_ON_ONCE(rdp->exp_deferred_qs);
+               WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
 }
 
 /*
@@ -354,7 +354,7 @@ void rcu_note_context_switch(bool preempt)
         * means that we continue to block the current grace period.
         */
        rcu_qs();
-       if (rdp->exp_deferred_qs)
+       if (rdp->cpu_no_qs.b.exp)
                rcu_report_exp_rdp(rdp);
        rcu_tasks_qs(current, preempt);
        trace_rcu_utilization(TPS("End context switch"));
@@ -481,7 +481,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
         */
        special = t->rcu_read_unlock_special;
        rdp = this_cpu_ptr(&rcu_data);
-       if (!special.s && !rdp->exp_deferred_qs) {
+       if (!special.s && !rdp->cpu_no_qs.b.exp) {
                local_irq_restore(flags);
                return;
        }
@@ -501,7 +501,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
         * tasks are handled when removing the task from the
         * blocked-tasks list below.
         */
-       if (rdp->exp_deferred_qs)
+       if (rdp->cpu_no_qs.b.exp)
                rcu_report_exp_rdp(rdp);
 
        /* Clean up if blocked during RCU read-side critical section. */
@@ -584,7 +584,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
  */
 static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 {
-       return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
+       return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) ||
                READ_ONCE(t->rcu_read_unlock_special.s)) &&
               rcu_preempt_depth() == 0;
 }