rcu: Rename rcu_data's ->deferred_qs to ->exp_deferred_qs
authorPaul E. McKenney <paulmck@linux.ibm.com>
Wed, 27 Mar 2019 22:51:25 +0000 (15:51 -0700)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 28 May 2019 15:48:19 +0000 (08:48 -0700)
The rcu_data structure's ->deferred_qs field is used to indicate that the
current CPU is blocking an expedited grace period (perhaps a future one).
Given that it is used only for expedited grace periods, its current name
is misleading, so this commit renames it to ->exp_deferred_qs.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h

index 21d740f0b8dc8a99698611276e3ee898aebdf57c..7acaf3a62d39839d3c3089c7c39c432253ec4df9 100644 (file)
@@ -154,7 +154,7 @@ struct rcu_data {
        bool            core_needs_qs;  /* Core waits for quiesc state. */
        bool            beenonline;     /* CPU online at least once. */
        bool            gpwrap;         /* Possible ->gp_seq wrap. */
-       bool            deferred_qs;    /* This CPU awaiting a deferred QS? */
+       bool            exp_deferred_qs; /* This CPU awaiting a deferred QS? */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
        unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
        unsigned long   ticks_this_gp;  /* The number of scheduling-clock */
index de1b4acf6979fb43252e527a50729fa19b9f40f5..e0c928d04be5f8fa50b9343678c3e2e16aa47188 100644 (file)
@@ -250,7 +250,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
  */
 static void rcu_report_exp_rdp(struct rcu_data *rdp)
 {
-       WRITE_ONCE(rdp->deferred_qs, false);
+       WRITE_ONCE(rdp->exp_deferred_qs, false);
        rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
 }
 
@@ -616,7 +616,7 @@ static void rcu_exp_handler(void *unused)
                    rcu_dynticks_curr_cpu_in_eqs()) {
                        rcu_report_exp_rdp(rdp);
                } else {
-                       rdp->deferred_qs = true;
+                       rdp->exp_deferred_qs = true;
                        set_tsk_need_resched(t);
                        set_preempt_need_resched();
                }
@@ -638,7 +638,7 @@ static void rcu_exp_handler(void *unused)
        if (t->rcu_read_lock_nesting > 0) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->expmask & rdp->grpmask) {
-                       rdp->deferred_qs = true;
+                       rdp->exp_deferred_qs = true;
                        t->rcu_read_unlock_special.b.exp_hint = true;
                }
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -661,7 +661,7 @@ static void rcu_exp_handler(void *unused)
         *
         * Otherwise, force a context switch after the CPU enables everything.
         */
-       rdp->deferred_qs = true;
+       rdp->exp_deferred_qs = true;
        if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
            WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
                rcu_preempt_deferred_qs(t);
index 58c7853f19e73e7ac713b1ccf8e7630e5121e617..1aeb4ae187ce9f80531c325ef1f8c34d0f7b0ebf 100644 (file)
@@ -237,10 +237,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
         * no need to check for a subsequent expedited GP.  (Though we are
         * still in a quiescent state in any case.)
         */
-       if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
+       if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
                rcu_report_exp_rdp(rdp);
        else
-               WARN_ON_ONCE(rdp->deferred_qs);
+               WARN_ON_ONCE(rdp->exp_deferred_qs);
 }
 
 /*
@@ -337,7 +337,7 @@ void rcu_note_context_switch(bool preempt)
         * means that we continue to block the current grace period.
         */
        rcu_qs();
-       if (rdp->deferred_qs)
+       if (rdp->exp_deferred_qs)
                rcu_report_exp_rdp(rdp);
        trace_rcu_utilization(TPS("End context switch"));
        barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -451,7 +451,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
         */
        special = t->rcu_read_unlock_special;
        rdp = this_cpu_ptr(&rcu_data);
-       if (!special.s && !rdp->deferred_qs) {
+       if (!special.s && !rdp->exp_deferred_qs) {
                local_irq_restore(flags);
                return;
        }
@@ -459,7 +459,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
        if (special.b.need_qs) {
                rcu_qs();
                t->rcu_read_unlock_special.b.need_qs = false;
-               if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) {
+               if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) {
                        local_irq_restore(flags);
                        return;
                }
@@ -471,7 +471,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
         * tasks are handled when removing the task from the
         * blocked-tasks list below.
         */
-       if (rdp->deferred_qs) {
+       if (rdp->exp_deferred_qs) {
                rcu_report_exp_rdp(rdp);
                if (!t->rcu_read_unlock_special.s) {
                        local_irq_restore(flags);
@@ -560,7 +560,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
  */
 static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 {
-       return (__this_cpu_read(rcu_data.deferred_qs) ||
+       return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
                READ_ONCE(t->rcu_read_unlock_special.s)) &&
               t->rcu_read_lock_nesting <= 0;
 }