rcu: Drop "wake" parameter from rcu_report_exp_rdp()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 2 Jul 2018 16:17:57 +0000 (09:17 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 30 Aug 2018 23:02:43 +0000 (16:02 -0700)
The rcu_report_exp_rdp() function is always invoked with its "wake"
argument set to "true", so this commit drops this parameter.  The only
potential call site that would use "false" is in the code driving the
expedited grace period, and that code uses rcu_report_exp_cpu_mult()
instead, which therefore retains its "wake" parameter.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h

index 158c58d47b07e03d60034158b4edc54f57359d38..e1927147a4a56d425eb3e002b7370fcc21db279f 100644 (file)
@@ -165,8 +165,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
-static void rcu_report_exp_rdp(struct rcu_state *rsp,
-                              struct rcu_data *rdp, bool wake);
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
 static void sync_sched_exp_online_cleanup(int cpu);
 
 /* rcuc/rcub kthread realtime priority */
@@ -239,8 +238,7 @@ void rcu_sched_qs(void)
        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
                return;
        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
-       rcu_report_exp_rdp(&rcu_sched_state,
-                          this_cpu_ptr(&rcu_sched_data), true);
+       rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
 }
 
 void rcu_softirq_qs(void)
@@ -3758,8 +3756,7 @@ void rcu_report_dead(unsigned int cpu)
 
        /* QS for any half-done expedited RCU-sched GP. */
        preempt_disable();
-       rcu_report_exp_rdp(&rcu_sched_state,
-                          this_cpu_ptr(rcu_sched_state.rda), true);
+       rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(rcu_sched_state.rda));
        preempt_enable();
        rcu_preempt_deferred_qs(current);
        for_each_rcu_flavor(rsp)
index f9d5bbd8adce8bce6a9451adc58d82835bff3b5c..0f8f225c1b46b6ac229cd0bfa9d84b43ac184f4c 100644 (file)
@@ -259,11 +259,10 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
 /*
  * Report expedited quiescent state for specified rcu_data (CPU).
  */
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
-                              bool wake)
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp)
 {
        WRITE_ONCE(rdp->deferred_qs, false);
-       rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
+       rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true);
 }
 
 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
@@ -352,7 +351,7 @@ static void sync_sched_exp_handler(void *data)
                return;
        if (rcu_is_cpu_rrupt_from_idle()) {
                rcu_report_exp_rdp(&rcu_sched_state,
-                                  this_cpu_ptr(&rcu_sched_data), true);
+                                  this_cpu_ptr(&rcu_sched_data));
                return;
        }
        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
@@ -750,7 +749,7 @@ static void sync_rcu_exp_handler(void *info)
        if (!t->rcu_read_lock_nesting) {
                if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
                    rcu_dynticks_curr_cpu_in_eqs()) {
-                       rcu_report_exp_rdp(rsp, rdp, true);
+                       rcu_report_exp_rdp(rsp, rdp);
                } else {
                        rdp->deferred_qs = true;
                        resched_cpu(rdp->cpu);
index 1ff742a3c8d1d309b0a2e2cda7c7b5003d870143..9f0d054e6c20bf6766a41a8006421d32e79861cf 100644 (file)
@@ -285,7 +285,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
         * still in a quiescent state in any case.)
         */
        if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
-               rcu_report_exp_rdp(rdp->rsp, rdp, true);
+               rcu_report_exp_rdp(rdp->rsp, rdp);
        else
                WARN_ON_ONCE(rdp->deferred_qs);
 }
@@ -383,7 +383,7 @@ static void rcu_preempt_note_context_switch(bool preempt)
         */
        rcu_preempt_qs();
        if (rdp->deferred_qs)
-               rcu_report_exp_rdp(rcu_state_p, rdp, true);
+               rcu_report_exp_rdp(rcu_state_p, rdp);
 }
 
 /*
@@ -508,7 +508,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
         * blocked-tasks list below.
         */
        if (rdp->deferred_qs) {
-               rcu_report_exp_rdp(rcu_state_p, rdp, true);
+               rcu_report_exp_rdp(rcu_state_p, rdp);
                if (!t->rcu_read_unlock_special.s) {
                        local_irq_restore(flags);
                        return;