}
check_cb_ovld(rdp);
- --- if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
- --- return; // Enqueued onto ->nocb_bypass, so just leave.
- --- // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
- --- rcu_segcblist_enqueue(&rdp->cblist, head);
- --- if (__is_kvfree_rcu_offset((unsigned long)func))
- --- trace_rcu_kvfree_callback(rcu_state.name, head,
- --- (unsigned long)func,
- --- rcu_segcblist_n_cbs(&rdp->cblist));
- --- else
- --- trace_rcu_callback(rcu_state.name, head,
- --- rcu_segcblist_n_cbs(&rdp->cblist));
-
- trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
- -- trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
- --
- --- /* Go handle any RCU core processing required. */
- --- if (unlikely(rcu_rdp_is_offloaded(rdp))) {
- --- __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
- --- } else {
- --- __call_rcu_core(rdp, head, flags);
- --- local_irq_restore(flags);
- --- }
+ +++ if (unlikely(rcu_rdp_is_offloaded(rdp)))
+ +++ call_rcu_nocb(rdp, head, func, flags, lazy);
+ +++ else
+ +++ call_rcu_core(rdp, head, func, flags);
+ +++ local_irq_restore(flags);
}
#ifdef CONFIG_RCU_LAZY
}
/*
-- -- * Update RCU priority boot kthread affinity for CPU-hotplug changes.
++ ++ * Update kthreads affinity during CPU-hotplug changes.
++ ++ *
++ ++ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
++ ++ * served by the rcu_node in question. The CPU hotplug lock is still
++ ++ * held, so the value of rnp->qsmaskinit will be stable.
++ ++ *
++ ++ * We don't include outgoingcpu in the affinity set, use -1 if there is
++ ++ * no outgoing CPU. If there are no CPUs left in the affinity set,
++ ++ * this function allows the kthread to execute on any CPU.
++ ++ *
++ ++ * Any future concurrent calls are serialized via ->kthread_mutex.
*/
-- --static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
++ ++static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
{
-- -- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
++ ++ cpumask_var_t cm;
++ ++ unsigned long mask;
++ ++ struct rcu_data *rdp;
++ ++ struct rcu_node *rnp;
++ ++ struct task_struct *task_boost, *task_exp;
+ ++
- rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
++ ++ rdp = per_cpu_ptr(&rcu_data, cpu);
++ ++ rnp = rdp->mynode;
++ ++
++ ++ task_boost = rcu_boost_task(rnp);
++ ++ task_exp = rcu_exp_par_gp_task(rnp);
++ ++
++ ++ /*
++ ++ * If CPU is the boot one, those tasks are created later from early
++ ++ * initcall since kthreadd must be created first.
++ ++ */
++ ++ if (!task_boost && !task_exp)
++ ++ return;
++ ++
++ ++ if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
++ ++ return;
++ ++
++ ++ mutex_lock(&rnp->kthread_mutex);
++ ++ mask = rcu_rnp_online_cpus(rnp);
++ ++ for_each_leaf_node_possible_cpu(rnp, cpu)
++ ++ if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
++ ++ cpu != outgoingcpu)
++ ++ cpumask_set_cpu(cpu, cm);
++ ++ cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
++ ++ if (cpumask_empty(cm)) {
++ ++ cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
++ ++ if (outgoingcpu >= 0)
++ ++ cpumask_clear_cpu(outgoingcpu, cm);
++ ++ }
++ ++
++ ++ if (task_exp)
++ ++ set_cpus_allowed_ptr(task_exp, cm);
++ ++
++ ++ if (task_boost)
++ ++ set_cpus_allowed_ptr(task_boost, cm);
+ +
- - rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
++ ++ mutex_unlock(&rnp->kthread_mutex);
++ +
- rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
++ ++ free_cpumask_var(cm);
}
/*