From: Boqun Feng Date: Tue, 27 Feb 2024 01:37:25 +0000 (-0800) Subject: Merge branches 'rcu-doc.2024.02.14a', 'rcu-nocb.2024.02.14a', 'rcu-exp.2024.02.14a... X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=3add00be5fe5810d7aa5ec3af8b6a245ef33144b;p=linux.git Merge branches 'rcu-doc.2024.02.14a', 'rcu-nocb.2024.02.14a', 'rcu-exp.2024.02.14a', 'rcu-tasks.2024.02.26a' and 'rcu-misc.2024.02.14a' into rcu.2024.02.26a --- 3add00be5fe5810d7aa5ec3af8b6a245ef33144b diff --cc kernel/rcu/tree.c index b2bccfd37c383,cc0e169e299a6,9591c22408a1e,ba9137f39d143,41c50a6c607ef..d9642dd06c253 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@@@@@ -2729,27 -2748,12 -2729,27 -2729,27 -2729,27 +2748,12 @@@@@@ __call_rcu_common(struct rcu_head *head } check_cb_ovld(rdp); - --- if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) - --- return; // Enqueued onto ->nocb_bypass, so just leave. - --- // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. - --- rcu_segcblist_enqueue(&rdp->cblist, head); - --- if (__is_kvfree_rcu_offset((unsigned long)func)) - --- trace_rcu_kvfree_callback(rcu_state.name, head, - --- (unsigned long)func, - --- rcu_segcblist_n_cbs(&rdp->cblist)); - --- else - --- trace_rcu_callback(rcu_state.name, head, - --- rcu_segcblist_n_cbs(&rdp->cblist)); - - trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); - -- trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); - -- - --- /* Go handle any RCU core processing required. */ - --- if (unlikely(rcu_rdp_is_offloaded(rdp))) { - --- __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ - --- } else { - --- __call_rcu_core(rdp, head, flags); - --- local_irq_restore(flags); - --- } + +++ if (unlikely(rcu_rdp_is_offloaded(rdp))) + +++ call_rcu_nocb(rdp, head, func, flags, lazy); + +++ else + +++ call_rcu_core(rdp, head, func, flags); + +++ local_irq_restore(flags); } #ifdef CONFIG_RCU_LAZY @@@@@@ -4450,13 -4454,13 -4510,64 -4450,13 -4455,13 +4519,64 @@@@@@ int rcutree_prepare_cpu(unsigned int cp } /* -- -- * Update RCU priority boot kthread affinity for CPU-hotplug changes. ++ ++ * Update kthreads affinity during CPU-hotplug changes. ++ ++ * ++ ++ * Set the per-rcu_node kthread's affinity to cover all CPUs that are ++ ++ * served by the rcu_node in question. The CPU hotplug lock is still ++ ++ * held, so the value of rnp->qsmaskinit will be stable. ++ ++ * ++ ++ * We don't include outgoingcpu in the affinity set, use -1 if there is ++ ++ * no outgoing CPU. If there are no CPUs left in the affinity set, ++ ++ * this function allows the kthread to execute on any CPU. ++ ++ * ++ ++ * Any future concurrent calls are serialized via ->kthread_mutex. */ -- --static void rcutree_affinity_setting(unsigned int cpu, int outgoing) ++ ++static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu) { -- -- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); ++ ++ cpumask_var_t cm; ++ ++ unsigned long mask; ++ ++ struct rcu_data *rdp; ++ ++ struct rcu_node *rnp; ++ ++ struct task_struct *task_boost, *task_exp; + ++ - rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); ++ ++ rdp = per_cpu_ptr(&rcu_data, cpu); ++ ++ rnp = rdp->mynode; ++ ++ ++ ++ task_boost = rcu_boost_task(rnp); ++ ++ task_exp = rcu_exp_par_gp_task(rnp); ++ ++ ++ ++ /* ++ ++ * If CPU is the boot one, those tasks are created later from early ++ ++ * initcall since kthreadd must be created first. ++ ++ */ ++ ++ if (!task_boost && !task_exp) ++ ++ return; ++ ++ ++ ++ if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) ++ ++ return; ++ ++ ++ ++ mutex_lock(&rnp->kthread_mutex); ++ ++ mask = rcu_rnp_online_cpus(rnp); ++ ++ for_each_leaf_node_possible_cpu(rnp, cpu) ++ ++ if ((mask & leaf_node_cpu_bit(rnp, cpu)) && ++ ++ cpu != outgoingcpu) ++ ++ cpumask_set_cpu(cpu, cm); ++ ++ cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); ++ ++ if (cpumask_empty(cm)) { ++ ++ cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); ++ ++ if (outgoingcpu >= 0) ++ ++ cpumask_clear_cpu(outgoingcpu, cm); ++ ++ } ++ ++ ++ ++ if (task_exp) ++ ++ set_cpus_allowed_ptr(task_exp, cm); ++ ++ ++ ++ if (task_boost) ++ ++ set_cpus_allowed_ptr(task_boost, cm); + + - - rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); ++ ++ mutex_unlock(&rnp->kthread_mutex); ++ + - rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); ++ ++ free_cpumask_var(cm); } /*