rcu: Move RCU CPU stall-warning code out of tree_plugin.h
authorPaul E. McKenney <paulmck@linux.ibm.com>
Sat, 12 Jan 2019 00:34:47 +0000 (16:34 -0800)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 26 Mar 2019 21:40:13 +0000 (14:40 -0700)
The RCU CPU stall-warning code for normal grace periods is currently
scattered across two files, due to earlier Tiny RCU support for RCU
CPU stall warnings and for old Kconfig options that have long since
been retired.  Given that it is hard for the lead RCU maintainer to
find relevant stall-warning code, it would be good to consolidate it.
This commit continues this process by moving stall-warning code from
kernel/rcu/tree_plugin.c to a new kernel/rcu/tree_stall.h file.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h

index 97dba50f6fb24f01a150ad74935f53c2db542edd..7fa3bc4d481bf3071f0119f54007ee199356cc09 100644 (file)
@@ -642,79 +642,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
        rcu_preempt_deferred_qs_irqrestore(t, flags);
 }
 
-/*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period on the specified rcu_node structure.
- */
-static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
-{
-       unsigned long flags;
-       struct task_struct *t;
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       if (!rcu_preempt_blocked_readers_cgp(rnp)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       t = list_entry(rnp->gp_tasks->prev,
-                      struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
-               /*
-                * We could be printing a lot while holding a spinlock.
-                * Avoid triggering hard lockup.
-                */
-               touch_nmi_watchdog();
-               sched_show_task(t);
-       }
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-}
-
-/*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period.
- */
-static void rcu_print_detail_task_stall(void)
-{
-       struct rcu_node *rnp = rcu_get_root();
-
-       rcu_print_detail_task_stall_rnp(rnp);
-       rcu_for_each_leaf_node(rnp)
-               rcu_print_detail_task_stall_rnp(rnp);
-}
-
-static void rcu_print_task_stall_begin(struct rcu_node *rnp)
-{
-       pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
-              rnp->level, rnp->grplo, rnp->grphi);
-}
-
-static void rcu_print_task_stall_end(void)
-{
-       pr_cont("\n");
-}
-
-/*
- * Scan the current list of tasks blocked within RCU read-side critical
- * sections, printing out the tid of each.
- */
-static int rcu_print_task_stall(struct rcu_node *rnp)
-{
-       struct task_struct *t;
-       int ndetected = 0;
-
-       if (!rcu_preempt_blocked_readers_cgp(rnp))
-               return 0;
-       rcu_print_task_stall_begin(rnp);
-       t = list_entry(rnp->gp_tasks->prev,
-                      struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
-               pr_cont(" P%d", t->pid);
-               ndetected++;
-       }
-       rcu_print_task_stall_end();
-       return ndetected;
-}
-
 /*
  * Scan the current list of tasks blocked within RCU read-side critical
  * sections, printing out the tid of each that is blocking the current
@@ -979,23 +906,6 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 }
 static void rcu_preempt_deferred_qs(struct task_struct *t) { }
 
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
-static void rcu_print_detail_task_stall(void)
-{
-}
-
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
-static int rcu_print_task_stall(struct rcu_node *rnp)
-{
-       return 0;
-}
-
 /*
  * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections that are
index 682189f4d08327ad7c43d17e8be3cca9d7e76502..6f5f94944f49e978aeae106e5ec5562df297a0a5 100644 (file)
@@ -61,3 +61,98 @@ static int __init check_cpu_stall_init(void)
        return 0;
 }
 early_initcall(check_cpu_stall_init);
+
+#ifdef CONFIG_PREEMPT
+
+/*
+ * Dump detailed information for all tasks blocking the current RCU
+ * grace period on the specified rcu_node structure.
+ */
+static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
+{
+       unsigned long flags;
+       struct task_struct *t;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       if (!rcu_preempt_blocked_readers_cgp(rnp)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       t = list_entry(rnp->gp_tasks->prev,
+                      struct task_struct, rcu_node_entry);
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+               /*
+                * We could be printing a lot while holding a spinlock.
+                * Avoid triggering hard lockup.
+                */
+               touch_nmi_watchdog();
+               sched_show_task(t);
+       }
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
+/*
+ * Dump detailed information for all tasks blocking the current RCU
+ * grace period.
+ */
+static void rcu_print_detail_task_stall(void)
+{
+       struct rcu_node *rnp = rcu_get_root();
+
+       rcu_print_detail_task_stall_rnp(rnp);
+       rcu_for_each_leaf_node(rnp)
+               rcu_print_detail_task_stall_rnp(rnp);
+}
+
+static void rcu_print_task_stall_begin(struct rcu_node *rnp)
+{
+       pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
+              rnp->level, rnp->grplo, rnp->grphi);
+}
+
+static void rcu_print_task_stall_end(void)
+{
+       pr_cont("\n");
+}
+
+/*
+ * Scan the current list of tasks blocked within RCU read-side critical
+ * sections, printing out the tid of each.
+ */
+static int rcu_print_task_stall(struct rcu_node *rnp)
+{
+       struct task_struct *t;
+       int ndetected = 0;
+
+       if (!rcu_preempt_blocked_readers_cgp(rnp))
+               return 0;
+       rcu_print_task_stall_begin(rnp);
+       t = list_entry(rnp->gp_tasks->prev,
+                      struct task_struct, rcu_node_entry);
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+               pr_cont(" P%d", t->pid);
+               ndetected++;
+       }
+       rcu_print_task_stall_end();
+       return ndetected;
+}
+
+#else /* #ifdef CONFIG_PREEMPT */
+
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+static void rcu_print_detail_task_stall(void)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+static int rcu_print_task_stall(struct rcu_node *rnp)
+{
+       return 0;
+}
+#endif /* #else #ifdef CONFIG_PREEMPT */