workqueue: Introduce show_one_worker_pool and show_one_workqueue.
authorImran Khan <imran.f.khan@oracle.com>
Wed, 20 Oct 2021 03:09:00 +0000 (14:09 +1100)
committerTejun Heo <tj@kernel.org>
Wed, 20 Oct 2021 16:19:03 +0000 (06:19 -1000)
Currently show_workqueue_state shows the state of all workqueues and of
all worker pools. In certain cases we may need to dump state of only a
specific workqueue or worker pool. For example in destroy_workqueue we
only need to show state of the workqueue which is getting destroyed.

So rename show_workqueue_state to show_all_workqueues(to signify it
dumps state of all busy workqueues) and divide it into more granular
functions (show_one_workqueue and show_one_worker_pool), that would show
states of individual workqueues and worker pools and can be used in
cases such as the one mentioned above.

Also, as mentioned earlier, make destroy_workqueue dump data pertaining
to only the workqueue that is being destroyed and make user(s) of
earlier interface(show_workqueue_state), use new interface
(show_all_workqueues).

Signed-off-by: Imran Khan <imran.f.khan@oracle.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
drivers/tty/sysrq.c
include/linux/workqueue.h
kernel/power/process.c
kernel/workqueue.c

index c911196ac8935fe697e2c09efc175c8e644bc70a..8d0f07509ca769d43ac11aaa1201db11d9ea1568 100644 (file)
@@ -296,7 +296,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
 static void sysrq_handle_showstate(int key)
 {
        show_state();
-       show_workqueue_state();
+       show_all_workqueues();
 }
 static const struct sysrq_key_op sysrq_showstate_op = {
        .handler        = sysrq_handle_showstate,
index 74d3c1efd9bb5c8d8d6507f5f9a0ff607d893789..7fee9b6cfedeffb8e425c9221a87e22d0d6da58c 100644 (file)
@@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
 extern unsigned int work_busy(struct work_struct *work);
 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
-extern void show_workqueue_state(void);
+extern void show_all_workqueues(void);
+extern void show_one_workqueue(struct workqueue_struct *wq);
 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
 
 /**
index 37401c99b7d7de338fcca8d08f78a984454465b0..b7e7798637b8e6afee13d5642a58cbf88ef40411 100644 (file)
@@ -94,7 +94,7 @@ static int try_to_freeze_tasks(bool user_only)
                       todo - wq_busy, wq_busy);
 
                if (wq_busy)
-                       show_workqueue_state();
+                       show_all_workqueues();
 
                if (!wakeup || pm_debug_messages_on) {
                        read_lock(&tasklist_lock);
index 76988f39ed5ac8afbe9fe32a8fb5655832fc670a..1a7df882f55e2c2bb2c0392170f2141da84e7971 100644 (file)
@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 static int worker_thread(void *__worker);
 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
 static void show_pwq(struct pool_workqueue *pwq);
+static void show_one_worker_pool(struct worker_pool *pool);
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
                        raw_spin_unlock_irq(&pwq->pool->lock);
                        mutex_unlock(&wq->mutex);
                        mutex_unlock(&wq_pool_mutex);
-                       show_workqueue_state();
+                       show_one_workqueue(wq);
                        return;
                }
                raw_spin_unlock_irq(&pwq->pool->lock);
@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)
 }
 
 /**
- * show_workqueue_state - dump workqueue state
- *
- * Called from a sysrq handler or try_to_freeze_tasks() and prints out
- * all busy workqueues and pools.
+ * show_one_workqueue - dump state of specified workqueue
+ * @wq: workqueue whose state will be printed
  */
-void show_workqueue_state(void)
+void show_one_workqueue(struct workqueue_struct *wq)
 {
-       struct workqueue_struct *wq;
-       struct worker_pool *pool;
+       struct pool_workqueue *pwq;
+       bool idle = true;
        unsigned long flags;
-       int pi;
-
-       rcu_read_lock();
 
-       pr_info("Showing busy workqueues and worker pools:\n");
-
-       list_for_each_entry_rcu(wq, &workqueues, list) {
-               struct pool_workqueue *pwq;
-               bool idle = true;
-
-               for_each_pwq(pwq, wq) {
-                       if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
-                               idle = false;
-                               break;
-                       }
+       for_each_pwq(pwq, wq) {
+               if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+                       idle = false;
+                       break;
                }
-               if (idle)
-                       continue;
+       }
+       if (idle) /* Nothing to print for idle workqueue */
+               return;
 
-               pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
+       pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
 
-               for_each_pwq(pwq, wq) {
-                       raw_spin_lock_irqsave(&pwq->pool->lock, flags);
-                       if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
-                               /*
-                                * Defer printing to avoid deadlocks in console
-                                * drivers that queue work while holding locks
-                                * also taken in their write paths.
-                                */
-                               printk_deferred_enter();
-                               show_pwq(pwq);
-                               printk_deferred_exit();
-                       }
-                       raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+       for_each_pwq(pwq, wq) {
+               raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+               if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
                        /*
-                        * We could be printing a lot from atomic context, e.g.
-                        * sysrq-t -> show_workqueue_state(). Avoid triggering
-                        * hard lockup.
+                        * Defer printing to avoid deadlocks in console
+                        * drivers that queue work while holding locks
+                        * also taken in their write paths.
                         */
-                       touch_nmi_watchdog();
-               }
-       }
-
-       for_each_pool(pool, pi) {
-               struct worker *worker;
-               bool first = true;
-
-               raw_spin_lock_irqsave(&pool->lock, flags);
-               if (pool->nr_workers == pool->nr_idle)
-                       goto next_pool;
-               /*
-                * Defer printing to avoid deadlocks in console drivers that
-                * queue work while holding locks also taken in their write
-                * paths.
-                */
-               printk_deferred_enter();
-               pr_info("pool %d:", pool->id);
-               pr_cont_pool_info(pool);
-               pr_cont(" hung=%us workers=%d",
-                       jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
-                       pool->nr_workers);
-               if (pool->manager)
-                       pr_cont(" manager: %d",
-                               task_pid_nr(pool->manager->task));
-               list_for_each_entry(worker, &pool->idle_list, entry) {
-                       pr_cont(" %s%d", first ? "idle: " : "",
-                               task_pid_nr(worker->task));
-                       first = false;
+                       printk_deferred_enter();
+                       show_pwq(pwq);
+                       printk_deferred_exit();
                }
-               pr_cont("\n");
-               printk_deferred_exit();
-       next_pool:
-               raw_spin_unlock_irqrestore(&pool->lock, flags);
+               raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                /*
                 * We could be printing a lot from atomic context, e.g.
-                * sysrq-t -> show_workqueue_state(). Avoid triggering
+                * sysrq-t -> show_all_workqueues(). Avoid triggering
                 * hard lockup.
                 */
                touch_nmi_watchdog();
        }
 
+}
+
+/**
+ * show_one_worker_pool - dump state of specified worker pool
+ * @pool: worker pool whose state will be printed
+ */
+static void show_one_worker_pool(struct worker_pool *pool)
+{
+       struct worker *worker;
+       bool first = true;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&pool->lock, flags);
+       if (pool->nr_workers == pool->nr_idle)
+               goto next_pool;
+       /*
+        * Defer printing to avoid deadlocks in console drivers that
+        * queue work while holding locks also taken in their write
+        * paths.
+        */
+       printk_deferred_enter();
+       pr_info("pool %d:", pool->id);
+       pr_cont_pool_info(pool);
+       pr_cont(" hung=%us workers=%d",
+               jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
+               pool->nr_workers);
+       if (pool->manager)
+               pr_cont(" manager: %d",
+                       task_pid_nr(pool->manager->task));
+       list_for_each_entry(worker, &pool->idle_list, entry) {
+               pr_cont(" %s%d", first ? "idle: " : "",
+                       task_pid_nr(worker->task));
+               first = false;
+       }
+       pr_cont("\n");
+       printk_deferred_exit();
+next_pool:
+       raw_spin_unlock_irqrestore(&pool->lock, flags);
+       /*
+        * We could be printing a lot from atomic context, e.g.
+        * sysrq-t -> show_all_workqueues(). Avoid triggering
+        * hard lockup.
+        */
+       touch_nmi_watchdog();
+
+}
+
+/**
+ * show_all_workqueues - dump workqueue state
+ *
+ * Called from a sysrq handler or try_to_freeze_tasks() and prints out
+ * all busy workqueues and pools.
+ */
+void show_all_workqueues(void)
+{
+       struct workqueue_struct *wq;
+       struct worker_pool *pool;
+       int pi;
+
+       rcu_read_lock();
+
+       pr_info("Showing busy workqueues and worker pools:\n");
+
+       list_for_each_entry_rcu(wq, &workqueues, list)
+               show_one_workqueue(wq);
+
+       for_each_pool(pool, pi)
+               show_one_worker_pool(pool);
+
        rcu_read_unlock();
 }
 
@@ -5876,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
        rcu_read_unlock();
 
        if (lockup_detected)
-               show_workqueue_state();
+               show_all_workqueues();
 
        wq_watchdog_reset_touched();
        mod_timer(&wq_watchdog_timer, jiffies + thresh);