workqueue: add cmdline parameter `workqueue.unbound_cpus` to further constrain wq_unb...
authortiozhang <tiozhang@didiglobal.com>
Thu, 29 Jun 2023 03:50:50 +0000 (11:50 +0800)
committerTejun Heo <tj@kernel.org>
Mon, 10 Jul 2023 20:42:51 +0000 (10:42 -1000)
Motivation of doing this is to better improve boot times for devices when
we want to prevent our workqueue works from running on some specific CPUs,
e,g, some CPUs are busy with interrupts.

Signed-off-by: tiozhang <tiozhang@didiglobal.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Documentation/admin-guide/kernel-parameters.txt
kernel/workqueue.c

index a1457995fd41cfb7db6cfa0cbb816fb9bbcd7fc0..d1edee0fd5ec340d9b63616478d35ea0ac73c750 100644 (file)
                        disables both lockup detectors. Default is 10
                        seconds.
 
+       workqueue.unbound_cpus=
+                       [KNL,SMP] Specify to constrain one or some CPUs
+                       to use in unbound workqueues.
+                       Format: <cpu-list>
+                       By default, all online CPUs are available for
+                       unbound workqueues.
+
        workqueue.watchdog_thresh=
                        If CONFIG_WQ_WATCHDOG is configured, workqueue can
                        warn stall conditions and dump internal state to
index f8891552fdd6dc2ab15a495b0a4ed8c7e03f4feb..83f8993af57cf23cc0d9f33598c0095b1a3ae863 100644 (file)
@@ -368,6 +368,9 @@ static bool workqueue_freezing;             /* PL: have wqs started freezing? */
 /* PL&A: allowable cpus for unbound wqs and work items */
 static cpumask_var_t wq_unbound_cpumask;
 
+/* for further constrain wq_unbound_cpumask by cmdline parameter*/
+static struct cpumask wq_cmdline_cpumask __initdata;
+
 /* CPU where unbound work was last round robin scheduled from this CPU */
 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
 
@@ -6455,6 +6458,9 @@ void __init workqueue_init_early(void)
        cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
        cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
 
+       if (!cpumask_empty(&wq_cmdline_cpumask))
+               cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
+
        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
        /* initialize CPU pools */
@@ -6577,3 +6583,14 @@ void __warn_flushing_systemwide_wq(void)
        dump_stack();
 }
 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
+
+static int __init workqueue_unbound_cpus_setup(char *str)
+{
+       if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
+               cpumask_clear(&wq_cmdline_cpumask);
+               pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
+       }
+
+       return 1;
+}
+__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);