Author: Gang Li padata: dispatch works on
authorGang Li Subject: padata: dispatch works on <gang.li@linux.dev>
Wed, 6 Mar 2024 21:04:17 +0000 (13:04 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 6 Mar 2024 21:04:17 +0000 (13:04 -0800)
different nodes Date: Thu, 22 Feb 2024 22:04:17 +0800

When a group of tasks that access different nodes are scheduled on the
same node, they may encounter bandwidth bottlenecks and access latency.

Thus, numa_aware flag is introduced here, allowing tasks to be distributed
across different nodes to fully utilize the advantage of multi-node
systems.

Link: https://lkml.kernel.org/r/20240222140422.393911-5-gang.li@linux.dev
Signed-off-by: Gang Li <ligang.bdlg@bytedance.com>
Tested-by: David Rientjes <rientjes@google.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/padata.h
kernel/padata.c
mm/mm_init.c

index 495b16b6b4d729360b2e85dac8e76a31ddce0dc6..8f418711351bcc1943fe79e309f1335a5d344513 100644 (file)
@@ -137,6 +137,7 @@ struct padata_shell {
  *             appropriate for one worker thread to do at once.
  * @max_threads: Max threads to use for the job, actual number may be less
  *               depending on task size and minimum chunk size.
+ * @numa_aware: Distribute jobs to different nodes with CPU in a round robin fashion.
  */
 struct padata_mt_job {
        void (*thread_fn)(unsigned long start, unsigned long end, void *arg);
@@ -146,6 +147,7 @@ struct padata_mt_job {
        unsigned long           align;
        unsigned long           min_chunk;
        int                     max_threads;
+       bool                    numa_aware;
 };
 
 /**
index 179fb1518070c21f028e201ab32ae3dd53e23357..e3f639ff16707a98e5c5a98d23432cd3cc28f684 100644 (file)
@@ -485,7 +485,8 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
        struct padata_work my_work, *pw;
        struct padata_mt_job_state ps;
        LIST_HEAD(works);
-       int nworks;
+       int nworks, nid;
+       static atomic_t last_used_nid __initdata;
 
        if (job->size == 0)
                return;
@@ -517,7 +518,16 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
        ps.chunk_size = roundup(ps.chunk_size, job->align);
 
        list_for_each_entry(pw, &works, pw_list)
-               queue_work(system_unbound_wq, &pw->pw_work);
+               if (job->numa_aware) {
+                       int old_node = atomic_read(&last_used_nid);
+
+                       do {
+                               nid = next_node_in(old_node, node_states[N_CPU]);
+                       } while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid));
+                       queue_work_node(nid, system_unbound_wq, &pw->pw_work);
+               } else {
+                       queue_work(system_unbound_wq, &pw->pw_work);
+               }
 
        /* Use the current thread, which saves starting a workqueue worker. */
        padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
index 2c19f5515e36c47eb3bb23f02d9c7c2c1c646393..549e76af8f82a8ed2fea2e1439e96f06f7f25b19 100644 (file)
@@ -2231,6 +2231,7 @@ static int __init deferred_init_memmap(void *data)
                        .align       = PAGES_PER_SECTION,
                        .min_chunk   = PAGES_PER_SECTION,
                        .max_threads = max_threads,
+                       .numa_aware  = false,
                };
 
                padata_do_multithreaded(&job);