powerpc/smp: Rename init_thread_group_l1_cache_map() to make it generic
authorGautham R. Shenoy <ego@linux.vnet.ibm.com>
Thu, 10 Dec 2020 10:38:57 +0000 (16:08 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 10 Dec 2020 13:10:25 +0000 (00:10 +1100)
init_thread_group_l1_cache_map() initializes the per-cpu cpumask
thread_group_l1_cache_map with the core-siblings which share L1 cache
with the CPU. Make this function generic to the cache-property (L1 or
L2) and update a suitable mask. This is a preparatory patch for the
next patch where we will introduce discovery of thread-groups that
share L2-cache.

No functional change.

Signed-off-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1607596739-32439-4-git-send-email-ego@linux.vnet.ibm.com
arch/powerpc/kernel/smp.c

index f3290d57fea68e94e75c4f5660f93b33cbe30b07..9078b5b5d6e40db276aeda4da14b4ead5a5b09a5 100644 (file)
@@ -866,15 +866,18 @@ out:
        return tg;
 }
 
-static int init_thread_group_l1_cache_map(int cpu)
+static int __init init_thread_group_cache_map(int cpu, int cache_property)
 
 {
        int first_thread = cpu_first_thread_sibling(cpu);
        int i, cpu_group_start = -1, err = 0;
        struct thread_groups *tg = NULL;
+       cpumask_var_t *mask;
 
-       tg = get_thread_groups(cpu, THREAD_GROUP_SHARE_L1,
-                              &err);
+       if (cache_property != THREAD_GROUP_SHARE_L1)
+               return -EINVAL;
+
+       tg = get_thread_groups(cpu, cache_property, &err);
        if (!tg)
                return err;
 
@@ -885,8 +888,8 @@ static int init_thread_group_l1_cache_map(int cpu)
                return -ENODATA;
        }
 
-       zalloc_cpumask_var_node(&per_cpu(thread_group_l1_cache_map, cpu),
-                               GFP_KERNEL, cpu_to_node(cpu));
+       mask = &per_cpu(thread_group_l1_cache_map, cpu);
+       zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
 
        for (i = first_thread; i < first_thread + threads_per_core; i++) {
                int i_group_start = get_cpu_thread_group_start(i, tg);
@@ -897,7 +900,7 @@ static int init_thread_group_l1_cache_map(int cpu)
                }
 
                if (i_group_start == cpu_group_start)
-                       cpumask_set_cpu(i, per_cpu(thread_group_l1_cache_map, cpu));
+                       cpumask_set_cpu(i, *mask);
        }
 
        return 0;
@@ -976,7 +979,7 @@ static int init_big_cores(void)
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               int err = init_thread_group_l1_cache_map(cpu);
+               int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
 
                if (err)
                        return err;