x86/cpu/topology: Cure the abuse of cpuinfo for persisting logical ids
authorThomas Gleixner <tglx@linutronix.de>
Mon, 14 Aug 2023 08:18:46 +0000 (10:18 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 10 Oct 2023 12:38:19 +0000 (14:38 +0200)
Per CPU cpuinfo is used to persist the logical package and die IDs. That's
really not the right place simply because cpuinfo is subject to be
reinitialized when a CPU goes through an offline/online cycle.

This works by chance today, but that's far from correct and neither obvious
nor documented.

Add a per cpu datastructure which persists those logical IDs, which allows
to cleanup the CPUID evaluation code.

This is a temporary workaround until the larger topology management is in
place, which makes all of this logical management mechanics obsolete.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Juergen Gross <jgross@suse.com>
Tested-by: Sohil Mehta <sohil.mehta@intel.com>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Zhang Rui <rui.zhang@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230814085113.292947071@linutronix.de
arch/x86/kernel/smpboot.c

index 04a1136406b43fd837ef67fbc1d1c6629a2e7b84..54541a1a07f8de34ed3ada9d2f3920e5924b781b 100644 (file)
@@ -124,7 +124,20 @@ struct mwait_cpu_dead {
  */
 static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
 
-/* Logical package management. We might want to allocate that dynamically */
+/* Logical package management. */
+struct logical_maps {
+       u32     phys_pkg_id;
+       u32     phys_die_id;
+       u32     logical_pkg_id;
+       u32     logical_die_id;
+};
+
+/* Temporary workaround until the full topology mechanics is in place */
+static DEFINE_PER_CPU_READ_MOSTLY(struct logical_maps, logical_maps) = {
+       .phys_pkg_id    = U32_MAX,
+       .phys_die_id    = U32_MAX,
+};
+
 unsigned int __max_logical_packages __read_mostly;
 EXPORT_SYMBOL(__max_logical_packages);
 static unsigned int logical_packages __read_mostly;
@@ -337,10 +350,8 @@ int topology_phys_to_logical_pkg(unsigned int phys_pkg)
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-               if (c->initialized && c->topo.pkg_id == phys_pkg)
-                       return c->topo.logical_pkg_id;
+               if (per_cpu(logical_maps.phys_pkg_id, cpu) == phys_pkg)
+                       return per_cpu(logical_maps.logical_pkg_id, cpu);
        }
        return -1;
 }
@@ -358,11 +369,9 @@ static int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cp
        int cpu, proc_id = cpu_data(cur_cpu).topo.pkg_id;
 
        for_each_possible_cpu(cpu) {
-               struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-               if (c->initialized && c->topo.die_id == die_id &&
-                   c->topo.pkg_id == proc_id)
-                       return c->topo.logical_die_id;
+               if (per_cpu(logical_maps.phys_pkg_id, cpu) == proc_id &&
+                   per_cpu(logical_maps.phys_die_id, cpu) == die_id)
+                       return per_cpu(logical_maps.logical_die_id, cpu);
        }
        return -1;
 }
@@ -387,6 +396,8 @@ int topology_update_package_map(unsigned int pkg, unsigned int cpu)
                        cpu, pkg, new);
        }
 found:
+       per_cpu(logical_maps.phys_pkg_id, cpu) = pkg;
+       per_cpu(logical_maps.logical_pkg_id, cpu) = new;
        cpu_data(cpu).topo.logical_pkg_id = new;
        return 0;
 }
@@ -410,6 +421,8 @@ int topology_update_die_map(unsigned int die, unsigned int cpu)
                        cpu, die, new);
        }
 found:
+       per_cpu(logical_maps.phys_die_id, cpu) = die;
+       per_cpu(logical_maps.logical_die_id, cpu) = new;
        cpu_data(cpu).topo.logical_die_id = new;
        return 0;
 }