From: Ingo Molnar Date: Wed, 10 Apr 2024 04:59:47 +0000 (+0200) Subject: x86/cpu: Improve readability of per-CPU cpumask initialization code X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=dbbe13a6f66b228a2867844ad4dd108576134775;p=linux.git x86/cpu: Improve readability of per-CPU cpumask initialization code In smp_prepare_cpus_common() and x2apic_prepare_cpu(): - use 'cpu' instead of 'i' - use 'node' instead of 'n' - use vertical alignment to improve readability - better structure basic blocks - reduce col80 checkpatch damage Signed-off-by: Ingo Molnar Cc: linux-kernel@vger.kernel.org --- diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index afbb885ce2904..7db83212effb0 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -178,14 +178,16 @@ static int x2apic_prepare_cpu(unsigned int cpu) u32 phys_apicid = apic->cpu_present_to_apicid(cpu); u32 cluster = apic_cluster(phys_apicid); u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf)); + int node = cpu_to_node(cpu); x86_cpu_to_logical_apicid[cpu] = logical_apicid; - if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0) + if (alloc_clustermask(cpu, cluster, node) < 0) return -ENOMEM; - if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, - cpu_to_node(cpu))) + + if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node)) return -ENOMEM; + return 0; } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 536dad1440369..a58109583c476 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1033,21 +1033,22 @@ static __init void disable_smp(void) void __init smp_prepare_cpus_common(void) { - unsigned int i, n; + unsigned int cpu, node; /* Mark all except the boot CPU as hotpluggable */ - for_each_possible_cpu(i) { - if (i) - per_cpu(cpu_info.cpu_index, i) = nr_cpu_ids; + for_each_possible_cpu(cpu) { + if (cpu) + per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids; } - for_each_possible_cpu(i) { - n = cpu_to_node(i); - zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, i), GFP_KERNEL, n); - zalloc_cpumask_var_node(&per_cpu(cpu_core_map, i), GFP_KERNEL, n); - zalloc_cpumask_var_node(&per_cpu(cpu_die_map, i), GFP_KERNEL, n); - zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL, n); - zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL, n); + for_each_possible_cpu(cpu) { + node = cpu_to_node(cpu); + + zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node); + zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node); + zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node); + zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node); + zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node); } set_cpu_sibling_map(0);