sparc64: Fix number of online CPUs
authorSam Ravnborg <sam@ravnborg.org>
Sat, 30 Mar 2024 09:57:45 +0000 (10:57 +0100)
committerAndreas Larsson <andreas@gaisler.com>
Mon, 22 Apr 2024 13:33:07 +0000 (15:33 +0200)
Nick Bowler reported:
    When using newer kernels on my Ultra 60 with dual 450MHz UltraSPARC-II
    CPUs, I noticed that only CPU 0 comes up, while older kernels (including
    4.7) are working fine with both CPUs.

      I bisected the failure to this commit:

      9b2f753ec23710aa32c0d837d2499db92fe9115b is the first bad commit
      commit 9b2f753ec23710aa32c0d837d2499db92fe9115b
      Author: Atish Patra <atish.patra@oracle.com>
      Date:   Thu Sep 15 14:54:40 2016 -0600

      sparc64: Fix cpu_possible_mask if nr_cpus is set

    This is a small change that reverts very easily on top of 5.18: there is
    just one trivial conflict.  Once reverted, both CPUs work again.

    Maybe this is related to the fact that the CPUs on this system are
    numbered CPU0 and CPU2 (there is no CPU1)?

The current code that adjust cpu_possible based on nr_cpu_ids do not
take into account that CPU's may not come one after each other.
Move the chech to the function that setup the cpu_possible mask
so there is no need to adjust it later.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Fixes: 9b2f753ec237 ("sparc64: Fix cpu_possible_mask if nr_cpus is set")
Reported-by: Nick Bowler <nbowler@draconx.ca>
Tested-by: Nick Bowler <nbowler@draconx.ca>
Link: https://lore.kernel.org/sparclinux/20201009161924.c8f031c079dd852941307870@gmx.de/
Link: https://lore.kernel.org/all/CADyTPEwt=ZNams+1bpMB1F9w_vUdPsGCt92DBQxxq_VtaLoTdw@mail.gmail.com/
Cc: stable@vger.kernel.org # v4.8+
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Atish Patra <atish.patra@oracle.com>
Cc: Bob Picco <bob.picco@oracle.com>
Cc: Vijay Kumar <vijay.ac.kumar@oracle.com>
Cc: David S. Miller <davem@davemloft.net>
Reviewed-by: Andreas Larsson <andreas@gaisler.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20240330-sparc64-warnings-v1-9-37201023ee2f@ravnborg.org
Signed-off-by: Andreas Larsson <andreas@gaisler.com>
arch/sparc/include/asm/smp_64.h
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c

index 505b6700805dd6fc22e49357e300c37cba4584b1..0964fede0b2cc67daa60bb705e275f2604595f11 100644 (file)
@@ -47,7 +47,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 int hard_smp_processor_id(void);
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
-void smp_fill_in_cpu_possible_map(void);
 void smp_fill_in_sib_core_maps(void);
 void __noreturn cpu_play_dead(void);
 
@@ -77,7 +76,6 @@ void __cpu_die(unsigned int cpu);
 #define smp_fill_in_sib_core_maps() do { } while (0)
 #define smp_fetch_global_regs() do { } while (0)
 #define smp_fetch_global_pmu() do { } while (0)
-#define smp_fill_in_cpu_possible_map() do { } while (0)
 #define smp_init_cpu_poke() do { } while (0)
 #define scheduler_poke() do { } while (0)
 
index 998aa693d491258b3e1346716b8832e776679607..ba82884cb92aa38b2403a1144277c49a99a81cb0 100644 (file)
@@ -483,7 +483,9 @@ static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
        ncpus_probed++;
 #ifdef CONFIG_SMP
        set_cpu_present(cpuid, true);
-       set_cpu_possible(cpuid, true);
+
+       if (num_possible_cpus() < nr_cpu_ids)
+               set_cpu_possible(cpuid, true);
 #endif
        return NULL;
 }
index 1d519f18d2b2d1119ae3351485aadce22bacc25c..63615f5c99b4770fc9766851d10e99deb4b23612 100644 (file)
@@ -671,7 +671,6 @@ void __init setup_arch(char **cmdline_p)
 
        paging_init();
        init_sparc64_elf_hwcap();
-       smp_fill_in_cpu_possible_map();
        /*
         * Once the OF device tree and MDESC have been setup and nr_cpus has
         * been parsed, we know the list of possible cpus.  Therefore we can
index a0cc9bb41a921cb3acd74c50a40f663bc042f2e3..e40c395db2026cd8afd2ac6d6536014c78f80d00 100644 (file)
@@ -1216,20 +1216,6 @@ void __init smp_setup_processor_id(void)
                xcall_deliver_impl = hypervisor_xcall_deliver;
 }
 
-void __init smp_fill_in_cpu_possible_map(void)
-{
-       int possible_cpus = num_possible_cpus();
-       int i;
-
-       if (possible_cpus > nr_cpu_ids)
-               possible_cpus = nr_cpu_ids;
-
-       for (i = 0; i < possible_cpus; i++)
-               set_cpu_possible(i, true);
-       for (; i < NR_CPUS; i++)
-               set_cpu_possible(i, false);
-}
-
 void smp_fill_in_sib_core_maps(void)
 {
        unsigned int i;