MIPS: smp: Remove tick_broadcast_count
authorPeter Xu <peterx@redhat.com>
Mon, 16 Dec 2019 21:31:24 +0000 (16:31 -0500)
committerIngo Molnar <mingo@kernel.org>
Fri, 6 Mar 2020 12:42:28 +0000 (13:42 +0100)
Now smp_call_function_single_async() provides the protection that
we'll return with -EBUSY if the csd object is still pending, then we
don't need the tick_broadcast_count counter any more.

Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20191216213125.9536-3-peterx@redhat.com
arch/mips/kernel/smp.c

index f510c00bda8820077b40effd377f2799b68828b9..0def6242b3eab7639808f7bc2b13c136531a4771 100644 (file)
@@ -696,29 +696,22 @@ EXPORT_SYMBOL(flush_tlb_one);
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 
-static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
 
 void tick_broadcast(const struct cpumask *mask)
 {
-       atomic_t *count;
        call_single_data_t *csd;
        int cpu;
 
        for_each_cpu(cpu, mask) {
-               count = &per_cpu(tick_broadcast_count, cpu);
                csd = &per_cpu(tick_broadcast_csd, cpu);
-
-               if (atomic_inc_return(count) == 1)
-                       smp_call_function_single_async(cpu, csd);
+               smp_call_function_single_async(cpu, csd);
        }
 }
 
 static void tick_broadcast_callee(void *info)
 {
-       int cpu = smp_processor_id();
        tick_receive_broadcast();
-       atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
 }
 
 static int __init tick_broadcast_init(void)