extern void unmask_irq(struct irq_desc *desc);
 extern void unmask_threaded_irq(struct irq_desc *desc);
 
+extern unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask);
+
 #ifdef CONFIG_SPARSE_IRQ
 static inline void irq_mark_irq(unsigned int irq) { }
 #else
 
        return desc->istate & IRQS_NMI;
 }
 
-static unsigned int kstat_irqs(unsigned int irq)
+unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned int sum = 0;
        int cpu;
 
-       if (!desc || !desc->kstat_irqs)
-               return 0;
        if (!irq_settings_is_per_cpu_devid(desc) &&
            !irq_settings_is_per_cpu(desc) &&
            !irq_is_nmi(desc))
                return data_race(desc->tot_count);
 
-       for_each_possible_cpu(cpu)
+       for_each_cpu(cpu, cpumask)
                sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu));
        return sum;
 }
 
+static unsigned int kstat_irqs(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (!desc || !desc->kstat_irqs)
+               return 0;
+       return kstat_irqs_desc(desc, cpu_possible_mask);
+}
+
 #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
 
 void kstat_snapshot_irqs(void)
 
        if (!desc || irq_settings_is_hidden(desc))
                goto outsparse;
 
-       if (desc->kstat_irqs) {
-               for_each_online_cpu(j)
-                       any_count |= data_race(per_cpu(desc->kstat_irqs->cnt, j));
-       }
+       if (desc->kstat_irqs)
+               any_count = kstat_irqs_desc(desc, cpu_online_mask);
 
        if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
                goto outsparse;