genirq: Avoid summation loops for /proc/interrupts
authorBitao Hu <yaoma@linux.alibaba.com>
Thu, 11 Apr 2024 07:41:32 +0000 (15:41 +0800)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 12 Apr 2024 15:08:05 +0000 (17:08 +0200)
show_interrupts() unconditionally accumulates the per CPU interrupt
statistics to determine whether an interrupt was ever raised.

This can be avoided for all interrupts which are not strictly per CPU
and not of type NMI because those interrupts provide already an
accumulated counter. The required logic is already implemented in
kstat_irqs().

Split the inner access logic out of kstat_irqs() and use it for
kstat_irqs() and show_interrupts() to avoid the accumulation loop
when possible.

Originally-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Bitao Hu <yaoma@linux.alibaba.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Liu Song <liusong@linux.alibaba.com>
Reviewed-by: Douglas Anderson <dianders@chromium.org>
Link: https://lore.kernel.org/r/20240411074134.30922-4-yaoma@linux.alibaba.com
kernel/irq/internals.h
kernel/irq/irqdesc.c
kernel/irq/proc.c

index 1d92532c2aaecefb80c59d6f8cdc33f990ff31a0..6c43ef3e7308b9a0eadde48e075fec21617c4446 100644 (file)
@@ -98,6 +98,8 @@ extern void mask_irq(struct irq_desc *desc);
 extern void unmask_irq(struct irq_desc *desc);
 extern void unmask_threaded_irq(struct irq_desc *desc);
 
+extern unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask);
+
 #ifdef CONFIG_SPARSE_IRQ
 static inline void irq_mark_irq(unsigned int irq) { }
 #else
index f348faffa7b4241fd354ded9c4b0dedcb8407a0f..3820931962102927d6a20f57680de95e8f3605d4 100644 (file)
@@ -976,24 +976,30 @@ static bool irq_is_nmi(struct irq_desc *desc)
        return desc->istate & IRQS_NMI;
 }
 
-static unsigned int kstat_irqs(unsigned int irq)
+unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned int sum = 0;
        int cpu;
 
-       if (!desc || !desc->kstat_irqs)
-               return 0;
        if (!irq_settings_is_per_cpu_devid(desc) &&
            !irq_settings_is_per_cpu(desc) &&
            !irq_is_nmi(desc))
                return data_race(desc->tot_count);
 
-       for_each_possible_cpu(cpu)
+       for_each_cpu(cpu, cpumask)
                sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu));
        return sum;
 }
 
+static unsigned int kstat_irqs(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (!desc || !desc->kstat_irqs)
+               return 0;
+       return kstat_irqs_desc(desc, cpu_possible_mask);
+}
+
 #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
 
 void kstat_snapshot_irqs(void)
index 6954e0a020475b863d9ba623dc515b674c42e3ce..5c320c3f10a7462e7165ee3060b3c7efbb1b5af4 100644 (file)
@@ -488,10 +488,8 @@ int show_interrupts(struct seq_file *p, void *v)
        if (!desc || irq_settings_is_hidden(desc))
                goto outsparse;
 
-       if (desc->kstat_irqs) {
-               for_each_online_cpu(j)
-                       any_count |= data_race(per_cpu(desc->kstat_irqs->cnt, j));
-       }
+       if (desc->kstat_irqs)
+               any_count = kstat_irqs_desc(desc, cpu_online_mask);
 
        if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
                goto outsparse;