x86/aperfmperf: Replace aperfmperf_get_khz()
authorThomas Gleixner <tglx@linutronix.de>
Fri, 15 Apr 2022 19:20:02 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 27 Apr 2022 18:22:19 +0000 (20:22 +0200)
The frequency invariance infrastructure provides the APERF/MPERF samples
already. Utilize them for the cpu frequency display in /proc/cpuinfo.

The sample is considered valid for 20ms. So for idle or isolated NOHZ full
CPUs the function returns 0, which is matching the previous behaviour.

This gets rid of the mass IPIs and a delay of 20ms for stabilizing observed
by Eric when reading /proc/cpuinfo.

Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Link: https://lore.kernel.org/r/20220415161206.875029458@linutronix.de
arch/x86/kernel/cpu/aperfmperf.c
fs/proc/cpuinfo.c
include/linux/cpufreq.h

index 963c0697a92be023430807706f35e76274a987f3..e9d2da7effc9e0c1882be8ca6188c5025f8bc14a 100644 (file)
@@ -101,49 +101,6 @@ static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
        return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
 }
 
-unsigned int aperfmperf_get_khz(int cpu)
-{
-       if (!cpu_khz)
-               return 0;
-
-       if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
-               return 0;
-
-       if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
-               return 0;
-
-       if (rcu_is_idle_cpu(cpu))
-               return 0; /* Idle CPUs are completely uninteresting. */
-
-       aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
-       return per_cpu(samples.khz, cpu);
-}
-
-void arch_freq_prepare_all(void)
-{
-       ktime_t now = ktime_get();
-       bool wait = false;
-       int cpu;
-
-       if (!cpu_khz)
-               return;
-
-       if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
-               return;
-
-       for_each_online_cpu(cpu) {
-               if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
-                       continue;
-               if (rcu_is_idle_cpu(cpu))
-                       continue; /* Idle CPUs are completely uninteresting. */
-               if (!aperfmperf_snapshot_cpu(cpu, now, false))
-                       wait = true;
-       }
-
-       if (wait)
-               msleep(APERFMPERF_REFRESH_DELAY_MS);
-}
-
 unsigned int arch_freq_get_on_cpu(int cpu)
 {
        struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
@@ -530,6 +487,40 @@ void arch_scale_freq_tick(void)
        scale_freq_tick(acnt, mcnt);
 }
 
+/*
+ * Discard samples older than the define maximum sample age of 20ms. There
+ * is no point in sending IPIs in such a case. If the scheduler tick was
+ * not running then the CPU is either idle or isolated.
+ */
+#define MAX_SAMPLE_AGE ((unsigned long)HZ / 50)
+
+unsigned int aperfmperf_get_khz(int cpu)
+{
+       struct aperfmperf *s = per_cpu_ptr(&cpu_samples, cpu);
+       unsigned long last;
+       unsigned int seq;
+       u64 acnt, mcnt;
+
+       if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
+               return 0;
+
+       do {
+               seq = raw_read_seqcount_begin(&s->seq);
+               last = s->last_update;
+               acnt = s->acnt;
+               mcnt = s->mcnt;
+       } while (read_seqcount_retry(&s->seq, seq));
+
+       /*
+        * Bail on invalid count and when the last update was too long ago,
+        * which covers idle and NOHZ full CPUs.
+        */
+       if (!mcnt || (jiffies - last) > MAX_SAMPLE_AGE)
+               return 0;
+
+       return div64_u64((cpu_khz * acnt), mcnt);
+}
+
 static int __init bp_init_aperfmperf(void)
 {
        if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
index 419760fd77bdd82b1fa1d1565bbcc8d13ae25967..f38bda5b83ec4418091fa52ea7e9758de56a6efe 100644 (file)
@@ -5,14 +5,10 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
-__weak void arch_freq_prepare_all(void)
-{
-}
-
 extern const struct seq_operations cpuinfo_op;
+
 static int cpuinfo_open(struct inode *inode, struct file *file)
 {
-       arch_freq_prepare_all();
        return seq_open(file, &cpuinfo_op);
 }
 
index 35c7d6db4139e46df2d8da6b8a0e147c012fc71d..d5595d57f4e53680a7e8b2a6e1bf6435a7c14501 100644 (file)
@@ -1199,7 +1199,6 @@ static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
                        struct cpufreq_governor *old_gov) { }
 #endif
 
-extern void arch_freq_prepare_all(void);
 extern unsigned int arch_freq_get_on_cpu(int cpu);
 
 #ifndef arch_set_freq_scale