powerpc/idle: Store PURR snapshot in a per-cpu global variable
authorGautham R. Shenoy <ego@linux.vnet.ibm.com>
Tue, 7 Apr 2020 08:47:40 +0000 (14:17 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 30 Apr 2020 02:35:26 +0000 (12:35 +1000)
Currently when CPU goes idle, we take a snapshot of PURR via
pseries_idle_prolog() which is used at the CPU idle exit to compute
the idle PURR cycles via the function pseries_idle_epilog().  Thus,
the value of idle PURR cycle thus read before pseries_idle_prolog() and
after pseries_idle_epilog() is always correct.

However, if we were to read the idle PURR cycles from an interrupt
context between pseries_idle_prolog() and pseries_idle_epilog() (this
will be done in a future patch), then, the value of the idle PURR thus
read will not include the cycles spent in the most recent idle period.
Thus, in that interrupt context, we will need access to the snapshot
of the PURR before going idle, in order to compute the idle PURR
cycles for the latest idle duration.

In this patch, we save the snapshot of PURR in pseries_idle_prolog()
in a per-cpu variable, instead of on the stack, so that it can be
accessed from an interrupt context.

Signed-off-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1586249263-14048-3-git-send-email-ego@linux.vnet.ibm.com
arch/powerpc/include/asm/idle.h
arch/powerpc/platforms/pseries/setup.c
drivers/cpuidle/cpuidle-pseries.c

index 32064a4c0dd78d472b580dad9d16e42b546e2f06..b90d75aa1f9e7e7663248a4694ad233351ff1089 100644 (file)
@@ -5,10 +5,27 @@
 #include <asm/paca.h>
 
 #ifdef CONFIG_PPC_PSERIES
-static inline void pseries_idle_prolog(unsigned long *in_purr)
+DECLARE_PER_CPU(u64, idle_entry_purr_snap);
+
+static inline void snapshot_purr_idle_entry(void)
+{
+       *this_cpu_ptr(&idle_entry_purr_snap) = mfspr(SPRN_PURR);
+}
+
+static inline void update_idle_purr_accounting(void)
+{
+       u64 wait_cycles;
+       u64 in_purr = *this_cpu_ptr(&idle_entry_purr_snap);
+
+       wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+       wait_cycles += mfspr(SPRN_PURR) - in_purr;
+       get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+}
+
+static inline void pseries_idle_prolog(void)
 {
        ppc64_runlatch_off();
-       *in_purr = mfspr(SPRN_PURR);
+       snapshot_purr_idle_entry();
        /*
         * Indicate to the HV that we are idle. Now would be
         * a good time to find other work to dispatch.
@@ -16,16 +33,12 @@ static inline void pseries_idle_prolog(unsigned long *in_purr)
        get_lppaca()->idle = 1;
 }
 
-static inline void pseries_idle_epilog(unsigned long in_purr)
+static inline void pseries_idle_epilog(void)
 {
-       u64 wait_cycles;
-
-       wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
-       wait_cycles += mfspr(SPRN_PURR) - in_purr;
-       get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+       update_idle_purr_accounting();
        get_lppaca()->idle = 0;
-
        ppc64_runlatch_on();
 }
+
 #endif /* CONFIG_PPC_PSERIES */
 #endif
index 2f53e6b031a78d3abadaa5e89afe90922937bc80..4905c965e111d54730bc5bebc2e7aa1bdfb350e3 100644 (file)
@@ -318,10 +318,9 @@ static int alloc_dispatch_log_kmem_cache(void)
 }
 machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
 
+DEFINE_PER_CPU(u64, idle_entry_purr_snap);
 static void pseries_lpar_idle(void)
 {
-       unsigned long in_purr;
-
        /*
         * Default handler to go into low thread priority and possibly
         * low power mode by ceding processor to hypervisor
@@ -331,7 +330,7 @@ static void pseries_lpar_idle(void)
                return;
 
        /* Indicate to hypervisor that we are idle. */
-       pseries_idle_prolog(&in_purr);
+       pseries_idle_prolog();
 
        /*
         * Yield the processor to the hypervisor.  We return if
@@ -342,7 +341,7 @@ static void pseries_lpar_idle(void)
         */
        cede_processor();
 
-       pseries_idle_epilog(in_purr);
+       pseries_idle_epilog();
 }
 
 /*
index 46d5e05fcf970db0dfcc17be1fbf9cf01f296cbd..6513ef2af66a81564dfb18310bbe84c0e225fea1 100644 (file)
@@ -36,12 +36,11 @@ static int snooze_loop(struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
                        int index)
 {
-       unsigned long in_purr;
        u64 snooze_exit_time;
 
        set_thread_flag(TIF_POLLING_NRFLAG);
 
-       pseries_idle_prolog(&in_purr);
+       pseries_idle_prolog();
        local_irq_enable();
        snooze_exit_time = get_tb() + snooze_timeout;
 
@@ -65,7 +64,7 @@ static int snooze_loop(struct cpuidle_device *dev,
 
        local_irq_disable();
 
-       pseries_idle_epilog(in_purr);
+       pseries_idle_epilog();
 
        return index;
 }
@@ -91,9 +90,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
                                struct cpuidle_driver *drv,
                                int index)
 {
-       unsigned long in_purr;
 
-       pseries_idle_prolog(&in_purr);
+       pseries_idle_prolog();
        get_lppaca()->donate_dedicated_cpu = 1;
 
        HMT_medium();
@@ -102,7 +100,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
        local_irq_disable();
        get_lppaca()->donate_dedicated_cpu = 0;
 
-       pseries_idle_epilog(in_purr);
+       pseries_idle_epilog();
 
        return index;
 }
@@ -111,9 +109,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
                        int index)
 {
-       unsigned long in_purr;
 
-       pseries_idle_prolog(&in_purr);
+       pseries_idle_prolog();
 
        /*
         * Yield the processor to the hypervisor.  We return if
@@ -125,7 +122,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
        check_and_cede_processor();
 
        local_irq_disable();
-       pseries_idle_epilog(in_purr);
+       pseries_idle_epilog();
 
        return index;
 }