cpuidle: pseries: Mark ->enter() functions as __cpuidle
authorMichael Ellerman <mpe@ellerman.id.au>
Thu, 6 Apr 2023 14:45:34 +0000 (00:45 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 20 Apr 2023 03:21:49 +0000 (13:21 +1000)
Code in the idle path is not allowed to be instrumented because RCU is
disabled, see commit 0e985e9d2286 ("cpuidle: Add comments about
noinstr/__cpuidle usage").

Mark the cpuidle ->enter() callbacks as __cpuidle and use the
raw_local_irq_*() routines to ensure that is the case.

Reported-by: Sachin Sant <sachinp@linux.ibm.com>
Link: https://lore.kernel.org/all/4C073F6A-C812-4C4A-BB7A-ECD10B75FB88@linux.ibm.com/
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Tested-by: Sachin Sant <sachinp@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230406144535.3786008-3-mpe@ellerman.id.au
drivers/cpuidle/cpuidle-pseries.c

index 1bad4d2b7be33da3152f825e344febb0457eef74..a7d33f3ee01e71f2da37f3e977368e2b2ed26663 100644 (file)
@@ -33,16 +33,16 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly;
 static u64 snooze_timeout __read_mostly;
 static bool snooze_timeout_en __read_mostly;
 
-static int snooze_loop(struct cpuidle_device *dev,
-                       struct cpuidle_driver *drv,
-                       int index)
+static __cpuidle
+int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+               int index)
 {
        u64 snooze_exit_time;
 
        set_thread_flag(TIF_POLLING_NRFLAG);
 
        pseries_idle_prolog();
-       local_irq_enable();
+       raw_local_irq_enable();
        snooze_exit_time = get_tb() + snooze_timeout;
        dev->poll_time_limit = false;
 
@@ -65,14 +65,14 @@ static int snooze_loop(struct cpuidle_device *dev,
        HMT_medium();
        clear_thread_flag(TIF_POLLING_NRFLAG);
 
-       local_irq_disable();
+       raw_local_irq_disable();
 
        pseries_idle_epilog();
 
        return index;
 }
 
-static void check_and_cede_processor(void)
+static __cpuidle void check_and_cede_processor(void)
 {
        /*
         * Ensure our interrupt state is properly tracked,
@@ -216,9 +216,9 @@ static int __init parse_cede_parameters(void)
 #define NR_DEDICATED_STATES    2 /* snooze, CEDE */
 static u8 cede_latency_hint[NR_DEDICATED_STATES];
 
-static int dedicated_cede_loop(struct cpuidle_device *dev,
-                               struct cpuidle_driver *drv,
-                               int index)
+static __cpuidle
+int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+                       int index)
 {
        u8 old_latency_hint;
 
@@ -230,7 +230,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
        HMT_medium();
        check_and_cede_processor();
 
-       local_irq_disable();
+       raw_local_irq_disable();
        get_lppaca()->donate_dedicated_cpu = 0;
        get_lppaca()->cede_latency_hint = old_latency_hint;
 
@@ -239,9 +239,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
        return index;
 }
 
-static int shared_cede_loop(struct cpuidle_device *dev,
-                       struct cpuidle_driver *drv,
-                       int index)
+static __cpuidle
+int shared_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+                    int index)
 {
 
        pseries_idle_prolog();
@@ -255,7 +255,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
         */
        check_and_cede_processor();
 
-       local_irq_disable();
+       raw_local_irq_disable();
        pseries_idle_epilog();
 
        return index;