arm64: idle: don't instrument idle code with KCOV
authorMark Rutland <mark.rutland@arm.com>
Mon, 7 Jun 2021 09:46:24 +0000 (10:46 +0100)
committerWill Deacon <will@kernel.org>
Mon, 7 Jun 2021 10:35:56 +0000 (11:35 +0100)
The low-level idle code in arch_cpu_idle() and its callees runs at a
time where where portions of the kernel environment aren't available.
For example, RCU may not be watching, and lockdep state may be
out-of-sync with the hardware. Due to this, it is not sound to
instrument this code.

We generally avoid instrumentation by marking the entry functions as
`noinstr`, but currently this doesn't inhibit KCOV instrumentation.
Prevent this by factoring these functions into a new idle.c so that we
can disable KCOV for the entire compilation unit, as is done for the
core idle code in kernel/sched/idle.c.

We'd like to keep instrumentation of the rest of process.c, and for the
existing code in cpuidle.c, so a new compilation unit is preferable. The
arch_cpu_idle_dead() function in process.c is a cpu hotplug function
that is safe to instrument, so it is left as-is in process.c.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-21-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/Makefile
arch/arm64/kernel/idle.c [new file with mode: 0644]
arch/arm64/kernel/process.c

index 787c3c83edd7a4bc84c8eb70c99bcb00b7230515..de434204d72342bff16b0bd3043065ebf5abeef2 100644 (file)
@@ -18,6 +18,7 @@ CFLAGS_syscall.o      += -fno-stack-protector
 # available or are out-of-sync with HW state. Since `noinstr` doesn't always
 # inhibit KCOV instrumentation, disable it for the entire compilation unit.
 KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_idle.o := n
 
 # Object file lists.
 obj-y                  := debug-monitors.o entry.o irq.o fpsimd.o              \
@@ -27,7 +28,7 @@ obj-y                 := debug-monitors.o entry.o irq.o fpsimd.o              \
                           return_address.o cpuinfo.o cpu_errata.o              \
                           cpufeature.o alternative.o cacheinfo.o               \
                           smp.o smp_spin_table.o topology.o smccc-call.o       \
-                          syscall.o proton-pack.o idreg-override.o
+                          syscall.o proton-pack.o idreg-override.o idle.o
 
 targets                        += efi-entry.o
 
diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c
new file mode 100644 (file)
index 0000000..45c7920
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level idle sequences
+ */
+
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+
+#include <asm/arch_gicv3.h>
+#include <asm/barrier.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+static void noinstr __cpu_do_idle(void)
+{
+       dsb(sy);
+       wfi();
+}
+
+static void noinstr __cpu_do_idle_irqprio(void)
+{
+       unsigned long pmr;
+       unsigned long daif_bits;
+
+       daif_bits = read_sysreg(daif);
+       write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
+
+       /*
+        * Unmask PMR before going idle to make sure interrupts can
+        * be raised.
+        */
+       pmr = gic_read_pmr();
+       gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+       __cpu_do_idle();
+
+       gic_write_pmr(pmr);
+       write_sysreg(daif_bits, daif);
+}
+
+/*
+ *     cpu_do_idle()
+ *
+ *     Idle the processor (wait for interrupt).
+ *
+ *     If the CPU supports priority masking we must do additional work to
+ *     ensure that interrupts are not masked at the PMR (because the core will
+ *     not wake up if we block the wake up signal in the interrupt controller).
+ */
+void noinstr cpu_do_idle(void)
+{
+       if (system_uses_irq_prio_masking())
+               __cpu_do_idle_irqprio();
+       else
+               __cpu_do_idle();
+}
+
+/*
+ * This is our default idle handler.
+ */
+void noinstr arch_cpu_idle(void)
+{
+       /*
+        * This should do all the clock switching and wait for interrupt
+        * tricks
+        */
+       cpu_do_idle();
+       raw_local_irq_enable();
+}
index 2e733770915520dbee8b54393237e30147308b76..72c5d80f03fa845c02727782c3921c150a302cca 100644 (file)
@@ -73,63 +73,6 @@ EXPORT_SYMBOL_GPL(pm_power_off);
 
 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 
-static void noinstr __cpu_do_idle(void)
-{
-       dsb(sy);
-       wfi();
-}
-
-static void noinstr __cpu_do_idle_irqprio(void)
-{
-       unsigned long pmr;
-       unsigned long daif_bits;
-
-       daif_bits = read_sysreg(daif);
-       write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
-
-       /*
-        * Unmask PMR before going idle to make sure interrupts can
-        * be raised.
-        */
-       pmr = gic_read_pmr();
-       gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
-       __cpu_do_idle();
-
-       gic_write_pmr(pmr);
-       write_sysreg(daif_bits, daif);
-}
-
-/*
- *     cpu_do_idle()
- *
- *     Idle the processor (wait for interrupt).
- *
- *     If the CPU supports priority masking we must do additional work to
- *     ensure that interrupts are not masked at the PMR (because the core will
- *     not wake up if we block the wake up signal in the interrupt controller).
- */
-void noinstr cpu_do_idle(void)
-{
-       if (system_uses_irq_prio_masking())
-               __cpu_do_idle_irqprio();
-       else
-               __cpu_do_idle();
-}
-
-/*
- * This is our default idle handler.
- */
-void noinstr arch_cpu_idle(void)
-{
-       /*
-        * This should do all the clock switching and wait for interrupt
-        * tricks
-        */
-       cpu_do_idle();
-       raw_local_irq_enable();
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 void arch_cpu_idle_dead(void)
 {