arm64: fpsimd: Bring cond_yield asm macro in line with new rules
authorArd Biesheuvel <ardb@kernel.org>
Thu, 11 Jan 2024 11:24:48 +0000 (12:24 +0100)
committerWill Deacon <will@kernel.org>
Fri, 12 Jan 2024 12:48:27 +0000 (12:48 +0000)
We no longer disable softirqs or preemption when doing kernel mode SIMD,
and so for fully preemptible kernels, there is no longer a need to do any
explicit yielding (and for non-preemptible kernels, yielding is not
needed either).

That leaves voluntary preemption, where only explicit yield calls may
result in a reschedule. To retain the existing behavior for such a
configuration, we should take the new situation into account, where the
preempt count will be zero rather than one, and yielding to pending
softirqs is unnecessary.

Fixes: aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch")
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20240111112447.577640-2-ardb+git@google.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/assembler.h
arch/arm64/kernel/asm-offsets.c

index 7b1975bf4b90e7a999de178140bc92aeb63f1c02..513787e4332993e18ec82db1a47f7814ca553d4c 100644 (file)
@@ -760,32 +760,25 @@ alternative_endif
 .endm
 
        /*
-        * Check whether preempt/bh-disabled asm code should yield as soon as
-        * it is able. This is the case if we are currently running in task
-        * context, and either a softirq is pending, or the TIF_NEED_RESCHED
-        * flag is set and re-enabling preemption a single time would result in
-        * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
-        * stored negated in the top word of the thread_info::preempt_count
+        * Check whether asm code should yield as soon as it is able. This is
+        * the case if we are currently running in task context, and the
+        * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
+        * is stored negated in the top word of the thread_info::preempt_count
         * field)
         */
-       .macro          cond_yield, lbl:req, tmp:req, tmp2:req
+       .macro          cond_yield, lbl:req, tmp:req, tmp2
+#ifdef CONFIG_PREEMPT_VOLUNTARY
        get_current_task \tmp
        ldr             \tmp, [\tmp, #TSK_TI_PREEMPT]
        /*
         * If we are serving a softirq, there is no point in yielding: the
         * softirq will not be preempted no matter what we do, so we should
-        * run to completion as quickly as we can.
+        * run to completion as quickly as we can. The preempt_count field will
+        * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
+        * catch this case too.
         */
-       tbnz            \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
-#ifdef CONFIG_PREEMPTION
-       sub             \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
        cbz             \tmp, \lbl
 #endif
-       adr_l           \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
-       get_this_cpu_offset     \tmp2
-       ldr             w\tmp, [\tmp, \tmp2]
-       cbnz            w\tmp, \lbl     // yield on pending softirq in task context
-.Lnoyield_\@:
        .endm
 
 /*
index 5ff1942b04fcfd94e334b6b204f7f3885647c68d..5a7dbbe0ce639a8b7a74012c957b3c7335e8f160 100644 (file)
@@ -117,8 +117,6 @@ int main(void)
   DEFINE(DMA_FROM_DEVICE,      DMA_FROM_DEVICE);
   BLANK();
   DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
-  DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
-  DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
   BLANK();
   DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
   BLANK();