arm64: mte: optimize GCR_EL1 modification on kernel entry/exit
authorPeter Collingbourne <pcc@google.com>
Wed, 14 Jul 2021 01:36:38 +0000 (18:36 -0700)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 28 Jul 2021 17:40:12 +0000 (18:40 +0100)
Accessing GCR_EL1 and issuing an ISB can be expensive on some
microarchitectures. Although we must write to GCR_EL1, we can
restructure the code to avoid reading from it because the new value
can be derived entirely from the exclusion mask, which is already in
a GPR. Do so.

Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/I560a190a74176ca4cc5191dad08f77f6b1577c75
Link: https://lore.kernel.org/r/20210714013638.3995315-1-pcc@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/entry.S

index 4b779f19848604aa2b369a0580ffb74fc3422b7a..8c8581e86a1afb9a371f8921a29c76051745d759 100644 (file)
@@ -168,15 +168,11 @@ alternative_else_nop_endif
 #endif
        .endm
 
-       .macro mte_set_gcr, tmp, tmp2
+       .macro mte_set_gcr, mte_ctrl, tmp
 #ifdef CONFIG_ARM64_MTE
-       /*
-        * Calculate and set the exclude mask preserving
-        * the RRND (bit[16]) setting.
-        */
-       mrs_s   \tmp2, SYS_GCR_EL1
-       bfxil   \tmp2, \tmp, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
-       msr_s   SYS_GCR_EL1, \tmp2
+       ubfx    \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
+       orr     \tmp, \tmp, #SYS_GCR_EL1_RRND
+       msr_s   SYS_GCR_EL1, \tmp
 #endif
        .endm