x86/srso: Improve i-cache locality for alias mitigation
authorJosh Poimboeuf <jpoimboe@kernel.org>
Tue, 5 Sep 2023 05:04:55 +0000 (22:04 -0700)
committerBorislav Petkov (AMD) <bp@alien8.de>
Fri, 20 Oct 2023 10:04:18 +0000 (12:04 +0200)
Move srso_alias_return_thunk() to the same section as
srso_alias_safe_ret() so they can share a cache line.

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/eadaf5530b46a7ae8b936522da45ae555d2b3393.1693889988.git.jpoimboe@kernel.org
arch/x86/lib/retpoline.S

index cd86aeb5fdd3eac07cf3f7872d3e22b771967f54..9ab634f0b5d27657f0a6be71135a2fe30f780ba7 100644 (file)
@@ -177,15 +177,14 @@ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
        int3
 SYM_FUNC_END(srso_alias_safe_ret)
 
-       .section .text..__x86.return_thunk
-
-SYM_CODE_START(srso_alias_return_thunk)
+SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
        UNWIND_HINT_FUNC
        ANNOTATE_NOENDBR
        call srso_alias_safe_ret
        ud2
 SYM_CODE_END(srso_alias_return_thunk)
 
+       .section .text..__x86.return_thunk
 /*
  * Some generic notes on the untraining sequences:
  *