From: Borislav Petkov (AMD) Date: Mon, 15 May 2023 14:07:26 +0000 (+0200) Subject: x86/retbleed: Add __x86_return_thunk alignment checks X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=f220125b999b2c9694149c6bda2798d8096f47ed;p=linux.git x86/retbleed: Add __x86_return_thunk alignment checks Add a linker assertion and compute the 0xcc padding dynamically so that __x86_return_thunk is always cacheline-aligned. Leave the SYM_START() macro in as the untraining doesn't need ENDBR annotations anyway. Suggested-by: Andrew Cooper Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Andrew Cooper Link: https://lore.kernel.org/r/20230515140726.28689-1-bp@alien8.de --- diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 25f155205770c..03c885d3640fb 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -508,4 +508,8 @@ INIT_PER_CPU(irq_stack_backing_store); "fixed_percpu_data is not at start of per-cpu area"); #endif +#ifdef CONFIG_RETHUNK +. = ASSERT((__x86_return_thunk & 0x3f) == 0, "__x86_return_thunk not cacheline-aligned"); +#endif + #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index b3b1e376dce86..3fd066d42ec05 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -143,7 +143,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) * from re-poisioning the BTB prediction. */ .align 64 - .skip 63, 0xcc + .skip 64 - (__x86_return_thunk - zen_untrain_ret), 0xcc SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /*