Requires a compiler with -mfunction-return=thunk-extern
support for full protection. The kernel may run slower.
-config CPU_UNRET_ENTRY
+config MITIGATION_UNRET_ENTRY
bool "Enable UNRET on kernel entry"
depends on CPU_SUP_AMD && RETHUNK && X86_64
default y
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
#endif
-#ifdef CONFIG_CPU_UNRET_ENTRY
+#ifdef CONFIG_MITIGATION_UNRET_ENTRY
# define DISABLE_UNRET 0
#else
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
*/
.macro VALIDATE_UNRET_END
#if defined(CONFIG_NOINSTR_VALIDATION) && \
- (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
+ (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
ANNOTATE_RETPOLINE_SAFE
nop
#endif
.Lskip_rsb_\@:
.endm
-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
+#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
#else
#define CALL_UNTRAIN_RET ""
static inline void __x86_return_thunk(void) {}
#endif
-#ifdef CONFIG_CPU_UNRET_ENTRY
+#ifdef CONFIG_MITIGATION_UNRET_ENTRY
extern void retbleed_return_thunk(void);
#else
static inline void retbleed_return_thunk(void) {}
void init_spectral_chicken(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_CPU_UNRET_ENTRY
+#ifdef CONFIG_MITIGATION_UNRET_ENTRY
u64 value;
/*
return;
case RETBLEED_CMD_UNRET:
- if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
+ if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
} else {
- pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
+ pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
goto do_cmd_auto;
}
break;
case RETBLEED_CMD_AUTO:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
+ if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
boot_cpu_has(X86_FEATURE_IBPB))
"fixed_percpu_data is not at start of per-cpu area");
#endif
-#ifdef CONFIG_CPU_UNRET_ENTRY
+#ifdef CONFIG_MITIGATION_UNRET_ENTRY
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
#endif
#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
#endif /* CONFIG_CPU_SRSO */
-#ifdef CONFIG_CPU_UNRET_ENTRY
+#ifdef CONFIG_MITIGATION_UNRET_ENTRY
/*
* Some generic notes on the untraining sequences:
SYM_FUNC_END(retbleed_untrain_ret)
#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
-#else /* !CONFIG_CPU_UNRET_ENTRY */
+#else /* !CONFIG_MITIGATION_UNRET_ENTRY */
#define JMP_RETBLEED_UNTRAIN_RET "ud2"
-#endif /* CONFIG_CPU_UNRET_ENTRY */
+#endif /* CONFIG_MITIGATION_UNRET_ENTRY */
-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
+#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
-#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
+#endif /* CONFIG_MITIGATION_UNRET_ENTRY || CONFIG_CPU_SRSO */
#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
*/
.macro VALIDATE_UNRET_BEGIN
#if defined(CONFIG_NOINSTR_VALIDATION) && \
- (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
+ (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
.Lhere_\@:
.pushsection .discard.validate_unret
.long .Lhere_\@ - .
vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
- $(if $(or $(CONFIG_CPU_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret)
+ $(if $(or $(CONFIG_MITIGATION_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret)
objtool-args = $(vmlinux-objtool-args-y) --link
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
#endif
-#ifdef CONFIG_CPU_UNRET_ENTRY
+#ifdef CONFIG_MITIGATION_UNRET_ENTRY
# define DISABLE_UNRET 0
#else
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))