arm64: Always load shadow stack pointer directly from the task struct
authorArd Biesheuvel <ardb@kernel.org>
Mon, 9 Jan 2023 17:47:59 +0000 (18:47 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 20 Jan 2023 14:26:18 +0000 (14:26 +0000)
All occurrences of the scs_load macro load the value of the shadow call
stack pointer from the task which is current at that point. So instead
of taking a task struct register argument in the scs_load macro to
specify the task struct to load from, let's always reference the current
task directly. This should make it much harder to exploit any
instruction sequences reloading the shadow call stack pointer register
from memory.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/scs.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S

index ff7da1268a52ab79216e7986590963fdd961f2a7..13df982a080805e6a747c09345f5cd183b7289ec 100644 (file)
 #ifdef CONFIG_SHADOW_CALL_STACK
        scs_sp  .req    x18
 
-       .macro scs_load tsk
-       ldr     scs_sp, [\tsk, #TSK_TI_SCS_SP]
+       .macro scs_load_current
+       get_current_task scs_sp
+       ldr     scs_sp, [scs_sp, #TSK_TI_SCS_SP]
        .endm
 
        .macro scs_save tsk
        str     scs_sp, [\tsk, #TSK_TI_SCS_SP]
        .endm
 #else
-       .macro scs_load tsk
+       .macro scs_load_current
        .endm
 
        .macro scs_save tsk
index 11cb99c4d298784d091e33e5003b533800a48872..546f7773238ea45d5f247a45e097788d67b219ce 100644 (file)
@@ -275,7 +275,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
 alternative_else_nop_endif
 1:
 
-       scs_load tsk
+       scs_load_current
        .else
        add     x21, sp, #PT_REGS_SIZE
        get_current_task tsk
@@ -848,7 +848,7 @@ SYM_FUNC_START(cpu_switch_to)
        msr     sp_el0, x1
        ptrauth_keys_install_kernel x1, x8, x9, x10
        scs_save x0
-       scs_load x1
+       scs_load_current
        ret
 SYM_FUNC_END(cpu_switch_to)
 NOKPROBE(cpu_switch_to)
index 952e17bd1c0b4f91a6c719afffc302e36c9e6f52..b9c1a506798ea315874efd8eee94ff2b674dcffe 100644 (file)
@@ -404,7 +404,7 @@ SYM_FUNC_END(create_kernel_mapping)
        stp     xzr, xzr, [sp, #S_STACKFRAME]
        add     x29, sp, #S_STACKFRAME
 
-       scs_load \tsk
+       scs_load_current
 
        adr_l   \tmp1, __per_cpu_offset
        ldr     w\tmp2, [\tsk, #TSK_TI_CPU]