powerpc: add support for syscall stack randomization
authorXiu Jianfeng <xiujianfeng@huawei.com>
Fri, 1 Jul 2022 08:24:35 +0000 (16:24 +0800)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 28 Jul 2022 06:22:15 +0000 (16:22 +1000)
Add support for adding a random offset to the stack while handling
syscalls. This patch uses mftb() instead of get_random_int() for better
performance.

In order to avoid unconditional stack canaries on syscall entry (due to
the use of alloca()), also disable stack protector to avoid triggering
needless checks and slowing down the entry path. As there is no general
way to control stack protector coverage with a function attribute, this
must be disabled at the compilation unit level.

Signed-off-by: Xiu Jianfeng <xiujianfeng@huawei.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220701082435.126596-3-xiujianfeng@huawei.com
arch/powerpc/Kconfig
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/syscall.c

index 04499f22d06b39d71eeeed4bf27e028d963f03c6..2daeaab21240e97c494b3e13a01caa14a10c5bec 100644 (file)
@@ -196,6 +196,7 @@ config PPC
        select HAVE_ARCH_KASAN                  if PPC_BOOK3E_64
        select HAVE_ARCH_KASAN_VMALLOC          if HAVE_ARCH_KASAN
        select HAVE_ARCH_KFENCE                 if PPC_BOOK3S_32 || PPC_8xx || 40x
+       select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if COMPAT
index 0963d39464c8a5c5792b2d0a3c06786be60bb06f..06d2d1f78f71b4f7c32e4b63d2d1eb922f8956a2 100644 (file)
@@ -54,6 +54,13 @@ CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
 CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
 endif
 
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+# Remove stack protector to avoid triggering unneeded stack canary
+# checks due to randomize_kstack_offset.
+CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
+CFLAGS_syscall.o += -fno-stack-protector
+#endif
+
 obj-y                          := cputable.o syscalls.o \
                                   irq.o align.o signal_$(BITS).o pmc.o vdso.o \
                                   process.o systbl.o idle.o \
index 4d5689eeaf2557f7c143aa4bb741510b30b87627..81ace9e8b72b685b93d5fa788aa5b77ab150c1ad 100644 (file)
@@ -2,6 +2,7 @@
 
 #include <linux/compat.h>
 #include <linux/context_tracking.h>
+#include <linux/randomize_kstack.h>
 
 #include <asm/interrupt.h>
 #include <asm/kup.h>
@@ -18,10 +19,12 @@ notrace long system_call_exception(long r3, long r4, long r5,
                                   long r6, long r7, long r8,
                                   unsigned long r0, struct pt_regs *regs)
 {
+       long ret;
        syscall_fn f;
 
        kuap_lock();
 
+       add_random_kstack_offset();
        regs->orig_gpr3 = r3;
 
        if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
@@ -169,5 +172,19 @@ notrace long system_call_exception(long r3, long r4, long r5,
                f = (void *)sys_call_table[r0];
        }
 
-       return f(r3, r4, r5, r6, r7, r8);
+       ret = f(r3, r4, r5, r6, r7, r8);
+
+       /*
+        * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+        * so the maximum stack offset is 1k bytes (10 bits).
+        *
+        * The actual entropy will be further reduced by the compiler when
+        * applying stack alignment constraints: the powerpc architecture
+        * may have two kinds of stack alignment (16-bytes and 8-bytes).
+        *
+        * So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or SP[9:3].
+        */
+       choose_random_kstack_offset(mftb());
+
+       return ret;
 }