seccomp: Migrate to use SYSCALL_WORK flag
authorGabriel Krisman Bertazi <krisman@collabora.com>
Mon, 16 Nov 2020 17:42:00 +0000 (12:42 -0500)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 16 Nov 2020 20:53:15 +0000 (21:53 +0100)
On architectures using the generic syscall entry code the architecture
independent syscall work is moved to flags in thread_info::syscall_work.
This removes architecture dependencies and frees up TIF bits.

Define SYSCALL_WORK_SECCOMP, use it in the generic entry code and convert
the code which uses the TIF specific helper functions to use the new
*_syscall_work() helpers which either resolve to the new mode for users of
the generic entry code or to the TIF based functions for the other
architectures.

Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20201116174206.2639648-5-krisman@collabora.com
include/asm-generic/syscall.h
include/linux/entry-common.h
include/linux/seccomp.h
include/linux/thread_info.h
kernel/entry/common.c
kernel/fork.c
kernel/seccomp.c

index f3135e7343877b238c8fef99482653e006ea734a..524d8e68ff5ec5d49849da086d36d09da271c2c2 100644 (file)
@@ -135,7 +135,7 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
  * Returns the AUDIT_ARCH_* based on the system call convention in use.
  *
  * It's only valid to call this when @task is stopped on entry to a system
- * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
+ * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %SYSCALL_WORK_SECCOMP.
  *
  * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
  * provide an implementation of this.
index 3fe8f868f15ef496a8109db42267df76b3c029c8..fa3cdb102dbf0f0ec439f7c06f5c453c15a40d06 100644 (file)
 # define _TIF_SYSCALL_TRACEPOINT       (0)
 #endif
 
-#ifndef _TIF_SECCOMP
-# define _TIF_SECCOMP                  (0)
-#endif
-
 #ifndef _TIF_SYSCALL_AUDIT
 # define _TIF_SYSCALL_AUDIT            (0)
 #endif
@@ -49,7 +45,7 @@
 #endif
 
 #define SYSCALL_ENTER_WORK                                             \
-       (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP |       \
+       (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT  |                     \
         _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU |                   \
         ARCH_SYSCALL_ENTER_WORK)
 
@@ -64,7 +60,7 @@
        (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |                      \
         _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK)
 
-#define SYSCALL_WORK_ENTER     (0)
+#define SYSCALL_WORK_ENTER     (SYSCALL_WORK_SECCOMP)
 #define SYSCALL_WORK_EXIT      (0)
 
 /*
index 02aef2844c38ebb736b47ca689c5637a265e8e78..47763f3999f799031a3af4647c1fda8a53aa0aef 100644 (file)
@@ -42,7 +42,7 @@ struct seccomp {
 extern int __secure_computing(const struct seccomp_data *sd);
 static inline int secure_computing(void)
 {
-       if (unlikely(test_thread_flag(TIF_SECCOMP)))
+       if (unlikely(test_syscall_work(SECCOMP)))
                return  __secure_computing(NULL);
        return 0;
 }
index 0e9fb15d6b42e1ebd5b78e6489882ddcbadd33a6..a308ba4ef07b6deb7479e7ee492918fb2fe50199 100644 (file)
@@ -35,6 +35,12 @@ enum {
        GOOD_STACK,
 };
 
+enum syscall_work_bit {
+       SYSCALL_WORK_BIT_SECCOMP,
+};
+
+#define SYSCALL_WORK_SECCOMP           BIT(SYSCALL_WORK_BIT_SECCOMP)
+
 #include <asm/thread_info.h>
 
 #ifdef __KERNEL__
index e7a11e38daba2c70bc8cd8b9e2392c0b5e638aa4..5747a6eb2c48a2441b4093fb56a1f1cd98b9fac4 100644 (file)
@@ -54,7 +54,7 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
        }
 
        /* Do seccomp after ptrace, to catch any tracer changes. */
-       if (ti_work & _TIF_SECCOMP) {
+       if (work & SYSCALL_WORK_SECCOMP) {
                ret = __secure_computing(NULL);
                if (ret == -1L)
                        return ret;
index 32083db7a2a23e93bf2ed9786c92769a0e9c7136..bc5b1090f415c19e985931f784e4aec621fa7c85 100644 (file)
@@ -1625,7 +1625,7 @@ static void copy_seccomp(struct task_struct *p)
         * to manually enable the seccomp thread flag here.
         */
        if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
-               set_tsk_thread_flag(p, TIF_SECCOMP);
+               set_task_syscall_work(p, SECCOMP);
 #endif
 }
 
index 8ad7a293255a02de60665b68726576c3b07cdd9e..f67e92d11ad7c4c8060709dfb1c3ffa38951a7c6 100644 (file)
@@ -356,14 +356,14 @@ static inline void seccomp_assign_mode(struct task_struct *task,
 
        task->seccomp.mode = seccomp_mode;
        /*
-        * Make sure TIF_SECCOMP cannot be set before the mode (and
+        * Make sure SYSCALL_WORK_SECCOMP cannot be set before the mode (and
         * filter) is set.
         */
        smp_mb__before_atomic();
        /* Assume default seccomp processes want spec flaw mitigation. */
        if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
                arch_seccomp_spec_mitigate(task);
-       set_tsk_thread_flag(task, TIF_SECCOMP);
+       set_task_syscall_work(task, SECCOMP);
 }
 
 #ifdef CONFIG_SECCOMP_FILTER
@@ -929,7 +929,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
 
        /*
         * Make sure that any changes to mode from another thread have
-        * been seen after TIF_SECCOMP was seen.
+        * been seen after SYSCALL_WORK_SECCOMP was seen.
         */
        rmb();