#endif
 
 /*
- * TIF flags handled in syscall_enter_from_user_mode()
+ * SYSCALL_WORK flags handled in syscall_enter_from_user_mode()
  */
-#ifndef ARCH_SYSCALL_ENTER_WORK
-# define ARCH_SYSCALL_ENTER_WORK       (0)
+#ifndef ARCH_SYSCALL_WORK_ENTER
+# define ARCH_SYSCALL_WORK_ENTER       (0)
 #endif
 
-#define SYSCALL_ENTER_WORK ARCH_SYSCALL_ENTER_WORK
-
 /*
- * TIF flags handled in syscall_exit_to_user_mode()
+ * SYSCALL_WORK flags handled in syscall_exit_to_user_mode()
  */
-#ifndef ARCH_SYSCALL_EXIT_WORK
-# define ARCH_SYSCALL_EXIT_WORK                (0)
+#ifndef ARCH_SYSCALL_WORK_EXIT
+# define ARCH_SYSCALL_WORK_EXIT                (0)
 #endif
 
-#define SYSCALL_EXIT_WORK ARCH_SYSCALL_EXIT_WORK
-
 #define SYSCALL_WORK_ENTER     (SYSCALL_WORK_SECCOMP |                 \
                                 SYSCALL_WORK_SYSCALL_TRACEPOINT |      \
                                 SYSCALL_WORK_SYSCALL_TRACE |           \
                                 SYSCALL_WORK_SYSCALL_EMU |             \
-                                SYSCALL_WORK_SYSCALL_AUDIT)
+                                SYSCALL_WORK_SYSCALL_AUDIT |           \
+                                ARCH_SYSCALL_WORK_ENTER)
 #define SYSCALL_WORK_EXIT      (SYSCALL_WORK_SYSCALL_TRACEPOINT |      \
                                 SYSCALL_WORK_SYSCALL_TRACE |           \
-                                SYSCALL_WORK_SYSCALL_AUDIT)
+                                SYSCALL_WORK_SYSCALL_AUDIT |           \
+                                ARCH_SYSCALL_WORK_EXIT)
 
 /*
  * TIF flags handled in exit_to_user_mode_loop()
  *
  * It handles the following work items:
  *
- *  1) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
- *     __secure_computing(), trace_sys_enter()
+ *  1) syscall_work flag dependent invocations of
+ *     arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter()
  *  2) Invocation of audit_syscall_entry()
  */
 long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
 
 }
 
 static long syscall_trace_enter(struct pt_regs *regs, long syscall,
-                               unsigned long ti_work, unsigned long work)
+                               unsigned long work)
 {
        long ret = 0;
 
 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
 {
        unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
-       unsigned long ti_work;
 
-       ti_work = READ_ONCE(current_thread_info()->flags);
-       if (work & SYSCALL_WORK_ENTER || ti_work & SYSCALL_ENTER_WORK)
-               syscall = syscall_trace_enter(regs, syscall, ti_work, work);
+       if (work & SYSCALL_WORK_ENTER)
+               syscall = syscall_trace_enter(regs, syscall, work);
 
        return syscall;
 }
 }
 #endif
 
-static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work,
-                             unsigned long work)
+
+static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
 {
        bool step;
 
 static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
 {
        unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
-       u32 cached_flags = READ_ONCE(current_thread_info()->flags);
        unsigned long nr = syscall_get_nr(current, regs);
 
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
         * enabled, we want to run them exactly once per syscall exit with
         * interrupts enabled.
         */
-       if (unlikely(work & SYSCALL_WORK_EXIT || cached_flags & SYSCALL_EXIT_WORK))
-               syscall_exit_work(regs, cached_flags, work);
+       if (unlikely(work & SYSCALL_WORK_EXIT))
+               syscall_exit_work(regs, work);
 }
 
 __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)