}
 
 static long syscall_trace_enter(struct pt_regs *regs, long syscall,
-                               unsigned long ti_work)
+                               unsigned long ti_work, unsigned long work)
 {
        long ret = 0;
 
 static __always_inline long
 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
 {
+       unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
        unsigned long ti_work;
 
        ti_work = READ_ONCE(current_thread_info()->flags);
-       if (ti_work & SYSCALL_ENTER_WORK)
-               syscall = syscall_trace_enter(regs, syscall, ti_work);
+       if (work & SYSCALL_WORK_ENTER || ti_work & SYSCALL_ENTER_WORK)
+               syscall = syscall_trace_enter(regs, syscall, ti_work, work);
 
        return syscall;
 }
 }
 #endif
 
-static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
+static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work,
+                             unsigned long work)
 {
        bool step;
 
  */
 static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
 {
+       unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
        u32 cached_flags = READ_ONCE(current_thread_info()->flags);
        unsigned long nr = syscall_get_nr(current, regs);
 
         * enabled, we want to run them exactly once per syscall exit with
         * interrupts enabled.
         */
-       if (unlikely(cached_flags & SYSCALL_EXIT_WORK))
-               syscall_exit_work(regs, cached_flags);
+       if (unlikely(work & SYSCALL_WORK_EXIT || cached_flags & SYSCALL_EXIT_WORK))
+               syscall_exit_work(regs, cached_flags, work);
 }
 
 __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)