signal: Guarantee that SIGNAL_GROUP_EXIT is set on process exit
authorEric W. Biederman <ebiederm@xmission.com>
Tue, 21 Jun 2022 19:38:52 +0000 (14:38 -0500)
committerEric W. Biederman <ebiederm@xmission.com>
Wed, 20 Jul 2022 15:23:51 +0000 (10:23 -0500)
Track how many threads have not started exiting and when the last
thread starts exiting set SIGNAL_GROUP_EXIT.

This guarantees that SIGNAL_GROUP_EXIT will get set when a process
exits.  In practice this achieves nothing as glibc's implementation of
_exit calls sys_group_exit then sys_exit.  While glibc's implemenation
of pthread_exit calls exit (which cleansup and calls _exit) if it is
the last thread and sys_exit if it is the last thread.

This means the only way the kernel might observe a process that does
not set call exit_group is if the language runtime does not use glibc.

With more cleanups I hope to move the decrement of quick_threads
earlier.

Link: https://lkml.kernel.org/r/87bkukd4tc.fsf_-_@email.froward.int.ebiederm.org
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
include/linux/sched/signal.h
kernel/exit.c
kernel/fork.c

index cafbe03eed017224b790714dd9c2dfaa3ae16256..20099268fa257f40d8d45e7fa795cb8492b9ca34 100644 (file)
@@ -94,6 +94,7 @@ struct signal_struct {
        refcount_t              sigcnt;
        atomic_t                live;
        int                     nr_threads;
+       int                     quick_threads;
        struct list_head        thread_head;
 
        wait_queue_head_t       wait_chldexit;  /* for wait4() */
index a3929e5e6d6132885b9ccd535c09481132dcc107..d8ecbaa514f7cee069f89598a44f9af41667eac1 100644 (file)
@@ -733,11 +733,29 @@ static void check_stack_usage(void)
 static inline void check_stack_usage(void) {}
 #endif
 
+static void synchronize_group_exit(struct task_struct *tsk, long code)
+{
+       struct sighand_struct *sighand = tsk->sighand;
+       struct signal_struct *signal = tsk->signal;
+
+       spin_lock_irq(&sighand->siglock);
+       signal->quick_threads--;
+       if ((signal->quick_threads == 0) &&
+           !(signal->flags & SIGNAL_GROUP_EXIT)) {
+               signal->flags = SIGNAL_GROUP_EXIT;
+               signal->group_exit_code = code;
+               signal->group_stop_count = 0;
+       }
+       spin_unlock_irq(&sighand->siglock);
+}
+
 void __noreturn do_exit(long code)
 {
        struct task_struct *tsk = current;
        int group_dead;
 
+       synchronize_group_exit(tsk, code);
+
        WARN_ON(tsk->plug);
 
        kcov_task_exit(tsk);
index 9d44f2d46c6964d5cf7e29e06ad377b03fe25dc2..67813b25a56720e105fd45a21a74edba4eeab58d 100644 (file)
@@ -1692,6 +1692,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
                return -ENOMEM;
 
        sig->nr_threads = 1;
+       sig->quick_threads = 1;
        atomic_set(&sig->live, 1);
        refcount_set(&sig->sigcnt, 1);
 
@@ -2444,6 +2445,7 @@ static __latent_entropy struct task_struct *copy_process(
                        __this_cpu_inc(process_counts);
                } else {
                        current->signal->nr_threads++;
+                       current->signal->quick_threads++;
                        atomic_inc(&current->signal->live);
                        refcount_inc(&current->signal->sigcnt);
                        task_join_group_stop(p);