sched: Simplify syscalls
authorPeter Zijlstra <peterz@infradead.org>
Fri, 9 Jun 2023 14:54:54 +0000 (16:54 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Sep 2023 13:01:19 +0000 (15:01 +0200)
Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index fa57a560c52aab0f893d904f453c8a2c0c8f72d2..67c32c43a94b1081b79c9cae2c0b2815fc66fe83 100644 (file)
@@ -7506,6 +7506,21 @@ static struct task_struct *find_process_by_pid(pid_t pid)
        return pid ? find_task_by_vpid(pid) : current;
 }
 
+static struct task_struct *find_get_task(pid_t pid)
+{
+       struct task_struct *p;
+       guard(rcu)();
+
+       p = find_process_by_pid(pid);
+       if (likely(p))
+               get_task_struct(p);
+
+       return p;
+}
+
+DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
+            find_get_task(pid), pid_t pid)
+
 /*
  * sched_setparam() passes in -1 for its policy, to let the functions
  * it calls know not to change it.
@@ -7543,14 +7558,11 @@ static void __setscheduler_params(struct task_struct *p,
 static bool check_same_owner(struct task_struct *p)
 {
        const struct cred *cred = current_cred(), *pcred;
-       bool match;
+       guard(rcu)();
 
-       rcu_read_lock();
        pcred = __task_cred(p);
-       match = (uid_eq(cred->euid, pcred->euid) ||
-                uid_eq(cred->euid, pcred->uid));
-       rcu_read_unlock();
-       return match;
+       return (uid_eq(cred->euid, pcred->euid) ||
+               uid_eq(cred->euid, pcred->uid));
 }
 
 /*
@@ -7962,27 +7974,17 @@ static int
 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 {
        struct sched_param lparam;
-       struct task_struct *p;
-       int retval;
 
        if (!param || pid < 0)
                return -EINVAL;
        if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
                return -EFAULT;
 
-       rcu_read_lock();
-       retval = -ESRCH;
-       p = find_process_by_pid(pid);
-       if (likely(p))
-               get_task_struct(p);
-       rcu_read_unlock();
-
-       if (likely(p)) {
-               retval = sched_setscheduler(p, policy, &lparam);
-               put_task_struct(p);
-       }
+       CLASS(find_get_task, p)(pid);
+       if (!p)
+               return -ESRCH;
 
-       return retval;
+       return sched_setscheduler(p, policy, &lparam);
 }
 
 /*
@@ -8078,7 +8080,6 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
                               unsigned int, flags)
 {
        struct sched_attr attr;
-       struct task_struct *p;
        int retval;
 
        if (!uattr || pid < 0 || flags)
@@ -8093,21 +8094,14 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
        if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
                attr.sched_policy = SETPARAM_POLICY;
 
-       rcu_read_lock();
-       retval = -ESRCH;
-       p = find_process_by_pid(pid);
-       if (likely(p))
-               get_task_struct(p);
-       rcu_read_unlock();
+       CLASS(find_get_task, p)(pid);
+       if (!p)
+               return -ESRCH;
 
-       if (likely(p)) {
-               if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
-                       get_params(p, &attr);
-               retval = sched_setattr(p, &attr);
-               put_task_struct(p);
-       }
+       if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
+               get_params(p, &attr);
 
-       return retval;
+       return sched_setattr(p, &attr);
 }
 
 /**
@@ -8125,16 +8119,17 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
        if (pid < 0)
                return -EINVAL;
 
-       retval = -ESRCH;
-       rcu_read_lock();
+       guard(rcu)();
        p = find_process_by_pid(pid);
-       if (p) {
-               retval = security_task_getscheduler(p);
-               if (!retval)
-                       retval = p->policy
-                               | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
+       if (!p)
+               return -ESRCH;
+
+       retval = security_task_getscheduler(p);
+       if (!retval) {
+               retval = p->policy;
+               if (p->sched_reset_on_fork)
+                       retval |= SCHED_RESET_ON_FORK;
        }
-       rcu_read_unlock();
        return retval;
 }
 
@@ -8155,30 +8150,23 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        if (!param || pid < 0)
                return -EINVAL;
 
-       rcu_read_lock();
-       p = find_process_by_pid(pid);
-       retval = -ESRCH;
-       if (!p)
-               goto out_unlock;
+       scoped_guard (rcu) {
+               p = find_process_by_pid(pid);
+               if (!p)
+                       return -ESRCH;
 
-       retval = security_task_getscheduler(p);
-       if (retval)
-               goto out_unlock;
+               retval = security_task_getscheduler(p);
+               if (retval)
+                       return retval;
 
-       if (task_has_rt_policy(p))
-               lp.sched_priority = p->rt_priority;
-       rcu_read_unlock();
+               if (task_has_rt_policy(p))
+                       lp.sched_priority = p->rt_priority;
+       }
 
        /*
         * This one might sleep, we cannot do it with a spinlock held ...
         */
-       retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-
-       return retval;
-
-out_unlock:
-       rcu_read_unlock();
-       return retval;
+       return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
 }
 
 /*
@@ -8238,39 +8226,33 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
            usize < SCHED_ATTR_SIZE_VER0 || flags)
                return -EINVAL;
 
-       rcu_read_lock();
-       p = find_process_by_pid(pid);
-       retval = -ESRCH;
-       if (!p)
-               goto out_unlock;
+       scoped_guard (rcu) {
+               p = find_process_by_pid(pid);
+               if (!p)
+                       return -ESRCH;
 
-       retval = security_task_getscheduler(p);
-       if (retval)
-               goto out_unlock;
+               retval = security_task_getscheduler(p);
+               if (retval)
+                       return retval;
 
-       kattr.sched_policy = p->policy;
-       if (p->sched_reset_on_fork)
-               kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-       get_params(p, &kattr);
-       kattr.sched_flags &= SCHED_FLAG_ALL;
+               kattr.sched_policy = p->policy;
+               if (p->sched_reset_on_fork)
+                       kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+               get_params(p, &kattr);
+               kattr.sched_flags &= SCHED_FLAG_ALL;
 
 #ifdef CONFIG_UCLAMP_TASK
-       /*
-        * This could race with another potential updater, but this is fine
-        * because it'll correctly read the old or the new value. We don't need
-        * to guarantee who wins the race as long as it doesn't return garbage.
-        */
-       kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
-       kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
+               /*
+                * This could race with another potential updater, but this is fine
+                * because it'll correctly read the old or the new value. We don't need
+                * to guarantee who wins the race as long as it doesn't return garbage.
+                */
+               kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
+               kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
 #endif
-
-       rcu_read_unlock();
+       }
 
        return sched_attr_copy_to_user(uattr, &kattr, usize);
-
-out_unlock:
-       rcu_read_unlock();
-       return retval;
 }
 
 #ifdef CONFIG_SMP