sched: Simplify sched_move_task()
authorPeter Zijlstra <peterz@infradead.org>
Fri, 9 Jun 2023 18:41:09 +0000 (20:41 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Sep 2023 13:01:38 +0000 (15:01 +0200)
Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index d298176367f775b734affa61e34c890ef737069d..a3f4fb8a684190c6fa8c607803d82fd5cb026b46 100644 (file)
@@ -10437,17 +10437,18 @@ void sched_move_task(struct task_struct *tsk)
        int queued, running, queue_flags =
                DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
        struct task_group *group;
-       struct rq_flags rf;
        struct rq *rq;
 
-       rq = task_rq_lock(tsk, &rf);
+       CLASS(task_rq_lock, rq_guard)(tsk);
+       rq = rq_guard.rq;
+
        /*
         * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
         * group changes.
         */
        group = sched_get_task_group(tsk);
        if (group == tsk->sched_task_group)
-               goto unlock;
+               return;
 
        update_rq_clock(rq);
 
@@ -10472,9 +10473,6 @@ void sched_move_task(struct task_struct *tsk)
                 */
                resched_curr(rq);
        }
-
-unlock:
-       task_rq_unlock(rq, tsk, &rf);
 }
 
 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)