sched: Simplify set_user_nice()
authorPeter Zijlstra <peterz@infradead.org>
Fri, 9 Jun 2023 18:52:55 +0000 (20:52 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Sep 2023 13:01:14 +0000 (15:01 +0200)
Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/sched.h

index 2299a5cfbfb9ed250695edb75d977edbe61eaf43..fa57a560c52aab0f893d904f453c8a2c0c8f72d2 100644 (file)
@@ -7187,9 +7187,8 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
 void set_user_nice(struct task_struct *p, long nice)
 {
        bool queued, running;
-       int old_prio;
-       struct rq_flags rf;
        struct rq *rq;
+       int old_prio;
 
        if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
                return;
@@ -7197,7 +7196,9 @@ void set_user_nice(struct task_struct *p, long nice)
         * We have to be careful, if called from sys_setpriority(),
         * the task might be in the middle of scheduling on another CPU.
         */
-       rq = task_rq_lock(p, &rf);
+       CLASS(task_rq_lock, rq_guard)(p);
+       rq = rq_guard.rq;
+
        update_rq_clock(rq);
 
        /*
@@ -7208,8 +7209,9 @@ void set_user_nice(struct task_struct *p, long nice)
         */
        if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
                p->static_prio = NICE_TO_PRIO(nice);
-               goto out_unlock;
+               return;
        }
+
        queued = task_on_rq_queued(p);
        running = task_current(rq, p);
        if (queued)
@@ -7232,9 +7234,6 @@ void set_user_nice(struct task_struct *p, long nice)
         * lowered its priority, then reschedule its CPU:
         */
        p->sched_class->prio_changed(rq, p, old_prio);
-
-out_unlock:
-       task_rq_unlock(rq, p, &rf);
 }
 EXPORT_SYMBOL(set_user_nice);
 
index 04846272409cc00f20f24d8cc6456554d67aba0a..68768f47ccb7a1f27a766fcf57e6a879ce3aaa8e 100644 (file)
@@ -1658,6 +1658,11 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
        raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 }
 
+DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
+                   _T->rq = task_rq_lock(_T->lock, &_T->rf),
+                   task_rq_unlock(_T->rq, _T->lock, &_T->rf),
+                   struct rq *rq; struct rq_flags rf)
+
 static inline void
 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
        __acquires(rq->lock)