sched/fair: Rename SG_OVERLOAD to SG_OVERLOADED
authorIngo Molnar <mingo@kernel.org>
Thu, 28 Mar 2024 10:44:16 +0000 (11:44 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 28 Mar 2024 10:44:44 +0000 (11:44 +0100)
Follow the rename of the root_domain::overloaded flag.

Note that this also matches the SG_OVERUTILIZED flag better.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Qais Yousef <qyousef@layalina.io>
Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/ZgVHq65XKsOZpfgK@gmail.com
kernel/sched/fair.c
kernel/sched/sched.h

index bf10665b6f4fb96f773b378c00424ef65c5346f0..839a97a4ba2a64a454b9569993febbf6750be1d8 100644 (file)
@@ -9961,7 +9961,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                sgs->sum_nr_running += nr_running;
 
                if (nr_running > 1)
-                       *sg_status |= SG_OVERLOAD;
+                       *sg_status |= SG_OVERLOADED;
 
                if (cpu_overutilized(i))
                        *sg_status |= SG_OVERUTILIZED;
@@ -9986,7 +9986,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        /* Check for a misfit task on the cpu */
                        if (sgs->group_misfit_task_load < rq->misfit_task_load) {
                                sgs->group_misfit_task_load = rq->misfit_task_load;
-                               *sg_status |= SG_OVERLOAD;
+                               *sg_status |= SG_OVERLOADED;
                        }
                } else if (env->idle && sched_reduced_capacity(rq, env->sd)) {
                        /* Check for a task running on a CPU with reduced capacity */
@@ -10657,7 +10657,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 
        if (!env->sd->parent) {
                /* update overload indicator if we are at root domain */
-               set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOAD);
+               set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOADED);
 
                /* Update over-utilization (tipping point, U >= 0) indicator */
                set_rd_overutilized_status(env->dst_rq->rd,
index c7e7ae17c34041d6b206593fa9721dd1eb379c3f..07c6669b8250b9f68c74ffa975386a028c7304e1 100644 (file)
@@ -851,7 +851,7 @@ struct perf_domain {
 };
 
 /* Scheduling group status flags */
-#define SG_OVERLOAD            0x1 /* More than one runnable task on a CPU. */
+#define SG_OVERLOADED          0x1 /* More than one runnable task on a CPU. */
 #define SG_OVERUTILIZED                0x2 /* One or more CPUs are over-utilized. */
 
 /*
@@ -2541,7 +2541,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
 
 #ifdef CONFIG_SMP
        if (prev_nr < 2 && rq->nr_running >= 2) {
-               set_rd_overloaded(rq->rd, SG_OVERLOAD);
+               set_rd_overloaded(rq->rd, SG_OVERLOADED);
        }
 #endif