sched/fair: Remove rq->load
authorDietmar Eggemann <dietmar.eggemann@arm.com>
Wed, 24 Apr 2019 08:45:56 +0000 (09:45 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 3 Jun 2019 09:49:37 +0000 (11:49 +0200)
The CFS class is the only one maintaining and using the CPU wide load
(rq->load(.weight)). The last use case of the CPU wide load in CFS's
set_next_entity() can be replaced by using the load of the CFS class
(rq->cfs.load(.weight)) instead.

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190424084556.604-1-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h

index 678bfb9bd87f7c40cbc7533d5659c4fdc8add6ef..150043e1d716f1d0e91913bcd89b839437eb04d0 100644 (file)
@@ -656,8 +656,6 @@ do {                                                                        \
        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
 
        P(nr_running);
-       SEQ_printf(m, "  .%-30s: %lu\n", "load",
-                  rq->load.weight);
        P(nr_switches);
        P(nr_load_updates);
        P(nr_uninterruptible);
index 8691a8fffe402fccdb37f3dfdc92ab43cdcf66cc..08b1cb06f9685e454fdf2b7dc3eb3fbf8953d5ba 100644 (file)
@@ -2686,8 +2686,6 @@ static void
 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        update_load_add(&cfs_rq->load, se->load.weight);
-       if (!parent_entity(se))
-               update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
 #ifdef CONFIG_SMP
        if (entity_is_task(se)) {
                struct rq *rq = rq_of(cfs_rq);
@@ -2703,8 +2701,6 @@ static void
 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        update_load_sub(&cfs_rq->load, se->load.weight);
-       if (!parent_entity(se))
-               update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
 #ifdef CONFIG_SMP
        if (entity_is_task(se)) {
                account_numa_dequeue(rq_of(cfs_rq), task_of(se));
@@ -4100,7 +4096,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
         * least twice that of our own weight (i.e. dont track it
         * when there are only lesser-weight tasks around):
         */
-       if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
+       if (schedstat_enabled() &&
+           rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
                schedstat_set(se->statistics.slice_max,
                        max((u64)schedstat_val(se->statistics.slice_max),
                            se->sum_exec_runtime - se->prev_sum_exec_runtime));
index b52ed1ada0be8bea55bf983028fc896d72b6b473..c308410675edd1b9c8388fe97bc6e21a4d3b7257 100644 (file)
@@ -830,8 +830,6 @@ struct rq {
        atomic_t nohz_flags;
 #endif /* CONFIG_NO_HZ_COMMON */
 
-       /* capture load from *all* tasks on this CPU: */
-       struct load_weight      load;
        unsigned long           nr_load_updates;
        u64                     nr_switches;