struct rq *rq = cpu_rq(cpu);
        struct task_struct *curr = rq->curr;
        struct rq_flags rf;
+       unsigned long thermal_pressure;
 
        arch_scale_freq_tick();
        sched_clock_tick();
        rq_lock(rq, &rf);
 
        update_rq_clock(rq);
+       thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
+       update_thermal_load_avg(rq_clock_task(rq), rq, thermal_pressure);
        curr->sched_class->task_tick(rq, curr, 0);
        calc_global_load_tick(rq);
        psi_task_tick(rq);
 
        if (READ_ONCE(rq->avg_dl.util_avg))
                return true;
 
+       if (thermal_load_avg(rq))
+               return true;
+
 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
        if (READ_ONCE(rq->avg_irq.util_avg))
                return true;
 {
        const struct sched_class *curr_class;
        u64 now = rq_clock_pelt(rq);
+       unsigned long thermal_pressure;
        bool decayed;
 
        /*
         */
        curr_class = rq->curr->sched_class;
 
+       thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
+
        decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
                  update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
+                 update_thermal_load_avg(rq_clock_task(rq), rq, thermal_pressure) |
                  update_irq_load_avg(rq, 0);
 
        if (others_have_blocked(rq))