}
        }
 
-       /*
-        * If this is a voluntary sleep, dequeue will have taken care
-        * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We
-        * only need to deal with it during preemption.
-        */
-       if (sleep)
-               return;
-
        if (prev->pid) {
-               psi_flags_change(prev, TSK_ONCPU, 0);
+               int clear = TSK_ONCPU, set = 0;
+
+               /*
+                * When we're going to sleep, psi_dequeue() lets us handle
+                * TSK_RUNNING and TSK_IOWAIT here, where we can combine it
+                * with TSK_ONCPU and save walking common ancestors twice.
+                */
+               if (sleep) {
+                       clear |= TSK_RUNNING;
+                       if (prev->in_iowait)
+                               set |= TSK_IOWAIT;
+               }
+
+               psi_flags_change(prev, clear, set);
 
                iter = NULL;
                while ((group = iterate_groups(prev, &iter)) && group != common)
-                       psi_group_change(group, cpu, TSK_ONCPU, 0, true);
+                       psi_group_change(group, cpu, clear, set, true);
+
+               /*
+                * TSK_ONCPU is handled up to the common ancestor. If we're tasked
+                * with dequeuing too, finish that for the rest of the hierarchy.
+                */
+               if (sleep) {
+                       clear &= ~TSK_ONCPU;
+                       for (; group; group = iterate_groups(prev, &iter))
+                               psi_group_change(group, cpu, clear, set, true);
+               }
        }
 }
 
 
 
 static inline void psi_dequeue(struct task_struct *p, bool sleep)
 {
-       int clear = TSK_RUNNING, set = 0;
+       int clear = TSK_RUNNING;
 
        if (static_branch_likely(&psi_disabled))
                return;
 
-       if (!sleep) {
-               if (p->in_memstall)
-                       clear |= TSK_MEMSTALL;
-       } else {
-               /*
-                * When a task sleeps, schedule() dequeues it before
-                * switching to the next one. Merge the clearing of
-                * TSK_RUNNING and TSK_ONCPU to save an unnecessary
-                * psi_task_change() call in psi_sched_switch().
-                */
-               clear |= TSK_ONCPU;
+       /*
+        * A voluntary sleep is a dequeue followed by a task switch. To
+        * avoid walking all ancestors twice, psi_task_switch() handles
+        * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
+        * Do nothing here.
+        */
+       if (sleep)
+               return;
 
-               if (p->in_iowait)
-                       set |= TSK_IOWAIT;
-       }
+       if (p->in_memstall)
+               clear |= TSK_MEMSTALL;
 
-       psi_task_change(p, clear, set);
+       psi_task_change(p, clear, 0);
 }
 
 static inline void psi_ttwu_dequeue(struct task_struct *p)