sched: Simplify migration_cpu_stop()
authorValentin Schneider <valentin.schneider@arm.com>
Thu, 25 Feb 2021 09:22:30 +0000 (10:22 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 6 Mar 2021 11:40:21 +0000 (12:40 +0100)
Since, when ->stop_pending, only the stopper can uninstall
p->migration_pending. This could simplify a few ifs, because:

  (pending != NULL) => (pending == p->migration_pending)

Also, the fatty comment above affine_move_task() probably needs a bit
of gardening.

Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index 98191218d891d8986f28fcc51c6724640bac7796..f9dfb349146e900869a2c1816408664f285434c5 100644 (file)
@@ -1926,6 +1926,12 @@ static int migration_cpu_stop(void *data)
        raw_spin_lock(&p->pi_lock);
        rq_lock(rq, &rf);
 
+       /*
+        * If we were passed a pending, then ->stop_pending was set, thus
+        * p->migration_pending must have remained stable.
+        */
+       WARN_ON_ONCE(pending && pending != p->migration_pending);
+
        /*
         * If task_rq(p) != rq, it cannot be migrated here, because we're
         * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
@@ -1936,8 +1942,7 @@ static int migration_cpu_stop(void *data)
                        goto out;
 
                if (pending) {
-                       if (p->migration_pending == pending)
-                               p->migration_pending = NULL;
+                       p->migration_pending = NULL;
                        complete = true;
                }
 
@@ -1976,8 +1981,7 @@ static int migration_cpu_stop(void *data)
                 * somewhere allowed, we're done.
                 */
                if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
-                       if (p->migration_pending == pending)
-                               p->migration_pending = NULL;
+                       p->migration_pending = NULL;
                        complete = true;
                        goto out;
                }
@@ -2165,16 +2169,21 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  *
  * (1) In the cases covered above. There is one more where the completion is
  * signaled within affine_move_task() itself: when a subsequent affinity request
- * cancels the need for an active migration. Consider:
+ * occurs after the stopper bailed out due to the targeted task still being
+ * Migrate-Disable. Consider:
  *
  *     Initial conditions: P0->cpus_mask = [0, 1]
  *
- *     P0@CPU0            P1                             P2
- *
- *     migrate_disable();
- *     <preempted>
+ *     CPU0              P1                            P2
+ *     <P0>
+ *       migrate_disable();
+ *       <preempted>
  *                        set_cpus_allowed_ptr(P0, [1]);
  *                          <blocks>
+ *     <migration/0>
+ *       migration_cpu_stop()
+ *         is_migration_disabled()
+ *           <bails>
  *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
  *                                                         <signal completion>
  *                          <awakes>