sched: tasks: Use sequence counter with associated spinlock
authorAhmed S. Darwish <a.darwish@linutronix.de>
Mon, 20 Jul 2020 15:55:19 +0000 (17:55 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 29 Jul 2020 14:14:26 +0000 (16:14 +0200)
A sequence counter write side critical section must be protected by some
form of locking to serialize writers. A plain seqcount_t does not
contain the information of which lock must be held when entering a write
side critical section.

Use the new seqcount_spinlock_t data type, which allows to associate a
spinlock with the sequence counter. This enables lockdep to verify that
the spinlock used for writer serialization is held when the write side
critical section is entered.

If lockdep is disabled this lock association is compiled out and has
neither storage size nor runtime overhead.

Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200720155530.1173732-14-a.darwish@linutronix.de
include/linux/sched.h
init/init_task.c
kernel/fork.c

index 8d1de021b3150f48bd6818b401a0355d707e2ec8..9a9d8263962da9f9725b2634e96f564cb71aacf3 100644 (file)
@@ -1050,7 +1050,7 @@ struct task_struct {
        /* Protected by ->alloc_lock: */
        nodemask_t                      mems_allowed;
        /* Seqence number to catch updates: */
-       seqcount_t                      mems_allowed_seq;
+       seqcount_spinlock_t             mems_allowed_seq;
        int                             cpuset_mem_spread_rotor;
        int                             cpuset_slab_spread_rotor;
 #endif
index 15089d15010ab9bce6e1b34ddc8e9133011e774a..94fe3ba1bb600cf40d07ea7af04f327fb24a034b 100644 (file)
@@ -154,7 +154,8 @@ struct task_struct init_task
        .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
 #endif
 #ifdef CONFIG_CPUSETS
-       .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
+       .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
+                                                &init_task.alloc_lock),
 #endif
 #ifdef CONFIG_RT_MUTEXES
        .pi_waiters     = RB_ROOT_CACHED,
index 70d9d0a4de2aa17e0d003d642b52edbee285855d..fc72f09a61b2b7d5c2bda8f46f01a77c93cebde3 100644 (file)
@@ -2032,7 +2032,7 @@ static __latent_entropy struct task_struct *copy_process(
 #ifdef CONFIG_CPUSETS
        p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
        p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
-       seqcount_init(&p->mems_allowed_seq);
+       seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
        p->irq_events = 0;