timekeeping: Use seqcount_latch_t
authorAhmed S. Darwish <a.darwish@linutronix.de>
Thu, 27 Aug 2020 11:40:41 +0000 (13:40 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 10 Sep 2020 09:19:29 +0000 (11:19 +0200)
Latch sequence counters are a multiversion concurrency control mechanism
where the seqcount_t counter even/odd value is used to switch between
two data storage copies. This allows the seqcount_t read path to safely
interrupt its write side critical section (e.g. from NMIs).

Initially, latch sequence counters were implemented as a single write
function, raw_write_seqcount_latch(), above plain seqcount_t. The read
path was expected to use plain seqcount_t raw_read_seqcount().

A specialized read function was later added, raw_read_seqcount_latch(),
and became the standardized way for latch read paths. Having unique read
and write APIs meant that latch sequence counters are basically a data
type of their own -- just inappropriately overloading plain seqcount_t.
The seqcount_latch_t data type was thus introduced at seqlock.h.

Use that new data type instead of seqcount_raw_spinlock_t. This ensures
that only latch-safe APIs are to be used with the sequence counter.

Note that the use of seqcount_raw_spinlock_t was not very useful in the
first place. Only the "raw_" subset of seqcount_t APIs were used at
timekeeping.c. This subset was created for contexts where lockdep cannot
be used. seqcount_LOCKTYPE_t's raison d'ĂȘtre -- verifying that the
seqcount_t writer serialization lock is held -- cannot thus be done.

References: 0c3351d451ae ("seqlock: Use raw_ prefix instead of _no_lockdep")
References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200827114044.11173-6-a.darwish@linutronix.de
kernel/time/timekeeping.c

index 4c47f388a83f17860fdafa3229bba0cc605ec25a..999c981ae766fa41f880cdf34956af58d0cb04d4 100644 (file)
@@ -64,7 +64,7 @@ static struct timekeeper shadow_timekeeper;
  * See @update_fast_timekeeper() below.
  */
 struct tk_fast {
-       seqcount_raw_spinlock_t seq;
+       seqcount_latch_t        seq;
        struct tk_read_base     base[2];
 };
 
@@ -81,13 +81,13 @@ static struct clocksource dummy_clock = {
 };
 
 static struct tk_fast tk_fast_mono ____cacheline_aligned = {
-       .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
+       .seq     = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
        .base[0] = { .clock = &dummy_clock, },
        .base[1] = { .clock = &dummy_clock, },
 };
 
 static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
-       .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
+       .seq     = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
        .base[0] = { .clock = &dummy_clock, },
        .base[1] = { .clock = &dummy_clock, },
 };
@@ -467,7 +467,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
                                        tk_clock_read(tkr),
                                        tkr->cycle_last,
                                        tkr->mask));
-       } while (read_seqcount_retry(&tkf->seq, seq));
+       } while (read_seqcount_latch_retry(&tkf->seq, seq));
 
        return now;
 }
@@ -533,7 +533,7 @@ static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
                                        tk_clock_read(tkr),
                                        tkr->cycle_last,
                                        tkr->mask));
-       } while (read_seqcount_retry(&tkf->seq, seq));
+       } while (read_seqcount_latch_retry(&tkf->seq, seq));
 
        return now;
 }