#define vdso_cycles_ok arch_vdso_cycles_ok
/*
- * x86 specific delta calculation.
+ * x86 specific calculation of nanoseconds for the current cycle count
*
* The regular implementation assumes that clocksource reads are globally
* monotonic. The TSC can be slightly off across sockets which can cause
* jump.
*
* Therefore it needs to be verified that @cycles are greater than
- * @last. If not then use @last, which is the base time of the current
- * conversion period.
+ * @vd->cycles_last. If not then use @vd->cycles_last, which is the base
+ * time of the current conversion period.
*
* This variant also uses a custom mask because while the clocksource mask of
* all the VDSO capable clocksources on x86 is U64_MAX, the above code uses
* declares everything with the MSB/Sign-bit set as invalid. Therefore the
* effective mask is S64_MAX.
*/
-static __always_inline
-u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
{
/*
* Due to the MSB/Sign-bit being used as invalid marker (see
* arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
*/
- u64 delta = (cycles - last) & S64_MAX;
+ u64 delta = (cycles - vd->cycle_last) & S64_MAX;
/*
* Due to the above mentioned TSC wobbles, filter out negative motion.
* Per the above masking, the effective sign bit is now bit 62.
*/
if (unlikely(delta & (1ULL << 62)))
- return 0;
+ return base >> vd->shift;
- return delta * mult;
+ return ((delta * vd->mult) + base) >> vd->shift;
}
-#define vdso_calc_delta vdso_calc_delta
+#define vdso_calc_ns vdso_calc_ns
#endif /* !__ASSEMBLY__ */
#include <vdso/datapage.h>
#include <vdso/helpers.h>
-#ifndef vdso_calc_delta
+#ifndef vdso_calc_ns
#ifdef VDSO_DELTA_NOMASK
-# define VDSO_DELTA_MASK(mask) U64_MAX
+# define VDSO_DELTA_MASK(vd) U64_MAX
#else
-# define VDSO_DELTA_MASK(mask) (mask)
+# define VDSO_DELTA_MASK(vd) (vd->mask)
+#endif
+
+#ifndef vdso_shift_ns
+static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
+{
+ return ns >> shift;
+}
#endif
/*
* Default implementation which works for all sane clocksources. That
* obviously excludes x86/TSC.
*/
-static __always_inline
-u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
{
- return ((cycles - last) & VDSO_DELTA_MASK(mask)) * mult;
-}
-#endif
+ u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
-#ifndef vdso_shift_ns
-static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
-{
- return ns >> shift;
+ return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
}
-#endif
+#endif /* vdso_calc_ns */
#ifndef __arch_vdso_hres_capable
static inline bool __arch_vdso_hres_capable(void)
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
- const struct vdso_data *vd;
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
- u64 cycles, last, ns;
+ const struct vdso_data *vd;
+ u64 cycles, ns;
u32 seq;
s64 sec;
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
- u64 cycles, last, sec, ns;
+ u64 cycles, sec, ns;
u32 seq;
/* Allows to compile the high resolution parts out */
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));