summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/kern_tc.c
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/src/kern_tc.c')
-rw-r--r--cpukit/score/src/kern_tc.c676
1 files changed, 420 insertions, 256 deletions
diff --git a/cpukit/score/src/kern_tc.c b/cpukit/score/src/kern_tc.c
index 1b65cf41ee..56ec4751ce 100644
--- a/cpukit/score/src/kern_tc.c
+++ b/cpukit/score/src/kern_tc.c
@@ -1,4 +1,6 @@
/*-
+ * SPDX-License-Identifier: Beerware
+ *
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
@@ -7,7 +9,6 @@
* ----------------------------------------------------------------------------
*
* Copyright (c) 2011, 2015, 2016 The FreeBSD Foundation
- * All rights reserved.
*
* Portions of this software were developed by Julien Ridoux at the University
* of Melbourne under sponsorship from the FreeBSD Foundation.
@@ -44,9 +45,8 @@
#include <rtems/score/watchdogimpl.h>
#endif /* __rtems__ */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/kern/kern_tc.c 324528 2017-10-11 11:03:11Z kib $");
+__FBSDID("$FreeBSD$");
-#include "opt_compat.h"
#include "opt_ntp.h"
#include "opt_ffclock.h"
@@ -79,16 +79,7 @@ ISR_LOCK_DEFINE(, _Timecounter_Lock, "Timecounter")
_ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context)
#define hz rtems_clock_get_ticks_per_second()
#define printf(...)
-#define bcopy(x, y, z) memcpy(y, x, z);
#define log(...)
-static inline int
-builtin_fls(int x)
-{
- return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
-}
-#define fls(x) builtin_fls(x)
-/* FIXME: https://devel.rtems.org/ticket/2348 */
-#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
static inline void
atomic_thread_fence_acq(void)
@@ -117,6 +108,24 @@ atomic_store_rel_int(Atomic_Uint *i, u_int val)
_Atomic_Store_uint(i, val, ATOMIC_ORDER_RELEASE);
}
+
+static inline void *
+atomic_load_ptr(void *ptr)
+{
+
+ return ((void *)_Atomic_Load_uintptr(ptr, ATOMIC_ORDER_RELAXED));
+}
+
+static Timecounter_NTP_update_second _Timecounter_NTP_update_second;
+
+void
+_Timecounter_Set_NTP_update_second(Timecounter_NTP_update_second handler)
+{
+
+ _Timecounter_NTP_update_second = handler;
+}
+
+#define ntp_update_second(a, b) (*ntp_update_second_handler)(a, b)
#endif /* __rtems__ */
/*
@@ -158,6 +167,7 @@ struct timehands {
struct timecounter *th_counter;
int64_t th_adjustment;
uint64_t th_scale;
+ uint32_t th_large_delta;
uint32_t th_offset_count;
struct bintime th_offset;
struct bintime th_bintime;
@@ -173,6 +183,40 @@ struct timehands {
struct timehands *th_next;
};
+#ifndef __rtems__
+static struct timehands ths[16] = {
+ [0] = {
+ .th_counter = &dummy_timecounter,
+ .th_scale = (uint64_t)-1 / 1000000,
+ .th_large_delta = 1000000,
+ .th_offset = { .sec = 1 },
+ .th_generation = 1,
+ },
+};
+
+static struct timehands *volatile timehands = &ths[0];
+struct timecounter *timecounter = &dummy_timecounter;
+static struct timecounter *timecounters = &dummy_timecounter;
+
+/* Mutex to protect the timecounter list. */
+static struct mtx tc_lock;
+MTX_SYSINIT(tc_lock, &tc_lock, "tc", MTX_DEF);
+
+int tc_min_ticktock_freq = 1;
+#else /* __rtems__ */
+/*
+ * In FreeBSD, the timehands count is a tuning option from two to 16. The
+ * tuning option was added since it is inexpensive and some FreeBSD users asked
+ * for it to play around. The default value is two. One system which did not
+ * work with two timehands was a system with one processor and a specific PPS
+ * device.
+ *
+ * For RTEMS, in uniprocessor configurations, just use one timehand since the
+ * update is done with interrupt disabled.
+ *
+ * In SMP configurations, use a fixed set of two timehands until someone
+ * reports an issue.
+ */
#if defined(RTEMS_SMP)
static struct timehands th0;
static struct timehands th1 = {
@@ -183,7 +227,8 @@ static struct timehands th0 = {
.th_counter = &dummy_timecounter,
.th_scale = (uint64_t)-1 / 1000000,
.th_offset = { .sec = 1 },
- .th_generation = 1,
+ .th_large_delta = 1000000,
+ .th_generation = UINT_MAX,
#ifdef __rtems__
.th_bintime = { .sec = TOD_SECONDS_1970_THROUGH_1988 },
.th_microtime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
@@ -199,10 +244,6 @@ static struct timehands th0 = {
static struct timehands *volatile timehands = &th0;
struct timecounter *timecounter = &dummy_timecounter;
-#ifndef __rtems__
-static struct timecounter *timecounters = &dummy_timecounter;
-
-int tc_min_ticktock_freq = 1;
#endif /* __rtems__ */
#ifndef __rtems__
@@ -214,17 +255,33 @@ volatile int32_t time_uptime = 1;
#endif /* __rtems__ */
#ifndef __rtems__
+/*
+ * The system time is always computed by summing the estimated boot time and the
+ * system uptime. The timehands track boot time, but it changes when the system
+ * time is set by the user, stepped by ntpd or adjusted when resuming. It
+ * is set to new_time - uptime.
+ */
static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
-SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
- NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
+SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
+ sysctl_kern_boottime, "S,timeval",
+ "Estimated system boottime");
-SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
-static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
+SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "");
+static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc,
+ CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "");
static int timestepwarnings;
-SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
+SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RWTUN,
&timestepwarnings, 0, "Log time steps");
+static int timehands_count = 2;
+SYSCTL_INT(_kern_timecounter, OID_AUTO, timehands_count,
+ CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &timehands_count, 0, "Count of timehands in rotation");
+
struct bintime bt_timethreshold;
struct bintime bt_tickthreshold;
sbintime_t sbt_timethreshold;
@@ -242,6 +299,7 @@ SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
volatile int rtc_generation = 1;
static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
+static char tc_from_tunable[16];
#endif /* __rtems__ */
static void tc_windup(struct bintime *new_boottimebin);
@@ -253,6 +311,7 @@ static void _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
void dtrace_getnanotime(struct timespec *tsp);
+void dtrace_getnanouptime(struct timespec *tsp);
#ifndef __rtems__
static int
@@ -262,7 +321,8 @@ sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
getboottime(&boottime);
-#ifndef __mips__
+/* i386 is the only arch which uses a 32bits time_t */
+#ifdef __amd64__
#ifdef SCTL_MASK32
int tv[2];
@@ -317,20 +377,85 @@ tc_delta(struct timehands *th)
* the comment in <sys/time.h> for a description of these 12 functions.
*/
-#ifdef FFCLOCK
-void
-fbclock_binuptime(struct bintime *bt)
+static __inline void
+bintime_off(struct bintime *bt, u_int off)
{
struct timehands *th;
- unsigned int gen;
+ struct bintime *btp;
+ uint64_t scale, x;
+#ifndef __rtems__
+ u_int delta, gen, large_delta;
+#else /* __rtems__ */
+ uint32_t delta, large_delta;
+ u_int gen;
+#endif /* __rtems__ */
do {
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- bintime_addx(bt, th->th_scale * tc_delta(th));
+ btp = (struct bintime *)((vm_offset_t)th + off);
+ *bt = *btp;
+ scale = th->th_scale;
+ delta = tc_delta(th);
+ large_delta = th->th_large_delta;
atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
} while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+
+ if (__predict_false(delta >= large_delta)) {
+ /* Avoid overflow for scale * delta. */
+ x = (scale >> 32) * delta;
+ bt->sec += x >> 32;
+ bintime_addx(bt, x << 32);
+ bintime_addx(bt, (scale & 0xffffffff) * delta);
+ } else {
+ bintime_addx(bt, scale * delta);
+ }
+}
+#define GETTHBINTIME(dst, member) \
+do { \
+ _Static_assert(_Generic(((struct timehands *)NULL)->member, \
+ struct bintime: 1, default: 0) == 1, \
+ "struct timehands member is not of struct bintime type"); \
+ bintime_off(dst, __offsetof(struct timehands, member)); \
+} while (0)
+
+static __inline void
+getthmember(void *out, size_t out_size, u_int off)
+{
+ struct timehands *th;
+ u_int gen;
+
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ memcpy(out, (char *)th + off, out_size);
+ atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
+ } while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+}
+#define GETTHMEMBER(dst, member) \
+do { \
+ _Static_assert(_Generic(*dst, \
+ __typeof(((struct timehands *)NULL)->member): 1, \
+ default: 0) == 1, \
+ "*dst and struct timehands member have different types"); \
+ getthmember(dst, sizeof(*dst), __offsetof(struct timehands, \
+ member)); \
+} while (0)
+
+#ifdef FFCLOCK
+void
+fbclock_binuptime(struct bintime *bt)
+{
+
+ GETTHBINTIME(bt, th_offset);
}
void
@@ -354,16 +479,8 @@ fbclock_microuptime(struct timeval *tvp)
void
fbclock_bintime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_bintime);
}
void
@@ -387,116 +504,88 @@ fbclock_microtime(struct timeval *tvp)
void
fbclock_getbinuptime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_offset);
}
void
fbclock_getnanouptime(struct timespec *tsp)
{
- struct timehands *th;
- unsigned int gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timespec(&th->th_offset, tsp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
void
fbclock_getmicrouptime(struct timeval *tvp)
{
- struct timehands *th;
- unsigned int gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timeval(&th->th_offset, tvp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timeval(&bt, tvp);
}
void
fbclock_getbintime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_bintime);
}
void
fbclock_getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
}
void
fbclock_getmicrotime(struct timeval *tvp)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tvp = th->th_microtime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tvp, th_microtime);
}
#else /* !FFCLOCK */
+
void
binuptime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_offset);
}
#ifdef __rtems__
sbintime_t
_Timecounter_Sbinuptime(void)
{
struct timehands *th;
- uint32_t gen;
sbintime_t sbt;
+ uint64_t scale;
+ uint32_t delta;
+ uint32_t large_delta;
+ u_int gen;
do {
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
sbt = bttosbt(th->th_offset);
- sbt += (th->th_scale * tc_delta(th)) >> 32;
+ scale = th->th_scale;
+ delta = tc_delta(th);
+ large_delta = th->th_large_delta;
atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
} while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+
+ if (__predict_false(delta >= large_delta)) {
+ /* Avoid overflow for scale * delta. */
+ sbt += (scale >> 32) * delta;
+ sbt += ((scale & 0xffffffff) * delta) >> 32;
+ } else {
+ sbt += (scale * delta) >> 32;
+ }
return (sbt);
}
@@ -523,16 +612,8 @@ microuptime(struct timeval *tvp)
void
bintime(struct bintime *bt)
{
- struct timehands *th;
- u_int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_bintime);
}
void
@@ -556,85 +637,47 @@ microtime(struct timeval *tvp)
void
getbinuptime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_offset);
}
void
getnanouptime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timespec(&th->th_offset, tsp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
void
getmicrouptime(struct timeval *tvp)
{
- struct timehands *th;
- uint32_t gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timeval(&th->th_offset, tvp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timeval(&bt, tvp);
}
void
getbintime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_bintime);
}
void
getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
}
void
getmicrotime(struct timeval *tvp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tvp = th->th_microtime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tvp, th_microtime);
}
#endif /* FFCLOCK */
@@ -650,15 +693,8 @@ getboottime(struct timeval *boottime)
void
getboottimebin(struct bintime *boottimebin)
{
- struct timehands *th;
- u_int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *boottimebin = th->th_boottime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(boottimebin, th_boottime);
}
#ifdef FFCLOCK
@@ -1175,15 +1211,22 @@ getmicrotime(struct timeval *tvp)
void
dtrace_getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
+}
+
+/*
+ * This is a clone of getnanouptime used for time since boot.
+ * The dtrace_ prefix prevents fbt from creating probes for
+ * it so an uptime that can be safely used in all fbt probes.
+ */
+void
+dtrace_getnanouptime(struct timespec *tsp)
+{
+ struct bintime bt;
+
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
#endif /* __rtems__ */
@@ -1364,26 +1407,32 @@ tc_init(struct timecounter *tc)
tc->tc_quality);
}
- tc->tc_next = timecounters;
- timecounters = tc;
/*
* Set up sysctl tree for this counter.
*/
tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
- CTLFLAG_RW, 0, "timecounter description", "timecounter");
+ CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "timecounter description", "timecounter");
SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
"mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
"mask for implemented bits");
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
- "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
- sysctl_kern_timecounter_get, "IU", "current timecounter value");
+ "counter", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
+ sizeof(*tc), sysctl_kern_timecounter_get, "IU",
+ "current timecounter value");
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
- "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
- sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
+ "frequency", CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
+ sizeof(*tc), sysctl_kern_timecounter_freq, "QU",
+ "timecounter frequency");
SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
"quality", CTLFLAG_RD, &(tc->tc_quality), 0,
"goodness of time counter");
+
+ mtx_lock(&tc_lock);
+ tc->tc_next = timecounters;
+ timecounters = tc;
+
/*
* Do not automatically switch if the current tc was specifically
* chosen. Never automatically use a timecounter with negative quality.
@@ -1391,21 +1440,31 @@ tc_init(struct timecounter *tc)
* worse since this timecounter may not be monotonic.
*/
if (tc_chosen)
- return;
+ goto unlock;
if (tc->tc_quality < 0)
- return;
-#endif /* __rtems__ */
+ goto unlock;
+ if (tc_from_tunable[0] != '\0' &&
+ strcmp(tc->tc_name, tc_from_tunable) == 0) {
+ tc_chosen = 1;
+ tc_from_tunable[0] = '\0';
+ } else {
+ if (tc->tc_quality < timecounter->tc_quality)
+ goto unlock;
+ if (tc->tc_quality == timecounter->tc_quality &&
+ tc->tc_frequency < timecounter->tc_frequency)
+ goto unlock;
+ }
+ (void)tc->tc_get_timecount(tc);
+ timecounter = tc;
+unlock:
+ mtx_unlock(&tc_lock);
+#else /* __rtems__ */
if (tc->tc_quality < timecounter->tc_quality)
return;
if (tc->tc_quality == timecounter->tc_quality &&
tc->tc_frequency < timecounter->tc_frequency)
return;
-#ifndef __rtems__
- (void)tc->tc_get_timecount(tc);
- (void)tc->tc_get_timecount(tc);
-#endif /* __rtems__ */
timecounter = tc;
-#ifdef __rtems__
tc_windup(NULL);
#endif /* __rtems__ */
}
@@ -1493,6 +1552,40 @@ _Timecounter_Set_clock(const struct bintime *_bt,
}
/*
+ * Recalculate the scaling factor. We want the number of 1/2^64
+ * fractions of a second per period of the hardware counter, taking
+ * into account the th_adjustment factor which the NTP PLL/adjtime(2)
+ * processing provides us with.
+ *
+ * The th_adjustment is nanoseconds per second with 32 bit binary
+ * fraction and we want 64 bit binary fraction of second:
+ *
+ * x = a * 2^32 / 10^9 = a * 4.294967296
+ *
+ * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
+ * we can only multiply by about 850 without overflowing, that
+ * leaves no suitably precise fractions for multiply before divide.
+ *
+ * Divide before multiply with a fraction of 2199/512 results in a
+ * systematic undercompensation of 10PPM of th_adjustment. On a
+ * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
+ *
+ * We happily sacrifice the lowest of the 64 bits of our result
+ * to the goddess of code clarity.
+ */
+static void
+recalculate_scaling_factor_and_large_delta(struct timehands *th)
+{
+ uint64_t scale;
+
+ scale = (uint64_t)1 << 63;
+ scale += (th->th_adjustment / 1024) * 2199;
+ scale /= th->th_counter->tc_frequency;
+ th->th_scale = scale * 2;
+ th->th_large_delta = MIN(((uint64_t)1 << 63) / scale, UINT_MAX);
+}
+
+/*
* Initialize the next struct timehands in the ring and make
* it the active timehands. Along the way we might switch to a different
* timecounter and/or do seconds processing in NTP. Slightly magic.
@@ -1513,11 +1606,17 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
{
struct bintime bt;
+ struct timecounter *tc;
struct timehands *th, *tho;
- uint64_t scale;
- uint32_t delta, ncount, ogen;
+ uint32_t delta, ncount;
+#if defined(RTEMS_SMP)
+ u_int ogen;
+#endif
int i;
time_t t;
+#ifdef __rtems__
+ Timecounter_NTP_update_second ntp_update_second_handler;
+#endif
/*
* Make the next timehands a copy of the current one, but do
@@ -1531,14 +1630,12 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
tho = timehands;
#if defined(RTEMS_SMP)
th = tho->th_next;
-#else
- th = tho;
-#endif
ogen = th->th_generation;
th->th_generation = 0;
atomic_thread_fence_rel();
-#if defined(RTEMS_SMP)
- bcopy(tho, th, offsetof(struct timehands, th_generation));
+ memcpy(th, tho, offsetof(struct timehands, th_generation));
+#else
+ th = tho;
#endif
if (new_boottimebin != NULL)
th->th_boottime = *new_boottimebin;
@@ -1548,9 +1645,10 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
* changing timecounters, a counter value from the new timecounter.
* Update the offset fields accordingly.
*/
+ tc = atomic_load_ptr(&timecounter);
delta = tc_delta(th);
- if (th->th_counter != timecounter)
- ncount = timecounter->tc_get_timecount(timecounter);
+ if (th->th_counter != tc)
+ ncount = tc->tc_get_timecount(tc);
else
ncount = 0;
#ifdef FFCLOCK
@@ -1584,7 +1682,7 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
/*
- * Deal with NTP second processing. The for loop normally
+ * Deal with NTP second processing. The loop normally
* iterates at most once, but in extreme situations it might
* keep NTP sane if timeouts are not run for several seconds.
* At boot, the time step can be large when the TOD hardware
@@ -1594,69 +1692,57 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
*/
bt = th->th_offset;
bintime_add(&bt, &th->th_boottime);
+#ifdef __rtems__
+ ntp_update_second_handler = _Timecounter_NTP_update_second;
+ if (ntp_update_second_handler != NULL) {
+#endif /* __rtems__ */
i = bt.sec - tho->th_microtime.tv_sec;
- if (i > LARGE_STEP)
- i = 2;
- for (; i > 0; i--) {
- t = bt.sec;
- ntp_update_second(&th->th_adjustment, &bt.sec);
- if (bt.sec != t)
- th->th_boottime.sec += bt.sec - t;
+ if (i > 0) {
+ if (i > LARGE_STEP)
+ i = 2;
+
+ do {
+ t = bt.sec;
+ ntp_update_second(&th->th_adjustment, &bt.sec);
+ if (bt.sec != t)
+ th->th_boottime.sec += bt.sec - t;
+ --i;
+ } while (i > 0);
+
+ recalculate_scaling_factor_and_large_delta(th);
}
+#ifdef __rtems__
+ }
+#endif /* __rtems__ */
+
/* Update the UTC timestamps used by the get*() functions. */
th->th_bintime = bt;
bintime2timeval(&bt, &th->th_microtime);
bintime2timespec(&bt, &th->th_nanotime);
/* Now is a good time to change timecounters. */
- if (th->th_counter != timecounter) {
+ if (th->th_counter != tc) {
#ifndef __rtems__
#ifndef __arm__
- if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
+ if ((tc->tc_flags & TC_FLAGS_C2STOP) != 0)
cpu_disable_c2_sleep++;
if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
cpu_disable_c2_sleep--;
#endif
#endif /* __rtems__ */
- th->th_counter = timecounter;
+ th->th_counter = tc;
th->th_offset_count = ncount;
#ifndef __rtems__
- tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
- (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
+ tc_min_ticktock_freq = max(1, tc->tc_frequency /
+ (((uint64_t)tc->tc_counter_mask + 1) / 3));
#endif /* __rtems__ */
+ recalculate_scaling_factor_and_large_delta(th);
#ifdef FFCLOCK
ffclock_change_tc(th);
#endif
}
- /*-
- * Recalculate the scaling factor. We want the number of 1/2^64
- * fractions of a second per period of the hardware counter, taking
- * into account the th_adjustment factor which the NTP PLL/adjtime(2)
- * processing provides us with.
- *
- * The th_adjustment is nanoseconds per second with 32 bit binary
- * fraction and we want 64 bit binary fraction of second:
- *
- * x = a * 2^32 / 10^9 = a * 4.294967296
- *
- * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
- * we can only multiply by about 850 without overflowing, that
- * leaves no suitably precise fractions for multiply before divide.
- *
- * Divide before multiply with a fraction of 2199/512 results in a
- * systematic undercompensation of 10PPM of th_adjustment. On a
- * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
- *
- * We happily sacrifice the lowest of the 64 bits of our result
- * to the goddess of code clarity.
- *
- */
- scale = (uint64_t)1 << 63;
- scale += (th->th_adjustment / 1024) * 2199;
- scale /= th->th_counter->tc_frequency;
- th->th_scale = scale * 2;
-
+#if defined(RTEMS_SMP)
/*
* Now that the struct timehands is again consistent, set the new
* generation number, making sure to not make it zero.
@@ -1664,6 +1750,9 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
if (++ogen == 0)
ogen = 1;
atomic_store_rel_int(&th->th_generation, ogen);
+#else
+ atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
+#endif
/* Go live with the new struct timehands. */
#ifdef FFCLOCK
@@ -1701,23 +1790,28 @@ sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
struct timecounter *newtc, *tc;
int error;
+ mtx_lock(&tc_lock);
tc = timecounter;
strlcpy(newname, tc->tc_name, sizeof(newname));
+ mtx_unlock(&tc_lock);
error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
if (error != 0 || req->newptr == NULL)
return (error);
+
+ mtx_lock(&tc_lock);
/* Record that the tc in use now was specifically chosen. */
tc_chosen = 1;
- if (strcmp(newname, tc->tc_name) == 0)
+ if (strcmp(newname, tc->tc_name) == 0) {
+ mtx_unlock(&tc_lock);
return (0);
+ }
for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
if (strcmp(newname, newtc->tc_name) != 0)
continue;
/* Warm up new timecounter. */
(void)newtc->tc_get_timecount(newtc);
- (void)newtc->tc_get_timecount(newtc);
timecounter = newtc;
@@ -1729,16 +1823,16 @@ sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
* use any locking and that it can be called in hard interrupt
* context via 'tc_windup()'.
*/
- return (0);
+ break;
}
- return (EINVAL);
+ mtx_unlock(&tc_lock);
+ return (newtc != NULL ? 0 : EINVAL);
}
-
-SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
- 0, 0, sysctl_kern_timecounter_hardware, "A",
+SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware,
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, 0, 0,
+ sysctl_kern_timecounter_hardware, "A",
"Timecounter hardware selected");
-
/* Report the available timecounter hardware. */
static int
sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
@@ -1747,19 +1841,26 @@ sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
struct timecounter *tc;
int error;
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
sbuf_new_for_sysctl(&sb, NULL, 0, req);
+ mtx_lock(&tc_lock);
for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
if (tc != timecounters)
sbuf_putc(&sb, ' ');
sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
}
+ mtx_unlock(&tc_lock);
error = sbuf_finish(&sb);
sbuf_delete(&sb);
return (error);
}
-SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
- 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
+SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
+ sysctl_kern_timecounter_choice, "A",
+ "Timecounter hardware detected");
#endif /* __rtems__ */
#ifndef __rtems__
@@ -1803,10 +1904,10 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
tv.tv_usec = fapi->timeout.tv_nsec / 1000;
timo = tvtohz(&tv);
}
- aseq = pps->ppsinfo.assert_sequence;
- cseq = pps->ppsinfo.clear_sequence;
- while (aseq == pps->ppsinfo.assert_sequence &&
- cseq == pps->ppsinfo.clear_sequence) {
+ aseq = atomic_load_int(&pps->ppsinfo.assert_sequence);
+ cseq = atomic_load_int(&pps->ppsinfo.clear_sequence);
+ while (aseq == atomic_load_int(&pps->ppsinfo.assert_sequence) &&
+ cseq == atomic_load_int(&pps->ppsinfo.clear_sequence)) {
if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
if (pps->flags & PPSFLAG_MTX_SPIN) {
err = msleep_spin(pps, pps->driver_mtx,
@@ -2144,27 +2245,38 @@ _Timecounter_Tick_simple(uint32_t delta, uint32_t offset,
{
struct bintime bt;
struct timehands *th;
- uint32_t ogen;
+#if defined(RTEMS_SMP)
+ u_int ogen;
+#endif
th = timehands;
+#if defined(RTEMS_SMP)
ogen = th->th_generation;
+ th->th_generation = 0;
+ atomic_thread_fence_rel();
+#endif
+
th->th_offset_count = offset;
bintime_addx(&th->th_offset, th->th_scale * delta);
-
bt = th->th_offset;
bintime_add(&bt, &th->th_boottime);
+
/* Update the UTC timestamps used by the get*() functions. */
th->th_bintime = bt;
bintime2timeval(&bt, &th->th_microtime);
bintime2timespec(&bt, &th->th_nanotime);
+#if defined(RTEMS_SMP)
/*
* Now that the struct timehands is again consistent, set the new
* generation number, making sure to not make it zero.
*/
if (++ogen == 0)
ogen = 1;
- th->th_generation = ogen;
+ atomic_store_rel_int(&th->th_generation, ogen);
+#else
+ atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
+#endif
/* Go live with the new struct timehands. */
time_second = th->th_microtime.tv_sec;
@@ -2218,6 +2330,28 @@ done:
return (0);
}
+/* Set up the requested number of timehands. */
+static void
+inittimehands(void *dummy)
+{
+ struct timehands *thp;
+ int i;
+
+ TUNABLE_INT_FETCH("kern.timecounter.timehands_count",
+ &timehands_count);
+ if (timehands_count < 1)
+ timehands_count = 1;
+ if (timehands_count > nitems(ths))
+ timehands_count = nitems(ths);
+ for (i = 1, thp = &ths[0]; i < timehands_count; thp = &ths[i++])
+ thp->th_next = &ths[i];
+ thp->th_next = &ths[0];
+
+ TUNABLE_STR_FETCH("kern.timecounter.hardware", tc_from_tunable,
+ sizeof(tc_from_tunable));
+}
+SYSINIT(timehands, SI_SUB_TUNABLES, SI_ORDER_ANY, inittimehands, NULL);
+
static void
inittimecounter(void *dummy)
{
@@ -2248,9 +2382,9 @@ inittimecounter(void *dummy)
#ifdef FFCLOCK
ffclock_init();
#endif
+
/* warm up new timecounter (again) and get rolling. */
(void)timecounter->tc_get_timecount(timecounter);
- (void)timecounter->tc_get_timecount(timecounter);
mtx_lock_spin(&tc_setclock_mtx);
tc_windup(NULL);
mtx_unlock_spin(&tc_setclock_mtx);
@@ -2263,8 +2397,8 @@ SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
static int cpu_tick_variable;
static uint64_t cpu_tick_frequency;
-static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
-static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
+DPCPU_DEFINE_STATIC(uint64_t, tc_cpu_ticks_base);
+DPCPU_DEFINE_STATIC(unsigned, tc_cpu_ticks_last);
static uint64_t
tc_cpu_ticks(void)
@@ -2438,7 +2572,6 @@ tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
enabled = 0;
return (enabled);
}
-#endif /* __rtems__ */
#ifdef COMPAT_FREEBSD32
uint32_t
@@ -2465,3 +2598,34 @@ tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
return (enabled);
}
#endif
+
+#include "opt_ddb.h"
+#ifdef DDB
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(timecounter, db_show_timecounter)
+{
+ struct timehands *th;
+ struct timecounter *tc;
+ u_int val1, val2;
+
+ th = timehands;
+ tc = th->th_counter;
+ val1 = tc->tc_get_timecount(tc);
+ __compiler_membar();
+ val2 = tc->tc_get_timecount(tc);
+
+ db_printf("timecounter %p %s\n", tc, tc->tc_name);
+ db_printf(" mask %#x freq %ju qual %d flags %#x priv %p\n",
+ tc->tc_counter_mask, (uintmax_t)tc->tc_frequency, tc->tc_quality,
+ tc->tc_flags, tc->tc_priv);
+ db_printf(" val %#x %#x\n", val1, val2);
+ db_printf("timehands adj %#jx scale %#jx ldelta %d off_cnt %d gen %d\n",
+ (uintmax_t)th->th_adjustment, (uintmax_t)th->th_scale,
+ th->th_large_delta, th->th_offset_count, th->th_generation);
+ db_printf(" offset %jd %jd boottime %jd %jd\n",
+ (intmax_t)th->th_offset.sec, (uintmax_t)th->th_offset.frac,
+ (intmax_t)th->th_boottime.sec, (uintmax_t)th->th_boottime.frac);
+}
+#endif
+#endif /* __rtems__ */