summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/kern_tc.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--cpukit/score/src/kern_tc.c925
1 files changed, 609 insertions, 316 deletions
diff --git a/cpukit/score/src/kern_tc.c b/cpukit/score/src/kern_tc.c
index b5f761aae0..95ae01b5b4 100644
--- a/cpukit/score/src/kern_tc.c
+++ b/cpukit/score/src/kern_tc.c
@@ -5,9 +5,10 @@
*
* @brief This source file contains the definition of
* ::_Timecounter, ::_Timecounter_Time_second, and ::_Timecounter_Time_uptime
- * and the implementation of _Timecounter_Binuptime(),
- * _Timecounter_Nanouptime(), _Timecounter_Microuptime(),
- * _Timecounter_Bintime(), _Timecounter_Nanotime(), _Timecounter_Microtime(),
+ * and the implementation of _Timecounter_Set_NTP_update_second(),
+ * _Timecounter_Binuptime(), _Timecounter_Nanouptime(),
+ * _Timecounter_Microuptime(), _Timecounter_Bintime(),
+ * _Timecounter_Nanotime(), _Timecounter_Microtime(),
* _Timecounter_Getbinuptime(), _Timecounter_Getnanouptime(),
* _Timecounter_Getmicrouptime(), _Timecounter_Getbintime(),
* _Timecounter_Getnanotime(), _Timecounter_Getmicrotime(),
@@ -16,6 +17,8 @@
*/
/*-
+ * SPDX-License-Identifier: Beerware
+ *
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
@@ -24,7 +27,6 @@
* ----------------------------------------------------------------------------
*
* Copyright (c) 2011, 2015, 2016 The FreeBSD Foundation
- * All rights reserved.
*
* Portions of this software were developed by Julien Ridoux at the University
* of Melbourne under sponsorship from the FreeBSD Foundation.
@@ -54,16 +56,21 @@
#define timecounter _Timecounter
#define time_second _Timecounter_Time_second
#define time_uptime _Timecounter_Time_uptime
+
#include <rtems/score/timecounterimpl.h>
+#include <rtems/score/assert.h>
#include <rtems/score/atomic.h>
#include <rtems/score/smp.h>
#include <rtems/score/todimpl.h>
#include <rtems/score/watchdogimpl.h>
+#include <rtems/rtems/clock.h>
+
+#define ENOIOCTL EINVAL
+#define KASSERT(exp, arg) _Assert(exp)
#endif /* __rtems__ */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/kern/kern_tc.c 324528 2017-10-11 11:03:11Z kib $");
+__FBSDID("$FreeBSD$");
-#include "opt_compat.h"
#include "opt_ntp.h"
#include "opt_ffclock.h"
@@ -88,6 +95,7 @@ __FBSDID("$FreeBSD: head/sys/kern/kern_tc.c 324528 2017-10-11 11:03:11Z kib $");
#include <sys/vdso.h>
#endif /* __rtems__ */
#ifdef __rtems__
+#include <errno.h>
#include <limits.h>
#include <string.h>
#include <rtems.h>
@@ -96,10 +104,7 @@ ISR_LOCK_DEFINE(, _Timecounter_Lock, "Timecounter")
_ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context)
#define hz rtems_clock_get_ticks_per_second()
#define printf(...)
-#define bcopy(x, y, z) memcpy(y, x, z);
#define log(...)
-/* FIXME: https://devel.rtems.org/ticket/2348 */
-#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
static inline void
atomic_thread_fence_acq(void)
@@ -116,6 +121,13 @@ atomic_thread_fence_rel(void)
}
static inline u_int
+atomic_load_int(Atomic_Uint *i)
+{
+
+ return (_Atomic_Load_uint(i, ATOMIC_ORDER_RELAXED));
+}
+
+static inline u_int
atomic_load_acq_int(Atomic_Uint *i)
{
@@ -128,6 +140,24 @@ atomic_store_rel_int(Atomic_Uint *i, u_int val)
_Atomic_Store_uint(i, val, ATOMIC_ORDER_RELEASE);
}
+
+static inline void *
+atomic_load_ptr(void *ptr)
+{
+
+ return ((void *)_Atomic_Load_uintptr(ptr, ATOMIC_ORDER_RELAXED));
+}
+
+static Timecounter_NTP_update_second _Timecounter_NTP_update_second_handler;
+
+void
+_Timecounter_Set_NTP_update_second(Timecounter_NTP_update_second handler)
+{
+
+ _Timecounter_NTP_update_second_handler = handler;
+}
+
+#define ntp_update_second(a, b) (*ntp_update_second_handler)(a, b)
#endif /* __rtems__ */
/*
@@ -169,6 +199,7 @@ struct timehands {
struct timecounter *th_counter;
int64_t th_adjustment;
uint64_t th_scale;
+ uint32_t th_large_delta;
uint32_t th_offset_count;
struct bintime th_offset;
struct bintime th_bintime;
@@ -184,6 +215,39 @@ struct timehands {
struct timehands *th_next;
};
+#ifndef __rtems__
+static struct timehands ths[16] = {
+ [0] = {
+ .th_counter = &dummy_timecounter,
+ .th_scale = (uint64_t)-1 / 1000000,
+ .th_large_delta = 1000000,
+ .th_offset = { .sec = 1 },
+ .th_generation = 1,
+ },
+};
+
+static struct timehands *volatile timehands = &ths[0];
+struct timecounter *timecounter = &dummy_timecounter;
+static struct timecounter *timecounters = &dummy_timecounter;
+
+/* Mutex to protect the timecounter list. */
+static struct mtx tc_lock;
+
+int tc_min_ticktock_freq = 1;
+#else /* __rtems__ */
+/*
+ * In FreeBSD, the timehands count is a tuning option from two to 16. The
+ * tuning option was added since it is inexpensive and some FreeBSD users asked
+ * for it to play around. The default value is two. One system which did not
+ * work with two timehands was a system with one processor and a specific PPS
+ * device.
+ *
+ * For RTEMS, in uniprocessor configurations, just use one timehand since the
+ * update is done with interrupt disabled.
+ *
+ * In SMP configurations, use a fixed set of two timehands until someone
+ * reports an issue.
+ */
#if defined(RTEMS_SMP)
static struct timehands th0;
static struct timehands th1 = {
@@ -194,7 +258,8 @@ static struct timehands th0 = {
.th_counter = &dummy_timecounter,
.th_scale = (uint64_t)-1 / 1000000,
.th_offset = { .sec = 1 },
- .th_generation = 1,
+ .th_large_delta = 1000000,
+ .th_generation = UINT_MAX,
#ifdef __rtems__
.th_bintime = { .sec = TOD_SECONDS_1970_THROUGH_1988 },
.th_microtime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
@@ -210,10 +275,6 @@ static struct timehands th0 = {
static struct timehands *volatile timehands = &th0;
struct timecounter *timecounter = &dummy_timecounter;
-#ifndef __rtems__
-static struct timecounter *timecounters = &dummy_timecounter;
-
-int tc_min_ticktock_freq = 1;
#endif /* __rtems__ */
#ifndef __rtems__
@@ -225,17 +286,33 @@ volatile int32_t time_uptime = 1;
#endif /* __rtems__ */
#ifndef __rtems__
+/*
+ * The system time is always computed by summing the estimated boot time and the
+ * system uptime. The timehands track boot time, but it changes when the system
+ * time is set by the user, stepped by ntpd or adjusted when resuming. It
+ * is set to new_time - uptime.
+ */
static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
-SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
- NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
+SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
+ sysctl_kern_boottime, "S,timeval",
+ "Estimated system boottime");
-SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
-static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
+SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "");
+static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc,
+ CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "");
static int timestepwarnings;
-SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
+SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RWTUN,
&timestepwarnings, 0, "Log time steps");
+static int timehands_count = 2;
+SYSCTL_INT(_kern_timecounter, OID_AUTO, timehands_count,
+ CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &timehands_count, 0, "Count of timehands in rotation");
+
struct bintime bt_timethreshold;
struct bintime bt_tickthreshold;
sbintime_t sbt_timethreshold;
@@ -253,6 +330,7 @@ SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
volatile int rtc_generation = 1;
static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
+static char tc_from_tunable[16];
#endif /* __rtems__ */
static void tc_windup(struct bintime *new_boottimebin);
@@ -264,6 +342,7 @@ static void _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
void dtrace_getnanotime(struct timespec *tsp);
+void dtrace_getnanouptime(struct timespec *tsp);
#ifndef __rtems__
static int
@@ -273,7 +352,8 @@ sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
getboottime(&boottime);
-#ifndef __mips__
+/* i386 is the only arch which uses a 32bits time_t */
+#ifdef __amd64__
#ifdef SCTL_MASK32
int tv[2];
@@ -322,26 +402,100 @@ tc_delta(struct timehands *th)
tc->tc_counter_mask);
}
+static __inline void
+bintime_add_tc_delta(struct bintime *bt, uint64_t scale,
+ uint64_t large_delta, uint64_t delta)
+{
+ uint64_t x;
+
+ if (__predict_false(delta >= large_delta)) {
+ /* Avoid overflow for scale * delta. */
+ x = (scale >> 32) * delta;
+ bt->sec += x >> 32;
+ bintime_addx(bt, x << 32);
+ bintime_addx(bt, (scale & 0xffffffff) * delta);
+ } else {
+ bintime_addx(bt, scale * delta);
+ }
+}
+
/*
* Functions for reading the time. We have to loop until we are sure that
* the timehands that we operated on was not updated under our feet. See
* the comment in <sys/time.h> for a description of these 12 functions.
*/
-#ifdef FFCLOCK
-void
-fbclock_binuptime(struct bintime *bt)
+static __inline void
+bintime_off(struct bintime *bt, u_int off)
+{
+ struct timehands *th;
+ struct bintime *btp;
+ uint64_t scale;
+#ifndef __rtems__
+ u_int delta, gen, large_delta;
+#else /* __rtems__ */
+ uint32_t delta, large_delta;
+ u_int gen;
+#endif /* __rtems__ */
+
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ btp = (struct bintime *)((vm_offset_t)th + off);
+ *bt = *btp;
+ scale = th->th_scale;
+ delta = tc_delta(th);
+ large_delta = th->th_large_delta;
+ atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
+ } while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+
+ bintime_add_tc_delta(bt, scale, large_delta, delta);
+}
+#define GETTHBINTIME(dst, member) \
+do { \
+ _Static_assert(_Generic(((struct timehands *)NULL)->member, \
+ struct bintime: 1, default: 0) == 1, \
+ "struct timehands member is not of struct bintime type"); \
+ bintime_off(dst, __offsetof(struct timehands, member)); \
+} while (0)
+
+static __inline void
+getthmember(void *out, size_t out_size, u_int off)
{
struct timehands *th;
- unsigned int gen;
+ u_int gen;
do {
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- bintime_addx(bt, th->th_scale * tc_delta(th));
+ memcpy(out, (char *)th + off, out_size);
atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
} while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+}
+#define GETTHMEMBER(dst, member) \
+do { \
+ _Static_assert(_Generic(*dst, \
+ __typeof(((struct timehands *)NULL)->member): 1, \
+ default: 0) == 1, \
+ "*dst and struct timehands member have different types"); \
+ getthmember(dst, sizeof(*dst), __offsetof(struct timehands, \
+ member)); \
+} while (0)
+
+#ifdef FFCLOCK
+void
+fbclock_binuptime(struct bintime *bt)
+{
+
+ GETTHBINTIME(bt, th_offset);
}
void
@@ -365,16 +519,8 @@ fbclock_microuptime(struct timeval *tvp)
void
fbclock_bintime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_bintime);
}
void
@@ -398,116 +544,88 @@ fbclock_microtime(struct timeval *tvp)
void
fbclock_getbinuptime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_offset);
}
void
fbclock_getnanouptime(struct timespec *tsp)
{
- struct timehands *th;
- unsigned int gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timespec(&th->th_offset, tsp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
void
fbclock_getmicrouptime(struct timeval *tvp)
{
- struct timehands *th;
- unsigned int gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timeval(&th->th_offset, tvp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timeval(&bt, tvp);
}
void
fbclock_getbintime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_bintime);
}
void
fbclock_getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
}
void
fbclock_getmicrotime(struct timeval *tvp)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tvp = th->th_microtime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tvp, th_microtime);
}
#else /* !FFCLOCK */
+
void
binuptime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_offset);
}
#ifdef __rtems__
sbintime_t
_Timecounter_Sbinuptime(void)
{
struct timehands *th;
- uint32_t gen;
sbintime_t sbt;
+ uint64_t scale;
+ uint32_t delta;
+ uint32_t large_delta;
+ u_int gen;
do {
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
sbt = bttosbt(th->th_offset);
- sbt += (th->th_scale * tc_delta(th)) >> 32;
+ scale = th->th_scale;
+ delta = tc_delta(th);
+ large_delta = th->th_large_delta;
atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
} while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+
+ if (__predict_false(delta >= large_delta)) {
+ /* Avoid overflow for scale * delta. */
+ sbt += (scale >> 32) * delta;
+ sbt += ((scale & 0xffffffff) * delta) >> 32;
+ } else {
+ sbt += (scale * delta) >> 32;
+ }
return (sbt);
}
@@ -534,16 +652,8 @@ microuptime(struct timeval *tvp)
void
bintime(struct bintime *bt)
{
- struct timehands *th;
- u_int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_bintime);
}
void
@@ -567,88 +677,60 @@ microtime(struct timeval *tvp)
void
getbinuptime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_offset);
}
void
getnanouptime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timespec(&th->th_offset, tsp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
void
getmicrouptime(struct timeval *tvp)
{
- struct timehands *th;
- uint32_t gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timeval(&th->th_offset, tvp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timeval(&bt, tvp);
}
void
getbintime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_bintime);
}
void
getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
}
void
getmicrotime(struct timeval *tvp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tvp = th->th_microtime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tvp, th_microtime);
}
#endif /* FFCLOCK */
+#ifdef __rtems__
+void
+rtems_clock_get_boot_time(struct timespec *boottime)
+{
+ struct bintime boottimebin;
+
+ getboottimebin(&boottimebin);
+ bintime2timespec(&boottimebin, boottime);
+}
+#endif /* __rtems__ */
void
getboottime(struct timeval *boottime)
{
@@ -661,15 +743,8 @@ getboottime(struct timeval *boottime)
void
getboottimebin(struct bintime *boottimebin)
{
- struct timehands *th;
- u_int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *boottimebin = th->th_boottime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(boottimebin, th_boottime);
}
#ifdef FFCLOCK
@@ -1186,15 +1261,22 @@ getmicrotime(struct timeval *tvp)
void
dtrace_getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
+}
+
+/*
+ * This is a clone of getnanouptime used for time since boot.
+ * The dtrace_ prefix prevents fbt from creating probes for
+ * it so an uptime that can be safely used in all fbt probes.
+ */
+void
+dtrace_getnanouptime(struct timespec *tsp)
+{
+ struct bintime bt;
+
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
#endif /* __rtems__ */
@@ -1375,26 +1457,32 @@ tc_init(struct timecounter *tc)
tc->tc_quality);
}
- tc->tc_next = timecounters;
- timecounters = tc;
/*
* Set up sysctl tree for this counter.
*/
tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
- CTLFLAG_RW, 0, "timecounter description", "timecounter");
+ CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "timecounter description", "timecounter");
SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
"mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
"mask for implemented bits");
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
- "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
- sysctl_kern_timecounter_get, "IU", "current timecounter value");
+ "counter", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
+ sizeof(*tc), sysctl_kern_timecounter_get, "IU",
+ "current timecounter value");
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
- "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
- sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
+ "frequency", CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
+ sizeof(*tc), sysctl_kern_timecounter_freq, "QU",
+ "timecounter frequency");
SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
"quality", CTLFLAG_RD, &(tc->tc_quality), 0,
"goodness of time counter");
+
+ mtx_lock(&tc_lock);
+ tc->tc_next = timecounters;
+ timecounters = tc;
+
/*
* Do not automatically switch if the current tc was specifically
* chosen. Never automatically use a timecounter with negative quality.
@@ -1402,26 +1490,35 @@ tc_init(struct timecounter *tc)
* worse since this timecounter may not be monotonic.
*/
if (tc_chosen)
- return;
+ goto unlock;
if (tc->tc_quality < 0)
- return;
-#endif /* __rtems__ */
+ goto unlock;
+ if (tc_from_tunable[0] != '\0' &&
+ strcmp(tc->tc_name, tc_from_tunable) == 0) {
+ tc_chosen = 1;
+ tc_from_tunable[0] = '\0';
+ } else {
+ if (tc->tc_quality < timecounter->tc_quality)
+ goto unlock;
+ if (tc->tc_quality == timecounter->tc_quality &&
+ tc->tc_frequency < timecounter->tc_frequency)
+ goto unlock;
+ }
+ (void)tc->tc_get_timecount(tc);
+ timecounter = tc;
+unlock:
+ mtx_unlock(&tc_lock);
+#else /* __rtems__ */
if (tc->tc_quality < timecounter->tc_quality)
return;
if (tc->tc_quality == timecounter->tc_quality &&
tc->tc_frequency < timecounter->tc_frequency)
return;
-#ifndef __rtems__
- (void)tc->tc_get_timecount(tc);
- (void)tc->tc_get_timecount(tc);
-#endif /* __rtems__ */
timecounter = tc;
-#ifdef __rtems__
tc_windup(NULL);
#endif /* __rtems__ */
}
-#ifndef __rtems__
/* Report the frequency of the current timecounter. */
uint64_t
tc_getfrequency(void)
@@ -1430,6 +1527,7 @@ tc_getfrequency(void)
return (timehands->th_counter->tc_frequency);
}
+#ifndef __rtems__
static bool
sleeping_on_old_rtc(struct thread *td)
{
@@ -1504,6 +1602,40 @@ _Timecounter_Set_clock(const struct bintime *_bt,
}
/*
+ * Recalculate the scaling factor. We want the number of 1/2^64
+ * fractions of a second per period of the hardware counter, taking
+ * into account the th_adjustment factor which the NTP PLL/adjtime(2)
+ * processing provides us with.
+ *
+ * The th_adjustment is nanoseconds per second with 32 bit binary
+ * fraction and we want 64 bit binary fraction of second:
+ *
+ * x = a * 2^32 / 10^9 = a * 4.294967296
+ *
+ * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
+ * we can only multiply by about 850 without overflowing, that
+ * leaves no suitably precise fractions for multiply before divide.
+ *
+ * Divide before multiply with a fraction of 2199/512 results in a
+ * systematic undercompensation of 10PPM of th_adjustment. On a
+ * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
+ *
+ * We happily sacrifice the lowest of the 64 bits of our result
+ * to the goddess of code clarity.
+ */
+static void
+recalculate_scaling_factor_and_large_delta(struct timehands *th)
+{
+ uint64_t scale;
+
+ scale = (uint64_t)1 << 63;
+ scale += (th->th_adjustment / 1024) * 2199;
+ scale /= th->th_counter->tc_frequency;
+ th->th_scale = scale * 2;
+ th->th_large_delta = MIN(((uint64_t)1 << 63) / scale, UINT_MAX);
+}
+
+/*
* Initialize the next struct timehands in the ring and make
* it the active timehands. Along the way we might switch to a different
* timecounter and/or do seconds processing in NTP. Slightly magic.
@@ -1524,11 +1656,17 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
{
struct bintime bt;
+ struct timecounter *tc;
struct timehands *th, *tho;
- uint64_t scale;
- uint32_t delta, ncount, ogen;
+ uint32_t delta, ncount;
+#if defined(RTEMS_SMP)
+ u_int ogen;
+#endif
int i;
time_t t;
+#ifdef __rtems__
+ Timecounter_NTP_update_second ntp_update_second_handler;
+#endif
/*
* Make the next timehands a copy of the current one, but do
@@ -1542,14 +1680,12 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
tho = timehands;
#if defined(RTEMS_SMP)
th = tho->th_next;
-#else
- th = tho;
-#endif
ogen = th->th_generation;
th->th_generation = 0;
atomic_thread_fence_rel();
-#if defined(RTEMS_SMP)
- bcopy(tho, th, offsetof(struct timehands, th_generation));
+ memcpy(th, tho, offsetof(struct timehands, th_generation));
+#else
+ th = tho;
#endif
if (new_boottimebin != NULL)
th->th_boottime = *new_boottimebin;
@@ -1559,9 +1695,10 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
* changing timecounters, a counter value from the new timecounter.
* Update the offset fields accordingly.
*/
+ tc = atomic_load_ptr(&timecounter);
delta = tc_delta(th);
- if (th->th_counter != timecounter)
- ncount = timecounter->tc_get_timecount(timecounter);
+ if (th->th_counter != tc)
+ ncount = tc->tc_get_timecount(tc);
else
ncount = 0;
#ifdef FFCLOCK
@@ -1569,17 +1706,8 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
#endif
th->th_offset_count += delta;
th->th_offset_count &= th->th_counter->tc_counter_mask;
- while (delta > th->th_counter->tc_frequency) {
- /* Eat complete unadjusted seconds. */
- delta -= th->th_counter->tc_frequency;
- th->th_offset.sec++;
- }
- if ((delta > th->th_counter->tc_frequency / 2) &&
- (th->th_scale * delta < ((uint64_t)1 << 63))) {
- /* The product th_scale * delta just barely overflows. */
- th->th_offset.sec++;
- }
- bintime_addx(&th->th_offset, th->th_scale * delta);
+ bintime_add_tc_delta(&th->th_offset, th->th_scale,
+ th->th_large_delta, delta);
#ifndef __rtems__
/*
@@ -1595,7 +1723,7 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
/*
- * Deal with NTP second processing. The for loop normally
+ * Deal with NTP second processing. The loop normally
* iterates at most once, but in extreme situations it might
* keep NTP sane if timeouts are not run for several seconds.
* At boot, the time step can be large when the TOD hardware
@@ -1605,69 +1733,57 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
*/
bt = th->th_offset;
bintime_add(&bt, &th->th_boottime);
+#ifdef __rtems__
+ ntp_update_second_handler = _Timecounter_NTP_update_second_handler;
+ if (ntp_update_second_handler != NULL) {
+#endif /* __rtems__ */
i = bt.sec - tho->th_microtime.tv_sec;
- if (i > LARGE_STEP)
- i = 2;
- for (; i > 0; i--) {
- t = bt.sec;
- ntp_update_second(&th->th_adjustment, &bt.sec);
- if (bt.sec != t)
- th->th_boottime.sec += bt.sec - t;
+ if (i > 0) {
+ if (i > LARGE_STEP)
+ i = 2;
+
+ do {
+ t = bt.sec;
+ ntp_update_second(&th->th_adjustment, &bt.sec);
+ if (bt.sec != t)
+ th->th_boottime.sec += bt.sec - t;
+ --i;
+ } while (i > 0);
+
+ recalculate_scaling_factor_and_large_delta(th);
}
+#ifdef __rtems__
+ }
+#endif /* __rtems__ */
+
/* Update the UTC timestamps used by the get*() functions. */
th->th_bintime = bt;
bintime2timeval(&bt, &th->th_microtime);
bintime2timespec(&bt, &th->th_nanotime);
/* Now is a good time to change timecounters. */
- if (th->th_counter != timecounter) {
+ if (th->th_counter != tc) {
#ifndef __rtems__
#ifndef __arm__
- if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
+ if ((tc->tc_flags & TC_FLAGS_C2STOP) != 0)
cpu_disable_c2_sleep++;
if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
cpu_disable_c2_sleep--;
#endif
#endif /* __rtems__ */
- th->th_counter = timecounter;
+ th->th_counter = tc;
th->th_offset_count = ncount;
#ifndef __rtems__
- tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
- (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
+ tc_min_ticktock_freq = max(1, tc->tc_frequency /
+ (((uint64_t)tc->tc_counter_mask + 1) / 3));
#endif /* __rtems__ */
+ recalculate_scaling_factor_and_large_delta(th);
#ifdef FFCLOCK
ffclock_change_tc(th);
#endif
}
- /*-
- * Recalculate the scaling factor. We want the number of 1/2^64
- * fractions of a second per period of the hardware counter, taking
- * into account the th_adjustment factor which the NTP PLL/adjtime(2)
- * processing provides us with.
- *
- * The th_adjustment is nanoseconds per second with 32 bit binary
- * fraction and we want 64 bit binary fraction of second:
- *
- * x = a * 2^32 / 10^9 = a * 4.294967296
- *
- * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
- * we can only multiply by about 850 without overflowing, that
- * leaves no suitably precise fractions for multiply before divide.
- *
- * Divide before multiply with a fraction of 2199/512 results in a
- * systematic undercompensation of 10PPM of th_adjustment. On a
- * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
- *
- * We happily sacrifice the lowest of the 64 bits of our result
- * to the goddess of code clarity.
- *
- */
- scale = (uint64_t)1 << 63;
- scale += (th->th_adjustment / 1024) * 2199;
- scale /= th->th_counter->tc_frequency;
- th->th_scale = scale * 2;
-
+#if defined(RTEMS_SMP)
/*
* Now that the struct timehands is again consistent, set the new
* generation number, making sure to not make it zero.
@@ -1675,6 +1791,9 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
if (++ogen == 0)
ogen = 1;
atomic_store_rel_int(&th->th_generation, ogen);
+#else
+ atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
+#endif
/* Go live with the new struct timehands. */
#ifdef FFCLOCK
@@ -1712,23 +1831,28 @@ sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
struct timecounter *newtc, *tc;
int error;
+ mtx_lock(&tc_lock);
tc = timecounter;
strlcpy(newname, tc->tc_name, sizeof(newname));
+ mtx_unlock(&tc_lock);
error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
if (error != 0 || req->newptr == NULL)
return (error);
+
+ mtx_lock(&tc_lock);
/* Record that the tc in use now was specifically chosen. */
tc_chosen = 1;
- if (strcmp(newname, tc->tc_name) == 0)
+ if (strcmp(newname, tc->tc_name) == 0) {
+ mtx_unlock(&tc_lock);
return (0);
+ }
for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
if (strcmp(newname, newtc->tc_name) != 0)
continue;
/* Warm up new timecounter. */
(void)newtc->tc_get_timecount(newtc);
- (void)newtc->tc_get_timecount(newtc);
timecounter = newtc;
@@ -1740,16 +1864,16 @@ sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
* use any locking and that it can be called in hard interrupt
* context via 'tc_windup()'.
*/
- return (0);
+ break;
}
- return (EINVAL);
+ mtx_unlock(&tc_lock);
+ return (newtc != NULL ? 0 : EINVAL);
}
-
-SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
- 0, 0, sysctl_kern_timecounter_hardware, "A",
+SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware,
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, 0, 0,
+ sysctl_kern_timecounter_hardware, "A",
"Timecounter hardware selected");
-
/* Report the available timecounter hardware. */
static int
sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
@@ -1758,22 +1882,28 @@ sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
struct timecounter *tc;
int error;
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
sbuf_new_for_sysctl(&sb, NULL, 0, req);
+ mtx_lock(&tc_lock);
for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
if (tc != timecounters)
sbuf_putc(&sb, ' ');
sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
}
+ mtx_unlock(&tc_lock);
error = sbuf_finish(&sb);
sbuf_delete(&sb);
return (error);
}
-SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
- 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
+SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
+ sysctl_kern_timecounter_choice, "A",
+ "Timecounter hardware detected");
#endif /* __rtems__ */
-#ifndef __rtems__
/*
* RFC 2783 PPS-API implementation.
*/
@@ -1792,9 +1922,15 @@ abi_aware(struct pps_state *pps, int vers)
static int
pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
{
+#ifndef __rtems__
int err, timo;
+#else /* __rtems__ */
+ int err;
+#endif /* __rtems__ */
pps_seq_t aseq, cseq;
+#ifndef __rtems__
struct timeval tv;
+#endif /* __rtems__ */
if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
return (EINVAL);
@@ -1807,6 +1943,7 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
* sleep a long time.
*/
if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
+#ifndef __rtems__
if (fapi->timeout.tv_sec == -1)
timo = 0x7fffffff;
else {
@@ -1814,10 +1951,12 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
tv.tv_usec = fapi->timeout.tv_nsec / 1000;
timo = tvtohz(&tv);
}
- aseq = pps->ppsinfo.assert_sequence;
- cseq = pps->ppsinfo.clear_sequence;
- while (aseq == pps->ppsinfo.assert_sequence &&
- cseq == pps->ppsinfo.clear_sequence) {
+#endif /* __rtems__ */
+ aseq = atomic_load_int(&pps->ppsinfo.assert_sequence);
+ cseq = atomic_load_int(&pps->ppsinfo.clear_sequence);
+ while (aseq == atomic_load_int(&pps->ppsinfo.assert_sequence) &&
+ cseq == atomic_load_int(&pps->ppsinfo.clear_sequence)) {
+#ifndef __rtems__
if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
if (pps->flags & PPSFLAG_MTX_SPIN) {
err = msleep_spin(pps, pps->driver_mtx,
@@ -1838,6 +1977,12 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
} else if (err != 0) {
return (err);
}
+#else /* __rtems__ */
+ _Assert(pps->wait != NULL);
+ err = (*pps->wait)(pps, fapi->timeout);
+ if (err != 0)
+ return (err);
+#endif /* __rtems__ */
}
}
@@ -1933,9 +2078,43 @@ pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
}
}
+#ifdef __rtems__
+/*
+ * The real implementation of hardpps() is defined in kern_ntptime.c. Use it
+ * only if the NTP support is needed by the application.
+ */
+RTEMS_WEAK void
+hardpps(struct timespec *tsp, long nsec)
+{
+
+ (void)tsp;
+ (void)nsec;
+}
+
+static int
+default_wait(struct pps_state *pps, struct timespec timeout)
+{
+
+ (void)pps;
+ (void)timeout;
+
+ return (ETIMEDOUT);
+}
+
+static void
+default_wakeup(struct pps_state *pps)
+{
+
+ (void)pps;
+}
+#endif /* __rtems__ */
void
pps_init(struct pps_state *pps)
{
+#ifdef __rtems__
+ pps->wait = default_wait;
+ pps->wakeup = default_wakeup;
+#endif /* __rtems__ */
pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
if (pps->ppscap & PPS_CAPTUREASSERT)
pps->ppscap |= PPS_OFFSETASSERT;
@@ -1962,6 +2141,7 @@ void
pps_capture(struct pps_state *pps)
{
struct timehands *th;
+ struct timecounter *tc;
KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
th = timehands;
@@ -1970,17 +2150,18 @@ pps_capture(struct pps_state *pps)
#ifdef FFCLOCK
pps->capffth = fftimehands;
#endif
- pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
- atomic_thread_fence_acq();
- if (pps->capgen != th->th_generation)
- pps->capgen = 0;
+ tc = th->th_counter;
+ pps->capcount = tc->tc_get_timecount(tc);
}
void
pps_event(struct pps_state *pps, int event)
{
+ struct timehands *capth;
+ struct timecounter *captc;
+ uint64_t capth_scale;
struct bintime bt;
- struct timespec ts, *tsp, *osp;
+ struct timespec *tsp, *osp;
uint32_t tcount, *pcount;
int foff;
pps_seq_t *pseq;
@@ -1997,9 +2178,21 @@ pps_event(struct pps_state *pps, int event)
/* Nothing to do if not currently set to capture this event type. */
if ((event & pps->ppsparam.mode) == 0)
return;
+
+ /* Make a snapshot of the captured timehand */
+ capth = pps->capth;
+ captc = capth->th_counter;
+ capth_scale = capth->th_scale;
+ tcount = capth->th_offset_count;
+ bt = capth->th_bintime;
+
/* If the timecounter was wound up underneath us, bail out. */
- if (pps->capgen == 0 || pps->capgen !=
- atomic_load_acq_int(&pps->capth->th_generation))
+ atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
+ if (pps->capgen == 0 || pps->capgen != capth->th_generation)
+#else
+ if (pps->capgen != capth->th_generation)
+#endif
return;
/* Things would be easier with arrays. */
@@ -2033,32 +2226,25 @@ pps_event(struct pps_state *pps, int event)
#endif
}
+ *pcount = pps->capcount;
+
/*
* If the timecounter changed, we cannot compare the count values, so
* we have to drop the rest of the PPS-stuff until the next event.
*/
- if (pps->ppstc != pps->capth->th_counter) {
- pps->ppstc = pps->capth->th_counter;
- *pcount = pps->capcount;
+ if (__predict_false(pps->ppstc != captc)) {
+ pps->ppstc = captc;
pps->ppscount[2] = pps->capcount;
return;
}
- /* Convert the count to a timespec. */
- tcount = pps->capcount - pps->capth->th_offset_count;
- tcount &= pps->capth->th_counter->tc_counter_mask;
- bt = pps->capth->th_bintime;
- bintime_addx(&bt, pps->capth->th_scale * tcount);
- bintime2timespec(&bt, &ts);
-
- /* If the timecounter was wound up underneath us, bail out. */
- atomic_thread_fence_acq();
- if (pps->capgen != pps->capth->th_generation)
- return;
-
- *pcount = pps->capcount;
(*pseq)++;
- *tsp = ts;
+
+ /* Convert the count to a timespec. */
+ tcount = pps->capcount - tcount;
+ tcount &= captc->tc_counter_mask;
+ bintime_addx(&bt, capth_scale * tcount);
+ bintime2timespec(&bt, tsp);
if (foff) {
timespecadd(tsp, osp, tsp);
@@ -2073,14 +2259,14 @@ pps_event(struct pps_state *pps, int event)
bt = pps->capffth->tick_time;
ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
bintime_add(&bt, &pps->capffth->tick_time);
- bintime2timespec(&bt, &ts);
(*pseq_ffc)++;
- *tsp_ffc = ts;
+ bintime2timespec(&bt, tsp_ffc);
#endif
#ifdef PPS_SYNC
if (fhard) {
- uint64_t scale;
+ uint64_t delta_nsec;
+ uint64_t freq;
/*
* Feed the NTP PLL/FLL.
@@ -2089,24 +2275,23 @@ pps_event(struct pps_state *pps, int event)
*/
tcount = pps->capcount - pps->ppscount[2];
pps->ppscount[2] = pps->capcount;
- tcount &= pps->capth->th_counter->tc_counter_mask;
- scale = (uint64_t)1 << 63;
- scale /= pps->capth->th_counter->tc_frequency;
- scale *= 2;
- bt.sec = 0;
- bt.frac = 0;
- bintime_addx(&bt, scale * tcount);
- bintime2timespec(&bt, &ts);
- hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
+ tcount &= captc->tc_counter_mask;
+ delta_nsec = 1000000000;
+ delta_nsec *= tcount;
+ freq = captc->tc_frequency;
+ delta_nsec = (delta_nsec + freq / 2) / freq;
+ hardpps(tsp, (long)delta_nsec);
}
#endif
/* Wakeup anyone sleeping in pps_fetch(). */
+#ifndef __rtems__
wakeup(pps);
-}
#else /* __rtems__ */
-/* FIXME: https://devel.rtems.org/ticket/2349 */
+ _Assert(pps->wakeup != NULL);
+ (*pps->wakeup)(pps);
#endif /* __rtems__ */
+}
/*
* Timecounters need to be updated every so often to prevent the hardware
@@ -2142,9 +2327,13 @@ _Timecounter_Tick(void)
{
Per_CPU_Control *cpu_self = _Per_CPU_Get();
+#if defined(RTEMS_SMP)
if (_Per_CPU_Is_boot_processor(cpu_self)) {
+#endif
tc_windup(NULL);
+#if defined(RTEMS_SMP)
}
+#endif
_Watchdog_Tick(cpu_self);
}
@@ -2155,27 +2344,38 @@ _Timecounter_Tick_simple(uint32_t delta, uint32_t offset,
{
struct bintime bt;
struct timehands *th;
- uint32_t ogen;
+#if defined(RTEMS_SMP)
+ u_int ogen;
+#endif
th = timehands;
+#if defined(RTEMS_SMP)
ogen = th->th_generation;
+ th->th_generation = 0;
+ atomic_thread_fence_rel();
+#endif
+
th->th_offset_count = offset;
bintime_addx(&th->th_offset, th->th_scale * delta);
-
bt = th->th_offset;
bintime_add(&bt, &th->th_boottime);
+
/* Update the UTC timestamps used by the get*() functions. */
th->th_bintime = bt;
bintime2timeval(&bt, &th->th_microtime);
bintime2timespec(&bt, &th->th_nanotime);
+#if defined(RTEMS_SMP)
/*
* Now that the struct timehands is again consistent, set the new
* generation number, making sure to not make it zero.
*/
if (++ogen == 0)
ogen = 1;
- th->th_generation = ogen;
+ atomic_store_rel_int(&th->th_generation, ogen);
+#else
+ atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
+#endif
/* Go live with the new struct timehands. */
time_second = th->th_microtime.tv_sec;
@@ -2229,6 +2429,30 @@ done:
return (0);
}
+/* Set up the requested number of timehands. */
+static void
+inittimehands(void *dummy)
+{
+ struct timehands *thp;
+ int i;
+
+ TUNABLE_INT_FETCH("kern.timecounter.timehands_count",
+ &timehands_count);
+ if (timehands_count < 1)
+ timehands_count = 1;
+ if (timehands_count > nitems(ths))
+ timehands_count = nitems(ths);
+ for (i = 1, thp = &ths[0]; i < timehands_count; thp = &ths[i++])
+ thp->th_next = &ths[i];
+ thp->th_next = &ths[0];
+
+ TUNABLE_STR_FETCH("kern.timecounter.hardware", tc_from_tunable,
+ sizeof(tc_from_tunable));
+
+ mtx_init(&tc_lock, "tc", NULL, MTX_DEF);
+}
+SYSINIT(timehands, SI_SUB_TUNABLES, SI_ORDER_ANY, inittimehands, NULL);
+
static void
inittimecounter(void *dummy)
{
@@ -2259,9 +2483,9 @@ inittimecounter(void *dummy)
#ifdef FFCLOCK
ffclock_init();
#endif
+
/* warm up new timecounter (again) and get rolling. */
(void)timecounter->tc_get_timecount(timecounter);
- (void)timecounter->tc_get_timecount(timecounter);
mtx_lock_spin(&tc_setclock_mtx);
tc_windup(NULL);
mtx_unlock_spin(&tc_setclock_mtx);
@@ -2271,11 +2495,11 @@ SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
/* Cpu tick handling -------------------------------------------------*/
-static int cpu_tick_variable;
+static bool cpu_tick_variable;
static uint64_t cpu_tick_frequency;
-static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
-static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
+DPCPU_DEFINE_STATIC(uint64_t, tc_cpu_ticks_base);
+DPCPU_DEFINE_STATIC(unsigned, tc_cpu_ticks_last);
static uint64_t
tc_cpu_ticks(void)
@@ -2364,14 +2588,14 @@ cpu_tick_calibrate(int reset)
}
void
-set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
+set_cputicker(cpu_tick_f *func, uint64_t freq, bool isvariable)
{
if (func == NULL) {
cpu_ticks = tc_cpu_ticks;
} else {
cpu_tick_frequency = freq;
- cpu_tick_variable = var;
+ cpu_tick_variable = isvariable;
cpu_ticks = func;
}
}
@@ -2391,20 +2615,14 @@ cpu_tickrate(void)
* years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
* before divide conversion (to retain precision) we find that the
* margin shrinks to 1.5 hours (one millionth of 146y).
- * With a three prong approach we never lose significant bits, no
- * matter what the cputick rate and length of timeinterval is.
*/
uint64_t
cputick2usec(uint64_t tick)
{
-
- if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */
- return (tick / (cpu_tickrate() / 1000000LL));
- else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */
- return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
- else
- return ((tick * 1000000LL) / cpu_tickrate());
+ uint64_t tr;
+ tr = cpu_tickrate();
+ return ((tick / tr) * 1000000ULL) + ((tick % tr) * 1000000ULL) / tr;
}
cpu_tick_f *cpu_ticks = tc_cpu_ticks;
@@ -2449,7 +2667,6 @@ tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
enabled = 0;
return (enabled);
}
-#endif /* __rtems__ */
#ifdef COMPAT_FREEBSD32
uint32_t
@@ -2476,3 +2693,79 @@ tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
return (enabled);
}
#endif
+
+#include "opt_ddb.h"
+#ifdef DDB
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(timecounter, db_show_timecounter)
+{
+ struct timehands *th;
+ struct timecounter *tc;
+ u_int val1, val2;
+
+ th = timehands;
+ tc = th->th_counter;
+ val1 = tc->tc_get_timecount(tc);
+ __compiler_membar();
+ val2 = tc->tc_get_timecount(tc);
+
+ db_printf("timecounter %p %s\n", tc, tc->tc_name);
+ db_printf(" mask %#x freq %ju qual %d flags %#x priv %p\n",
+ tc->tc_counter_mask, (uintmax_t)tc->tc_frequency, tc->tc_quality,
+ tc->tc_flags, tc->tc_priv);
+ db_printf(" val %#x %#x\n", val1, val2);
+ db_printf("timehands adj %#jx scale %#jx ldelta %d off_cnt %d gen %d\n",
+ (uintmax_t)th->th_adjustment, (uintmax_t)th->th_scale,
+ th->th_large_delta, th->th_offset_count, th->th_generation);
+ db_printf(" offset %jd %jd boottime %jd %jd\n",
+ (intmax_t)th->th_offset.sec, (uintmax_t)th->th_offset.frac,
+ (intmax_t)th->th_boottime.sec, (uintmax_t)th->th_boottime.frac);
+}
+#endif
+#else /* __rtems__ */
+RTEMS_ALIAS(_Timecounter_Nanotime)
+void rtems_clock_get_realtime(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Bintime)
+void rtems_clock_get_realtime_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Microtime)
+void rtems_clock_get_realtime_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Getnanotime)
+void rtems_clock_get_realtime_coarse(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Getbintime)
+void rtems_clock_get_realtime_coarse_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Getmicrotime)
+void rtems_clock_get_realtime_coarse_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Nanouptime)
+void rtems_clock_get_monotonic(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Binuptime)
+void rtems_clock_get_monotonic_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Sbinuptime)
+sbintime_t rtems_clock_get_monotonic_sbintime(void);
+
+RTEMS_ALIAS(_Timecounter_Microuptime)
+void rtems_clock_get_monotonic_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Getnanouptime)
+void rtems_clock_get_monotonic_coarse(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Getbinuptime)
+void rtems_clock_get_monotonic_coarse_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Getmicrouptime)
+void rtems_clock_get_monotonic_coarse_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Getboottimebin)
+void rtems_clock_get_boot_time_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Getboottime)
+void rtems_clock_get_boot_time_timeval(struct timeval *);
+#endif /* __rtems__ */