summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2016-07-27 11:49:41 +0000
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-10-12 07:04:11 +0200
commitb48aeaf4cfdbc289a53ff78f06452298845f8d3a (patch)
tree88919ecbb4cc8da0a950f7aae59d11f1fc11f7b1
parenttimecounter: Merge FreeBSD change r303384 (diff)
downloadrtems-b48aeaf4cfdbc289a53ff78f06452298845f8d3a.tar.bz2
timecounter: Merge FreeBSD change r303387
Prevent parallel tc_windup() calls, both parallel top-level calls from setclock() and from simultaneous top-level and interrupt. For this, tc_windup() is protected with a tc_setclock_mtx spinlock, in the try mode when called from hardclock interrupt. If spinlock cannot be obtained without spinning from the interrupt context, this means that top-level executes tc_windup() on other core and our try may be avoided. The boottimebin and boottime variables should be adjusted from tc_windup(). To be correct, they must be part of the timehands and read using lockless protocol. Remove the globals and reimplement the getboottime(9)/getboottimebin(9) KPI using the timehands read protocol. Tested by: pho (as part of the whole patch) Reviewed by: jhb (same) Discussed wit: bde Sponsored by: The FreeBSD Foundation MFC after: 1 month X-Differential revision: https://reviews.freebsd.org/D7302 Update #3175.
-rw-r--r--cpukit/score/src/kern_tc.c120
1 files changed, 74 insertions, 46 deletions
diff --git a/cpukit/score/src/kern_tc.c b/cpukit/score/src/kern_tc.c
index c1c5821a15..efe2ee904a 100644
--- a/cpukit/score/src/kern_tc.c
+++ b/cpukit/score/src/kern_tc.c
@@ -153,6 +153,7 @@ struct timehands {
struct bintime th_offset;
struct timeval th_microtime;
struct timespec th_nanotime;
+ struct bintime th_boottime;
/* Fields not to be copied in tc_windup start with th_generation. */
#ifndef __rtems__
u_int th_generation;
@@ -176,6 +177,7 @@ static struct timehands th0 = {
#ifdef __rtems__
.th_microtime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
.th_nanotime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
+ .th_boottime = { .sec = TOD_SECONDS_1970_THROUGH_1988 - 1 },
#endif /* __rtems__ */
#if defined(RTEMS_SMP)
.th_next = &th1
@@ -200,14 +202,6 @@ volatile time_t time_second = TOD_SECONDS_1970_THROUGH_1988;
volatile time_t time_uptime = 1;
#ifndef __rtems__
-static struct bintime boottimebin_x;
-#else /* __rtems__ */
-static struct bintime boottimebin_x = {
- .sec = TOD_SECONDS_1970_THROUGH_1988 - 1
-};
-#endif /* __rtems__ */
-#ifndef __rtems__
-static struct timeval boottime_x;
static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
@@ -236,11 +230,12 @@ SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
#endif /* __rtems__ */
-static void tc_windup(void);
+static void tc_windup(struct bintime *new_boottimebin);
#ifndef __rtems__
static void cpu_tick_calibrate(int);
#else /* __rtems__ */
-static void _Timecounter_Windup(ISR_lock_Context *lock_context);
+static void _Timecounter_Windup(struct bintime *new_boottimebin,
+ ISR_lock_Context *lock_context);
#endif /* __rtems__ */
void dtrace_getnanotime(struct timespec *tsp);
@@ -345,9 +340,17 @@ fbclock_microuptime(struct timeval *tvp)
void
fbclock_bintime(struct bintime *bt)
{
+ struct timehands *th;
+ unsigned int gen;
- fbclock_binuptime(bt);
- bintime_add(bt, &boottimebin_x);
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ *bt = th->th_offset;
+ bintime_addx(bt, th->th_scale * tc_delta(th));
+ bintime_add(bt, &th->th_boottime);
+ atomic_thread_fence_acq();
+ } while (gen == 0 || gen != th->th_generation);
}
void
@@ -420,9 +423,9 @@ fbclock_getbintime(struct bintime *bt)
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
*bt = th->th_offset;
+ bintime_add(bt, &th->th_boottime);
atomic_thread_fence_acq();
} while (gen == 0 || gen != th->th_generation);
- bintime_add(bt, &boottimebin_x);
}
void
@@ -508,9 +511,17 @@ microuptime(struct timeval *tvp)
void
bintime(struct bintime *bt)
{
+ struct timehands *th;
+ u_int gen;
- binuptime(bt);
- bintime_add(bt, &boottimebin_x);
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ *bt = th->th_offset;
+ bintime_addx(bt, th->th_scale * tc_delta(th));
+ bintime_add(bt, &th->th_boottime);
+ atomic_thread_fence_acq();
+ } while (gen == 0 || gen != th->th_generation);
}
void
@@ -583,9 +594,9 @@ getbintime(struct bintime *bt)
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
*bt = th->th_offset;
+ bintime_add(bt, &th->th_boottime);
atomic_thread_fence_acq();
} while (gen == 0 || gen != th->th_generation);
- bintime_add(bt, &boottimebin_x);
}
void
@@ -620,15 +631,24 @@ getmicrotime(struct timeval *tvp)
void
getboottime(struct timeval *boottime)
{
+ struct bintime boottimebin;
- *boottime = boottime_x;
+ getboottimebin(&boottimebin);
+ bintime2timeval(&boottimebin, boottime);
}
void
getboottimebin(struct bintime *boottimebin)
{
+ struct timehands *th;
+ u_int gen;
- *boottimebin = boottimebin_x;
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ *boottimebin = th->th_boottime;
+ atomic_thread_fence_acq();
+ } while (gen == 0 || gen != th->th_generation);
}
#ifdef FFCLOCK
@@ -1376,7 +1396,7 @@ tc_init(struct timecounter *tc)
(void)tc->tc_get_timecount(tc);
timecounter = tc;
#ifdef __rtems__
- tc_windup();
+ tc_windup(NULL);
#endif /* __rtems__ */
}
@@ -1388,12 +1408,14 @@ tc_getfrequency(void)
return (timehands->th_counter->tc_frequency);
}
+
+static struct mtx tc_setclock_mtx;
+MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx, "tcsetc", MTX_SPIN);
#endif /* __rtems__ */
/*
* Step our concept of UTC. This is done by modifying our estimate of
* when we booted.
- * XXX: not locked.
*/
void
#ifndef __rtems__
@@ -1409,32 +1431,30 @@ _Timecounter_Set_clock(const struct bintime *_bt,
struct bintime bt, bt2;
#ifndef __rtems__
- cpu_tick_calibrate(1);
- nanotime(&tbef);
timespec2bintime(ts, &bt);
+ nanotime(&tbef);
+ mtx_lock_spin(&tc_setclock_mtx);
+ cpu_tick_calibrate(1);
#else /* __rtems__ */
bt = *_bt;
#endif /* __rtems__ */
binuptime(&bt2);
bintime_sub(&bt, &bt2);
- bintime_add(&bt2, &boottimebin_x);
- boottimebin_x = bt;
-#ifndef __rtems__
- bintime2timeval(&bt, &boottime_x);
/* XXX fiddle all the little crinkly bits around the fiords... */
- tc_windup();
- nanotime(&taft);
+#ifndef __rtems__
+ tc_windup(&bt);
+ mtx_unlock_spin(&tc_setclock_mtx);
if (timestepwarnings) {
+ nanotime(&taft);
log(LOG_INFO,
"Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
(intmax_t)tbef.tv_sec, tbef.tv_nsec,
(intmax_t)taft.tv_sec, taft.tv_nsec,
(intmax_t)ts->tv_sec, ts->tv_nsec);
}
- cpu_tick_calibrate(1);
#else /* __rtems__ */
- _Timecounter_Windup(lock_context);
+ _Timecounter_Windup(&bt, lock_context);
#endif /* __rtems__ */
}
@@ -1444,17 +1464,18 @@ _Timecounter_Set_clock(const struct bintime *_bt,
* timecounter and/or do seconds processing in NTP. Slightly magic.
*/
static void
-tc_windup(void)
+tc_windup(struct bintime *new_boottimebin)
#ifdef __rtems__
{
ISR_lock_Context lock_context;
_Timecounter_Acquire(&lock_context);
- _Timecounter_Windup(&lock_context);
+ _Timecounter_Windup(new_boottimebin, &lock_context);
}
static void
-_Timecounter_Windup(ISR_lock_Context *lock_context)
+_Timecounter_Windup(struct bintime *new_boottimebin,
+ ISR_lock_Context *lock_context)
#endif /* __rtems__ */
{
struct bintime bt;
@@ -1485,6 +1506,8 @@ _Timecounter_Windup(ISR_lock_Context *lock_context)
#if defined(RTEMS_SMP)
bcopy(tho, th, offsetof(struct timehands, th_generation));
#endif
+ if (new_boottimebin != NULL)
+ th->th_boottime = *new_boottimebin;
/*
* Capture a timecounter delta on the current timecounter and if
@@ -1534,7 +1557,7 @@ _Timecounter_Windup(ISR_lock_Context *lock_context)
* case we missed a leap second.
*/
bt = th->th_offset;
- bintime_add(&bt, &boottimebin_x);
+ bintime_add(&bt, &th->th_boottime);
i = bt.sec - tho->th_microtime.tv_sec;
if (i > LARGE_STEP)
i = 2;
@@ -1542,7 +1565,7 @@ _Timecounter_Windup(ISR_lock_Context *lock_context)
t = bt.sec;
ntp_update_second(&th->th_adjustment, &bt.sec);
if (bt.sec != t)
- boottimebin_x.sec += bt.sec - t;
+ th->th_boottime.sec += bt.sec - t;
}
/* Update the UTC timestamps used by the get*() functions. */
/* XXX shouldn't do this here. Should force non-`get' versions. */
@@ -1979,7 +2002,7 @@ pps_event(struct pps_state *pps, int event)
tcount &= pps->capth->th_counter->tc_counter_mask;
bt = pps->capth->th_offset;
bintime_addx(&bt, pps->capth->th_scale * tcount);
- bintime_add(&bt, &boottimebin_x);
+ bintime_add(&bt, &pps->capth->th_boottime);
bintime2timespec(&bt, &ts);
/* If the timecounter was wound up underneath us, bail out. */
@@ -2058,11 +2081,14 @@ tc_ticktock(int cnt)
{
static int count;
- count += cnt;
- if (count < tc_tick)
- return;
- count = 0;
- tc_windup();
+ if (mtx_trylock_spin(&tc_setclock_mtx)) {
+ count += cnt;
+ if (count >= tc_tick) {
+ count = 0;
+ tc_windup(NULL);
+ }
+ mtx_unlock_spin(&tc_setclock_mtx);
+ }
}
#else /* __rtems__ */
void
@@ -2071,7 +2097,7 @@ _Timecounter_Tick(void)
Per_CPU_Control *cpu_self = _Per_CPU_Get();
if (_Per_CPU_Is_boot_processor(cpu_self)) {
- tc_windup();
+ tc_windup(NULL);
}
_Watchdog_Tick(cpu_self);
@@ -2191,7 +2217,9 @@ inittimecounter(void *dummy)
/* warm up new timecounter (again) and get rolling. */
(void)timecounter->tc_get_timecount(timecounter);
(void)timecounter->tc_get_timecount(timecounter);
- tc_windup();
+ mtx_lock_spin(&tc_setclock_mtx);
+ tc_windup(NULL);
+ mtx_unlock_spin(&tc_setclock_mtx);
}
SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
@@ -2367,7 +2395,7 @@ tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
vdso_th->th_offset_count = th->th_offset_count;
vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
vdso_th->th_offset = th->th_offset;
- vdso_th->th_boottime = boottimebin_x;
+ vdso_th->th_boottime = th->th_boottime;
enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
if (!vdso_th_enable)
enabled = 0;
@@ -2389,8 +2417,8 @@ tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
vdso_th32->th_offset.sec = th->th_offset.sec;
*(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
- vdso_th32->th_boottime.sec = boottimebin_x.sec;
- *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin_x.frac;
+ vdso_th32->th_boottime.sec = th->th_boottime.sec;
+ *(uint64_t *)&vdso_th32->th_boottime.frac[0] = th->th_boottime.frac;
enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
if (!vdso_th_enable)
enabled = 0;