diff options
Diffstat (limited to 'cpukit/score/src/kern_tc.c')
-rw-r--r-- | cpukit/score/src/kern_tc.c | 169 |
1 files changed, 117 insertions, 52 deletions
diff --git a/cpukit/score/src/kern_tc.c b/cpukit/score/src/kern_tc.c index e57da2c0ca..95ae01b5b4 100644 --- a/cpukit/score/src/kern_tc.c +++ b/cpukit/score/src/kern_tc.c @@ -56,12 +56,17 @@ #define timecounter _Timecounter #define time_second _Timecounter_Time_second #define time_uptime _Timecounter_Time_uptime + #include <rtems/score/timecounterimpl.h> +#include <rtems/score/assert.h> #include <rtems/score/atomic.h> #include <rtems/score/smp.h> #include <rtems/score/todimpl.h> #include <rtems/score/watchdogimpl.h> #include <rtems/rtems/clock.h> + +#define ENOIOCTL EINVAL +#define KASSERT(exp, arg) _Assert(exp) #endif /* __rtems__ */ #include <sys/cdefs.h> __FBSDID("$FreeBSD$"); @@ -90,6 +95,7 @@ __FBSDID("$FreeBSD$"); #include <sys/vdso.h> #endif /* __rtems__ */ #ifdef __rtems__ +#include <errno.h> #include <limits.h> #include <string.h> #include <rtems.h> @@ -115,6 +121,13 @@ atomic_thread_fence_rel(void) } static inline u_int +atomic_load_int(Atomic_Uint *i) +{ + + return (_Atomic_Load_uint(i, ATOMIC_ORDER_RELAXED)); +} + +static inline u_int atomic_load_acq_int(Atomic_Uint *i) { @@ -1506,7 +1519,6 @@ unlock: #endif /* __rtems__ */ } -#ifndef __rtems__ /* Report the frequency of the current timecounter. */ uint64_t tc_getfrequency(void) @@ -1515,6 +1527,7 @@ tc_getfrequency(void) return (timehands->th_counter->tc_frequency); } +#ifndef __rtems__ static bool sleeping_on_old_rtc(struct thread *td) { @@ -1891,7 +1904,6 @@ SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, "Timecounter hardware detected"); #endif /* __rtems__ */ -#ifndef __rtems__ /* * RFC 2783 PPS-API implementation. */ @@ -1910,9 +1922,15 @@ abi_aware(struct pps_state *pps, int vers) static int pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps) { +#ifndef __rtems__ int err, timo; +#else /* __rtems__ */ + int err; +#endif /* __rtems__ */ pps_seq_t aseq, cseq; +#ifndef __rtems__ struct timeval tv; +#endif /* __rtems__ */ if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) return (EINVAL); @@ -1925,6 +1943,7 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps) * sleep a long time. */ if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) { +#ifndef __rtems__ if (fapi->timeout.tv_sec == -1) timo = 0x7fffffff; else { @@ -1932,10 +1951,12 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps) tv.tv_usec = fapi->timeout.tv_nsec / 1000; timo = tvtohz(&tv); } +#endif /* __rtems__ */ aseq = atomic_load_int(&pps->ppsinfo.assert_sequence); cseq = atomic_load_int(&pps->ppsinfo.clear_sequence); while (aseq == atomic_load_int(&pps->ppsinfo.assert_sequence) && cseq == atomic_load_int(&pps->ppsinfo.clear_sequence)) { +#ifndef __rtems__ if (abi_aware(pps, 1) && pps->driver_mtx != NULL) { if (pps->flags & PPSFLAG_MTX_SPIN) { err = msleep_spin(pps, pps->driver_mtx, @@ -1956,6 +1977,12 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps) } else if (err != 0) { return (err); } +#else /* __rtems__ */ + _Assert(pps->wait != NULL); + err = (*pps->wait)(pps, fapi->timeout); + if (err != 0) + return (err); +#endif /* __rtems__ */ } } @@ -2051,9 +2078,43 @@ pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) } } +#ifdef __rtems__ +/* + * The real implementation of hardpps() is defined in kern_ntptime.c. Use it + * only if the NTP support is needed by the application. + */ +RTEMS_WEAK void +hardpps(struct timespec *tsp, long nsec) +{ + + (void)tsp; + (void)nsec; +} + +static int +default_wait(struct pps_state *pps, struct timespec timeout) +{ + + (void)pps; + (void)timeout; + + return (ETIMEDOUT); +} + +static void +default_wakeup(struct pps_state *pps) +{ + + (void)pps; +} +#endif /* __rtems__ */ void pps_init(struct pps_state *pps) { +#ifdef __rtems__ + pps->wait = default_wait; + pps->wakeup = default_wakeup; +#endif /* __rtems__ */ pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT; if (pps->ppscap & PPS_CAPTUREASSERT) pps->ppscap |= PPS_OFFSETASSERT; @@ -2080,6 +2141,7 @@ void pps_capture(struct pps_state *pps) { struct timehands *th; + struct timecounter *tc; KASSERT(pps != NULL, ("NULL pps pointer in pps_capture")); th = timehands; @@ -2088,17 +2150,18 @@ pps_capture(struct pps_state *pps) #ifdef FFCLOCK pps->capffth = fftimehands; #endif - pps->capcount = th->th_counter->tc_get_timecount(th->th_counter); - atomic_thread_fence_acq(); - if (pps->capgen != th->th_generation) - pps->capgen = 0; + tc = th->th_counter; + pps->capcount = tc->tc_get_timecount(tc); } void pps_event(struct pps_state *pps, int event) { + struct timehands *capth; + struct timecounter *captc; + uint64_t capth_scale; struct bintime bt; - struct timespec ts, *tsp, *osp; + struct timespec *tsp, *osp; uint32_t tcount, *pcount; int foff; pps_seq_t *pseq; @@ -2115,9 +2178,21 @@ pps_event(struct pps_state *pps, int event) /* Nothing to do if not currently set to capture this event type. */ if ((event & pps->ppsparam.mode) == 0) return; + + /* Make a snapshot of the captured timehand */ + capth = pps->capth; + captc = capth->th_counter; + capth_scale = capth->th_scale; + tcount = capth->th_offset_count; + bt = capth->th_bintime; + /* If the timecounter was wound up underneath us, bail out. */ - if (pps->capgen == 0 || pps->capgen != - atomic_load_acq_int(&pps->capth->th_generation)) + atomic_thread_fence_acq(); +#if defined(RTEMS_SMP) + if (pps->capgen == 0 || pps->capgen != capth->th_generation) +#else + if (pps->capgen != capth->th_generation) +#endif return; /* Things would be easier with arrays. */ @@ -2151,32 +2226,25 @@ pps_event(struct pps_state *pps, int event) #endif } + *pcount = pps->capcount; + /* * If the timecounter changed, we cannot compare the count values, so * we have to drop the rest of the PPS-stuff until the next event. */ - if (pps->ppstc != pps->capth->th_counter) { - pps->ppstc = pps->capth->th_counter; - *pcount = pps->capcount; + if (__predict_false(pps->ppstc != captc)) { + pps->ppstc = captc; pps->ppscount[2] = pps->capcount; return; } - /* Convert the count to a timespec. */ - tcount = pps->capcount - pps->capth->th_offset_count; - tcount &= pps->capth->th_counter->tc_counter_mask; - bt = pps->capth->th_bintime; - bintime_addx(&bt, pps->capth->th_scale * tcount); - bintime2timespec(&bt, &ts); - - /* If the timecounter was wound up underneath us, bail out. */ - atomic_thread_fence_acq(); - if (pps->capgen != pps->capth->th_generation) - return; - - *pcount = pps->capcount; (*pseq)++; - *tsp = ts; + + /* Convert the count to a timespec. */ + tcount = pps->capcount - tcount; + tcount &= captc->tc_counter_mask; + bintime_addx(&bt, capth_scale * tcount); + bintime2timespec(&bt, tsp); if (foff) { timespecadd(tsp, osp, tsp); @@ -2191,14 +2259,14 @@ pps_event(struct pps_state *pps, int event) bt = pps->capffth->tick_time; ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt); bintime_add(&bt, &pps->capffth->tick_time); - bintime2timespec(&bt, &ts); (*pseq_ffc)++; - *tsp_ffc = ts; + bintime2timespec(&bt, tsp_ffc); #endif #ifdef PPS_SYNC if (fhard) { - uint64_t scale; + uint64_t delta_nsec; + uint64_t freq; /* * Feed the NTP PLL/FLL. @@ -2207,24 +2275,23 @@ pps_event(struct pps_state *pps, int event) */ tcount = pps->capcount - pps->ppscount[2]; pps->ppscount[2] = pps->capcount; - tcount &= pps->capth->th_counter->tc_counter_mask; - scale = (uint64_t)1 << 63; - scale /= pps->capth->th_counter->tc_frequency; - scale *= 2; - bt.sec = 0; - bt.frac = 0; - bintime_addx(&bt, scale * tcount); - bintime2timespec(&bt, &ts); - hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec); + tcount &= captc->tc_counter_mask; + delta_nsec = 1000000000; + delta_nsec *= tcount; + freq = captc->tc_frequency; + delta_nsec = (delta_nsec + freq / 2) / freq; + hardpps(tsp, (long)delta_nsec); } #endif /* Wakeup anyone sleeping in pps_fetch(). */ +#ifndef __rtems__ wakeup(pps); -} #else /* __rtems__ */ -/* FIXME: https://devel.rtems.org/ticket/2349 */ + _Assert(pps->wakeup != NULL); + (*pps->wakeup)(pps); #endif /* __rtems__ */ +} /* * Timecounters need to be updated every so often to prevent the hardware @@ -2260,9 +2327,13 @@ _Timecounter_Tick(void) { Per_CPU_Control *cpu_self = _Per_CPU_Get(); +#if defined(RTEMS_SMP) if (_Per_CPU_Is_boot_processor(cpu_self)) { +#endif tc_windup(NULL); +#if defined(RTEMS_SMP) } +#endif _Watchdog_Tick(cpu_self); } @@ -2424,7 +2495,7 @@ SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL); /* Cpu tick handling -------------------------------------------------*/ -static int cpu_tick_variable; +static bool cpu_tick_variable; static uint64_t cpu_tick_frequency; DPCPU_DEFINE_STATIC(uint64_t, tc_cpu_ticks_base); @@ -2517,14 +2588,14 @@ cpu_tick_calibrate(int reset) } void -set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var) +set_cputicker(cpu_tick_f *func, uint64_t freq, bool isvariable) { if (func == NULL) { cpu_ticks = tc_cpu_ticks; } else { cpu_tick_frequency = freq; - cpu_tick_variable = var; + cpu_tick_variable = isvariable; cpu_ticks = func; } } @@ -2544,20 +2615,14 @@ cpu_tickrate(void) * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply * before divide conversion (to retain precision) we find that the * margin shrinks to 1.5 hours (one millionth of 146y). - * With a three prong approach we never lose significant bits, no - * matter what the cputick rate and length of timeinterval is. */ uint64_t cputick2usec(uint64_t tick) { - - if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */ - return (tick / (cpu_tickrate() / 1000000LL)); - else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */ - return ((tick * 1000LL) / (cpu_tickrate() / 1000LL)); - else - return ((tick * 1000000LL) / cpu_tickrate()); + uint64_t tr; + tr = cpu_tickrate(); + return ((tick / tr) * 1000000ULL) + ((tick % tr) * 1000000ULL) / tr; } cpu_tick_f *cpu_ticks = tc_cpu_ticks; |