diff options
author | Joel Sherrill <joel.sherrill@oarcorp.com> | 2015-05-21 18:08:32 -0500 |
---|---|---|
committer | Joel Sherrill <joel.sherrill@oarcorp.com> | 2015-05-21 18:08:32 -0500 |
commit | 24acc6d0c092c244881dd6ffadc3782a8136fccc (patch) | |
tree | 864b4985a7a431bd3520e9d815927eee5315c2df | |
parent | Merge branch 'master' of ssh://dispatch.rtems.org/data/git/rtems (diff) | |
parent | doc: Add new documentation section for Epiphany architecture (diff) | |
download | rtems-24acc6d0c092c244881dd6ffadc3782a8136fccc.tar.bz2 |
Merge branch 'master' of ssh://dispatch.rtems.org/data/git/rtems
367 files changed, 13697 insertions, 4862 deletions
diff --git a/SUPPORT b/SUPPORT deleted file mode 100644 index a3ab534660..0000000000 --- a/SUPPORT +++ /dev/null @@ -1,18 +0,0 @@ -On-Line Applications Research Corporation (OAR) offers support, -customization, and training for RTEMS. Custom RTEMS development services -includes porting RTEMS to new processors and the development of custom board -support packages and device drivers. In addition, OAR is available -to assist in the development of your real-time embedded application. - -For more information, email Joel Sherrill joel.sherrill@OARcorp.com -or contact OAR at: - -On-Line Applications Research Corporation -7047 Old Madison Pike Suite 320 -Huntsville AL 35806 -Voice: (256) 722-9985 -Fax: (256) 722-0985 - -RTEMS maintenance and development is funded solely by RTEMS users. -The future of RTEMS depends on its user base. - diff --git a/VERSION b/VERSION deleted file mode 100644 index 56a4f35499..0000000000 --- a/VERSION +++ /dev/null @@ -1,5 +0,0 @@ -# -# This file is automatically generated -- DO NOT EDIT!!! -# - -RTEMS Version 4.10.99.0 diff --git a/c/src/aclocal/rtems-cpu-subdirs.m4 b/c/src/aclocal/rtems-cpu-subdirs.m4 index 9593d34a6c..524edac41e 100644 --- a/c/src/aclocal/rtems-cpu-subdirs.m4 +++ b/c/src/aclocal/rtems-cpu-subdirs.m4 @@ -12,6 +12,7 @@ AC_DEFUN([RTEMS_CPU_SUBDIRS], case $RTEMS_CPU in _RTEMS_CPU_SUBDIR([arm],[$1]);; _RTEMS_CPU_SUBDIR([bfin],[$1]);; +_RTEMS_CPU_SUBDIR([epiphany],[$1]);; _RTEMS_CPU_SUBDIR([avr],[$1]);; _RTEMS_CPU_SUBDIR([h8300],[$1]);; _RTEMS_CPU_SUBDIR([i386],[$1]);; diff --git a/c/src/lib/libbsp/arm/altera-cyclone-v/bsp_specs b/c/src/lib/libbsp/arm/altera-cyclone-v/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/altera-cyclone-v/bsp_specs +++ b/c/src/lib/libbsp/arm/altera-cyclone-v/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/beagle/bsp_specs b/c/src/lib/libbsp/arm/beagle/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/beagle/bsp_specs +++ b/c/src/lib/libbsp/arm/beagle/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/beagle/clock.c b/c/src/lib/libbsp/arm/beagle/clock.c index 66aba1b84b..912d904197 100644 --- a/c/src/lib/libbsp/arm/beagle/clock.c +++ b/c/src/lib/libbsp/arm/beagle/clock.c @@ -15,11 +15,12 @@ */ #include <rtems.h> +#include <rtems/timecounter.h> #include <bsp.h> #include <libcpu/omap_timer.h> -#ifdef ARM_MULTILIB_ARCH_V4 +static struct timecounter beagle_clock_tc; static omap_timer_registers_t regs_v1 = { .TIDR = OMAP3_TIMER_TIDR, @@ -115,8 +116,6 @@ static struct omap_timer *timer = &am335x_timer; #endif -static int done = 0; - #if IS_AM335X #define FRCLOCK_HZ (16*1500000) #endif @@ -181,20 +180,14 @@ omap3_frclock_init(void) /* Start timer, without prescaler */ mmio_set(fr_timer->base + fr_timer->regs->TCLR, OMAP3_TCLR_OVF_TRG | OMAP3_TCLR_AR | OMAP3_TCLR_ST); - done = 1; } -static inline uint32_t -read_frc(void) +static uint32_t +beagle_clock_get_timecount(struct timecounter *tc) { - if (done == 0) { - return 0; - } return mmio_read(fr_timer->base + fr_timer->regs->TCRR); } -static uint32_t last_tick_nanoseconds; - static void beagle_clock_initialize(void) { @@ -262,12 +255,16 @@ beagle_clock_initialize(void) while(mmio_read(AM335X_WDT_BASE+AM335X_WDT_WWPS) != 0) ; #endif + /* Install timecounter */ \ + beagle_clock_tc.tc_get_timecount = beagle_clock_get_timecount; + beagle_clock_tc.tc_counter_mask = 0xffffffff; + beagle_clock_tc.tc_frequency = FRCLOCK_HZ; + beagle_clock_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&beagle_clock_tc); } static void beagle_clock_at_tick(void) { - last_tick_nanoseconds = read_frc(); - mmio_write(timer->base + timer->regs->TISR, OMAP3_TISR_MAT_IT_FLAG | OMAP3_TISR_OVF_IT_FLAG | OMAP3_TISR_TCAR_IT_FLAG); @@ -315,14 +312,6 @@ static void beagle_clock_cleanup(void) mmio_clear(fr_timer->base + fr_timer->regs->TCLR, OMAP3_TCLR_ST); } -static inline uint32_t beagle_clock_nanoseconds_since_last_tick(void) -{ - /* this arithmetic also works if read_frc() wraps around, as long - * as the subtraction wraps around too - */ - return (read_frc() - (uint64_t) last_tick_nanoseconds) * 1000000000 / FRCLOCK_HZ; -} - #define Clock_driver_support_at_tick() beagle_clock_at_tick() #define Clock_driver_support_initialize_hardware() beagle_clock_initialize() #define Clock_driver_support_install_isr(isr, old_isr) \ @@ -332,10 +321,6 @@ static inline uint32_t beagle_clock_nanoseconds_since_last_tick(void) } while (0) #define Clock_driver_support_shutdown_hardware() beagle_clock_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - beagle_clock_nanoseconds_since_last_tick /* Include shared source clock driver code */ #include "../../shared/clockdrv_shell.h" - -#endif /* ARM_MULTILIB_ARCH_V4 */ diff --git a/c/src/lib/libbsp/arm/csb336/bsp_specs b/c/src/lib/libbsp/arm/csb336/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/csb336/bsp_specs +++ b/c/src/lib/libbsp/arm/csb336/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/csb337/bsp_specs b/c/src/lib/libbsp/arm/csb337/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/csb337/bsp_specs +++ b/c/src/lib/libbsp/arm/csb337/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/edb7312/bsp_specs b/c/src/lib/libbsp/arm/edb7312/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/edb7312/bsp_specs +++ b/c/src/lib/libbsp/arm/edb7312/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/edb7312/clock/clockdrv.c b/c/src/lib/libbsp/arm/edb7312/clock/clockdrv.c index 499a27904c..121b2c9d32 100644 --- a/c/src/lib/libbsp/arm/edb7312/clock/clockdrv.c +++ b/c/src/lib/libbsp/arm/edb7312/clock/clockdrv.c @@ -68,4 +68,6 @@ void Clock_isr(void * arg); assert(status == RTEMS_SUCCESSFUL); \ } while (0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/arm/gba/bsp_specs b/c/src/lib/libbsp/arm/gba/bsp_specs index 5a51d82d2d..554ded6669 100644 --- a/c/src/lib/libbsp/arm/gba/bsp_specs +++ b/c/src/lib/libbsp/arm/gba/bsp_specs @@ -10,5 +10,5 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -Bstatic -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/gba/clock/clockdrv.c b/c/src/lib/libbsp/arm/gba/clock/clockdrv.c index 72d0b81e32..ff171110b9 100644 --- a/c/src/lib/libbsp/arm/gba/clock/clockdrv.c +++ b/c/src/lib/libbsp/arm/gba/clock/clockdrv.c @@ -92,4 +92,6 @@ void Clock_driver_support_initialize_hardware(void) GBA_REG_TM3CNT = (0x00c0|GBA_TMCNT_PS); } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/arm/gdbarmsim/bsp_specs b/c/src/lib/libbsp/arm/gdbarmsim/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/gdbarmsim/bsp_specs +++ b/c/src/lib/libbsp/arm/gdbarmsim/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/gp32/bsp_specs b/c/src/lib/libbsp/arm/gp32/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/gp32/bsp_specs +++ b/c/src/lib/libbsp/arm/gp32/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/gumstix/bsp_specs b/c/src/lib/libbsp/arm/gumstix/bsp_specs index 082653ad22..32c105fd0f 100755 --- a/c/src/lib/libbsp/arm/gumstix/bsp_specs +++ b/c/src/lib/libbsp/arm/gumstix/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/lm3s69xx/bsp_specs b/c/src/lib/libbsp/arm/lm3s69xx/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/lm3s69xx/bsp_specs +++ b/c/src/lib/libbsp/arm/lm3s69xx/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/lpc176x/bsp_specs b/c/src/lib/libbsp/arm/lpc176x/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/lpc176x/bsp_specs +++ b/c/src/lib/libbsp/arm/lpc176x/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/lpc176x/include/lpc-clock-config.h b/c/src/lib/libbsp/arm/lpc176x/include/lpc-clock-config.h index c72575f719..3eef02152e 100644 --- a/c/src/lib/libbsp/arm/lpc176x/include/lpc-clock-config.h +++ b/c/src/lib/libbsp/arm/lpc176x/include/lpc-clock-config.h @@ -33,6 +33,7 @@ extern "C" { #define LPC_CLOCK_INTERRUPT LPC176X_IRQ_TIMER_0 #define LPC_CLOCK_TIMER_BASE TMR0_BASE_ADDR +#define LPC_CLOCK_TIMECOUNTER_BASE TMR1_BASE_ADDR #define LPC_CLOCK_REFERENCE LPC176X_PCLK #define LPC_CLOCK_MODULE_ENABLE() \ lpc176x_module_enable( LPC176X_MODULE_TIMER_0, LPC176X_MODULE_PCLK_DEFAULT ) diff --git a/c/src/lib/libbsp/arm/lpc24xx/bsp_specs b/c/src/lib/libbsp/arm/lpc24xx/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/lpc24xx/bsp_specs +++ b/c/src/lib/libbsp/arm/lpc24xx/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/lpc24xx/include/lpc-clock-config.h b/c/src/lib/libbsp/arm/lpc24xx/include/lpc-clock-config.h index 1edab4eb02..5e6b469e0f 100644 --- a/c/src/lib/libbsp/arm/lpc24xx/include/lpc-clock-config.h +++ b/c/src/lib/libbsp/arm/lpc24xx/include/lpc-clock-config.h @@ -35,6 +35,8 @@ extern "C" { #define LPC_CLOCK_TIMER_BASE TMR0_BASE_ADDR +#define LPC_CLOCK_TIMECOUNTER_BASE TMR1_BASE_ADDR + #define LPC_CLOCK_REFERENCE LPC24XX_PCLK #define LPC_CLOCK_MODULE_ENABLE() \ diff --git a/c/src/lib/libbsp/arm/lpc32xx/bsp_specs b/c/src/lib/libbsp/arm/lpc32xx/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/lpc32xx/bsp_specs +++ b/c/src/lib/libbsp/arm/lpc32xx/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/lpc32xx/include/lpc-clock-config.h b/c/src/lib/libbsp/arm/lpc32xx/include/lpc-clock-config.h index accd0d256a..2b676b433f 100644 --- a/c/src/lib/libbsp/arm/lpc32xx/include/lpc-clock-config.h +++ b/c/src/lib/libbsp/arm/lpc32xx/include/lpc-clock-config.h @@ -44,6 +44,8 @@ extern "C" { #define LPC_CLOCK_TIMER_BASE LPC32XX_BASE_TIMER_0 +#define LPC_CLOCK_TIMECOUNTER_BASE LPC32XX_BASE_TIMER_1 + #define LPC_CLOCK_REFERENCE LPC32XX_PERIPH_CLK #define LPC_CLOCK_MODULE_ENABLE() diff --git a/c/src/lib/libbsp/arm/nds/bsp_specs b/c/src/lib/libbsp/arm/nds/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/nds/bsp_specs +++ b/c/src/lib/libbsp/arm/nds/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/nds/clock/clock.c b/c/src/lib/libbsp/arm/nds/clock/clock.c index a24f8cc788..1e239d44d9 100644 --- a/c/src/lib/libbsp/arm/nds/clock/clock.c +++ b/c/src/lib/libbsp/arm/nds/clock/clock.c @@ -81,4 +81,6 @@ void Clock_driver_support_initialize_hardware (void) TIMER_DATA (0) = TIMER_FREQ_64 ((uint16_t) freq); } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/arm/raspberrypi/bsp_specs b/c/src/lib/libbsp/arm/raspberrypi/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/raspberrypi/bsp_specs +++ b/c/src/lib/libbsp/arm/raspberrypi/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/raspberrypi/clock/clockdrv.c b/c/src/lib/libbsp/arm/raspberrypi/clock/clockdrv.c index 533873cf19..f765485570 100644 --- a/c/src/lib/libbsp/arm/raspberrypi/clock/clockdrv.c +++ b/c/src/lib/libbsp/arm/raspberrypi/clock/clockdrv.c @@ -72,14 +72,6 @@ static void raspberrypi_clock_cleanup(void) } } -/* - * Return the nanoseconds since last tick - */ -static uint32_t raspberrypi_clock_nanoseconds_since_last_tick(void) -{ - return 0; -} - #define Clock_driver_support_at_tick() raspberrypi_clock_at_tick() #define Clock_driver_support_initialize_hardware() raspberrypi_clock_initialize() @@ -92,8 +84,6 @@ static uint32_t raspberrypi_clock_nanoseconds_since_last_tick(void) #define Clock_driver_support_shutdown_hardware() raspberrypi_clock_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - raspberrypi_clock_nanoseconds_since_last_tick - +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/arm/realview-pbx-a9/bsp_specs b/c/src/lib/libbsp/arm/realview-pbx-a9/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/realview-pbx-a9/bsp_specs +++ b/c/src/lib/libbsp/arm/realview-pbx-a9/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/rtl22xx/bsp_specs b/c/src/lib/libbsp/arm/rtl22xx/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/rtl22xx/bsp_specs +++ b/c/src/lib/libbsp/arm/rtl22xx/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/shared/arm-a9mpcore-clock-config.c b/c/src/lib/libbsp/arm/shared/arm-a9mpcore-clock-config.c index f2ce07e3b6..8e2e153b46 100644 --- a/c/src/lib/libbsp/arm/shared/arm-a9mpcore-clock-config.c +++ b/c/src/lib/libbsp/arm/shared/arm-a9mpcore-clock-config.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -17,14 +17,11 @@ #include <bsp/irq.h> #include <bsp/arm-a9mpcore-regs.h> #include <bsp/arm-a9mpcore-clock.h> +#include <rtems/timecounter.h> #define A9MPCORE_GT ((volatile a9mpcore_gt *) BSP_ARM_A9MPCORE_GT_BASE) -static uint64_t a9mpcore_clock_last_tick_k; - -static uint32_t a9mpcore_clock_last_tick_cmpvallower; - -static uint32_t a9mpcore_clock_autoinc; +static struct timecounter a9mpcore_tc; /* This is defined in clockdrv_shell.h */ void Clock_isr(rtems_irq_hdl_param arg); @@ -39,13 +36,6 @@ static void a9mpcore_clock_at_tick(void) { volatile a9mpcore_gt *gt = A9MPCORE_GT; - /* - * FIXME: Now the _TOD_Get_with_nanoseconds() yields wrong values until - * _TOD_Tickle_ticks() managed to update the uptime. See also PR2180. - */ - a9mpcore_clock_last_tick_cmpvallower = - gt->cmpvallower - a9mpcore_clock_autoinc; - gt->irqst = A9MPCORE_GT_IRQST_EFLG; } @@ -80,6 +70,13 @@ static uint64_t a9mpcore_clock_get_counter(volatile a9mpcore_gt *gt) return ((uint64_t) cu2 << 32) | cl; } +static uint32_t a9mpcore_clock_get_timecount(struct timecounter *tc) +{ + volatile a9mpcore_gt *gt = A9MPCORE_GT; + + return gt->cntrlower; +} + static void a9mpcore_clock_initialize(void) { volatile a9mpcore_gt *gt = A9MPCORE_GT; @@ -98,14 +95,16 @@ static void a9mpcore_clock_initialize(void) gt->cmpvalupper = (uint32_t) (cmpval >> 32); gt->autoinc = interval; - a9mpcore_clock_last_tick_k = (UINT64_C(1000000000) << 32) / periphclk; - a9mpcore_clock_last_tick_cmpvallower = (uint32_t) cmpval - interval; - a9mpcore_clock_autoinc = interval; - gt->ctrl = A9MPCORE_GT_CTRL_AUTOINC_EN | A9MPCORE_GT_CTRL_IRQ_EN | A9MPCORE_GT_CTRL_COMP_EN | A9MPCORE_GT_CTRL_TMR_EN; + + a9mpcore_tc.tc_get_timecount = a9mpcore_clock_get_timecount; + a9mpcore_tc.tc_counter_mask = 0xffffffff; + a9mpcore_tc.tc_frequency = periphclk; + a9mpcore_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&a9mpcore_tc); } CPU_Counter_ticks _CPU_Counter_read(void) @@ -147,16 +146,6 @@ static void a9mpcore_clock_cleanup(void) } } -static uint32_t a9mpcore_clock_nanoseconds_since_last_tick(void) -{ - volatile a9mpcore_gt *gt = A9MPCORE_GT; - uint64_t k = a9mpcore_clock_last_tick_k; - uint32_t n = a9mpcore_clock_last_tick_cmpvallower; - uint32_t c = gt->cntrlower; - - return (uint32_t) (((c - n) * k) >> 32); -} - #define Clock_driver_support_at_tick() \ a9mpcore_clock_at_tick() @@ -165,15 +154,12 @@ static uint32_t a9mpcore_clock_nanoseconds_since_last_tick(void) #define Clock_driver_support_install_isr(isr, old_isr) \ do { \ - a9mpcore_clock_handler_install(); \ + a9mpcore_clock_handler_install(); \ old_isr = NULL; \ } while (0) #define Clock_driver_support_shutdown_hardware() \ a9mpcore_clock_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - a9mpcore_clock_nanoseconds_since_last_tick - /* Include shared source clock driver code */ #include "../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/arm/shared/armv7m/clock/armv7m-clock-config.c b/c/src/lib/libbsp/arm/shared/armv7m/clock/armv7m-clock-config.c index 8e4ae338f8..e78684c8d2 100644 --- a/c/src/lib/libbsp/arm/shared/armv7m/clock/armv7m-clock-config.c +++ b/c/src/lib/libbsp/arm/shared/armv7m/clock/armv7m-clock-config.c @@ -13,6 +13,7 @@ */ #include <rtems.h> +#include <rtems/timecounter.h> #include <rtems/score/armv7m.h> #include <bsp.h> @@ -22,15 +23,35 @@ /* This is defined in clockdrv_shell.h */ static void Clock_isr(void *arg); -#define _ARMV7M_Systick_get_factor(freq) \ - ((1000000000ULL << 32) / (freq)) +static rtems_timecounter_simple _ARMV7M_TC; -#ifdef BSP_ARMV7M_SYSTICK_FREQUENCY - #define _ARMV7M_Systick_factor \ - _ARMV7M_Systick_get_factor(BSP_ARMV7M_SYSTICK_FREQUENCY) -#else - static uint64_t _ARMV7M_Systick_factor; -#endif +static uint32_t _ARMV7M_TC_get(rtems_timecounter_simple *tc) +{ + volatile ARMV7M_Systick *systick = _ARMV7M_Systick; + + return systick->cvr; +} + +static bool _ARMV7M_TC_is_pending(rtems_timecounter_simple *tc) +{ + volatile ARMV7M_SCB *scb = _ARMV7M_SCB; + + return ((scb->icsr & ARMV7M_SCB_ICSR_PENDSTSET) != 0); +} + +static uint32_t _ARMV7M_TC_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_downcounter_get( + tc, + _ARMV7M_TC_get, + _ARMV7M_TC_is_pending + ); +} + +static void _ARMV7M_TC_tick(void) +{ + rtems_timecounter_simple_downcounter_tick(&_ARMV7M_TC, _ARMV7M_TC_get); +} static void _ARMV7M_Systick_at_tick(void) { @@ -67,15 +88,18 @@ static void _ARMV7M_Systick_initialize(void) uint64_t us_per_tick = rtems_configuration_get_microseconds_per_tick(); uint64_t interval = (freq * us_per_tick) / 1000000ULL; - #ifndef BSP_ARMV7M_SYSTICK_FREQUENCY - _ARMV7M_Systick_factor = _ARMV7M_Systick_get_factor(freq); - #endif - systick->rvr = (uint32_t) interval; systick->cvr = 0; systick->csr = ARMV7M_SYSTICK_CSR_ENABLE | ARMV7M_SYSTICK_CSR_TICKINT | ARMV7M_SYSTICK_CSR_CLKSOURCE; + + rtems_timecounter_simple_install( + &_ARMV7M_TC, + freq, + interval, + _ARMV7M_TC_get_timecount + ); } static void _ARMV7M_Systick_cleanup(void) @@ -85,19 +109,7 @@ static void _ARMV7M_Systick_cleanup(void) systick->csr = 0; } -static uint32_t _ARMV7M_Systick_nanoseconds_since_last_tick(void) -{ - volatile ARMV7M_Systick *systick = _ARMV7M_Systick; - volatile ARMV7M_SCB *scb = _ARMV7M_SCB; - uint32_t rvr = systick->rvr; - uint32_t c = rvr - systick->cvr; - - if ((scb->icsr & ARMV7M_SCB_ICSR_PENDSTSET) != 0) { - c = rvr - systick->cvr + rvr; - } - - return (uint32_t) ((c * _ARMV7M_Systick_factor) >> 32); -} +#define Clock_driver_timecounter_tick() _ARMV7M_TC_tick() #define Clock_driver_support_at_tick() \ _ARMV7M_Systick_at_tick() @@ -114,9 +126,6 @@ static uint32_t _ARMV7M_Systick_nanoseconds_since_last_tick(void) #define Clock_driver_support_shutdown_hardware() \ _ARMV7M_Systick_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - _ARMV7M_Systick_nanoseconds_since_last_tick - /* Include shared source clock driver code */ #include "../../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/arm/shared/lpc/clock/lpc-clock-config.c b/c/src/lib/libbsp/arm/shared/lpc/clock/lpc-clock-config.c index 05c94a6b2c..0a0399939d 100644 --- a/c/src/lib/libbsp/arm/shared/lpc/clock/lpc-clock-config.c +++ b/c/src/lib/libbsp/arm/shared/lpc/clock/lpc-clock-config.c @@ -7,10 +7,10 @@ */ /* - * Copyright (c) 2009-2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2009-2015 embedded brains GmbH. All rights reserved. * * embedded brains GmbH - * Obere Lagerstr. 30 + * Dornierstr. 4 * 82178 Puchheim * Germany * <rtems@embedded-brains.de> @@ -21,6 +21,7 @@ */ #include <rtems.h> +#include <rtems/timecounter.h> #include <bsp/lpc-clock-config.h> #include <bsp/lpc-timer.h> @@ -33,6 +34,16 @@ void Clock_isr(rtems_irq_hdl_param arg); static volatile lpc_timer *const lpc_clock = (volatile lpc_timer *) LPC_CLOCK_TIMER_BASE; +static volatile lpc_timer *const lpc_timecounter = + (volatile lpc_timer *) LPC_CLOCK_TIMECOUNTER_BASE; + +static struct timecounter lpc_clock_tc; + +static uint32_t lpc_clock_tc_get_timecount(struct timecounter *tc) +{ + return lpc_timecounter->tc; +} + static void lpc_clock_at_tick(void) { lpc_clock->ir = LPC_TIMER_IR_MR0; @@ -85,6 +96,13 @@ static void lpc_clock_initialize(void) /* Enable timer */ lpc_clock->tcr = LPC_TIMER_TCR_EN; + + /* Install timecounter */ + lpc_clock_tc.tc_get_timecount = lpc_clock_tc_get_timecount; + lpc_clock_tc.tc_counter_mask = 0xffffffff; + lpc_clock_tc.tc_frequency = LPC_CLOCK_REFERENCE; + lpc_clock_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&lpc_clock_tc); } static void lpc_clock_cleanup(void) @@ -105,18 +123,6 @@ static void lpc_clock_cleanup(void) } } -static uint32_t lpc_clock_nanoseconds_since_last_tick(void) -{ - uint64_t k = (1000000000ULL << 32) / LPC_CLOCK_REFERENCE; - uint64_t c = lpc_clock->tc; - - if ((lpc_clock->ir & LPC_TIMER_IR_MR0) != 0) { - c = lpc_clock->tc + lpc_clock->mr0; - } - - return (uint32_t) ((c * k) >> 32); -} - #define Clock_driver_support_at_tick() lpc_clock_at_tick() #define Clock_driver_support_initialize_hardware() lpc_clock_initialize() #define Clock_driver_support_install_isr(isr, old_isr) \ @@ -126,8 +132,6 @@ static uint32_t lpc_clock_nanoseconds_since_last_tick(void) } while (0) #define Clock_driver_support_shutdown_hardware() lpc_clock_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - lpc_clock_nanoseconds_since_last_tick /* Include shared source clock driver code */ #include "../../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/arm/smdk2410/bsp_specs b/c/src/lib/libbsp/arm/smdk2410/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/smdk2410/bsp_specs +++ b/c/src/lib/libbsp/arm/smdk2410/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/stm32f4/bsp_specs b/c/src/lib/libbsp/arm/stm32f4/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/stm32f4/bsp_specs +++ b/c/src/lib/libbsp/arm/stm32f4/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/tms570/bsp_specs b/c/src/lib/libbsp/arm/tms570/bsp_specs index 1afa2ba72e..86af12f071 100644 --- a/c/src/lib/libbsp/arm/tms570/bsp_specs +++ b/c/src/lib/libbsp/arm/tms570/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N -EB } *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/arm/tms570/clock/clock.c b/c/src/lib/libbsp/arm/tms570/clock/clock.c index 4dba949c86..98ee5d9911 100644 --- a/c/src/lib/libbsp/arm/tms570/clock/clock.c +++ b/c/src/lib/libbsp/arm/tms570/clock/clock.c @@ -30,13 +30,14 @@ #include <bsp/irq.h> #include <bsp/tms570-rti.h> #include <rtems/counter.h> +#include <rtems/timecounter.h> -/** - * holds HW counter value since last interrupt event - * sets in tms570_clock_driver_support_at_tick - * used in tms570_clock_driver_nanoseconds_since_last_tick - */ -static uint32_t tms570_rti_last_tick_fcr0; +static struct timecounter tms570_rti_tc; + +static uint32_t tms570_rti_get_timecount(struct timecounter *tc) +{ + return TMS570_RTI.RTIFRC0; +} /** * @brief Initialize the HW peripheral for clock driver @@ -72,6 +73,12 @@ static void tms570_clock_driver_support_initialize_hardware( void ) TMS570_RTI.RTISETINTENA = 0x1; /* enable timer */ TMS570_RTI.RTIGCTRL = 1; + /* set timecounter */ + tms570_rti_tc.tc_get_timecount = tms570_rti_get_timecount; + tms570_rti_tc.tc_counter_mask = 0xffffffff; + tms570_rti_tc.tc_frequency = BSP_PLL_OUT_CLOCK; + tms570_rti_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&tms570_rti_tc); } /** @@ -82,7 +89,6 @@ static void tms570_clock_driver_support_initialize_hardware( void ) static void tms570_clock_driver_support_at_tick( void ) { TMS570_RTI.RTIINTFLAG = 0x00000001; - tms570_rti_last_tick_fcr0 = TMS570_RTI.RTICOMP0 - TMS570_RTI.RTIUDCP0; } /** @@ -124,24 +130,6 @@ static void tms570_clock_driver_support_shutdown_hardware( void ) TMS570_RTI.RTICLEARINTENA = 0x20000; } -/** - * @brief returns the nanoseconds since last tick - * - * Return the nanoseconds since last tick - * - * @retval x nanoseconds - * - */ -static uint32_t tms570_clock_driver_nanoseconds_since_last_tick( void ) -{ - uint32_t actual_fcr0 = TMS570_RTI.RTIFRC0; - uint32_t usec_since_tick; - - usec_since_tick = actual_fcr0 - tms570_rti_last_tick_fcr0; - - return usec_since_tick * 1000; -} - #define Clock_driver_support_initialize_hardware \ tms570_clock_driver_support_initialize_hardware #define Clock_driver_support_at_tick \ @@ -150,8 +138,6 @@ static uint32_t tms570_clock_driver_nanoseconds_since_last_tick( void ) tms570_clock_driver_support_initialize_hardware #define Clock_driver_support_shutdown_hardware \ tms570_clock_driver_support_shutdown_hardware -#define Clock_driver_nanoseconds_since_last_tick \ - tms570_clock_driver_nanoseconds_since_last_tick #define Clock_driver_support_install_isr(Clock_isr, Old_ticker ) \ tms570_clock_driver_support_install_isr( Clock_isr ) diff --git a/c/src/lib/libbsp/arm/xilinx-zynq/bsp_specs b/c/src/lib/libbsp/arm/xilinx-zynq/bsp_specs index 082653ad22..32c105fd0f 100644 --- a/c/src/lib/libbsp/arm/xilinx-zynq/bsp_specs +++ b/c/src/lib/libbsp/arm/xilinx-zynq/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/bfin/TLL6527M/bsp_specs b/c/src/lib/libbsp/bfin/TLL6527M/bsp_specs index 540acd1da9..32c5f572cc 100644 --- a/c/src/lib/libbsp/bfin/TLL6527M/bsp_specs +++ b/c/src/lib/libbsp/bfin/TLL6527M/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/bfin/bf537Stamp/bsp_specs b/c/src/lib/libbsp/bfin/bf537Stamp/bsp_specs index 540acd1da9..32c5f572cc 100644 --- a/c/src/lib/libbsp/bfin/bf537Stamp/bsp_specs +++ b/c/src/lib/libbsp/bfin/bf537Stamp/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/bfin/eZKit533/bsp_specs b/c/src/lib/libbsp/bfin/eZKit533/bsp_specs index 540acd1da9..32c5f572cc 100644 --- a/c/src/lib/libbsp/bfin/eZKit533/bsp_specs +++ b/c/src/lib/libbsp/bfin/eZKit533/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/epiphany/Makefile.am b/c/src/lib/libbsp/epiphany/Makefile.am new file mode 100644 index 0000000000..cdcc3fbd5c --- /dev/null +++ b/c/src/lib/libbsp/epiphany/Makefile.am @@ -0,0 +1,8 @@ +ACLOCAL_AMFLAGS = -I ../../../aclocal +## Descend into the @RTEMS_BSP_FAMILY@ directory +## Currently, the shared directory is not explicitly +## added but it is present in the source tree. +SUBDIRS = @RTEMS_BSP_FAMILY@ +include $(srcdir)/preinstall.am +include $(top_srcdir)/../../../automake/subdirs.am +include $(top_srcdir)/../../../automake/local.am diff --git a/c/src/lib/libbsp/epiphany/acinclude.m4 b/c/src/lib/libbsp/epiphany/acinclude.m4 new file mode 100644 index 0000000000..0fe1e2e79d --- /dev/null +++ b/c/src/lib/libbsp/epiphany/acinclude.m4 @@ -0,0 +1,10 @@ +# RTEMS_CHECK_BSPDIR(RTEMS_BSP_FAMILY) +AC_DEFUN([RTEMS_CHECK_BSPDIR], +[ + case "$1" in + epiphany_sim ) + AC_CONFIG_SUBDIRS([epiphany_sim]);; + *) + AC_MSG_ERROR([Invalid BSP]);; + esac +]) diff --git a/c/src/lib/libbsp/epiphany/configure.ac b/c/src/lib/libbsp/epiphany/configure.ac new file mode 100644 index 0000000000..bba5440ca9 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/configure.ac @@ -0,0 +1,19 @@ +# Process this file with autoconf to produce a configure script. + +AC_PREREQ([2.69]) +AC_INIT([rtems-c-src-lib-libbsp-epiphany],[_RTEMS_VERSION],[http://www.rtems.org/bugzilla]) +AC_CONFIG_SRCDIR([epiphany_sim]) +RTEMS_TOP(../../../../..) + +RTEMS_CANONICAL_TARGET_CPU +AM_INIT_AUTOMAKE([no-define foreign subdir-objects 1.12.2]) +AM_MAINTAINER_MODE + +RTEMS_ENV_RTEMSBSP +RTEMS_PROJECT_ROOT + +RTEMS_CHECK_BSPDIR([$RTEMS_BSP_FAMILY]) + +# Explicitly list all Makefiles here +AC_CONFIG_FILES([Makefile]) +AC_OUTPUT diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/Makefile.am b/c/src/lib/libbsp/epiphany/epiphany_sim/Makefile.am new file mode 100644 index 0000000000..0b84843b98 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/Makefile.am @@ -0,0 +1,95 @@ +## +# +# @brief Makefile of LibBSP for the Epiphany simulator. +# +# +ACLOCAL_AMFLAGS = -I ../../../../aclocal + +include $(top_srcdir)/../../../../automake/compile.am + +include_bspdir = $(includedir)/bsp +include_libcpudir = $(includedir)/libcpu + +dist_project_lib_DATA = bsp_specs + +############################################################################### +# Header # +############################################################################### + +include_HEADERS = include/bsp.h +include_HEADERS += include/tm27.h +include_HEADERS += ../../shared/include/coverhd.h + +nodist_include_bsp_HEADERS = ../../shared/include/bootcard.h +include_bsp_HEADERS = ../shared/include/linker-symbols.h + +include_bsp_HEADERS += ../../../libbsp/shared/include/mm.h +include_bsp_HEADERS += ../../shared/include/utility.h +include_bsp_HEADERS += ../../shared/include/irq-generic.h +include_bsp_HEADERS += ../../shared/include/irq-info.h +include_bsp_HEADERS += ../../shared/include/stackalloc.h +include_bsp_HEADERS += ../../shared/include/console-polled.h +include_bsp_HEADERS += include/irq.h + +nodist_include_HEADERS = include/bspopts.h + +############################################################################### +# Data # +############################################################################### +noinst_LIBRARIES = libbspstart.a + +libbspstart_a_SOURCES = start/start.S + +project_lib_DATA = start/start.$(OBJEXT) + +project_lib_DATA += startup/linkcmds + +############################################################################### +# LibBSP # +############################################################################### + +noinst_LIBRARIES += libbsp.a + +# Startup +libbsp_a_SOURCES = ../../shared/bspreset.c +libbsp_a_SOURCES += ../../shared/bspstart.c + +# Shared +libbsp_a_SOURCES += ../../shared/bootcard.c +libbsp_a_SOURCES += ../../shared/bspclean.c +libbsp_a_SOURCES += ../../shared/bsplibc.c +libbsp_a_SOURCES += ../../shared/bsppost.c +libbsp_a_SOURCES += ../../shared/bsppredriverhook.c +libbsp_a_SOURCES += ../../shared/bsppretaskinghook.c +libbsp_a_SOURCES += ../../shared/gnatinstallhandler.c +libbsp_a_SOURCES += ../../shared/sbrk.c +libbsp_a_SOURCES += ../../shared/src/stackalloc.c +libbsp_a_SOURCES += ../../shared/bspgetworkarea.c + +# clock +libbsp_a_SOURCES += ../../shared/clock_driver_simidle.c + +# Timer +libbsp_a_SOURCES += timer/timer.c + +# console +libbsp_a_SOURCES += ../../shared/console-polled.c + +# IRQ +libbsp_a_SOURCES += ../../shared/src/irq-default-handler.c +libbsp_a_SOURCES += ../../shared/src/irq-generic.c +libbsp_a_SOURCES += ../../shared/src/irq-info.c +libbsp_a_SOURCES += irq/irq.c + +# Cache +libbsp_a_SOURCES += ../../../libcpu/shared/src/cache_manager.c +libbsp_a_SOURCES += ../../shared/include/cache_.h +libbsp_a_CPPFLAGS = -I$(srcdir)/../../shared/include + +# debugio +libbsp_a_SOURCES += console/console-io.c + +DISTCLEANFILES = include/bspopts.h + +include $(srcdir)/preinstall.am +include $(top_srcdir)/../../../../automake/local.am diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/README b/c/src/lib/libbsp/epiphany/epiphany_sim/README new file mode 100644 index 0000000000..7127d91a66 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/README @@ -0,0 +1,6 @@ +This BSP is intended to run on epiphany-*-run simulator. + +From command line type: + +$ epiphany-rtems4.11-run -e=on --memory-region 0x8e000000,0x2000000 \ + $PATH_TO_RTEMS_EXE diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/bsp_specs b/c/src/lib/libbsp/epiphany/epiphany_sim/bsp_specs new file mode 100644 index 0000000000..082653ad22 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/bsp_specs @@ -0,0 +1,13 @@ +%rename endfile old_endfile +%rename startfile old_startfile +%rename link old_link + +*startfile: +%{!qrtems: %(old_startfile)} \ +%{!nostdlib: %{qrtems: start.o%s crti.o%s crtbegin.o%s -e _start}} + +*link: +%{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} + +*endfile: +%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtn.o%s } diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/configure.ac b/c/src/lib/libbsp/epiphany/epiphany_sim/configure.ac new file mode 100644 index 0000000000..016b135c34 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/configure.ac @@ -0,0 +1,36 @@ +## +# +# @file +# +# @brief Configure script of LibBSP for epiphany_sim BSP. +# + +AC_PREREQ(2.69) +AC_INIT([rtems-c-src-lib-libbsp-epiphany-sim],[_RTEMS_VERSION],[http://www.rtems.org/bugzilla]) +AC_CONFIG_SRCDIR([bsp_specs]) +RTEMS_TOP(../../../../../..) + +RTEMS_CANONICAL_TARGET_CPU +AM_INIT_AUTOMAKE([no-define nostdinc foreign subdir-objects 1.12.2]) +RTEMS_BSP_CONFIGURE + +RTEMS_BSPOPTS_SET([BSP_START_RESET_VECTOR],[*],[]) +RTEMS_BSPOPTS_HELP([BSP_START_RESET_VECTOR],[reset vector address for BSP +start]) + +RTEMS_BSPOPTS_SET([BSP_EPIPHANY_PERIPHCLK],[*],[100000000U]) +RTEMS_BSPOPTS_HELP([BSP_EPIPHANY_PERIPHCLK],[epiphany PERIPHCLK clock +frequency in Hz]) + +RTEMS_PROG_CC_FOR_TARGET([-ansi -fasm]) +RTEMS_CANONICALIZE_TOOLS +RTEMS_PROG_CCAS + +RTEMS_CHECK_SMP +AM_CONDITIONAL(HAS_SMP,[test "$rtems_cv_HAS_SMP" = "yes"]) + +RTEMS_BSP_CLEANUP_OPTIONS(0, 1) +RTEMS_BSP_LINKCMDS + +AC_CONFIG_FILES([Makefile]) +AC_OUTPUT diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/console/console-io.c b/c/src/lib/libbsp/epiphany/epiphany_sim/console/console-io.c new file mode 100644 index 0000000000..e3da54389f --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/console/console-io.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <bsp.h> +#include <bsp/console-polled.h> +#include <rtems/libio.h> +#include <stdlib.h> +#include <assert.h> +#include <stdio.h> + +static void outbyte_console( char ); +static char inbyte_console( void ); + +void console_initialize_hardware(void) +{ + /* Do nothing */ +} + +/* Epiphany simulator would handle this system call */ +static void outbyte_console(char c) +{ + register int chan asm("r0") = STDOUT_FILENO; + register void* addr asm("r1") = &c; + register int len asm("r2") = 1; + + /* Invoke write system call to be handled by Epiphany simulator */ + __asm__ __volatile__ ("trap 0" : : "r" (chan), "r" (addr), "r" (len)); +} + +static char inbyte_console(void) +{ + char c; + register int chan asm("r0") = STDIN_FILENO; + register void* addr asm("r1") = &c; + register int len asm("r2") = 1; + + /* Invoke read system call to be handled by Epiphany simulator */ + asm ("trap 1" :: "r" (chan), "r" (addr), "r" (len)); + return c; +} + +/* + * console_outbyte_polled + * + * This routine transmits a character using polling. + */ +void console_outbyte_polled( + int port, + char ch +) +{ + outbyte_console( ch ); +} + +/* + * console_inbyte_nonblocking + * + * This routine polls for a character. + */ + +int console_inbyte_nonblocking(int port) +{ + char c; + + c = inbyte_console(); + if (!c) + return -1; + return (int) c; +} + +/* + * To support printk + */ + +#include <rtems/bspIo.h> + +static void Epiphany_output_char(char c) { console_outbyte_polled( 0, c ); } + +BSP_output_char_function_type BSP_output_char = Epiphany_output_char; +BSP_polling_getchar_function_type BSP_poll_char = + (void *)console_inbyte_nonblocking; diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/include/bsp.h b/c/src/lib/libbsp/epiphany/epiphany_sim/include/bsp.h new file mode 100644 index 0000000000..1eb91ce8d6 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/include/bsp.h @@ -0,0 +1,55 @@ +/* + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef LIBBSP_EPIPHANY_EPIPHANY_SIM_H +#define LIBBSP_EPIPHANY_EPIPHANY_SIM_H + +#include <rtems.h> +#include <rtems/clockdrv.h> +#include <rtems/console.h> + +#include <bspopts.h> +#include <bsp/default-initial-extension.h> + +#include <rtems/devnull.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* Constants */ +Thread clock_driver_sim_idle_body(uintptr_t); +#define BSP_IDLE_TASK_BODY clock_driver_sim_idle_body + +#ifdef __cplusplus +} +#endif + +#endif /* LIBBSP_EPIPHANY_PARALLELLA_H */ diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/include/bspopts.h b/c/src/lib/libbsp/epiphany/epiphany_sim/include/bspopts.h new file mode 100644 index 0000000000..47c2f0da7c --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/include/bspopts.h @@ -0,0 +1,68 @@ +/* + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* include/bspopts.h.in. Generated from configure.ac by autoheader. */ + +/* If defined, then the BSP Framework will put a non-zero pattern into the + RTEMS Workspace and C program heap. This should assist in finding code that + assumes memory starts set to zero. */ +#undef BSP_DIRTY_MEMORY + +/* If defined, print a message and wait until pressed before resetting board + when application exits. */ +#undef BSP_PRESS_KEY_FOR_RESET + +/* If defined, prints the exception context when an unexpected exception + occurs. */ +#undef BSP_PRINT_EXCEPTION_CONTEXT + +/* If defined, reset the board when the application exits. */ +#undef BSP_RESET_BOARD_AT_EXIT + +/* reset vector address for BSP start */ +#undef BSP_START_RESET_VECTOR + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/include/irq.h b/c/src/lib/libbsp/epiphany/epiphany_sim/include/irq.h new file mode 100644 index 0000000000..2b15a4536c --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/include/irq.h @@ -0,0 +1,49 @@ +/** + * @file + * + * @ingroup Epiphany_IRQ + * + * @brief Interrupt definitions. + */ + +/* + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef LIBBSP_GENERIC_EPIPHANY_IRQ_H +#define LIBBSP_GENERIC_EPIPHANY_IRQ_H + +#ifndef ASM + +#include <rtems.h> +#include <rtems/irq.h> +#include <rtems/irq-extension.h> + +#define BSP_INTERRUPT_VECTOR_MIN 0x0 +#define BSP_INTERRUPT_VECTOR_MAX 0x24 + +#endif /* ASM */ +#endif /* LIBBSP_GENERIC_OR1K_IRQ_H */ diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/include/tm27.h b/c/src/lib/libbsp/epiphany/epiphany_sim/include/tm27.h new file mode 100644 index 0000000000..10dac820f1 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/include/tm27.h @@ -0,0 +1,53 @@ +/* + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _RTEMS_TMTEST27 +#error "This is an RTEMS internal file you must not include directly." +#endif + +#ifndef __tm27_h +#define __tm27_h + +/** + * @name Interrupt mechanisms for Time Test 27 + * @{ + */ + +#define MUST_WAIT_FOR_INTERRUPT 0 + +#define Install_tm27_vector( handler ) /* empty */ + +#define Cause_tm27_intr() /* empty */ + +#define Clear_tm27_intr() /* empty */ + +#define Lower_tm27_intr() /* empty */ + +#endif diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/irq/irq.c b/c/src/lib/libbsp/epiphany/epiphany_sim/irq/irq.c new file mode 100644 index 0000000000..323aa0792f --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/irq/irq.c @@ -0,0 +1,62 @@ +/** + * @file + * + * @ingroup epiphany_interrupt + * + * @brief Interrupt support. + */ + +/* + * Epiphany CPU Dependent Source + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <bsp/irq.h> +#include <bsp/irq-generic.h> + +/* Almost all of the jobs that the following functions should + * do are implemented in cpukit + */ + +void bsp_interrupt_handler_default(rtems_vector_number vector) +{ + printk("spurious interrupt: %u\n", vector); +} + +rtems_status_code bsp_interrupt_facility_initialize() +{ + return 0; +} + +rtems_status_code bsp_interrupt_vector_enable(rtems_vector_number vector) +{ + return 0; +} + +rtems_status_code bsp_interrupt_vector_disable(rtems_vector_number vector) +{ + return 0; +} diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/make/custom/epiphany_sim.cfg b/c/src/lib/libbsp/epiphany/epiphany_sim/make/custom/epiphany_sim.cfg new file mode 100644 index 0000000000..9eea2a04c7 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/make/custom/epiphany_sim.cfg @@ -0,0 +1,7 @@ +include $(RTEMS_ROOT)/make/custom/default.cfg + +RTEMS_CPU = epiphany + +CPU_CFLAGS = + +CFLAGS_OPTIMIZE_V ?= -O0 -g diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/preinstall.am b/c/src/lib/libbsp/epiphany/epiphany_sim/preinstall.am new file mode 100644 index 0000000000..e53b646405 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/preinstall.am @@ -0,0 +1,104 @@ +## Automatically generated by ampolish3 - Do not edit + +if AMPOLISH3 +$(srcdir)/preinstall.am: Makefile.am + $(AMPOLISH3) $(srcdir)/Makefile.am > $(srcdir)/preinstall.am +endif + +PREINSTALL_DIRS = +DISTCLEANFILES += $(PREINSTALL_DIRS) + +all-am: $(PREINSTALL_FILES) + +PREINSTALL_FILES = +CLEANFILES = $(PREINSTALL_FILES) + +all-local: $(TMPINSTALL_FILES) + +TMPINSTALL_FILES = +CLEANFILES += $(TMPINSTALL_FILES) + +$(PROJECT_LIB)/$(dirstamp): + @$(MKDIR_P) $(PROJECT_LIB) + @: > $(PROJECT_LIB)/$(dirstamp) +PREINSTALL_DIRS += $(PROJECT_LIB)/$(dirstamp) + +$(PROJECT_INCLUDE)/$(dirstamp): + @$(MKDIR_P) $(PROJECT_INCLUDE) + @: > $(PROJECT_INCLUDE)/$(dirstamp) +PREINSTALL_DIRS += $(PROJECT_INCLUDE)/$(dirstamp) + +$(PROJECT_INCLUDE)/bsp/$(dirstamp): + @$(MKDIR_P) $(PROJECT_INCLUDE)/bsp + @: > $(PROJECT_INCLUDE)/bsp/$(dirstamp) +PREINSTALL_DIRS += $(PROJECT_INCLUDE)/bsp/$(dirstamp) + +$(PROJECT_INCLUDE)/libcpu/$(dirstamp): + @$(MKDIR_P) $(PROJECT_INCLUDE)/libcpu + @: > $(PROJECT_INCLUDE)/libcpu/$(dirstamp) +PREINSTALL_DIRS += $(PROJECT_INCLUDE)/libcpu/$(dirstamp) + +$(PROJECT_LIB)/bsp_specs: bsp_specs $(PROJECT_LIB)/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_LIB)/bsp_specs +PREINSTALL_FILES += $(PROJECT_LIB)/bsp_specs + +$(PROJECT_INCLUDE)/bsp.h: include/bsp.h $(PROJECT_INCLUDE)/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp.h + +$(PROJECT_INCLUDE)/tm27.h: include/tm27.h $(PROJECT_INCLUDE)/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/tm27.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/tm27.h + +$(PROJECT_INCLUDE)/coverhd.h: ../../shared/include/coverhd.h $(PROJECT_INCLUDE)/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/coverhd.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/coverhd.h + +$(PROJECT_INCLUDE)/bsp/bootcard.h: ../../shared/include/bootcard.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/bootcard.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/bootcard.h + +$(PROJECT_INCLUDE)/bsp/linker-symbols.h: ../shared/include/linker-symbols.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/linker-symbols.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/linker-symbols.h + +$(PROJECT_INCLUDE)/bsp/mm.h: ../../../libbsp/shared/include/mm.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/mm.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/mm.h + +$(PROJECT_INCLUDE)/bsp/utility.h: ../../shared/include/utility.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/utility.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/utility.h + +$(PROJECT_INCLUDE)/bsp/irq-generic.h: ../../shared/include/irq-generic.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/irq-generic.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/irq-generic.h + +$(PROJECT_INCLUDE)/bsp/irq-info.h: ../../shared/include/irq-info.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/irq-info.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/irq-info.h + +$(PROJECT_INCLUDE)/bsp/stackalloc.h: ../../shared/include/stackalloc.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/stackalloc.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/stackalloc.h + +$(PROJECT_INCLUDE)/bsp/console-polled.h: ../../shared/include/console-polled.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/console-polled.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/console-polled.h + +$(PROJECT_INCLUDE)/bsp/irq.h: include/irq.h $(PROJECT_INCLUDE)/bsp/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/irq.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/irq.h + +$(PROJECT_INCLUDE)/bspopts.h: include/bspopts.h $(PROJECT_INCLUDE)/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bspopts.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/bspopts.h + +$(PROJECT_LIB)/start.$(OBJEXT): start/start.$(OBJEXT) $(PROJECT_LIB)/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_LIB)/start.$(OBJEXT) +TMPINSTALL_FILES += $(PROJECT_LIB)/start.$(OBJEXT) + +$(PROJECT_LIB)/linkcmds: startup/linkcmds $(PROJECT_LIB)/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_LIB)/linkcmds +TMPINSTALL_FILES += $(PROJECT_LIB)/linkcmds + diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/start/start.S b/c/src/lib/libbsp/epiphany/epiphany_sim/start/start.S new file mode 100644 index 0000000000..002cb3b764 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/start/start.S @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include <bsp/linker-symbols.h> +#include <rtems/asm.h> + +EXTERN(bsp_section_bss_begin) +EXTERN(bsp_section_bss_end) +EXTERN(ISR_Handler) +EXTERN(bsp_start_vector_table_end) +EXTERN(bsp_start_vector_table_size) +EXTERN(bsp_vector_table_size) +EXTERN(bsp_section_stack_begin) + +PUBLIC(EPIPHANY_Exception_default) +PUBLIC(bsp_start_vector_table_begin) +PUBLIC(start) + +.section .vector, "wax" +TYPE_FUNC(start) +SYM(start): + .balign 4 ; + b .normal_start + + .balign 4 ; 0x4 + b .sw_exception + + .balign 4 ; 0x8 + b .normal_start + + .balign 4 ; 0xc + b .clock_isr + + .balign 4 ; 0x10 + b .timer1_isr + + .balign 4 ; 0x14 + b _EPIPHANY_Exception_default + + .balign 4 ; 0x18 + b _EPIPHANY_Exception_default + + .balign 4 ; 0x1c + b _EPIPHANY_Exception_default + + .balign 4 ; 0x20 + b _EPIPHANY_Exception_default + + .balign 4 ; 0x24 + b _EPIPHANY_Exception_default + +_bsp_start_vector_table_begin: + .word .normal_start /* Reset */ + .word _EPIPHANY_Exception_default /* SW exception */ + .word _EPIPHANY_Exception_default /* Data Page Fault */ + .word _EPIPHANY_Exception_default /* Timer 0 */ + .word _EPIPHANY_Exception_default /* Timer 1 */ + .word _EPIPHANY_Exception_default /* Message int */ + .word _EPIPHANY_Exception_default /* DMA0 int */ + .word _EPIPHANY_Exception_default /* DMA1 int */ + .word _EPIPHANY_Exception_default /* WAND */ + .word _EPIPHANY_Exception_default /* User interrupt */ + +_bsp_start_vector_table_end: + +.size _start, .-_start + +.section .start,"ax" +.align 4 +.type _external_start, %function +.normal_start: + /* Initialize the stack and frame pointers */ + mov sp, %low(bsp_section_stack_begin) + movt sp, %high(bsp_section_stack_begin) + mov fp, sp + +cpu0: + /* Zero .bss section */ + mov r0, %low(bsp_section_bss_begin) + movt r0, %high(bsp_section_bss_begin) + mov r1, sp + mov r2,#0 + mov r3,#0 + +_bss_clear_loop: + strd r2, [r0], +#1 + sub r5, r1, r0 + bne _bss_clear_loop + + /* Clear the reset interrupt flag */ + mov r0, %low(_jump_to_c) + movt r0, %high(_jump_to_c) + movts iret, r0 + rti + +_jump_to_c: + /* Jump to bootcard */ + mov r3, %low(_boot_card) + movt r3, %high(_boot_card) + jalr r3 + + /* Should never reach here */ + idle + +.size .normal_start, .-.normal_start + +.balign 4 +.type .sw_exception, %function +.sw_exception: + idle + +.balign 4 +.type .clock_isr, %function +.clock_isr: + /* + * r62 and r63 are saved here, and restored from _ISR_Handler, they + * and hold vector number and _ISR_Handler address repsectively. + */ + add sp, sp, #-8 + str r62, [sp, #0] + str r63, [sp, #4] + mov r62, 3 + mov r63, %low(_ISR_Handler) + movt r63, %high(_ISR_Handler) + jr r6 + +.balign 4 +.type .timer1_isr, %function +.timer1_isr: + /* + * r62 and r63 are saved here, and restored from _ISR_Handler, they + * and hold vector number and _ISR_Handler address repsectively. + */ + add sp, sp, #-8 + str r62, [sp, 0] + str r63, [sp, 4] + mov r62, 4 + mov r63, %low(_ISR_Handler) + movt r63, %high(_ISR_Handler) + jr r63 + +.balign 4 +TYPE_FUNC(EPIPHANY_Exception_default) +SYM(EPIPHANY_Exception_default): + idle diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/startup/linkcmds b/c/src/lib/libbsp/epiphany/epiphany_sim/startup/linkcmds new file mode 100644 index 0000000000..e06b22f7d6 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/startup/linkcmds @@ -0,0 +1,369 @@ +/** + * @file + * + * @ingroup bsp_linker + * + * @brief Memory map + */ + +/* + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +OUTPUT_FORMAT("elf32-epiphany", "elf32-epiphany", "elf32-epiphany") +OUTPUT_ARCH (epiphany) + +ENTRY (_start) + +MEMORY +{ + VECTOR_RAM (AIW) : ORIGIN = 0x00000000, LENGTH = 0xB0 + LOCAL_MEM (AIW) : ORIGIN = 0x000000B0, LENGTH = 0x00007FC0 /* 32KiB Local memory */ + RAM : ORIGIN = 0x8E000000, LENGTH = 0x01000000 /* 16MiB external RAM */ +} + +REGION_ALIAS ("REGION_START", LOCAL_MEM); +REGION_ALIAS ("REGION_VECTOR", VECTOR_RAM); +REGION_ALIAS ("REGION_TEXT", RAM); +REGION_ALIAS ("REGION_TEXT_LOAD", RAM); +REGION_ALIAS ("REGION_RODATA", RAM); +REGION_ALIAS ("REGION_RODATA_LOAD", RAM); +REGION_ALIAS ("REGION_DATA", RAM); +REGION_ALIAS ("REGION_DATA_LOAD", RAM); +REGION_ALIAS ("REGION_FAST_DATA", RAM); +REGION_ALIAS ("REGION_FAST_DATA_LOAD", RAM); +REGION_ALIAS ("REGION_BSS", RAM); +REGION_ALIAS ("REGION_WORK", RAM); +REGION_ALIAS ("REGION_STACK", RAM); + +/* The following address is used for text output */ +bsp_section_outbut_buffer = 0x8F800000; +bsp_section_vector_begin = 0x00000000; + +/* + * Global symbols that may be defined externally + */ +bsp_vector_table_size = DEFINED (bsp_vector_table_size) ? bsp_vector_table_size : 64; + +bsp_section_xbarrier_align = DEFINED (bsp_section_xbarrier_align) ? bsp_section_xbarrier_align : 1; +bsp_section_robarrier_align = DEFINED (bsp_section_robarrier_align) ? bsp_section_robarrier_align : 1; +bsp_section_rwbarrier_align = DEFINED (bsp_section_rwbarrier_align) ? bsp_section_rwbarrier_align : 1; + +bsp_stack_align = DEFINED (bsp_stack_align) ? bsp_stack_align : 8; + +bsp_stack_main_size = DEFINED (bsp_stack_main_size) ? bsp_stack_main_size : 1024; +bsp_stack_main_size = ALIGN (bsp_stack_main_size, bsp_stack_align); + +_bsp_processor_count = DEFINED (_bsp_processor_count) ? _bsp_processor_count : 1; + +SECTIONS { + + .vector : + { + *(.vector) + . = ALIGN(bsp_vector_table_size); + bsp_section_vector_end = .; + } > REGION_VECTOR AT > REGION_VECTOR + bsp_section_vector_size = bsp_section_vector_end - bsp_section_vector_begin; + bsp_vector_table_begin = bsp_section_vector_begin; + bsp_vector_table_end = bsp_vector_table_begin + bsp_vector_table_size; + + .start : + { + . = ALIGN(8); + bsp_section_start_begin = .; + KEEP (*(.bsp_start_text)) + KEEP (*(.bsp_start_data)) + bsp_section_start_end = .; + } > REGION_START AT > REGION_START + bsp_section_start_size = bsp_section_start_end - bsp_section_start_begin; + +.xbarrier : { + . = ALIGN (bsp_section_xbarrier_align); + } > REGION_VECTOR AT > REGION_VECTOR + +.text : { + . = ALIGN(8); + bsp_section_text_begin = .; + *(.text.unlikely .text.*_unlikely) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } > REGION_TEXT AT > REGION_TEXT_LOAD + .init : { + KEEP (*(.init)) + } > REGION_TEXT AT > REGION_TEXT_LOAD + .fini : { + KEEP (*(.fini)) + . = ALIGN(8); + bsp_section_text_end = .; + } > REGION_TEXT AT > REGION_TEXT_LOAD + bsp_section_text_size = bsp_section_text_end - bsp_section_text_begin; + bsp_section_text_load_begin = LOADADDR (.text); + bsp_section_text_load_end = bsp_section_text_load_begin + bsp_section_text_size; + +.robarrier : { + . = ALIGN (bsp_section_robarrier_align); + } > REGION_RODATA AT > REGION_RODATA + +.rodata : { + . = ALIGN(8); + bsp_section_rodata_begin = .; + *(.rodata .rodata.* .gnu.linkonce.r.*) + } > REGION_RODATA AT > REGION_RODATA_LOAD +.eh_frame : { + KEEP (*(.eh_frame)) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .gcc_except_table : { + *(.gcc_except_table .gcc_except_table.*) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .tdata : { + __TLS_Data_begin = .; + *(.tdata .tdata.* .gnu.linkonce.td.*) + __TLS_Data_end = .; + } > REGION_RODATA AT > REGION_RODATA_LOAD + .tbss : { + __TLS_BSS_begin = .; + *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) + __TLS_BSS_end = .; + } > REGION_RODATA AT > REGION_RODATA_LOAD + __TLS_Data_size = __TLS_Data_end - __TLS_Data_begin; + __TLS_Data_begin = __TLS_Data_size != 0 ? __TLS_Data_begin : __TLS_BSS_begin; + __TLS_Data_end = __TLS_Data_size != 0 ? __TLS_Data_end : __TLS_BSS_begin; + __TLS_BSS_size = __TLS_BSS_end - __TLS_BSS_begin; + __TLS_Size = __TLS_BSS_end - __TLS_Data_begin; + __TLS_Alignment = MAX (ALIGNOF (.tdata), ALIGNOF (.tbss)); + .preinit_array : { + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + } > REGION_RODATA AT > REGION_RODATA_LOAD + .init_array : { + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array)) + PROVIDE_HIDDEN (__init_array_end = .); + } > REGION_RODATA AT > REGION_RODATA_LOAD + .fini_array : { + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(.fini_array)) + KEEP (*(SORT(.fini_array.*))) + PROVIDE_HIDDEN (__fini_array_end = .); + } > REGION_RODATA AT > REGION_RODATA_LOAD + .ctors : { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin?.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .dtors : { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin?.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .data.rel.ro : { + *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) + *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .jcr : { + KEEP (*(.jcr)) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .interp : { + *(.interp) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .note.gnu.build-id : { + *(.note.gnu.build-id) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .hash : { + *(.hash) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .gnu.hash : { + *(.gnu.hash) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .dynsym : { + *(.dynsym) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .dynstr : { + *(.dynstr) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .gnu.version : { + *(.gnu.version) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .gnu.version_d : { + *(.gnu.version_d) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .gnu.version_r : { + *(.gnu.version_r) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .rel.dyn : { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + PROVIDE_HIDDEN (__rela_iplt_start = .); + PROVIDE_HIDDEN (__rela_iplt_end = .); + } > REGION_RODATA AT > REGION_RODATA_LOAD + .rela.dyn : { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + PROVIDE_HIDDEN (__rel_iplt_end = .); + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } > REGION_RODATA AT > REGION_RODATA_LOAD + .rel.plt : { + *(.rel.plt) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .rela.plt : { + *(.rela.plt) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .plt : { + *(.plt) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .iplt : { + *(.iplt) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .dynamic : { + *(.dynamic) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .got : { + *(.got.plt) *(.igot.plt) *(.got) *(.igot) + } > REGION_RODATA AT > REGION_RODATA_LOAD + .rtemsroset : { + /* Special FreeBSD linker set sections */ + __start_set_sysctl_set = .; + *(set_sysctl_*); + __stop_set_sysctl_set = .; + *(set_domain_*); + *(set_pseudo_*); + + KEEP (*(SORT(.rtemsroset.*))) + . = ALIGN(8); + bsp_section_rodata_end = .; + } > REGION_RODATA AT > REGION_RODATA_LOAD + bsp_section_rodata_size = bsp_section_rodata_end - bsp_section_rodata_begin; + bsp_section_rodata_load_begin = LOADADDR (.rodata); + bsp_section_rodata_load_end = bsp_section_rodata_load_begin + bsp_section_rodata_size; + +.rwbarrier : { + . = ALIGN(8); + . = ALIGN (bsp_section_rwbarrier_align); + } > REGION_DATA AT > REGION_DATA + +.data : { + . = ALIGN(8); + bsp_section_data_begin = .; + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } > REGION_DATA AT > REGION_DATA_LOAD + .data1 : { + *(.data1) + } > REGION_DATA AT > REGION_DATA_LOAD + .rtemsrwset : { + KEEP (*(SORT(.rtemsrwset.*))) + . = ALIGN(8); + bsp_section_data_end = .; + } > REGION_DATA AT > REGION_DATA_LOAD + bsp_section_data_size = bsp_section_data_end - bsp_section_data_begin; + bsp_section_data_load_begin = LOADADDR (.data); + bsp_section_data_load_end = bsp_section_data_load_begin + bsp_section_data_size; + + .bss : { + . = ALIGN(8); + bsp_section_bss_begin = .; + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + . = ALIGN(8); + bsp_section_bss_end = .; + } > REGION_BSS AT > REGION_BSS + bsp_section_bss_size = bsp_section_bss_end - bsp_section_bss_begin; + +.work : { + /* + * The work section will occupy the remaining REGION_WORK region and + * contains the RTEMS work space and heap. + */ + . = ALIGN(8); + bsp_section_work_begin = .; + . += ORIGIN (REGION_WORK) + LENGTH (REGION_WORK) - ABSOLUTE (.); + . = ALIGN(8); + bsp_section_work_end = .; + } > REGION_WORK AT > REGION_WORK + bsp_section_work_size = bsp_section_work_end - bsp_section_work_begin; + + .stack : { + . = ALIGN(8); + bsp_section_stack_begin = .; + . += ORIGIN (REGION_STACK) + LENGTH (REGION_STACK) - ABSOLUTE (.); + . = ALIGN(8); + bsp_section_stack_end = .; + } > REGION_STACK AT > REGION_STACK + bsp_section_stack_size = bsp_section_stack_begin - bsp_section_stack_end; + + _RamBase = ORIGIN (REGION_WORK); + _RamSize = LENGTH (REGION_WORK); + _WorkAreaBase = bsp_section_work_begin; + _HeapSize = 0; +} diff --git a/c/src/lib/libbsp/epiphany/epiphany_sim/timer/timer.c b/c/src/lib/libbsp/epiphany/epiphany_sim/timer/timer.c new file mode 100644 index 0000000000..5df9448e01 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/epiphany_sim/timer/timer.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <rtems.h> +#include <bsp.h> +#include <rtems/btimer.h> +#include <rtems/score/epiphany-utility.h> + +extern char bsp_start_vector_table_begin[]; + +bool benchmark_timer_find_average_overhead; + +static void benchmark_timer1_interrupt_handler(void) +{ + unsigned int val = 0xFFFFFFFF; + unsigned int event_type = 0x1; + + /* Embed assembly code for setting timer1 */ + __asm__ __volatile__ ("movts ctimer1, %[val] \t \n" :: [val] "r" (val)); + + __asm__ __volatile__ ("movfs r16, config; \t \n" + "mov r17, %%low(0xfffff0ff);\t \n" + "movt r17, %%high(0xffff0ff);\t \n" + "lsl r18, %[event_type], 0x8; \t \n" + "and r16, r16, r17; \t \n" + "orr r16, r16, r18; \t \n" + "movts config, r16; \t \n" + :: [event_type] "r" (event_type)); +} + +/* Start eCore tiemr 1 usef for profiling and timing analysis */ +void benchmark_timer_initialize( void ) +{ + /* Install interrupt handler for timer 1 */ + + proc_ptr *table = + (proc_ptr *) bsp_start_vector_table_begin; + + table[TIMER1] = benchmark_timer1_interrupt_handler; + + benchmark_timer1_interrupt_handler(); +} + +/* + * The following controls the behavior of benchmark_timer_read(). + * + * AVG_OVEREHAD is the overhead for starting and stopping the timer. It + * is usually deducted from the number returned. + * + * LEAST_VALID is the lowest number this routine should trust. Numbers + * below this are "noise" and zero is returned. + */ + +#define AVG_OVERHEAD 0 /* It typically takes X.X microseconds */ + /* (Y countdowns) to start/stop the timer. */ + /* This value is in microseconds. */ +#define LEAST_VALID 1 /* Don't trust a clicks value lower than this */ + +benchmark_timer_t benchmark_timer_read( void ) +{ + uint32_t timer_val = 0; + uint32_t total; + + __asm__ __volatile__ ("movfs %[timer_val], ctimer1 \t \n" + :[timer_val] "=r" (timer_val):); + + total = (0xFFFFFFFF - timer_val); + + if ( benchmark_timer_find_average_overhead == true ) + return total; + else { + if ( total < LEAST_VALID ) + return 0; /* below timer resolution */ + /* + * Somehow convert total into microseconds + */ + return (total - AVG_OVERHEAD); + } +} + +void benchmark_timer_disable_subtracting_average_overhead( + bool find_flag +) +{ + benchmark_timer_find_average_overhead = find_flag; +} diff --git a/c/src/lib/libbsp/epiphany/preinstall.am b/c/src/lib/libbsp/epiphany/preinstall.am new file mode 100644 index 0000000000..dba6cc4d81 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/preinstall.am @@ -0,0 +1,7 @@ +## Automatically generated by ampolish3 - Do not edit + +if AMPOLISH3 +$(srcdir)/preinstall.am: Makefile.am + $(AMPOLISH3) $(srcdir)/Makefile.am > $(srcdir)/preinstall.am +endif + diff --git a/c/src/lib/libbsp/epiphany/shared/include/linker-symbols.h b/c/src/lib/libbsp/epiphany/shared/include/linker-symbols.h new file mode 100644 index 0000000000..c218b7ff99 --- /dev/null +++ b/c/src/lib/libbsp/epiphany/shared/include/linker-symbols.h @@ -0,0 +1,81 @@ +#ifndef LIBBSP_EPIPHANY_SHARED_LINKER_SYMBOLS_H +#define LIBBSP_EPIPHANY_SHARED_LINKER_SYMBOLS_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup epiphany_linker Linker Support + * + * @ingroup epiphany_shared + * + * @brief Linker support. + * + * @{ + */ + +#ifndef ASM + #define LINKER_SYMBOL(sym) extern char sym []; +#else + #define LINKER_SYMBOL(sym) .extern sym +#endif + +LINKER_SYMBOL(bsp_section_start_begin) +LINKER_SYMBOL(bsp_section_start_end) +LINKER_SYMBOL(bsp_section_start_size) + +LINKER_SYMBOL(bsp_section_vector_begin) +LINKER_SYMBOL(bsp_section_vector_end) +LINKER_SYMBOL(bsp_section_vector_size) + +LINKER_SYMBOL(bsp_section_text_begin) +LINKER_SYMBOL(bsp_section_text_end) +LINKER_SYMBOL(bsp_section_text_size) +LINKER_SYMBOL(bsp_section_text_load_begin) +LINKER_SYMBOL(bsp_section_text_load_end) + +LINKER_SYMBOL(bsp_section_rodata_begin) +LINKER_SYMBOL(bsp_section_rodata_end) +LINKER_SYMBOL(bsp_section_rodata_size) +LINKER_SYMBOL(bsp_section_rodata_load_begin) +LINKER_SYMBOL(bsp_section_rodata_load_end) + +LINKER_SYMBOL(bsp_section_data_begin) +LINKER_SYMBOL(bsp_section_data_end) +LINKER_SYMBOL(bsp_section_data_size) +LINKER_SYMBOL(bsp_section_data_load_begin) +LINKER_SYMBOL(bsp_section_data_load_end) + +LINKER_SYMBOL(bsp_section_bss_begin) +LINKER_SYMBOL(bsp_section_bss_end) +LINKER_SYMBOL(bsp_section_bss_size) + +LINKER_SYMBOL(bsp_section_work_begin) +LINKER_SYMBOL(bsp_section_work_end) +LINKER_SYMBOL(bsp_section_work_size) + +LINKER_SYMBOL(bsp_section_stack_begin) +LINKER_SYMBOL(bsp_section_stack_end) +LINKER_SYMBOL(bsp_section_stack_size) + +LINKER_SYMBOL(bsp_vector_table_begin) +LINKER_SYMBOL(bsp_vector_table_end) +LINKER_SYMBOL(bsp_vector_table_size) + +LINKER_SYMBOL(bsp_start_vector_table_begin) +LINKER_SYMBOL(bsp_start_vector_table_end) +LINKER_SYMBOL(bsp_start_vector_table_size) + +LINKER_SYMBOL(bsp_translation_table_base) +LINKER_SYMBOL(bsp_translation_table_end) + +LINKER_SYMBOL(_bsp_processor_count) + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LIBBSP_EPIPHANY_SHARED_LINKER_SYMBOLS_H */ diff --git a/c/src/lib/libbsp/i386/pc386/clock/ckinit.c b/c/src/lib/libbsp/i386/pc386/clock/ckinit.c index 2782252680..04514d19fa 100644 --- a/c/src/lib/libbsp/i386/pc386/clock/ckinit.c +++ b/c/src/lib/libbsp/i386/pc386/clock/ckinit.c @@ -28,6 +28,7 @@ #include <bspopts.h> #include <libcpu/cpuModel.h> #include <assert.h> +#include <rtems/timecounter.h> #define CLOCK_VECTOR 0 @@ -37,31 +38,25 @@ uint32_t pc386_clock_click_count; /* forward declaration */ void Clock_isr(void *param); -static void Clock_driver_support_at_tick_empty(void); static void clockOff(void); -static void Clock_driver_support_at_tick_tsc(void); -static uint32_t bsp_clock_nanoseconds_since_last_tick_tsc(void); -static uint32_t bsp_clock_nanoseconds_since_last_tick_i8254(void); static void Clock_isr_handler(void *param); /* - * Roughly the number of cycles per tick and per nanosecond. Note that these + * Roughly the number of cycles per second. Note that these * will be wildly inaccurate if the chip speed changes due to power saving * or thermal modes. * * NOTE: These are only used when the TSC method is used. */ -uint64_t pc586_tsc_per_tick; -uint64_t pc586_nanoseconds_per_tick; +static uint64_t pc586_tsc_frequency; -uint64_t pc586_tsc_at_tick; +static struct timecounter pc386_tc; /* this driver may need to count ISRs per tick */ #define CLOCK_DRIVER_ISRS_PER_TICK 1 #define CLOCK_DRIVER_ISRS_PER_TICK_VALUE pc386_isrs_per_tick -/* The driver uses the count in Clock_driver_support_at_tick */ -extern volatile uint32_t Clock_driver_isrs; +extern volatile uint32_t Clock_driver_ticks; #define READ_8254( _lsb, _msb ) \ do { outport_byte(TIMER_MODE, TIMER_SEL0|TIMER_LATCH); \ @@ -74,60 +69,21 @@ extern volatile uint32_t Clock_driver_isrs; * Hooks which get swapped based upon which nanoseconds since last * tick method is preferred. */ -void (*Clock_driver_support_at_tick)(void) = NULL; -uint32_t (*Clock_driver_nanoseconds_since_last_tick)(void) = NULL; - -/* - * What do we do at each clock tick? - */ -static void Clock_driver_support_at_tick_tsc(void) -{ - pc586_tsc_at_tick = rdtsc(); -} - -static void Clock_driver_support_at_tick_empty(void) -{ -} +#define Clock_driver_support_at_tick() #define Clock_driver_support_install_isr( _new, _old ) \ do { \ _old = NULL; \ } while(0) -/* - * Get nanoseconds using Pentium-compatible TSC register - */ -static uint32_t bsp_clock_nanoseconds_since_last_tick_tsc(void) +static uint32_t pc386_get_timecount_tsc(struct timecounter *tc) { - uint64_t diff_nsec; - - diff_nsec = rdtsc() - pc586_tsc_at_tick; - - /* - * At this point, with a hypothetical 10 GHz CPU clock and 100 Hz tick - * clock, diff_nsec <= 27 bits. - */ - diff_nsec *= pc586_nanoseconds_per_tick; /* <= 54 bits */ - diff_nsec /= pc586_tsc_per_tick; - - if (diff_nsec > pc586_nanoseconds_per_tick) - /* - * Hmmm... Some drift or rounding. Pin the value to 1 nanosecond before - * the next tick. - */ - /* diff_nsec = pc586_nanoseconds_per_tick - 1; */ - diff_nsec = 12345; - - return (uint32_t)diff_nsec; + return (uint32_t)rdtsc(); } -/* - * Get nanoseconds using 8254 timer chip - */ -static uint32_t bsp_clock_nanoseconds_since_last_tick_i8254(void) +static uint32_t pc386_get_timecount_i8254(struct timecounter *tc) { - uint32_t usecs, clicks, isrs; - uint32_t usecs1, usecs2; + uint32_t irqs; uint8_t lsb, msb; rtems_interrupt_level level; @@ -136,34 +92,10 @@ static uint32_t bsp_clock_nanoseconds_since_last_tick_i8254(void) */ rtems_interrupt_disable(level); READ_8254(lsb, msb); - isrs = Clock_driver_isrs; + irqs = Clock_driver_ticks; rtems_interrupt_enable(level); - /* - * Now do the math - */ - /* convert values read into counter clicks */ - clicks = ((msb << 8) | lsb); - - /* whole ISRs we have done since the last tick */ - usecs1 = (pc386_isrs_per_tick - isrs - 1) * pc386_microseconds_per_isr; - - /* the partial ISR we in the middle of now */ - usecs2 = pc386_microseconds_per_isr - TICK_TO_US(clicks); - - /* total microseconds */ - usecs = usecs1 + usecs2; - #if 0 - printk( "usecs1=%d usecs2=%d ", usecs1, usecs2 ); - printk( "maxclicks=%d clicks=%d ISRs=%d ISRsper=%d usersPer=%d usecs=%d\n", - pc386_clock_click_count, clicks, - Clock_driver_isrs, pc386_isrs_per_tick, - pc386_microseconds_per_isr, usecs ); - #endif - - /* return it in nanoseconds */ - return usecs * 1000; - + return (irqs + 1) * pc386_microseconds_per_isr - ((msb << 8) | lsb); } /* @@ -175,9 +107,6 @@ static void calibrate_tsc(void) uint8_t then_lsb, then_msb, now_lsb, now_msb; uint32_t i; - pc586_nanoseconds_per_tick = - rtems_configuration_get_microseconds_per_tick() * 1000; - /* * We just reset the timer, so we know we're at the beginning of a tick. */ @@ -204,16 +133,11 @@ static void calibrate_tsc(void) } while (1); } - pc586_tsc_per_tick = rdtsc() - begin_time; - - /* Initialize "previous tick" counters */ - pc586_tsc_at_tick = rdtsc(); + pc586_tsc_frequency = rdtsc() - begin_time; #if 0 - printk( "CPU clock at %u MHz\n", (uint32_t)(pc586_tsc_per_tick / 1000000)); + printk( "CPU clock at %u MHz\n", (uint32_t)(pc586_tsc_frequency / 1000000)); #endif - - pc586_tsc_per_tick /= rtems_clock_get_ticks_per_second(); } static void clockOn(void) @@ -299,24 +223,18 @@ void Clock_driver_support_initialize_hardware(void) if ( use_8254 ) { /* printk( "Use 8254\n" ); */ - Clock_driver_support_at_tick = Clock_driver_support_at_tick_empty; - Clock_driver_nanoseconds_since_last_tick = - bsp_clock_nanoseconds_since_last_tick_i8254; + pc386_tc.tc_get_timecount = pc386_get_timecount_i8254; + pc386_tc.tc_counter_mask = 0xffffffff; + pc386_tc.tc_frequency = TIMER_TICK; } else { /* printk( "Use TSC\n" ); */ - Clock_driver_support_at_tick = Clock_driver_support_at_tick_tsc; - Clock_driver_nanoseconds_since_last_tick = - bsp_clock_nanoseconds_since_last_tick_tsc; + pc386_tc.tc_get_timecount = pc386_get_timecount_tsc; + pc386_tc.tc_counter_mask = 0xffffffff; + pc386_tc.tc_frequency = pc586_tsc_frequency; } - /* Shell installs nanosecond handler before calling - * Clock_driver_support_initialize_hardware() :-( - * so we do it again now that we're ready. - */ - rtems_clock_set_nanoseconds_extension( - Clock_driver_nanoseconds_since_last_tick - ); - + pc386_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&pc386_tc); Clock_isr_enabled = true; } diff --git a/c/src/lib/libbsp/lm32/shared/clock/ckinit.c b/c/src/lib/libbsp/lm32/shared/clock/ckinit.c index ae065de0d8..b1f5c8d0b9 100644 --- a/c/src/lib/libbsp/lm32/shared/clock/ckinit.c +++ b/c/src/lib/libbsp/lm32/shared/clock/ckinit.c @@ -75,5 +75,7 @@ static void Clock_driver_support_shutdown_hardware(void) clockwrite(LM32_CLOCK_CR, LM32_CLOCK_CR_STOP); } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/lm32/shared/milkymist_clock/ckinit.c b/c/src/lib/libbsp/lm32/shared/milkymist_clock/ckinit.c index 65651e6470..3230d83325 100644 --- a/c/src/lib/libbsp/lm32/shared/milkymist_clock/ckinit.c +++ b/c/src/lib/libbsp/lm32/shared/milkymist_clock/ckinit.c @@ -46,4 +46,6 @@ static void Clock_driver_support_shutdown_hardware(void) MM_WRITE(MM_TIMER0_CONTROL, 0); } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m32r/m32rsim/bsp_specs b/c/src/lib/libbsp/m32r/m32rsim/bsp_specs index 559ebee5e0..3093175bb8 100644 --- a/c/src/lib/libbsp/m32r/m32rsim/bsp_specs +++ b/c/src/lib/libbsp/m32r/m32rsim/bsp_specs @@ -10,4 +10,4 @@ %{!qrtems: %(old_link)} %{qrtems: -dc -dp -N} *endfile: -%{!qrtems: *(old_endfiles)} %{qrtems: crtend.o%s crtfini.o%s } +%{!qrtems: %(old_endfiles)} %{qrtems: crtend.o%s crtfini.o%s } diff --git a/c/src/lib/libbsp/m68k/av5282/clock/clock.c b/c/src/lib/libbsp/m68k/av5282/clock/clock.c index 3dded8adf1..182693fe03 100644 --- a/c/src/lib/libbsp/m68k/av5282/clock/clock.c +++ b/c/src/lib/libbsp/m68k/av5282/clock/clock.c @@ -58,4 +58,6 @@ MCF5282_PIT_PCSR_EN; \ } while (0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/gen68360/clock/clock.c b/c/src/lib/libbsp/m68k/gen68360/clock/clock.c index 78deaf9cce..3b9d4ca293 100644 --- a/c/src/lib/libbsp/m68k/gen68360/clock/clock.c +++ b/c/src/lib/libbsp/m68k/gen68360/clock/clock.c @@ -98,4 +98,6 @@ extern int m360_clock_rate; m360.pitr |= divisor; \ } while (0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/genmcf548x/clock/clock.c b/c/src/lib/libbsp/m68k/genmcf548x/clock/clock.c index d3f6eb467c..d0f28c6b72 100644 --- a/c/src/lib/libbsp/m68k/genmcf548x/clock/clock.c +++ b/c/src/lib/libbsp/m68k/genmcf548x/clock/clock.c @@ -98,5 +98,7 @@ MCF548X_SLT_SCR0 |= (MCF548X_SLT_SCR_TEN | MCF548X_SLT_SCR_RUN | MCF548X_SLT_SCR_IEN); \ } while (0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/mcf52235/clock/clock.c b/c/src/lib/libbsp/m68k/mcf52235/clock/clock.c index c22393d5ae..fdc1ed6111 100644 --- a/c/src/lib/libbsp/m68k/mcf52235/clock/clock.c +++ b/c/src/lib/libbsp/m68k/mcf52235/clock/clock.c @@ -3,6 +3,7 @@ */ #include <rtems.h> +#include <rtems/timecounter.h> #include <bsp.h> /* @@ -10,34 +11,37 @@ */ #define CLOCK_VECTOR (64+56) -static uint32_t s_pcntrAtTick = 0; -static uint32_t s_nanoScale = 0; +static rtems_timecounter_simple mcf52235_tc; -/* - * Provide nanosecond extension - * Interrupts are disabled when this is called - */ -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) +static uint32_t mcf52235_tc_get(rtems_timecounter_simple *tc) { - uint32_t i; + return MCF_PIT1_PCNTR; +} - if (MCF_PIT1_PCSR & MCF_PIT_PCSR_PIF) { - i = s_pcntrAtTick + (MCF_PIT1_PMR - MCF_PIT1_PCNTR); - } - else { - i = s_pcntrAtTick - MCF_PIT1_PCNTR; - } - return i * s_nanoScale; +static bool mcf52235_tc_is_pending(rtems_timecounter_simple *tc) +{ + return (MCF_PIT1_PCSR & MCF_PIT_PCSR_PIF) != 0; } -#define Clock_driver_nanoseconds_since_last_tick bsp_clock_nanoseconds_since_last_tick +static uint32_t mcf52235_tc_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_downcounter_get( + tc, + mcf52235_tc_get, + mcf52235_tc_is_pending + ); +} + +static void mcf52235_tc_tick(void) +{ + rtems_timecounter_simple_downcounter_tick(&mcf52235_tc, mcf52235_tc_get); +} /* * Periodic interval timer interrupt handler */ #define Clock_driver_support_at_tick() \ do { \ - s_pcntrAtTick = MCF_PIT1_PCNTR; \ MCF_PIT1_PCSR |= MCF_PIT_PCSR_PIF; \ } while (0) \ @@ -76,7 +80,6 @@ static void Clock_driver_support_initialize_hardware(void) break; preScaleCode++; } - s_nanoScale = 1000000000 / (clk >> preScaleCode); MCF_INTC0_ICR56 = MCF_INTC_ICR_IL(PIT3_IRQ_LEVEL) | MCF_INTC_ICR_IP(PIT3_IRQ_PRIORITY); @@ -90,7 +93,15 @@ static void Clock_driver_support_initialize_hardware(void) MCF_PIT1_PMR = pmr; MCF_PIT1_PCSR = MCF_PIT_PCSR_PRE(preScaleCode) | MCF_PIT_PCSR_PIE | MCF_PIT_PCSR_RLD | MCF_PIT_PCSR_EN; - s_pcntrAtTick = MCF_PIT1_PCNTR; + + rtems_timecounter_simple_install( + &mcf52235_tc, + clk >> preScaleCode, + pmr, + mcf52235_tc_get_timecount + ); } +#define Clock_driver_timecounter_tick() mcf52235_tc_tick() + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/mcf52235/make/custom/mcf52235-testsuite.tcfg b/c/src/lib/libbsp/m68k/mcf52235/make/custom/mcf52235-testsuite.tcfg index 72153f716b..72deca158f 100644 --- a/c/src/lib/libbsp/m68k/mcf52235/make/custom/mcf52235-testsuite.tcfg +++ b/c/src/lib/libbsp/m68k/mcf52235/make/custom/mcf52235-testsuite.tcfg @@ -25,5 +25,6 @@ monitor02 paranoia pppd spstkalloc02 +syscall01 tmfine01 utf8proc01 diff --git a/c/src/lib/libbsp/m68k/mcf5225x/clock/clock.c b/c/src/lib/libbsp/m68k/mcf5225x/clock/clock.c index d058126798..743bd1ff94 100644 --- a/c/src/lib/libbsp/m68k/mcf5225x/clock/clock.c +++ b/c/src/lib/libbsp/m68k/mcf5225x/clock/clock.c @@ -3,6 +3,7 @@ */ #include <rtems.h> +#include <rtems/timecounter.h> #include <bsp.h> /* @@ -10,26 +11,37 @@ */ #define CLOCK_VECTOR (64+56) -static uint32_t s_pcntrAtTick = 0; -static uint32_t s_nanoScale = 0; +static rtems_timecounter_simple mcf5225x_tc; -/* - * Provide nanosecond extension - * Interrupts are disabled when this is called - */ -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) +static uint32_t mcf5225x_tc_get(rtems_timecounter_simple *tc) +{ + return MCF_PIT1_PCNTR; +} + +static bool mcf5225x_tc_is_pending(rtems_timecounter_simple *tc) +{ + return (MCF_PIT1_PCSR & MCF_PIT_PCSR_PIF) != 0; +} + +static uint32_t mcf5225x_tc_get_timecount(struct timecounter *tc) { - return MCF_PIT1_PCSR & MCF_PIT_PCSR_PIF ? (s_pcntrAtTick + (MCF_PIT1_PMR - MCF_PIT1_PCNTR)) * s_nanoScale : (s_pcntrAtTick - MCF_PIT1_PCNTR) * s_nanoScale; + return rtems_timecounter_simple_downcounter_get( + tc, + mcf5225x_tc_get, + mcf5225x_tc_is_pending + ); } -#define Clock_driver_nanoseconds_since_last_tick bsp_clock_nanoseconds_since_last_tick +static void mcf5225x_tc_tick(void) +{ + rtems_timecounter_simple_downcounter_tick(&mcf5225x_tc, mcf5225x_tc_get); +} /* * Periodic interval timer interrupt handler */ #define Clock_driver_support_at_tick() \ do { \ - s_pcntrAtTick = MCF_PIT1_PCNTR; \ MCF_PIT1_PCSR |= MCF_PIT_PCSR_PIF; \ } while (0) \ @@ -68,7 +80,6 @@ static void Clock_driver_support_initialize_hardware(void) break; preScaleCode++; } - s_nanoScale = 1000000000 / (clk >> preScaleCode); MCF_INTC0_ICR56 = MCF_INTC_ICR_IL(PIT3_IRQ_LEVEL) | MCF_INTC_ICR_IP(PIT3_IRQ_PRIORITY); @@ -82,7 +93,15 @@ static void Clock_driver_support_initialize_hardware(void) MCF_PIT1_PMR = pmr; MCF_PIT1_PCSR = MCF_PIT_PCSR_PRE(preScaleCode) | MCF_PIT_PCSR_PIE | MCF_PIT_PCSR_RLD | MCF_PIT_PCSR_EN; - s_pcntrAtTick = MCF_PIT1_PCNTR; + + rtems_timecounter_simple_install( + &mcf5225x_tc, + clk >> preScaleCode, + pmr, + mcf5225x_tc_get_timecount + ); } +#define Clock_driver_timecounter_tick() mcf5225x_tc_tick() + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/mcf5235/clock/clock.c b/c/src/lib/libbsp/m68k/mcf5235/clock/clock.c index c057796e59..95b7f37bac 100644 --- a/c/src/lib/libbsp/m68k/mcf5235/clock/clock.c +++ b/c/src/lib/libbsp/m68k/mcf5235/clock/clock.c @@ -58,4 +58,6 @@ MCF5235_PIT_PCSR_EN; \ } while (0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/mcf5329/clock/clock.c b/c/src/lib/libbsp/m68k/mcf5329/clock/clock.c index ed11320bba..94c5bb0b04 100644 --- a/c/src/lib/libbsp/m68k/mcf5329/clock/clock.c +++ b/c/src/lib/libbsp/m68k/mcf5329/clock/clock.c @@ -3,6 +3,7 @@ */ #include <rtems.h> +#include <rtems/timecounter.h> #include <bsp.h> /* @@ -10,36 +11,40 @@ */ #define CLOCK_VECTOR (128+46) -static uint32_t s_pcntrAtTick = 0; -static uint32_t s_nanoScale = 0; +static rtems_timecounter_simple mcf5329_tc; -/* - * Provide nanosecond extension - */ -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) +static uint32_t mcf5329_tc_get(rtems_timecounter_simple *tc) { - uint32_t i; + return MCF_PIT3_PCNTR; +} - if (MCF_PIT3_PCSR & MCF_PIT_PCSR_PIF) { - i = s_pcntrAtTick + (MCF_PIT3_PMR - MCF_PIT3_PCNTR); - } else { - i = s_pcntrAtTick - MCF_PIT3_PCNTR; - } - return i * s_nanoScale; +static bool mcf5329_tc_is_pending(rtems_timecounter_simple *tc) +{ + return (MCF_PIT3_PCSR & MCF_PIT_PCSR_PIF) != 0; +} + +static uint32_t mcf5329_tc_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_downcounter_get( + tc, + mcf5329_tc_get, + mcf5329_tc_is_pending + ); } -#define Clock_driver_nanoseconds_since_last_tick bsp_clock_nanoseconds_since_last_tick +static void mcf5329_tc_tick(void) +{ + rtems_timecounter_simple_downcounter_tick(&mcf5329_tc, mcf5329_tc_get); +} /* * Periodic interval timer interrupt handler */ #define Clock_driver_support_at_tick() \ do { \ - s_pcntrAtTick = MCF_PIT3_PCNTR; \ MCF_PIT3_PCSR |= MCF_PIT_PCSR_PIF; \ } while (0) \ - /* * Attach clock interrupt handler */ @@ -75,8 +80,6 @@ static void Clock_driver_support_initialize_hardware(void) break; preScaleCode++; } - s_nanoScale = 1000000000 / (clk >> preScaleCode); - MCF_INTC1_ICR46 = MCF_INTC_ICR_IL(PIT3_IRQ_LEVEL); rtems_interrupt_disable(level); @@ -89,7 +92,15 @@ static void Clock_driver_support_initialize_hardware(void) MCF_PIT3_PMR = pmr; MCF_PIT3_PCSR = MCF_PIT_PCSR_PRE(preScaleCode) | MCF_PIT_PCSR_PIE | MCF_PIT_PCSR_RLD | MCF_PIT_PCSR_EN; - s_pcntrAtTick = MCF_PIT3_PCNTR; + + rtems_timecounter_simple_install( + &mcf5329_tc, + clk >> preScaleCode, + pmr, + mcf5329_tc_get_timecount + ); } +#define Clock_driver_timecounter_tick() mcf5329_tc_tick() + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/sim68000/clock/clockdrv.c b/c/src/lib/libbsp/m68k/sim68000/clock/clockdrv.c index 3cf981863a..0a78abc15f 100644 --- a/c/src/lib/libbsp/m68k/sim68000/clock/clockdrv.c +++ b/c/src/lib/libbsp/m68k/sim68000/clock/clockdrv.c @@ -48,4 +48,6 @@ static void Clock_driver_support_shutdown_hardware(void) t->cr = 0xA0; /* initialize with timer disabled */ } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/m68k/uC5282/clock/clock.c b/c/src/lib/libbsp/m68k/uC5282/clock/clock.c index 8636f96fe0..46e045cae3 100644 --- a/c/src/lib/libbsp/m68k/uC5282/clock/clock.c +++ b/c/src/lib/libbsp/m68k/uC5282/clock/clock.c @@ -12,6 +12,7 @@ */ #include <rtems.h> +#include <rtems/timecounter.h> #include <bsp.h> #include <mcf5282/mcf5282.h> @@ -20,6 +21,32 @@ */ #define CLOCK_VECTOR (64+58) +static rtems_timecounter_simple uC5282_tc; + +static uint32_t uC5282_tc_get(rtems_timecounter_simple *tc) +{ + return MCF5282_PIT3_PCNTR; +} + +static bool uC5282_tc_is_pending(rtems_timecounter_simple *tc) +{ + return (MCF5282_PIT3_PCSR & MCF5282_PIT_PCSR_PIF) != 0; +} + +static uint32_t uC5282_tc_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_downcounter_get( + tc, + uC5282_tc_get, + uC5282_tc_is_pending + ); +} + +static void uC5282_tc_tick(void) +{ + rtems_timecounter_simple_downcounter_tick(&uC5282_tc, uC5282_tc_get); +} + /* * CPU load counters * Place in static RAM so updates don't hit the SDRAM @@ -31,17 +58,6 @@ #define NSEC_PER_PITC __SRAMBASE.nsec_per_pitc #define FILTER_SHIFT 6 -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) -{ - int i = MCF5282_PIT3_PCNTR; - if (MCF5282_PIT3_PCSR & MCF5282_PIT_PCSR_PIF) - i = MCF5282_PIT3_PCNTR - PITC_PER_TICK; - return (PITC_PER_TICK - i) * NSEC_PER_PITC; -} - -#define Clock_driver_nanoseconds_since_last_tick \ - bsp_clock_nanoseconds_since_last_tick - /* * Periodic interval timer interrupt handler */ @@ -83,7 +99,7 @@ static uint32_t bsp_clock_nanoseconds_since_last_tick(void) */ #define Clock_driver_support_initialize_hardware() \ do { \ - unsigned long long N; \ + unsigned long long N; \ int level; \ int preScaleCode = 0; \ N = bsp_get_CPU_clock_speed(); \ @@ -116,6 +132,12 @@ static uint32_t bsp_clock_nanoseconds_since_last_tick(void) MCF5282_PIT_PCSR_PIE | \ MCF5282_PIT_PCSR_RLD | \ MCF5282_PIT_PCSR_EN; \ + rtems_timecounter_simple_install( \ + &uC5282_tc, \ + bsp_get_CPU_clock_speed() >> (preScaleCode + 1), \ + PITC_PER_TICK, \ + uC5282_tc_get_timecount \ + ); \ } while (0) /* @@ -135,4 +157,6 @@ int bsp_cpu_load_percentage(void) 0; } +#define Clock_driver_timecounter_tick() uC5282_tc_tick() + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/mips/csb350/clock/clockdrv.c b/c/src/lib/libbsp/mips/csb350/clock/clockdrv.c index c733a1b19a..537bf7ee1e 100644 --- a/c/src/lib/libbsp/mips/csb350/clock/clockdrv.c +++ b/c/src/lib/libbsp/mips/csb350/clock/clockdrv.c @@ -88,4 +88,6 @@ void au1x00_clock_init(void) #define Clock_driver_support_shutdown_hardware() +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/mips/genmongoosev/clock/clockdrv.c b/c/src/lib/libbsp/mips/genmongoosev/clock/clockdrv.c index 05c1a92ac4..20f730ad6b 100644 --- a/c/src/lib/libbsp/mips/genmongoosev/clock/clockdrv.c +++ b/c/src/lib/libbsp/mips/genmongoosev/clock/clockdrv.c @@ -54,4 +54,6 @@ #define Clock_driver_support_shutdown_hardware() \ MONGOOSEV_WRITE_REGISTER( CLOCK_BASE, MONGOOSEV_TIMER_CONTROL_REGISTER, 0 ) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/mips/jmr3904/clock/clockdrv.c b/c/src/lib/libbsp/mips/jmr3904/clock/clockdrv.c index 4aa1f9ebb6..f3bcbe25f1 100644 --- a/c/src/lib/libbsp/mips/jmr3904/clock/clockdrv.c +++ b/c/src/lib/libbsp/mips/jmr3904/clock/clockdrv.c @@ -55,4 +55,6 @@ #define Clock_driver_support_shutdown_hardware() +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/mips/rbtx4925/clock/clockdrv.c b/c/src/lib/libbsp/mips/rbtx4925/clock/clockdrv.c index 4a45c9a955..2fb56c6eb9 100644 --- a/c/src/lib/libbsp/mips/rbtx4925/clock/clockdrv.c +++ b/c/src/lib/libbsp/mips/rbtx4925/clock/clockdrv.c @@ -123,5 +123,6 @@ TX4925_REG_WRITE( TX4925_REG_BASE, TX4925_TIMER0_BASE + TX4925_TIMER_TCR, 0x0 ); /* Disable timer */ \ } while(0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/mips/rbtx4938/clock/clockdrv.c b/c/src/lib/libbsp/mips/rbtx4938/clock/clockdrv.c index f50c6d2fdc..59b3452d88 100644 --- a/c/src/lib/libbsp/mips/rbtx4938/clock/clockdrv.c +++ b/c/src/lib/libbsp/mips/rbtx4938/clock/clockdrv.c @@ -114,4 +114,6 @@ void new_brk_esr(void) } while(0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/mips/shared/clock/clockdrv.c b/c/src/lib/libbsp/mips/shared/clock/clockdrv.c index 8ee52c63ed..8b178bbd52 100644 --- a/c/src/lib/libbsp/mips/shared/clock/clockdrv.c +++ b/c/src/lib/libbsp/mips/shared/clock/clockdrv.c @@ -43,17 +43,11 @@ static uint32_t mips_timer_rate = 0; mips_enable_in_interrupt_mask(CLOCK_VECTOR_MASK); \ } while(0) -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) -{ - return 0; -} - -#define Clock_driver_nanoseconds_since_last_tick \ - bsp_clock_nanoseconds_since_last_tick - #define Clock_driver_support_shutdown_hardware() \ do { \ mips_disable_in_interrupt_mask(CLOCK_VECTOR_MASK); \ } while (0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/nios2/nios2_iss/clock/clock.c b/c/src/lib/libbsp/nios2/nios2_iss/clock/clock.c index 939af651c5..1656e763c1 100644 --- a/c/src/lib/libbsp/nios2/nios2_iss/clock/clock.c +++ b/c/src/lib/libbsp/nios2/nios2_iss/clock/clock.c @@ -49,5 +49,7 @@ static void Clock_driver_support_initialize_hardware(void) NIOS2_IENABLE(1 << CLOCK_VECTOR); } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/or1k/generic_or1k/clock/clockdrv.c b/c/src/lib/libbsp/or1k/generic_or1k/clock/clockdrv.c index 57e46c1897..e01d2e506d 100644 --- a/c/src/lib/libbsp/or1k/generic_or1k/clock/clockdrv.c +++ b/c/src/lib/libbsp/or1k/generic_or1k/clock/clockdrv.c @@ -22,11 +22,14 @@ #include <bsp/generic_or1k.h> #include <rtems/score/cpu.h> #include <rtems/score/or1k-utility.h> +#include <rtems/timecounter.h> /* The number of clock cycles before generating a tick timer interrupt. */ #define TTMR_NUM_OF_CLOCK_TICKS_INTERRUPT 0x09ED9 #define OR1K_CLOCK_CYCLE_TIME_NANOSECONDS 10 +static struct timecounter or1ksim_tc; + /* CPU counter */ static CPU_Counter_ticks cpu_counter_ticks; @@ -69,8 +72,23 @@ static void generic_or1k_clock_handler_install( } } +static uint32_t or1ksim_get_timecount(struct timecounter *tc) +{ + uint32_t ticks_since_last_timer_interrupt; + + ticks_since_last_timer_interrupt = _OR1K_mfspr(CPU_OR1K_SPR_TTCR); + + return cpu_counter_ticks + ticks_since_last_timer_interrupt; +} + +CPU_Counter_ticks _CPU_Counter_read(void) +{ + return or1ksim_get_timecount(NULL); +} + static void generic_or1k_clock_initialize(void) { + uint64_t frequency = (1000000000 / OR1K_CLOCK_CYCLE_TIME_NANOSECONDS); uint32_t TTMR; /* For TTMR register, @@ -90,11 +108,15 @@ static void generic_or1k_clock_initialize(void) _OR1K_mtspr(CPU_OR1K_SPR_TTMR, TTMR); _OR1K_mtspr(CPU_OR1K_SPR_TTCR, 0); - /* Initialize CPU Counter */ - cpu_counter_ticks = 0; + /* Initialize timecounter */ + or1ksim_tc.tc_get_timecount = or1ksim_get_timecount; + or1ksim_tc.tc_counter_mask = 0xffffffff; + or1ksim_tc.tc_frequency = frequency; + or1ksim_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&or1ksim_tc); } - static void generic_or1k_clock_cleanup(void) +static void generic_or1k_clock_cleanup(void) { uint32_t sr; @@ -109,24 +131,6 @@ static void generic_or1k_clock_initialize(void) _OR1K_mtspr(CPU_OR1K_SPR_TTMR, 0); } -/* - * Return the nanoseconds since last tick - */ -static uint32_t generic_or1k_clock_nanoseconds_since_last_tick(void) -{ - return - TTMR_NUM_OF_CLOCK_TICKS_INTERRUPT * OR1K_CLOCK_CYCLE_TIME_NANOSECONDS; -} - -CPU_Counter_ticks _CPU_Counter_read(void) -{ - uint32_t ticks_since_last_timer_interrupt; - - ticks_since_last_timer_interrupt = _OR1K_mfspr(CPU_OR1K_SPR_TTCR); - - return cpu_counter_ticks + ticks_since_last_timer_interrupt; -} - CPU_Counter_ticks _CPU_Counter_difference( CPU_Counter_ticks second, CPU_Counter_ticks first @@ -134,6 +138,7 @@ CPU_Counter_ticks _CPU_Counter_difference( { return second - first; } + #define Clock_driver_support_at_tick() generic_or1k_clock_at_tick() #define Clock_driver_support_initialize_hardware() generic_or1k_clock_initialize() @@ -146,7 +151,4 @@ CPU_Counter_ticks _CPU_Counter_difference( #define Clock_driver_support_shutdown_hardware() generic_or1k_clock_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - generic_or1k_clock_nanoseconds_since_last_tick - #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/powerpc/mpc55xxevb/clock/clock-config.c b/c/src/lib/libbsp/powerpc/mpc55xxevb/clock/clock-config.c index 5743dcff35..4de9a520ab 100644 --- a/c/src/lib/libbsp/powerpc/mpc55xxevb/clock/clock-config.c +++ b/c/src/lib/libbsp/powerpc/mpc55xxevb/clock/clock-config.c @@ -26,14 +26,40 @@ #include <mpc55xx/regs.h> +#include <rtems/timecounter.h> + void Clock_isr(void *arg); -static uint64_t mpc55xx_clock_factor; +static rtems_timecounter_simple mpc55xx_tc; #if defined(MPC55XX_CLOCK_EMIOS_CHANNEL) #include <mpc55xx/emios.h> +static uint32_t mpc55xx_tc_get(rtems_timecounter_simple *tc) +{ + return EMIOS.CH [MPC55XX_CLOCK_EMIOS_CHANNEL].CCNTR.R; +} + +static bool mpc55xx_tc_is_pending(rtems_timecounter_simple *tc) +{ + return EMIOS.CH [MPC55XX_CLOCK_EMIOS_CHANNEL].CSR.B.FLAG != 0; +} + +static uint32_t mpc55xx_tc_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_upcounter_get( + tc, + mpc55xx_tc_get, + mpc55xx_tc_is_pending + ); +} + +static void mpc55xx_tc_tick(void) +{ + rtems_timecounter_simple_upcounter_tick(&mpc55xx_tc, mpc55xx_tc_get); +} + static void mpc55xx_clock_at_tick(void) { union EMIOS_CSR_tag csr = MPC55XX_ZERO_FLAGS; @@ -68,8 +94,6 @@ static void mpc55xx_clock_initialize(void) uint64_t us_per_tick = rtems_configuration_get_microseconds_per_tick(); uint64_t interval = (reference_clock * us_per_tick) / 1000000; - mpc55xx_clock_factor = (1000000000ULL << 32) / reference_clock; - /* Apply prescaler */ if (prescaler > 0) { interval /= (uint64_t) prescaler; @@ -110,6 +134,13 @@ static void mpc55xx_clock_initialize(void) ccr.B.FEN = 1; ccr.B.FREN = 1; regs->CCR.R = ccr.R; + + rtems_timecounter_simple_install( + &mpc55xx_tc, + reference_clock, + interval, + mpc55xx_tc_get_timecount + ); } static void mpc55xx_clock_cleanup(void) @@ -122,21 +153,31 @@ static void mpc55xx_clock_cleanup(void) regs->CCR.R = ccr.R; } -static uint32_t mpc55xx_clock_nanoseconds_since_last_tick(void) +#elif defined(MPC55XX_CLOCK_PIT_CHANNEL) + +static uint32_t mpc55xx_tc_get(rtems_timecounter_simple *tc) { - volatile struct EMIOS_CH_tag *regs = &EMIOS.CH [MPC55XX_CLOCK_EMIOS_CHANNEL]; - uint64_t c = regs->CCNTR.R; - union EMIOS_CSR_tag csr = { .R = regs->CSR.R }; - uint64_t k = mpc55xx_clock_factor; + return PIT_RTI.CHANNEL [MPC55XX_CLOCK_PIT_CHANNEL].CVAL.R; +} - if (csr.B.FLAG != 0) { - c = regs->CCNTR.R + regs->CADR.R + 1; - } +static bool mpc55xx_tc_is_pending(rtems_timecounter_simple *tc) +{ + return PIT_RTI.CHANNEL [MPC55XX_CLOCK_PIT_CHANNEL].TFLG.B.TIF != 0; +} - return (uint32_t) ((c * k) >> 32); +static uint32_t mpc55xx_tc_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_downcounter_get( + tc, + mpc55xx_tc_get, + mpc55xx_tc_is_pending + ); } -#elif defined(MPC55XX_CLOCK_PIT_CHANNEL) +static void mpc55xx_tc_tick(void) +{ + rtems_timecounter_simple_downcounter_tick(&mpc55xx_tc, mpc55xx_tc_get); +} static void mpc55xx_clock_at_tick(void) { @@ -174,11 +215,16 @@ static void mpc55xx_clock_initialize(void) PIT_RTI_PITMCR_32B_tag pitmcr = { .B = { .FRZ = 1 } }; PIT_RTI_TCTRL_32B_tag tctrl = { .B = { .TIE = 1, .TEN = 1 } }; - mpc55xx_clock_factor = (1000000000ULL << 32) / reference_clock; - PIT_RTI.PITMCR.R = pitmcr.R; channel->LDVAL.R = interval; channel->TCTRL.R = tctrl.R; + + rtems_timecounter_simple_install( + &mpc55xx_tc, + reference_clock, + interval, + mpc55xx_tc_get_timecount + ); } static void mpc55xx_clock_cleanup(void) @@ -189,23 +235,9 @@ static void mpc55xx_clock_cleanup(void) channel->TCTRL.R = 0; } -static uint32_t mpc55xx_clock_nanoseconds_since_last_tick(void) -{ - volatile PIT_RTI_CHANNEL_tag *channel = - &PIT_RTI.CHANNEL [MPC55XX_CLOCK_PIT_CHANNEL]; - uint32_t c = channel->CVAL.R; - uint32_t i = channel->LDVAL.R; - uint64_t k = mpc55xx_clock_factor; - - if (channel->TFLG.B.TIF != 0) { - c = channel->CVAL.R - i; - } - - return (uint32_t) (((i - c) * k) >> 32); -} - #endif +#define Clock_driver_timecounter_tick() mpc55xx_tc_tick() #define Clock_driver_support_at_tick() \ mpc55xx_clock_at_tick() #define Clock_driver_support_initialize_hardware() \ @@ -217,8 +249,6 @@ static uint32_t mpc55xx_clock_nanoseconds_since_last_tick(void) } while (0) #define Clock_driver_support_shutdown_hardware() \ mpc55xx_clock_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - mpc55xx_clock_nanoseconds_since_last_tick /* Include shared source clock driver code */ #include "../../../../libbsp/shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/powerpc/qoriq/clock/clock-config.c b/c/src/lib/libbsp/powerpc/qoriq/clock/clock-config.c index 039edde46e..c6300e254f 100644 --- a/c/src/lib/libbsp/powerpc/qoriq/clock/clock-config.c +++ b/c/src/lib/libbsp/powerpc/qoriq/clock/clock-config.c @@ -7,10 +7,10 @@ */ /* - * Copyright (c) 2011-2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011-2015 embedded brains GmbH. All rights reserved. * * embedded brains GmbH - * Obere Lagerstr. 30 + * Dornierstr. 4 * 82178 Puchheim * Germany * <rtems@embedded-brains.de> @@ -20,6 +20,8 @@ * http://www.rtems.org/license/LICENSE. */ +#include <rtems/timecounter.h> + #include <libcpu/powerpc-utility.h> #include <bsp.h> @@ -29,10 +31,6 @@ /* This is defined in clockdrv_shell.h */ static rtems_isr Clock_isr(void *arg); -static uint32_t qoriq_clock_last_ccr; - -static uint32_t qoriq_clock_nanoseconds_per_timer_tick; - static volatile qoriq_pic_global_timer *const qoriq_clock = #if QORIQ_CLOCK_TIMER < 4 &qoriq.pic.gta [QORIQ_CLOCK_TIMER]; @@ -40,8 +38,17 @@ static volatile qoriq_pic_global_timer *const qoriq_clock = &qoriq.pic.gtb [QORIQ_CLOCK_TIMER - 4]; #endif +static volatile qoriq_pic_global_timer *const qoriq_timecounter = + #if QORIQ_CLOCK_TIMECOUNTER < 4 + &qoriq.pic.gta [QORIQ_CLOCK_TIMECOUNTER]; + #else + &qoriq.pic.gtb [QORIQ_CLOCK_TIMECOUNTER - 4]; + #endif + #define CLOCK_INTERRUPT (QORIQ_IRQ_GT_BASE + QORIQ_CLOCK_TIMER) +static struct timecounter qoriq_clock_tc; + static void qoriq_clock_handler_install(rtems_isr_entry *old_isr) { rtems_status_code sc = RTEMS_SUCCESSFUL; @@ -77,18 +84,28 @@ static void qoriq_clock_handler_install(rtems_isr_entry *old_isr) } } +static uint32_t qoriq_clock_get_timecount(struct timecounter *tc) +{ + uint32_t ccr = qoriq_timecounter->ccr; + + return GTCCR_COUNT_GET(-ccr); +} + static void qoriq_clock_initialize(void) { uint32_t timer_frequency = BSP_bus_frequency / 8; - uint32_t nanoseconds_per_second = 1000000000; uint32_t interval = (uint32_t) (((uint64_t) timer_frequency * (uint64_t) rtems_configuration_get_microseconds_per_tick()) / 1000000); - qoriq_clock_nanoseconds_per_timer_tick = - nanoseconds_per_second / timer_frequency; - qoriq_clock->bcr = GTBCR_COUNT(interval); - qoriq_clock_last_ccr = qoriq_clock->ccr; + + qoriq_timecounter->bcr = GTBCR_COUNT(0xffffffff); + + qoriq_clock_tc.tc_get_timecount = qoriq_clock_get_timecount; + qoriq_clock_tc.tc_counter_mask = GTCCR_COUNT_GET(0xffffffff); + qoriq_clock_tc.tc_frequency = timer_frequency; + qoriq_clock_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&qoriq_clock_tc); } static void qoriq_clock_cleanup(void) @@ -107,33 +124,13 @@ static void qoriq_clock_cleanup(void) } } -static void qoriq_clock_at_tick(void) -{ - qoriq_clock_last_ccr = qoriq_clock->ccr; -} - -static uint32_t qoriq_clock_nanoseconds_since_last_tick(void) -{ - uint32_t ccr = qoriq_clock->ccr; - uint32_t bcr = qoriq_clock->bcr; - - if ((ccr & GTCCR_TOG) != (qoriq_clock_last_ccr & GTCCR_TOG)) { - bcr += bcr; - } - - return (bcr - GTCCR_COUNT_GET(ccr)) * qoriq_clock_nanoseconds_per_timer_tick; -} - -#define Clock_driver_support_at_tick() \ - qoriq_clock_at_tick() +#define Clock_driver_support_at_tick() #define Clock_driver_support_initialize_hardware() \ qoriq_clock_initialize() #define Clock_driver_support_install_isr(clock_isr, old_isr) \ qoriq_clock_handler_install(&old_isr) #define Clock_driver_support_shutdown_hardware() \ qoriq_clock_cleanup() -#define Clock_driver_nanoseconds_since_last_tick \ - qoriq_clock_nanoseconds_since_last_tick /* Include shared source clock driver code */ #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/powerpc/qoriq/configure.ac b/c/src/lib/libbsp/powerpc/qoriq/configure.ac index f1ff103d7c..409ac193c4 100644 --- a/c/src/lib/libbsp/powerpc/qoriq/configure.ac +++ b/c/src/lib/libbsp/powerpc/qoriq/configure.ac @@ -154,6 +154,10 @@ RTEMS_BSPOPTS_SET([QORIQ_CLOCK_TIMER],[qoriq_core_1],[4]) RTEMS_BSPOPTS_SET([QORIQ_CLOCK_TIMER],[*],[0]) RTEMS_BSPOPTS_HELP([QORIQ_CLOCK_TIMER],[global timer used for system clock, 0..3 maps to A0..A3, and 4..7 maps to B0..B3]) +RTEMS_BSPOPTS_SET([QORIQ_CLOCK_TIMECOUNTER],[qoriq_core_1],[5]) +RTEMS_BSPOPTS_SET([QORIQ_CLOCK_TIMECOUNTER],[*],[1]) +RTEMS_BSPOPTS_HELP([QORIQ_CLOCK_TIMECOUNTER],[global timer used for the timecounter, 0..3 maps to A0..A3, and 4..7 maps to B0..B3]) + RTEMS_BSPOPTS_SET([QORIQ_CHIP_VARIANT],[qoriq_t2080*],[QORIQ_CHIP_T2080]) RTEMS_BSPOPTS_SET([QORIQ_CHIP_VARIANT],[qoriq_t4240*],[QORIQ_CHIP_T4240]) RTEMS_BSPOPTS_SET([QORIQ_CHIP_VARIANT],[*],[QORIQ_CHIP_P1020]) diff --git a/c/src/lib/libbsp/powerpc/shared/clock/clock.c b/c/src/lib/libbsp/powerpc/shared/clock/clock.c index e9b1d4d24a..431488a901 100644 --- a/c/src/lib/libbsp/powerpc/shared/clock/clock.c +++ b/c/src/lib/libbsp/powerpc/shared/clock/clock.c @@ -7,7 +7,7 @@ */ /* - * Copyright (c) 2008-2013 embedded brains GmbH. All rights reserved. + * Copyright (c) 2008-2015 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Obere Lagerstr. 30 @@ -22,6 +22,7 @@ #include <rtems.h> #include <rtems/clockdrv.h> +#include <rtems/timecounter.h> #include <libcpu/powerpc-utility.h> #include <bsp/vectors.h> @@ -46,7 +47,12 @@ static uint32_t ppc_clock_decrementer_value = PPC_CLOCK_DECREMENTER_MAX; static uint32_t ppc_clock_next_time_base; -static uint64_t ppc_clock_factor; +static struct timecounter ppc_tc; + +static uint32_t ppc_get_timecount(struct timecounter *tc) +{ + return ppc_time_base(); +} static void ppc_clock_no_tick(void) { @@ -161,28 +167,6 @@ static int ppc_clock_exception_handler_ppc405(BSP_Exception_frame *frame, unsign return 0; } -static uint32_t ppc_clock_nanoseconds_since_last_tick(void) -{ - uint64_t k = ppc_clock_factor; - uint32_t c = ppc_decrementer_register(); - uint32_t i = ppc_clock_decrementer_value + 1; - - return (uint32_t) (((i - c) * k) >> 32); -} - -static uint32_t ppc_clock_nanoseconds_since_last_tick_ppc405(void) -{ - uint64_t k = ppc_clock_factor; - uint32_t i = ppc_clock_decrementer_value; - uint32_t c = i - PPC_SPECIAL_PURPOSE_REGISTER(PPC405_PIT); - - if ((PPC_SPECIAL_PURPOSE_REGISTER(PPC405_TSR) & BOOKE_TSR_DIS) != 0) { - c = i - PPC_SPECIAL_PURPOSE_REGISTER(PPC405_PIT) + i; - } - - return (uint32_t) ((c * k) >> 32); -} - void Clock_exit(void) { /* Set the decrementer to the maximum value */ @@ -204,15 +188,8 @@ rtems_device_driver Clock_initialize( /* * Set default ticker. - * - * The function rtems_clock_tick() returns a status code. This value - * will be discarded since the RTEMS documentation claims that it is - * always successful. */ - ppc_clock_tick = (void (*)(void)) rtems_clock_tick; - - /* Factor for nano seconds extension */ - ppc_clock_factor = (1000000000ULL << 32) / frequency; + ppc_clock_tick = rtems_timecounter_tick; if (ppc_cpu_is_bookE() != PPC_BOOKE_405) { /* Decrementer value */ @@ -223,10 +200,6 @@ rtems_device_driver Clock_initialize( ppc_clock_decrementer_value = PPC_CLOCK_DECREMENTER_MAX; RTEMS_SYSLOG_ERROR( "decrementer value would be zero, will be set to maximum value instead\n"); } - - /* Set the nanoseconds since last tick handler */ - rtems_clock_set_nanoseconds_extension( ppc_clock_nanoseconds_since_last_tick); - if (ppc_cpu_is_bookE()) { /* Set decrementer auto-reload value */ PPC_SET_SPECIAL_PURPOSE_REGISTER( BOOKE_DECAR, ppc_clock_decrementer_value); @@ -253,9 +226,6 @@ rtems_device_driver Clock_initialize( /* PIT interval value */ ppc_clock_decrementer_value = interval; - /* Set the nanoseconds since last tick handler */ - rtems_clock_set_nanoseconds_extension(ppc_clock_nanoseconds_since_last_tick_ppc405); - /* Install exception handler */ ppc_exc_set_handler(ASM_BOOKE_DEC_VECTOR, ppc_clock_exception_handler_ppc405); @@ -266,5 +236,12 @@ rtems_device_driver Clock_initialize( PPC_SET_SPECIAL_PURPOSE_REGISTER(PPC405_PIT, interval); } + /* Install timecounter */ + ppc_tc.tc_get_timecount = ppc_get_timecount; + ppc_tc.tc_counter_mask = 0xffffffff; + ppc_tc.tc_frequency = frequency; + ppc_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&ppc_tc); + return RTEMS_SUCCESSFUL; } diff --git a/c/src/lib/libbsp/sh/shsim/make/custom/simsh1-testsuite.tcfg b/c/src/lib/libbsp/sh/shsim/make/custom/simsh1-testsuite.tcfg index b83b7ee208..542f689e34 100644 --- a/c/src/lib/libbsp/sh/shsim/make/custom/simsh1-testsuite.tcfg +++ b/c/src/lib/libbsp/sh/shsim/make/custom/simsh1-testsuite.tcfg @@ -7,6 +7,7 @@ include: testdata/require-tick-isr.tcfg include: testdata/disable-intrcritical-tests.tcfg +fileio fsdosfsname01 iostream utf8proc01 diff --git a/c/src/lib/libbsp/sh/shsim/make/custom/simsh2e-testsuite.tcfg b/c/src/lib/libbsp/sh/shsim/make/custom/simsh2e-testsuite.tcfg index 33a87eaf6d..9ecaa70db4 100644 --- a/c/src/lib/libbsp/sh/shsim/make/custom/simsh2e-testsuite.tcfg +++ b/c/src/lib/libbsp/sh/shsim/make/custom/simsh2e-testsuite.tcfg @@ -6,6 +6,7 @@ include: testdata/require-tick-isr.tcfg include: testdata/disable-intrcritical-tests.tcfg +fileio fsdosfsname01 iostream utf8proc01 diff --git a/c/src/lib/libbsp/shared/clock_driver_simidle.c b/c/src/lib/libbsp/shared/clock_driver_simidle.c index 0f94b1e56b..ee4b116b40 100644 --- a/c/src/lib/libbsp/shared/clock_driver_simidle.c +++ b/c/src/lib/libbsp/shared/clock_driver_simidle.c @@ -28,6 +28,8 @@ volatile bool clock_driver_enabled; clock_driver_enabled = false; \ } while (0) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "clockdrv_shell.h" /* diff --git a/c/src/lib/libbsp/shared/clockdrv_shell.h b/c/src/lib/libbsp/shared/clockdrv_shell.h index 5dbea1335f..628ba58672 100644 --- a/c/src/lib/libbsp/shared/clockdrv_shell.h +++ b/c/src/lib/libbsp/shared/clockdrv_shell.h @@ -20,6 +20,10 @@ #include <bsp.h> #include <rtems/clockdrv.h> +#ifdef Clock_driver_nanoseconds_since_last_tick +#error "Update driver to use the timecounter instead of nanoseconds extension" +#endif + /** * @defgroup bsp_clock Clock Support * @@ -39,6 +43,18 @@ #define Clock_driver_support_find_timer() #endif +/* + * A specialized clock driver may use for example rtems_timecounter_tick_simple() + * instead of the default. + */ +#ifndef Clock_driver_timecounter_tick + #ifdef CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #define Clock_driver_timecounter_tick() rtems_clock_tick() + #else + #define Clock_driver_timecounter_tick() rtems_timecounter_tick() + #endif +#endif + /** * @brief ISRs until next clock tick */ @@ -78,16 +94,24 @@ rtems_isr Clock_isr( Clock_driver_ticks += 1; #if CLOCK_DRIVER_USE_FAST_IDLE - do { - rtems_clock_tick(); - } while ( - _Thread_Heir == _Thread_Executing - && _Thread_Executing->Start.entry_point - == (Thread_Entry) rtems_configuration_get_idle_task() - ); + { + struct timecounter *tc = _Timecounter; + uint64_t us_per_tick = rtems_configuration_get_microseconds_per_tick(); + uint32_t interval = (uint32_t) + ((tc->tc_frequency * us_per_tick) / 1000000); + + Clock_driver_timecounter_tick(); + + while ( + _Thread_Heir == _Thread_Executing + && _Thread_Executing->Start.entry_point + == (Thread_Entry) rtems_configuration_get_idle_task() + ) { + _Timecounter_Tick_simple(interval, (*tc->tc_get_timecount)(tc)); + } - Clock_driver_support_at_tick(); - return; + Clock_driver_support_at_tick(); + } #else /* * Do the hardware specific per-tick action. @@ -101,7 +125,7 @@ rtems_isr Clock_isr( * The driver is multiple ISRs per clock tick. */ if ( !Clock_driver_isrs ) { - rtems_clock_tick(); + Clock_driver_timecounter_tick(); Clock_driver_isrs = CLOCK_DRIVER_ISRS_PER_TICK; } @@ -110,7 +134,7 @@ rtems_isr Clock_isr( /* * The driver is one ISR per clock tick. */ - rtems_clock_tick(); + Clock_driver_timecounter_tick(); #endif #endif } @@ -160,12 +184,6 @@ rtems_device_driver Clock_initialize( (void) Old_ticker; Clock_driver_support_install_isr( Clock_isr, Old_ticker ); - #if defined(Clock_driver_nanoseconds_since_last_tick) - rtems_clock_set_nanoseconds_extension( - Clock_driver_nanoseconds_since_last_tick - ); - #endif - /* * Now initialize the hardware that is the source of the tick ISR. */ diff --git a/c/src/lib/libbsp/sparc/erc32/clock/ckinit.c b/c/src/lib/libbsp/sparc/erc32/clock/ckinit.c index 2afe770b4b..46f99a5e7d 100644 --- a/c/src/lib/libbsp/sparc/erc32/clock/ckinit.c +++ b/c/src/lib/libbsp/sparc/erc32/clock/ckinit.c @@ -25,6 +25,7 @@ #include <bsp.h> #include <bspopts.h> #include <rtems/counter.h> +#include <rtems/timecounter.h> #if SIMSPARC_FAST_IDLE==1 #define CLOCK_DRIVER_USE_FAST_IDLE 1 @@ -44,24 +45,34 @@ extern int CLOCK_SPEED; -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) +static rtems_timecounter_simple erc32_tc; + +static uint32_t erc32_tc_get( rtems_timecounter_simple *tc ) +{ + return ERC32_MEC.Real_Time_Clock_Counter; +} + +static bool erc32_tc_is_pending( rtems_timecounter_simple *tc ) +{ + return ERC32_Is_interrupt_pending( ERC32_INTERRUPT_REAL_TIME_CLOCK ); +} + +static uint32_t erc32_tc_get_timecount( struct timecounter *tc ) { - uint32_t clicks; - uint32_t usecs; - - clicks = ERC32_MEC.Real_Time_Clock_Counter; - - if ( ERC32_Is_interrupt_pending( ERC32_INTERRUPT_REAL_TIME_CLOCK ) ) { - clicks = ERC32_MEC.Real_Time_Clock_Counter; - usecs = (2*rtems_configuration_get_microseconds_per_tick() - clicks); - } else { - usecs = (rtems_configuration_get_microseconds_per_tick() - clicks); - } - return usecs * 1000; + return rtems_timecounter_simple_downcounter_get( + tc, + erc32_tc_get, + erc32_tc_is_pending + ); } -#define Clock_driver_nanoseconds_since_last_tick \ - bsp_clock_nanoseconds_since_last_tick +static void erc32_tc_tick( void ) +{ + rtems_timecounter_simple_downcounter_tick( + &erc32_tc, + erc32_tc_get + ); +} static CPU_Counter_ticks erc32_counter_difference( CPU_Counter_ticks second, @@ -75,6 +86,7 @@ static CPU_Counter_ticks erc32_counter_difference( #define Clock_driver_support_initialize_hardware() \ do { \ + uint32_t frequency = 1000000; \ /* approximately 1 us per countdown */ \ ERC32_MEC.Real_Time_Clock_Scalar = CLOCK_SPEED - 1; \ ERC32_MEC.Real_Time_Clock_Counter = \ @@ -89,14 +101,22 @@ static CPU_Counter_ticks erc32_counter_difference( ERC32_MEC_Set_Real_Time_Clock_Timer_Control( \ ERC32_MEC_TIMER_COUNTER_ENABLE_COUNTING | \ ERC32_MEC_TIMER_COUNTER_RELOAD_AT_ZERO \ + ); \ + rtems_timecounter_simple_install( \ + &erc32_tc, \ + frequency, \ + rtems_configuration_get_microseconds_per_tick(), \ + erc32_tc_get_timecount \ ); \ _SPARC_Counter_initialize( \ &ERC32_MEC.Real_Time_Clock_Counter, \ erc32_counter_difference \ ); \ - rtems_counter_initialize_converter(1000000); \ + rtems_counter_initialize_converter( frequency ); \ } while (0) +#define Clock_driver_timecounter_tick() erc32_tc_tick() + #define Clock_driver_support_shutdown_hardware() \ do { \ ERC32_Mask_interrupt( ERC32_INTERRUPT_REAL_TIME_CLOCK ); \ diff --git a/c/src/lib/libbsp/sparc/leon2/clock/ckinit.c b/c/src/lib/libbsp/sparc/leon2/clock/ckinit.c index ab4efa0e8a..3dd68e080a 100644 --- a/c/src/lib/libbsp/sparc/leon2/clock/ckinit.c +++ b/c/src/lib/libbsp/sparc/leon2/clock/ckinit.c @@ -24,11 +24,38 @@ #include <bsp.h> #include <bspopts.h> +#include <rtems/timecounter.h> #if SIMSPARC_FAST_IDLE==1 #define CLOCK_DRIVER_USE_FAST_IDLE 1 #endif +static rtems_timecounter_simple leon2_tc; + +static uint32_t leon2_tc_get( rtems_timecounter_simple *tc ) +{ + return LEON_REG.Timer_Counter_1; +} + +static bool leon2_tc_is_pending( rtems_timecounter_simple *tc ) +{ + return LEON_Is_interrupt_pending( LEON_INTERRUPT_TIMER1 ); +} + +static uint32_t leon2_tc_get_timecount( struct timecounter *tc ) +{ + return rtems_timecounter_simple_downcounter_get( + tc, + leon2_tc_get, + leon2_tc_is_pending + ); +} + +static void leon2_tc_tick( void ) +{ + rtems_timecounter_simple_downcounter_tick( &leon2_tc, leon2_tc_get ); +} + /* * The Real Time Clock Counter Timer uses this trap type. */ @@ -54,6 +81,12 @@ extern int CLOCK_SPEED; LEON_REG_TIMER_COUNTER_RELOAD_AT_ZERO | \ LEON_REG_TIMER_COUNTER_LOAD_COUNTER \ ); \ + rtems_timecounter_simple_install( \ + &leon2_tc, \ + 1000000, \ + rtems_configuration_get_microseconds_per_tick(), \ + leon2_tc_get_timecount \ + ); \ } while (0) #define Clock_driver_support_shutdown_hardware() \ @@ -62,23 +95,6 @@ extern int CLOCK_SPEED; LEON_REG.Timer_Control_1 = 0; \ } while (0) -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) -{ - uint32_t clicks; - uint32_t usecs; - - clicks = LEON_REG.Timer_Counter_1; - - if ( LEON_Is_interrupt_pending( LEON_INTERRUPT_TIMER1 ) ) { - clicks = LEON_REG.Timer_Counter_1; - usecs = (2*rtems_configuration_get_microseconds_per_tick() - clicks); - } else { - usecs = (rtems_configuration_get_microseconds_per_tick() - clicks); - } - return usecs * 1000; -} - -#define Clock_driver_nanoseconds_since_last_tick \ - bsp_clock_nanoseconds_since_last_tick +#define Clock_driver_timecounter_tick() leon2_tc_tick() #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/sparc/leon3/clock/ckinit.c b/c/src/lib/libbsp/sparc/leon3/clock/ckinit.c index ad226161bc..b82b457866 100644 --- a/c/src/lib/libbsp/sparc/leon3/clock/ckinit.c +++ b/c/src/lib/libbsp/sparc/leon3/clock/ckinit.c @@ -24,6 +24,7 @@ #include <rtems/rtems/intr.h> #include <ambapp.h> #include <rtems/score/profiling.h> +#include <rtems/timecounter.h> /* The LEON3 BSP Timer driver can rely on the Driver Manager if the * DrvMgr is initialized during startup. Otherwise the classic driver @@ -40,6 +41,43 @@ /* LEON3 Timer system interrupt number */ static int clkirq; +static bool leon3_tc_use_irqmp; + +static rtems_timecounter_simple leon3_tc; + +static uint32_t leon3_tc_get(rtems_timecounter_simple *tc) +{ + return LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].value; +} + +static bool leon3_tc_is_pending(rtems_timecounter_simple *tc) +{ + return LEON_Is_interrupt_pending(clkirq); +} + +static uint32_t leon3_tc_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_downcounter_get( + tc, + leon3_tc_get, + leon3_tc_is_pending + ); +} + +static uint32_t leon3_tc_get_timecount_irqmp(struct timecounter *tc) +{ + return LEON3_IrqCtrl_Regs->timestamp[0].counter; +} + +static void leon3_tc_tick(void) +{ + if (leon3_tc_use_irqmp) { + rtems_timecounter_tick(); + } else { + rtems_timecounter_simple_downcounter_tick(&leon3_tc, leon3_tc_get); + } +} + static void leon3_clock_profiling_interrupt_delay(void) { #ifdef RTEMS_PROFILING @@ -112,15 +150,36 @@ static void bsp_clock_handler_install(rtems_isr *new) } } +static void leon3_clock_initialize(void) +{ + volatile struct irqmp_timestamp_regs *irqmp_ts = + &LEON3_IrqCtrl_Regs->timestamp[0]; + + LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].reload = + rtems_configuration_get_microseconds_per_tick() - 1; + LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].ctrl = + GPTIMER_TIMER_CTRL_EN | GPTIMER_TIMER_CTRL_RS | + GPTIMER_TIMER_CTRL_LD | GPTIMER_TIMER_CTRL_IE; + + if (leon3_irqmp_has_timestamp(irqmp_ts)) { + leon3_tc.tc.tc_get_timecount = leon3_tc_get_timecount_irqmp; + leon3_tc.tc.tc_counter_mask = 0xffffffff; + leon3_tc.tc.tc_frequency = ambapp_freq_get(&ambapp_plb, LEON3_Timer_Adev); + leon3_tc.tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + leon3_tc_use_irqmp = true; + rtems_timecounter_install(&leon3_tc.tc); + } else { + rtems_timecounter_simple_install( + &leon3_tc, + LEON3_GPTIMER_0_FREQUENCY_SET_BY_BOOT_LOADER, + rtems_configuration_get_microseconds_per_tick(), + leon3_tc_get_timecount + ); + } +} + #define Clock_driver_support_initialize_hardware() \ - do { \ - LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].reload = \ - rtems_configuration_get_microseconds_per_tick() - 1; \ - \ - LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].ctrl = \ - GPTIMER_TIMER_CTRL_EN | GPTIMER_TIMER_CTRL_RS | \ - GPTIMER_TIMER_CTRL_LD | GPTIMER_TIMER_CTRL_IE; \ - } while (0) + leon3_clock_initialize() #define Clock_driver_support_shutdown_hardware() \ do { \ @@ -128,27 +187,7 @@ static void bsp_clock_handler_install(rtems_isr *new) LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].ctrl = 0; \ } while (0) -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) -{ - uint32_t clicks; - uint32_t usecs; - - if ( !LEON3_Timer_Regs ) - return 0; - - clicks = LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].value; - - if ( LEON_Is_interrupt_pending( clkirq ) ) { - clicks = LEON3_Timer_Regs->timer[LEON3_CLOCK_INDEX].value; - usecs = (2*rtems_configuration_get_microseconds_per_tick() - clicks); - } else { - usecs = (rtems_configuration_get_microseconds_per_tick() - clicks); - } - return usecs * 1000; -} - -#define Clock_driver_nanoseconds_since_last_tick \ - bsp_clock_nanoseconds_since_last_tick +#define Clock_driver_timecounter_tick() leon3_tc_tick() #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libbsp/sparc64/shared/clock/ckinit.c b/c/src/lib/libbsp/sparc64/shared/clock/ckinit.c index a3f9450174..ff0464c8f2 100644 --- a/c/src/lib/libbsp/sparc64/shared/clock/ckinit.c +++ b/c/src/lib/libbsp/sparc64/shared/clock/ckinit.c @@ -114,5 +114,7 @@ static void Clock_driver_support_initialize_hardware(void) \ } while ( 0 ) +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../shared/clockdrv_shell.h" diff --git a/c/src/lib/libcpu/arm/at91rm9200/clock/clock.c b/c/src/lib/libcpu/arm/at91rm9200/clock/clock.c index 16cf145b37..42b85f3f98 100644 --- a/c/src/lib/libcpu/arm/at91rm9200/clock/clock.c +++ b/c/src/lib/libcpu/arm/at91rm9200/clock/clock.c @@ -22,8 +22,6 @@ #include <at91rm9200.h> #include <at91rm9200_pmc.h> -static unsigned long st_pimr_reload; - /** * Enables clock interrupt. * @@ -77,17 +75,16 @@ rtems_irq_connect_data clock_isr_data = { BSP_install_rtems_irq_handler(&clock_isr_data); \ } while(0) -uint16_t st_pimr_value; static void Clock_driver_support_initialize_hardware(void) { uint32_t st_str; int slck; + unsigned long value; /* the system timer is driven from SLCK */ slck = at91rm9200_get_slck(); - st_pimr_value = (((rtems_configuration_get_microseconds_per_tick() * slck) + + value = (((rtems_configuration_get_microseconds_per_tick() * slck) + (1000000/2))/ 1000000); - st_pimr_reload = st_pimr_value; /* read the status to clear the int */ st_str = ST_REG(ST_SR); @@ -97,21 +94,9 @@ static void Clock_driver_support_initialize_hardware(void) AIC_SMR_REG(AIC_SMR_SYSIRQ) = AIC_SMR_PRIOR(0x7); /* set the timer value */ - ST_REG(ST_PIMR) = st_pimr_reload; + ST_REG(ST_PIMR) = value; } -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) -{ - uint16_t slck_counts; - - slck_counts = st_pimr_value - st_pimr_reload; - return (rtems_configuration_get_microseconds_per_tick() * slck_counts * 1000) - / st_pimr_value; -} - -#define Clock_driver_nanoseconds_since_last_tick \ - bsp_clock_nanoseconds_since_last_tick - #define Clock_driver_support_at_tick() \ do { \ uint32_t st_str; \ @@ -126,4 +111,6 @@ static void Clock_driver_support_shutdown_hardware( void ) BSP_remove_rtems_irq_handler(&clock_isr_data); } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../../libbsp/shared/clockdrv_shell.h" diff --git a/c/src/lib/libcpu/arm/lpc22xx/clock/clockdrv.c b/c/src/lib/libcpu/arm/lpc22xx/clock/clockdrv.c index 02f5b8c5ea..4887d2a8e0 100644 --- a/c/src/lib/libcpu/arm/lpc22xx/clock/clockdrv.c +++ b/c/src/lib/libcpu/arm/lpc22xx/clock/clockdrv.c @@ -17,12 +17,39 @@ #include <bsp/irq.h> #include <lpc22xx.h> #include <rtems/bspIo.h> /* for printk */ +#include <rtems/timecounter.h> void Clock_isr(rtems_irq_hdl_param arg); static void clock_isr_on(const rtems_irq_connect_data *unused); static void clock_isr_off(const rtems_irq_connect_data *unused); static int clock_isr_is_on(const rtems_irq_connect_data *irq); +static rtems_timecounter_simple lpc22xx_tc; + +static uint32_t lpc22xx_tc_get(rtems_timecounter_simple *tc) +{ + return T0TC; +} + +static bool lpc22xx_tc_is_pending(rtems_timecounter_simple *tc) +{ + return (T0IR & 0x1) != 0; +} + +static uint32_t lpc22xx_tc_get_timecount(struct timecounter *tc) +{ + return rtems_timecounter_simple_upcounter_get( + tc, + lpc22xx_tc_get, + lpc22xx_tc_is_pending + ); +} + +static void lpc22xx_tc_tick(void) +{ + rtems_timecounter_simple_upcounter_tick(&lpc22xx_tc, lpc22xx_tc_get); +} + /* Replace the first value with the clock's interrupt name. */ rtems_irq_connect_data clock_isr_data = { .name = LPC22xx_INTERRUPT_TIMER0, @@ -76,7 +103,7 @@ rtems_irq_connect_data clock_isr_data = { do { \ /* disable and clear timer 0, set to */ \ T0TCR &= 0; \ - /* TC is incrementet on every pclk.*/ \ + /* TC is incremented on every pclk.*/ \ T0PC = 0; \ /* initialize the timer period and prescaler */ \ T0MR0 = ((LPC22xx_Fpclk/1000 * \ @@ -89,6 +116,13 @@ rtems_irq_connect_data clock_isr_data = { T0TCR = 1; \ /* enable interrupt, skyeye will check this*/ \ T0IR |= 0x01; \ + /* install timecounter */ \ + rtems_timecounter_simple_install( \ + &lpc22xx_tc, \ + LPC22xx_Fpclk, \ + T0MR0, \ + lpc22xx_tc_get_timecount \ + ); \ } while (0) /** @@ -104,20 +138,6 @@ rtems_irq_connect_data clock_isr_data = { BSP_remove_rtems_irq_handler(&clock_isr_data); \ } while (0) -static uint32_t bsp_clock_nanoseconds_since_last_tick(void) -{ - uint32_t clicks; - uint32_t microseconds; - - clicks = T0TC; /* T0TC is the 32bit time counter 0 */ - - microseconds = (rtems_configuration_get_microseconds_per_tick() - clicks); - return microseconds * 1000; -} - -#define Clock_driver_nanoseconds_since_last_tick \ - bsp_clock_nanoseconds_since_last_tick - /** * Enables clock interrupt. * @@ -149,6 +169,8 @@ static int clock_isr_is_on(const rtems_irq_connect_data *irq) return T0IR & 0x01; /* MR0 mask */ } +#define Clock_driver_timecounter_tick() lpc22xx_tc_tick() + /* Make sure to include this, and only at the end of the file */ #include "../../../../libbsp/shared/clockdrv_shell.h" diff --git a/c/src/lib/libcpu/arm/mc9328mxl/clock/clockdrv.c b/c/src/lib/libcpu/arm/mc9328mxl/clock/clockdrv.c index 0c3ab5dd93..d5dc69c9a0 100644 --- a/c/src/lib/libcpu/arm/mc9328mxl/clock/clockdrv.c +++ b/c/src/lib/libcpu/arm/mc9328mxl/clock/clockdrv.c @@ -130,5 +130,8 @@ static int clock_isr_is_on(const rtems_irq_connect_data *irq) return MC9328MXL_TMR1_TCTL & MC9328MXL_TMR_TCTL_IRQEN; } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + /* Make sure to include this, and only at the end of the file */ + #include "../../../../libbsp/shared/clockdrv_shell.h" diff --git a/c/src/lib/libcpu/arm/pxa255/clock/clock.c b/c/src/lib/libcpu/arm/pxa255/clock/clock.c index 92d9b21d2b..69b684926c 100644 --- a/c/src/lib/libcpu/arm/pxa255/clock/clock.c +++ b/c/src/lib/libcpu/arm/pxa255/clock/clock.c @@ -116,4 +116,6 @@ static void Clock_driver_support_shutdown_hardware( void ) BSP_remove_rtems_irq_handler(&clock_isr_data); } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + #include "../../../../libbsp/shared/clockdrv_shell.h" diff --git a/c/src/lib/libcpu/arm/s3c24xx/clock/clockdrv.c b/c/src/lib/libcpu/arm/s3c24xx/clock/clockdrv.c index 519d3f850d..d8c7e644ad 100644 --- a/c/src/lib/libcpu/arm/s3c24xx/clock/clockdrv.c +++ b/c/src/lib/libcpu/arm/s3c24xx/clock/clockdrv.c @@ -28,17 +28,6 @@ rtems_irq_connect_data clock_isr_data = { }; /** - * Return the nanoseconds since last tick - */ -static uint32_t clock_driver_get_nanoseconds_since_last_tick(void) -{ - return 0; -} - -#define Clock_driver_nanoseconds_since_last_tick \ - clock_driver_get_nanoseconds_since_last_tick - -/** * When we get the clock interrupt * - clear the interrupt bit? * - restart the timer? @@ -131,5 +120,7 @@ static int clock_isr_is_on(const rtems_irq_connect_data *irq) return 1; } +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + /* Make sure to include this, and only at the end of the file */ #include "../../../../libbsp/shared/clockdrv_shell.h" diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c index 218828cf4a..41b10cb30d 100644 --- a/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c +++ b/c/src/lib/libcpu/powerpc/mpc6xx/clock/c_clock.c @@ -29,6 +29,7 @@ #include <libcpu/spr.h> #include <rtems/bspIo.h> /* for printk() */ #include <libcpu/powerpc-utility.h> +#include <rtems/timecounter.h> #include <bspopts.h> /* for CLOCK_DRIVER_USE_FAST_IDLE */ @@ -48,11 +49,12 @@ volatile uint32_t Clock_driver_ticks; */ static uint32_t Clock_Decrementer_value; -/* - * This is the value by which elapsed count down timer ticks are multiplied to - * give an elapsed duration in nanoseconds, left-shifted by 32 bits - */ -static uint64_t Clock_Decrementer_reference; +static struct timecounter Clock_TC; + +static uint32_t Clock_Get_timecount(struct timecounter *tc) +{ + return ppc_time_base(); +} void clockOff(void* unused) { @@ -94,16 +96,27 @@ void clockOn(void* unused) static void clockHandler(void) { #if (CLOCK_DRIVER_USE_FAST_IDLE == 1) - do { - rtems_clock_tick(); - } while ( + rtems_interrupt_level level; + uint32_t tb; + + rtems_interrupt_disable(level); + + tb = ppc_time_base(); + rtems_timecounter_tick(); + + while ( _Thread_Heir == _Thread_Executing && _Thread_Executing->Start.entry_point == (Thread_Entry) rtems_configuration_get_idle_task() - ); + ) { + tb += Clock_Decrementer_value; + ppc_set_time_base( tb ); + rtems_timecounter_tick(); + } + rtems_interrupt_enable(level); #else - rtems_clock_tick(); + rtems_timecounter_tick(); #endif } @@ -141,7 +154,6 @@ void clockIsr(void *unused) rtems_interrupt_enable(flags); Clock_driver_ticks += 1; - /* * Real Time Clock counter/timer is set to automatically reload. */ @@ -187,7 +199,6 @@ int clockIsOn(void* unused) return 0; } - /* * Clock_exit * @@ -199,53 +210,6 @@ void Clock_exit( void ) (void) BSP_disconnect_clock_handler (); } -static uint32_t Clock_driver_nanoseconds_since_last_tick(void) -{ - uint32_t clicks, tmp; - - PPC_Get_decrementer( clicks ); - - /* - * Multiply by 1000 here separately from below so we do not overflow - * and get a negative value. - */ - tmp = (Clock_Decrementer_value - clicks) * 1000; - tmp /= (BSP_bus_frequency/BSP_time_base_divisor); - - return tmp * 1000; -} - -static uint32_t Clock_driver_nanoseconds_since_last_tick_bookE(void) -{ - uint32_t clicks; - uint64_t c; - - PPC_Get_decrementer( clicks ); - c = Clock_Decrementer_value - clicks; - - /* - * Check whether a clock tick interrupt is pending and hence that the - * decrementer's wrapped. If it has, we'll compensate by returning a time one - * tick period longer. - * - * We have to check interrupt status after reading the decrementer. If we - * don't, we may miss an interrupt and read a wrapped decrementer value - * without compensating for it - */ - if ( _read_BOOKE_TSR() & BOOKE_TSR_DIS ) - { - /* - * Re-read the decrementer: The tick interrupt may have been - * generated and the decrementer wrapped during the time since we - * last read it and the time we checked the interrupt status - */ - PPC_Get_decrementer( clicks ); - c = (Clock_Decrementer_value - clicks) + Clock_Decrementer_value; - } - - return (uint32_t)((c * Clock_Decrementer_reference) >> 32); -} - /* * Clock_initialize * @@ -262,9 +226,6 @@ rtems_device_driver Clock_initialize( Clock_Decrementer_value = (BSP_bus_frequency/BSP_time_base_divisor)* rtems_configuration_get_milliseconds_per_tick(); - Clock_Decrementer_reference = ((uint64_t)1000000U<<32)/ - (BSP_bus_frequency/BSP_time_base_divisor); - /* set the decrementer now, prior to installing the handler * so no interrupts will happen in a while. */ @@ -283,24 +244,14 @@ rtems_device_driver Clock_initialize( _write_BOOKE_TCR(tcr); rtems_interrupt_enable(l); - - /* - * Set the nanoseconds since last tick handler - */ - rtems_clock_set_nanoseconds_extension( - Clock_driver_nanoseconds_since_last_tick_bookE - ); - } - else - { - /* - * Set the nanoseconds since last tick handler - */ - rtems_clock_set_nanoseconds_extension( - Clock_driver_nanoseconds_since_last_tick - ); } + Clock_TC.tc_get_timecount = Clock_Get_timecount; + Clock_TC.tc_counter_mask = 0xffffffff; + Clock_TC.tc_frequency = (1000 * BSP_bus_frequency) / BSP_time_base_divisor; + Clock_TC.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install(&Clock_TC); + /* * If a decrementer exception was pending, it is cleared by * executing the default (nop) handler at this point; diff --git a/cpukit/configure.ac b/cpukit/configure.ac index 2b432f6437..46942c9805 100644 --- a/cpukit/configure.ac +++ b/cpukit/configure.ac @@ -453,6 +453,7 @@ score/cpu/Makefile score/cpu/arm/Makefile score/cpu/bfin/Makefile score/cpu/avr/Makefile +score/cpu/epiphany/Makefile score/cpu/h8300/Makefile score/cpu/i386/Makefile score/cpu/lm32/Makefile diff --git a/cpukit/libcsupport/src/__times.c b/cpukit/libcsupport/src/__times.c index 5a1f9eba4d..895ee6b658 100644 --- a/cpukit/libcsupport/src/__times.c +++ b/cpukit/libcsupport/src/__times.c @@ -66,9 +66,10 @@ clock_t _times( #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ { - Timestamp_Control per_tick; - uint32_t ticks_of_executing; - uint32_t fractional_ticks; + Timestamp_Control per_tick; + uint32_t ticks_of_executing; + uint32_t fractional_ticks; + Per_CPU_Control *cpu_self; _Timestamp_Set( &per_tick, @@ -78,7 +79,7 @@ clock_t _times( TOD_NANOSECONDS_PER_SECOND) ); - _Thread_Disable_dispatch(); + cpu_self = _Thread_Dispatch_disable(); executing = _Thread_Executing; _Thread_Update_cpu_time_used( executing, @@ -90,7 +91,7 @@ clock_t _times( &ticks_of_executing, &fractional_ticks ); - _Thread_Enable_dispatch(); + _Thread_Dispatch_enable( cpu_self ); ptms->tms_utime = ticks_of_executing * us_per_tick; } #else diff --git a/cpukit/libcsupport/src/error.c b/cpukit/libcsupport/src/error.c index 81ddae251a..44cc1ee688 100644 --- a/cpukit/libcsupport/src/error.c +++ b/cpukit/libcsupport/src/error.c @@ -36,7 +36,7 @@ int rtems_verror( if (error_flag & RTEMS_ERROR_PANIC) { if (rtems_panic_in_progress++) - _Thread_Disable_dispatch(); /* disable task switches */ + _Thread_Dispatch_disable(); /* disable task switches */ /* don't aggravate things */ if (rtems_panic_in_progress > 2) diff --git a/cpukit/libcsupport/src/kill_noposix.c b/cpukit/libcsupport/src/kill_noposix.c index cfc99f6218..3cb807c95e 100644 --- a/cpukit/libcsupport/src/kill_noposix.c +++ b/cpukit/libcsupport/src/kill_noposix.c @@ -9,7 +9,6 @@ * * + kill * + _kill_r - * + __kill * + sleep */ @@ -45,9 +44,4 @@ int _kill_r( struct _reent *ptr, pid_t pid, int sig ) return 0; } #endif - -int __kill( pid_t pid, int sig ) -{ - return 0; -} #endif diff --git a/cpukit/libcsupport/src/privateenv.c b/cpukit/libcsupport/src/privateenv.c index 57177453c4..29821e4045 100644 --- a/cpukit/libcsupport/src/privateenv.c +++ b/cpukit/libcsupport/src/privateenv.c @@ -23,7 +23,7 @@ #include <stdlib.h> #include <rtems/libio_.h> -#include <rtems/score/threaddispatch.h> +#include <rtems/score/threadimpl.h> /** * Instantiate a private user environment for the calling thread. @@ -50,13 +50,6 @@ void rtems_libio_free_user_env(void *arg) } } -static void free_user_env_protected(rtems_user_env_t *env) -{ - _Thread_Disable_dispatch(); - rtems_libio_free_user_env(env); - _Thread_Enable_dispatch(); -} - rtems_status_code rtems_libio_set_private_env(void) { rtems_status_code sc = RTEMS_SUCCESSFUL; @@ -64,6 +57,7 @@ rtems_status_code rtems_libio_set_private_env(void) bool uses_global_env = old_env == &rtems_global_user_env; if (uses_global_env) { + bool life_protection = _Thread_Set_life_protection(true); rtems_user_env_t *new_env = calloc(1, sizeof(*new_env)); if (new_env != NULL) { @@ -83,7 +77,7 @@ rtems_status_code rtems_libio_set_private_env(void) ); if (eno == 0) { - free_user_env_protected(old_env); + rtems_libio_free_user_env(old_env); } else { sc = RTEMS_TOO_MANY; } @@ -97,6 +91,8 @@ rtems_status_code rtems_libio_set_private_env(void) } else { sc = RTEMS_NO_MEMORY; } + + _Thread_Set_life_protection(life_protection); } return sc; @@ -108,7 +104,11 @@ void rtems_libio_use_global_env(void) bool uses_private_env = env != &rtems_global_user_env; if (uses_private_env) { - free_user_env_protected(env); + bool life_protection = _Thread_Set_life_protection(true); + + rtems_libio_free_user_env(env); pthread_setspecific(rtems_current_user_env_key, NULL); + + _Thread_Set_life_protection(life_protection); } } diff --git a/cpukit/libcsupport/src/sup_fs_location.c b/cpukit/libcsupport/src/sup_fs_location.c index 545dfe185f..ddda4d15eb 100644 --- a/cpukit/libcsupport/src/sup_fs_location.c +++ b/cpukit/libcsupport/src/sup_fs_location.c @@ -6,10 +6,10 @@ */ /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012-2015 embedded brains GmbH. All rights reserved. * * embedded brains GmbH - * Obere Lagerstr. 30 + * Dornierstr. 4 * 82178 Puchheim * Germany * <rtems@embedded-brains.de> @@ -31,6 +31,12 @@ rtems_interrupt_lock rtems_filesystem_mt_entry_lock_control = RTEMS_INTERRUPT_LOCK_INITIALIZER("mount table entry"); +RTEMS_INTERRUPT_LOCK_DEFINE( + static, + deferred_release_lock, + "Filesystem Deferred Release" +) + static rtems_filesystem_global_location_t *deferred_released_global_locations; rtems_filesystem_location_info_t *rtems_filesystem_location_copy( @@ -134,9 +140,10 @@ static void deferred_release(void) rtems_filesystem_global_location_t *current = NULL; do { + rtems_interrupt_lock_context lock_context; int count = 0; - _Thread_Disable_dispatch(); + rtems_interrupt_lock_acquire(&deferred_release_lock, &lock_context); current = deferred_released_global_locations; if (current != NULL) { deferred_released_global_locations = current->deferred_released_next; @@ -144,7 +151,7 @@ static void deferred_release(void) current->deferred_released_next = NULL; current->deferred_released_count = 0; } - _Thread_Enable_dispatch(); + rtems_interrupt_lock_release(&deferred_release_lock, &lock_context); if (current != NULL) { release_with_count(current, count); @@ -182,6 +189,10 @@ void rtems_filesystem_global_location_release( if (_Thread_Dispatch_is_enabled()) { release_with_count(global_loc, 1); } else { + rtems_interrupt_lock_context lock_context; + + rtems_interrupt_lock_acquire(&deferred_release_lock, &lock_context); + if (global_loc->deferred_released_count == 0) { rtems_filesystem_global_location_t *head = deferred_released_global_locations; @@ -192,6 +203,8 @@ void rtems_filesystem_global_location_release( } else { ++global_loc->deferred_released_count; } + + rtems_interrupt_lock_release(&deferred_release_lock, &lock_context); } } diff --git a/cpukit/libcsupport/src/umask.c b/cpukit/libcsupport/src/umask.c index 678cac8c50..fba2405e4a 100644 --- a/cpukit/libcsupport/src/umask.c +++ b/cpukit/libcsupport/src/umask.c @@ -21,7 +21,6 @@ #include <sys/stat.h> #include <rtems/libio_.h> -#include <rtems/score/threaddispatch.h> /** * POSIX 1003.1b 5.3.3 - Set File Creation Mask @@ -30,14 +29,10 @@ mode_t umask( mode_t cmask ) { mode_t old_mask; - /* - * We must use the same protection mechanism as in - * rtems_libio_set_private_env(). - */ - _Thread_Disable_dispatch(); + rtems_libio_lock(); old_mask = rtems_filesystem_umask; rtems_filesystem_umask = cmask & (S_IRWXU | S_IRWXG | S_IRWXO); - _Thread_Enable_dispatch(); + rtems_libio_unlock(); return old_mask; } diff --git a/cpukit/libnetworking/rtems/rtems_bsdnet_internal.h b/cpukit/libnetworking/rtems/rtems_bsdnet_internal.h index fc0370ceb5..4de1b0bda6 100644 --- a/cpukit/libnetworking/rtems/rtems_bsdnet_internal.h +++ b/cpukit/libnetworking/rtems/rtems_bsdnet_internal.h @@ -12,6 +12,7 @@ #include <rtems.h> #include <rtems/fs.h> +#include <rtems/bsd.h> #ifdef __cplusplus extern "C" { @@ -61,7 +62,7 @@ void *memset(void *s, int c, size_t n); #define panic rtems_panic #define suser(a,b) 0 -void microtime(struct timeval *tv); +#define microtime(tv) rtems_bsd_microtime(tv) #define hz rtems_bsdnet_ticks_per_second #define tick rtems_bsdnet_microseconds_per_tick diff --git a/cpukit/libnetworking/rtems/rtems_glue.c b/cpukit/libnetworking/rtems/rtems_glue.c index 63d4b8045f..1355fbb838 100644 --- a/cpukit/libnetworking/rtems/rtems_glue.c +++ b/cpukit/libnetworking/rtems/rtems_glue.c @@ -378,9 +378,6 @@ rtems_bsdnet_semaphore_obtain (void) #ifdef RTEMS_FAST_MUTEX ISR_lock_Context lock_context; Thread_Control *executing; -#ifdef RTEMS_SMP - _Thread_Disable_dispatch(); -#endif _ISR_lock_ISR_disable(&lock_context); if (!the_networkSemaphore) rtems_panic ("rtems-net: network sema obtain: network not initialised\n"); @@ -393,9 +390,6 @@ rtems_bsdnet_semaphore_obtain (void) 0, /* forever */ &lock_context ); -#ifdef RTEMS_SMP - _Thread_Enable_dispatch(); -#endif if (executing->Wait.return_code) rtems_panic ("rtems-net: can't obtain network sema: %d\n", executing->Wait.return_code); @@ -416,18 +410,19 @@ void rtems_bsdnet_semaphore_release (void) { #ifdef RTEMS_FAST_MUTEX - int i; + ISR_lock_Context lock_context; + CORE_mutex_Status status; - _Thread_Disable_dispatch(); if (!the_networkSemaphore) rtems_panic ("rtems-net: network sema obtain: network not initialised\n"); - i = _CORE_mutex_Surrender ( + _ISR_lock_ISR_disable(&lock_context); + status = _CORE_mutex_Surrender ( &the_networkSemaphore->Core_control.mutex, networkSemaphore, - NULL + NULL, + &lock_context ); - _Thread_Enable_dispatch(); - if (i) + if (status != CORE_MUTEX_STATUS_SUCCESSFUL) rtems_panic ("rtems-net: can't release network sema: %i\n"); #else rtems_status_code sc; @@ -1344,7 +1339,3 @@ m_clalloc(int ncl, int nowait) return 1; } -void microtime(struct timeval *tv) -{ - rtems_clock_get_uptime_timeval(tv); -} diff --git a/cpukit/librpc/src/xdr/xdr_float.c b/cpukit/librpc/src/xdr/xdr_float.c index 925b29499f..ac8c46d49a 100644 --- a/cpukit/librpc/src/xdr/xdr_float.c +++ b/cpukit/librpc/src/xdr/xdr_float.c @@ -61,6 +61,7 @@ static char *rcsid = "$FreeBSD: src/lib/libc/xdr/xdr_float.c,v 1.7 1999/08/28 00 #if defined(__alpha__) || \ defined(_AM29K) || \ defined(__arm__) || \ + defined(__epiphany__) || defined(__EPIPHANY__) || \ defined(__H8300__) || defined(__h8300__) || \ defined(__hppa__) || \ defined(__i386__) || \ diff --git a/cpukit/posix/include/rtems/posix/barrierimpl.h b/cpukit/posix/include/rtems/posix/barrierimpl.h index 240abad45d..22be1c0d6e 100644 --- a/cpukit/posix/include/rtems/posix/barrierimpl.h +++ b/cpukit/posix/include/rtems/posix/barrierimpl.h @@ -80,6 +80,7 @@ RTEMS_INLINE_ROUTINE void _POSIX_Barrier_Free ( POSIX_Barrier_Control *the_barrier ) { + _CORE_barrier_Destroy( &the_barrier->Barrier ); _Objects_Free( &_POSIX_Barrier_Information, &the_barrier->Object ); } diff --git a/cpukit/posix/include/rtems/posix/condimpl.h b/cpukit/posix/include/rtems/posix/condimpl.h index 435127ee3c..7def0f852a 100644 --- a/cpukit/posix/include/rtems/posix/condimpl.h +++ b/cpukit/posix/include/rtems/posix/condimpl.h @@ -19,6 +19,7 @@ #include <rtems/posix/cond.h> #include <rtems/score/objectimpl.h> +#include <rtems/score/threadqimpl.h> #include <rtems/score/watchdog.h> #ifdef __cplusplus @@ -72,6 +73,7 @@ RTEMS_INLINE_ROUTINE void _POSIX_Condition_variables_Free ( POSIX_Condition_variables_Control *the_condition_variable ) { + _Thread_queue_Destroy( &the_condition_variable->Wait_queue ); _Objects_Free( &_POSIX_Condition_variables_Information, &the_condition_variable->Object diff --git a/cpukit/posix/include/rtems/posix/mqueueimpl.h b/cpukit/posix/include/rtems/posix/mqueueimpl.h index bfc850d990..90269bf57b 100644 --- a/cpukit/posix/include/rtems/posix/mqueueimpl.h +++ b/cpukit/posix/include/rtems/posix/mqueueimpl.h @@ -250,6 +250,21 @@ RTEMS_INLINE_ROUTINE POSIX_Message_queue_Control_fd *_POSIX_Message_queue_Get_fd location ); } + +RTEMS_INLINE_ROUTINE POSIX_Message_queue_Control_fd * +_POSIX_Message_queue_Get_fd_interrupt_disable( + mqd_t id, + Objects_Locations *location, + ISR_lock_Context *lock_context +) +{ + return (POSIX_Message_queue_Control_fd *) _Objects_Get_isr_disable( + &_POSIX_Message_queue_Information_fds, + (Objects_Id)id, + location, + lock_context + ); +} /** * @see _POSIX_Name_to_id(). diff --git a/cpukit/posix/include/rtems/posix/muteximpl.h b/cpukit/posix/include/rtems/posix/muteximpl.h index 821961c0f4..f4c6c686aa 100644 --- a/cpukit/posix/include/rtems/posix/muteximpl.h +++ b/cpukit/posix/include/rtems/posix/muteximpl.h @@ -72,6 +72,7 @@ RTEMS_INLINE_ROUTINE void _POSIX_Mutex_Free( POSIX_Mutex_Control *the_mutex ) { + _CORE_mutex_Destroy( &the_mutex->Mutex ); _Objects_Free( &_POSIX_Mutex_Information, &the_mutex->Object ); } diff --git a/cpukit/posix/include/rtems/posix/psignalimpl.h b/cpukit/posix/include/rtems/posix/psignalimpl.h index 81561e2539..166705b37b 100644 --- a/cpukit/posix/include/rtems/posix/psignalimpl.h +++ b/cpukit/posix/include/rtems/posix/psignalimpl.h @@ -33,7 +33,7 @@ #include <rtems/posix/sigset.h> #include <rtems/score/apiext.h> #include <rtems/score/isrlock.h> -#include <rtems/score/threadq.h> +#include <rtems/score/threadqimpl.h> #define _States_Is_interruptible_signal( _states ) \ ( ((_states) & \ @@ -55,8 +55,6 @@ * Variables */ -extern ISR_lock_Control _POSIX_signals_Lock; - extern sigset_t _POSIX_signals_Pending; extern const struct sigaction _POSIX_signals_Default_vectors[ SIG_ARRAY_MAX ]; @@ -79,10 +77,10 @@ extern Chain_Control _POSIX_signals_Siginfo[ SIG_ARRAY_MAX ]; void _POSIX_signals_Manager_Initialization(void); #define _POSIX_signals_Acquire( lock_context ) \ - _ISR_lock_ISR_disable_and_acquire( &_POSIX_signals_Lock, lock_context ) + _Thread_queue_Acquire( &_POSIX_signals_Wait_queue, lock_context ) #define _POSIX_signals_Release( lock_context ) \ - _ISR_lock_Release_and_ISR_enable( &_POSIX_signals_Lock, lock_context ) + _Thread_queue_Release( &_POSIX_signals_Wait_queue, lock_context ) void _POSIX_signals_Action_handler( Thread_Control *executing, diff --git a/cpukit/posix/include/rtems/posix/rwlockimpl.h b/cpukit/posix/include/rtems/posix/rwlockimpl.h index 48e0a17366..46e33904dc 100644 --- a/cpukit/posix/include/rtems/posix/rwlockimpl.h +++ b/cpukit/posix/include/rtems/posix/rwlockimpl.h @@ -89,6 +89,7 @@ RTEMS_INLINE_ROUTINE void _POSIX_RWLock_Free ( POSIX_RWLock_Control *the_RWLock ) { + _CORE_RWLock_Destroy( &the_RWLock->RWLock ); _Objects_Free( &_POSIX_RWLock_Information, &the_RWLock->Object ); } diff --git a/cpukit/posix/include/rtems/posix/semaphoreimpl.h b/cpukit/posix/include/rtems/posix/semaphoreimpl.h index df5e5238de..eeea51c488 100644 --- a/cpukit/posix/include/rtems/posix/semaphoreimpl.h +++ b/cpukit/posix/include/rtems/posix/semaphoreimpl.h @@ -65,6 +65,7 @@ RTEMS_INLINE_ROUTINE void _POSIX_Semaphore_Free ( POSIX_Semaphore_Control *the_semaphore ) { + _CORE_semaphore_Destroy( &the_semaphore->Semaphore ); _Objects_Free( &_POSIX_Semaphore_Information, &the_semaphore->Object ); } @@ -88,6 +89,20 @@ RTEMS_INLINE_ROUTINE POSIX_Semaphore_Control *_POSIX_Semaphore_Get ( _Objects_Get( &_POSIX_Semaphore_Information, (Objects_Id)*id, location ); } +RTEMS_INLINE_ROUTINE POSIX_Semaphore_Control * +_POSIX_Semaphore_Get_interrupt_disable( + sem_t *id, + Objects_Locations *location, + ISR_lock_Context *lock_context +) +{ + return (POSIX_Semaphore_Control *) _Objects_Get_isr_disable( + &_POSIX_Semaphore_Information, + (Objects_Id)*id, + location, + lock_context + ); +} /** * @brief POSIX Semaphore Create Support diff --git a/cpukit/posix/src/alarm.c b/cpukit/posix/src/alarm.c index 48036617d4..6f051d7190 100644 --- a/cpukit/posix/src/alarm.c +++ b/cpukit/posix/src/alarm.c @@ -73,7 +73,7 @@ unsigned int alarm( _Thread_Disable_dispatch(); - state = _Watchdog_Remove( the_timer ); + state = _Watchdog_Remove_seconds( the_timer ); if ( (state == WATCHDOG_ACTIVE) || (state == WATCHDOG_REMOVE_IT) ) { /* * The stop_time and start_time fields are snapshots of ticks since diff --git a/cpukit/posix/src/clockgettime.c b/cpukit/posix/src/clockgettime.c index 2838ba657e..83a35d769a 100644 --- a/cpukit/posix/src/clockgettime.c +++ b/cpukit/posix/src/clockgettime.c @@ -37,19 +37,21 @@ int clock_gettime( rtems_set_errno_and_return_minus_one( EINVAL ); if ( clock_id == CLOCK_REALTIME ) { - _TOD_Get(tp); + _TOD_Get_as_timespec(tp); return 0; } #ifdef CLOCK_MONOTONIC if ( clock_id == CLOCK_MONOTONIC ) { - _TOD_Get_uptime_as_timespec( tp ); + _TOD_Get_zero_based_uptime_as_timespec( tp ); + --tp->tv_sec; return 0; } #endif #ifdef _POSIX_CPUTIME if ( clock_id == CLOCK_PROCESS_CPUTIME_ID ) { - _TOD_Get_uptime_as_timespec( tp ); + _TOD_Get_zero_based_uptime_as_timespec( tp ); + --tp->tv_sec; return 0; } #endif diff --git a/cpukit/posix/src/condinit.c b/cpukit/posix/src/condinit.c index c1c14b8bf9..0b61d14cb9 100644 --- a/cpukit/posix/src/condinit.c +++ b/cpukit/posix/src/condinit.c @@ -64,8 +64,7 @@ int pthread_cond_init( _Thread_queue_Initialize( &the_cond->Wait_queue, - THREAD_QUEUE_DISCIPLINE_FIFO, - ETIMEDOUT + THREAD_QUEUE_DISCIPLINE_FIFO ); _Objects_Open_u32( diff --git a/cpukit/posix/src/condwaitsupp.c b/cpukit/posix/src/condwaitsupp.c index 1abdc426fc..5a71dc3b54 100644 --- a/cpukit/posix/src/condwaitsupp.c +++ b/cpukit/posix/src/condwaitsupp.c @@ -74,10 +74,8 @@ int _POSIX_Condition_variables_Wait_support( if ( !already_timedout ) { the_cond->Mutex = *mutex; - _Thread_queue_Enter_critical_section( &the_cond->Wait_queue ); executing = _Thread_Executing; executing->Wait.return_code = 0; - executing->Wait.queue = &the_cond->Wait_queue; executing->Wait.id = *cond; _Thread_queue_Enqueue( @@ -85,7 +83,8 @@ int _POSIX_Condition_variables_Wait_support( executing, STATES_WAITING_FOR_CONDITION_VARIABLE | STATES_INTERRUPTIBLE_BY_SIGNAL, - timeout + timeout, + ETIMEDOUT ); _Objects_Put( &the_cond->Object ); diff --git a/cpukit/posix/src/killinfo.c b/cpukit/posix/src/killinfo.c index 5d3dded53f..c8c59d6096 100644 --- a/cpukit/posix/src/killinfo.c +++ b/cpukit/posix/src/killinfo.c @@ -146,7 +146,7 @@ int killinfo( !_Chain_Is_tail( the_chain, the_node ) ; the_node = the_node->next ) { - the_thread = (Thread_Control *)the_node; + the_thread = THREAD_CHAIN_NODE_TO_THREAD( the_node ); api = the_thread->API_Extensions[ THREAD_API_POSIX ]; #if defined(DEBUG_SIGNAL_PROCESSING) diff --git a/cpukit/posix/src/mqueuerecvsupp.c b/cpukit/posix/src/mqueuerecvsupp.c index bea19fe9ca..2f9bb2d276 100644 --- a/cpukit/posix/src/mqueuerecvsupp.c +++ b/cpukit/posix/src/mqueuerecvsupp.c @@ -54,20 +54,25 @@ ssize_t _POSIX_Message_queue_Receive_support( size_t length_out; bool do_wait; Thread_Control *executing; + ISR_lock_Context lock_context; - the_mq_fd = _POSIX_Message_queue_Get_fd( mqdes, &location ); + the_mq_fd = _POSIX_Message_queue_Get_fd_interrupt_disable( + mqdes, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: if ( (the_mq_fd->oflag & O_ACCMODE) == O_WRONLY ) { - _Objects_Put( &the_mq_fd->Object ); + _ISR_lock_ISR_enable( &lock_context ); rtems_set_errno_and_return_minus_one( EBADF ); } the_mq = the_mq_fd->Queue; if ( msg_len < the_mq->Message_queue.maximum_message_size ) { - _Objects_Put( &the_mq_fd->Object ); + _ISR_lock_ISR_enable( &lock_context ); rtems_set_errno_and_return_minus_one( EMSGSIZE ); } @@ -97,10 +102,10 @@ ssize_t _POSIX_Message_queue_Receive_support( msg_ptr, &length_out, do_wait, - timeout + timeout, + &lock_context ); - _Objects_Put( &the_mq_fd->Object ); if (msg_prio) { *msg_prio = _POSIX_Message_queue_Priority_from_core( executing->Wait.count diff --git a/cpukit/posix/src/mqueuesendsupp.c b/cpukit/posix/src/mqueuesendsupp.c index 2d6ddae859..d73538ae88 100644 --- a/cpukit/posix/src/mqueuesendsupp.c +++ b/cpukit/posix/src/mqueuesendsupp.c @@ -64,6 +64,7 @@ int _POSIX_Message_queue_Send_support( CORE_message_queue_Status msg_status; bool do_wait; Thread_Control *executing; + ISR_lock_Context lock_context; /* * Validate the priority. @@ -73,12 +74,16 @@ int _POSIX_Message_queue_Send_support( if ( msg_prio > MQ_PRIO_MAX ) rtems_set_errno_and_return_minus_one( EINVAL ); - the_mq_fd = _POSIX_Message_queue_Get_fd( mqdes, &location ); + the_mq_fd = _POSIX_Message_queue_Get_fd_interrupt_disable( + mqdes, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: if ( (the_mq_fd->oflag & O_ACCMODE) == O_RDONLY ) { - _Objects_Put( &the_mq_fd->Object ); + _ISR_lock_ISR_enable( &lock_context ); rtems_set_errno_and_return_minus_one( EBADF ); } @@ -105,11 +110,10 @@ int _POSIX_Message_queue_Send_support( NULL, _POSIX_Message_queue_Priority_to_core( msg_prio ), do_wait, - timeout /* no timeout */ + timeout, /* no timeout */ + &lock_context ); - _Objects_Put( &the_mq_fd->Object ); - /* * If we had to block, then this is where the task returns * after it wakes up. The returned status is correct for diff --git a/cpukit/posix/src/mutexlocksupp.c b/cpukit/posix/src/mutexlocksupp.c index 9b20f58d13..b63a89aa4e 100644 --- a/cpukit/posix/src/mutexlocksupp.c +++ b/cpukit/posix/src/mutexlocksupp.c @@ -54,9 +54,6 @@ int _POSIX_Mutex_Lock_support( switch ( location ) { case OBJECTS_LOCAL: -#if defined(RTEMS_SMP) - _Thread_Disable_dispatch(); -#endif executing = _Thread_Executing; _CORE_mutex_Seize( &the_mutex->Mutex, @@ -66,10 +63,6 @@ int _POSIX_Mutex_Lock_support( timeout, &lock_context ); -#if defined(RTEMS_SMP) - _Thread_Enable_dispatch(); -#endif - _Objects_Put_for_get_isr_disable( &the_mutex->Object ); return _POSIX_Mutex_Translate_core_mutex_return_code( (CORE_mutex_Status) executing->Wait.return_code ); diff --git a/cpukit/posix/src/mutexsetprioceiling.c b/cpukit/posix/src/mutexsetprioceiling.c index f0fb8f5ee6..32d92107cc 100644 --- a/cpukit/posix/src/mutexsetprioceiling.c +++ b/cpukit/posix/src/mutexsetprioceiling.c @@ -41,6 +41,7 @@ int pthread_mutex_setprioceiling( register POSIX_Mutex_Control *the_mutex; Objects_Locations location; Priority_Control the_priority; + ISR_lock_Context lock_context; if ( !old_ceiling ) return EINVAL; @@ -64,7 +65,11 @@ int pthread_mutex_setprioceiling( * NOTE: This makes it easier to get 100% binary coverage since the * bad Id case is handled by the switch. */ - the_mutex = _POSIX_Mutex_Get( mutex, &location ); + the_mutex = _POSIX_Mutex_Get_interrupt_disable( + mutex, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: @@ -78,9 +83,9 @@ int pthread_mutex_setprioceiling( _CORE_mutex_Surrender( &the_mutex->Mutex, the_mutex->Object.id, - NULL + NULL, + &lock_context ); - _Objects_Put( &the_mutex->Object ); return 0; diff --git a/cpukit/posix/src/mutexunlock.c b/cpukit/posix/src/mutexunlock.c index dbb9d1a3e6..c5b2375d1a 100644 --- a/cpukit/posix/src/mutexunlock.c +++ b/cpukit/posix/src/mutexunlock.c @@ -41,17 +41,22 @@ int pthread_mutex_unlock( register POSIX_Mutex_Control *the_mutex; Objects_Locations location; CORE_mutex_Status status; + ISR_lock_Context lock_context; - the_mutex = _POSIX_Mutex_Get( mutex, &location ); + the_mutex = _POSIX_Mutex_Get_interrupt_disable( + mutex, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: status = _CORE_mutex_Surrender( &the_mutex->Mutex, the_mutex->Object.id, - NULL + NULL, + &lock_context ); - _Objects_Put( &the_mutex->Object ); return _POSIX_Mutex_Translate_core_mutex_return_code( status ); #if defined(RTEMS_MULTIPROCESSING) diff --git a/cpukit/posix/src/nanosleep.c b/cpukit/posix/src/nanosleep.c index 39ae84d3e4..46697ae8fc 100644 --- a/cpukit/posix/src/nanosleep.c +++ b/cpukit/posix/src/nanosleep.c @@ -23,9 +23,13 @@ #include <rtems/seterr.h> #include <rtems/score/threadimpl.h> +#include <rtems/score/threadqimpl.h> #include <rtems/score/timespec.h> #include <rtems/score/watchdogimpl.h> +static Thread_queue_Control _Nanosleep_Pseudo_queue = + THREAD_QUEUE_FIFO_INITIALIZER( _Nanosleep_Pseudo_queue, "Nanosleep" ); + /* * 14.2.5 High Resolution Sleep, P1003.1b-1993, p. 269 */ @@ -38,7 +42,8 @@ int nanosleep( * It is critical to obtain the executing thread after thread dispatching is * disabled on SMP configurations. */ - Thread_Control *executing; + Thread_Control *executing; + Per_CPU_Control *cpu_self; Watchdog_Interval ticks; Watchdog_Interval elapsed; @@ -58,16 +63,17 @@ int nanosleep( */ ticks = _Timespec_To_ticks( rqtp ); + executing = _Thread_Get_executing(); + /* * A nanosleep for zero time is implemented as a yield. * This behavior is also beyond the POSIX specification but is * consistent with the RTEMS API and yields desirable behavior. */ if ( !ticks ) { - _Thread_Disable_dispatch(); - executing = _Thread_Executing; + cpu_self = _Thread_Dispatch_disable(); _Thread_Yield( executing ); - _Thread_Enable_dispatch(); + _Thread_Dispatch_enable( cpu_self ); if ( rmtp ) { rmtp->tv_sec = 0; rmtp->tv_nsec = 0; @@ -78,20 +84,13 @@ int nanosleep( /* * Block for the desired amount of time */ - _Thread_Disable_dispatch(); - executing = _Thread_Executing; - _Thread_Set_state( - executing, - STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL - ); - _Watchdog_Initialize( - &executing->Timer, - _Thread_Delay_ended, - 0, - executing - ); - _Watchdog_Insert_ticks( &executing->Timer, ticks ); - _Thread_Enable_dispatch(); + _Thread_queue_Enqueue( + &_Nanosleep_Pseudo_queue, + executing, + STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL, + ticks, + 0 + ); /* * Calculate the time that passed while we were sleeping and how diff --git a/cpukit/posix/src/posixtimespecabsolutetimeout.c b/cpukit/posix/src/posixtimespecabsolutetimeout.c index 32da45f3ae..f3029861d1 100644 --- a/cpukit/posix/src/posixtimespecabsolutetimeout.c +++ b/cpukit/posix/src/posixtimespecabsolutetimeout.c @@ -57,7 +57,7 @@ POSIX_Absolute_timeout_conversion_results_t _POSIX_Absolute_timeout_to_ticks( /* * Is the absolute time in the past? */ - _TOD_Get( ¤t_time ); + _TOD_Get_as_timespec( ¤t_time ); if ( _Timespec_Less_than( abstime, ¤t_time ) ) return POSIX_ABSOLUTE_TIMEOUT_IS_IN_PAST; diff --git a/cpukit/posix/src/psignal.c b/cpukit/posix/src/psignal.c index eec4d95fb5..3ca0723860 100644 --- a/cpukit/posix/src/psignal.c +++ b/cpukit/posix/src/psignal.c @@ -45,8 +45,6 @@ RTEMS_STATIC_ASSERT( /*** PROCESS WIDE STUFF ****/ -ISR_lock_Control _POSIX_signals_Lock = ISR_LOCK_INITIALIZER("POSIX signals"); - sigset_t _POSIX_signals_Pending; void _POSIX_signals_Abnormal_termination_handler( @@ -192,8 +190,7 @@ void _POSIX_signals_Manager_Initialization(void) */ _Thread_queue_Initialize( &_POSIX_signals_Wait_queue, - THREAD_QUEUE_DISCIPLINE_FIFO, - EAGAIN + THREAD_QUEUE_DISCIPLINE_FIFO ); /* XXX status codes */ diff --git a/cpukit/posix/src/psignalunblockthread.c b/cpukit/posix/src/psignalunblockthread.c index c56c1502a1..200e9e714b 100644 --- a/cpukit/posix/src/psignalunblockthread.c +++ b/cpukit/posix/src/psignalunblockthread.c @@ -110,16 +110,7 @@ bool _POSIX_signals_Unblock_thread( if ( _States_Is_interruptible_by_signal( the_thread->current_state ) ) { the_thread->Wait.return_code = EINTR; - /* - * In pthread_cond_wait, a thread will be blocking on a thread - * queue, but is also interruptible by a POSIX signal. - */ - if ( _States_Is_waiting_on_thread_queue(the_thread->current_state) ) - _Thread_queue_Extract_with_proxy( the_thread ); - else if ( _States_Is_delaying(the_thread->current_state) ) { - (void) _Watchdog_Remove( &the_thread->Timer ); - _Thread_Unblock( the_thread ); - } + _Thread_queue_Extract_with_proxy( the_thread ); } } return _POSIX_signals_Unblock_thread_done( the_thread, api, false ); diff --git a/cpukit/posix/src/pthread.c b/cpukit/posix/src/pthread.c index 4d28de536f..6395ec0ebd 100644 --- a/cpukit/posix/src/pthread.c +++ b/cpukit/posix/src/pthread.c @@ -83,6 +83,23 @@ pthread_attr_t _POSIX_Threads_Default_attributes = { #endif }; +static bool _POSIX_Threads_Sporadic_budget_TSR_filter( + Thread_Control *the_thread, + Priority_Control *new_priority, + void *arg +) +{ + the_thread->real_priority = *new_priority; + + /* + * If holding a resource, then do not change it. + * + * If this would make them less important, then do not change it. + */ + return !_Thread_Owns_resources( the_thread ) && + _Thread_Priority_less_than( the_thread->current_priority, *new_priority ); +} + /* * _POSIX_Threads_Sporadic_budget_TSR */ @@ -92,7 +109,6 @@ void _POSIX_Threads_Sporadic_budget_TSR( ) { uint32_t ticks; - uint32_t new_priority; Thread_Control *the_thread; POSIX_API_Control *api; @@ -105,27 +121,13 @@ void _POSIX_Threads_Sporadic_budget_TSR( the_thread->cpu_time_budget = ticks; - new_priority = _POSIX_Priority_To_core( api->schedparam.sched_priority ); - the_thread->real_priority = new_priority; - - /* - * If holding a resource, then do not change it. - */ - #if 0 - printk( "TSR %d %d %d\n", the_thread->resource_count, - the_thread->current_priority, new_priority ); - #endif - if ( !_Thread_Owns_resources( the_thread ) ) { - /* - * If this would make them less important, then do not change it. - */ - if ( the_thread->current_priority > new_priority ) { - _Thread_Change_priority( the_thread, new_priority, true ); - #if 0 - printk( "raise priority\n" ); - #endif - } - } + _Thread_Change_priority( + the_thread, + _POSIX_Priority_To_core( api->schedparam.sched_priority ), + NULL, + _POSIX_Threads_Sporadic_budget_TSR_filter, + true + ); /* ticks is guaranteed to be at least one */ ticks = _Timespec_To_ticks( &api->schedparam.sched_ss_repl_period ); @@ -133,6 +135,25 @@ void _POSIX_Threads_Sporadic_budget_TSR( _Watchdog_Insert_ticks( &api->Sporadic_timer, ticks ); } +static bool _POSIX_Threads_Sporadic_budget_callout_filter( + Thread_Control *the_thread, + Priority_Control *new_priority, + void *arg +) +{ + the_thread->real_priority = *new_priority; + + /* + * If holding a resource, then do not change it. + * + * Make sure we are actually lowering it. If they have lowered it + * to logically lower than sched_ss_low_priority, then we do not want to + * change it. + */ + return !_Thread_Owns_resources( the_thread ) && + _Thread_Priority_less_than( *new_priority, the_thread->current_priority ); +} + /* * _POSIX_Threads_Sporadic_budget_callout */ @@ -141,7 +162,6 @@ void _POSIX_Threads_Sporadic_budget_callout( ) { POSIX_API_Control *api; - uint32_t new_priority; api = the_thread->API_Extensions[ THREAD_API_POSIX ]; @@ -151,29 +171,13 @@ void _POSIX_Threads_Sporadic_budget_callout( */ the_thread->cpu_time_budget = UINT32_MAX; - new_priority = _POSIX_Priority_To_core(api->schedparam.sched_ss_low_priority); - the_thread->real_priority = new_priority; - - /* - * If holding a resource, then do not change it. - */ - #if 0 - printk( "callout %d %d %d\n", the_thread->resource_count, - the_thread->current_priority, new_priority ); - #endif - if ( !_Thread_Owns_resources( the_thread ) ) { - /* - * Make sure we are actually lowering it. If they have lowered it - * to logically lower than sched_ss_low_priority, then we do not want to - * change it. - */ - if ( the_thread->current_priority < new_priority ) { - _Thread_Change_priority( the_thread, new_priority, true ); - #if 0 - printk( "lower priority\n" ); - #endif - } - } + _Thread_Change_priority( + the_thread, + _POSIX_Priority_To_core( api->schedparam.sched_ss_low_priority ), + NULL, + _POSIX_Threads_Sporadic_budget_callout_filter, + true + ); } /* @@ -235,11 +239,7 @@ static bool _POSIX_Threads_Create_extension( _POSIX_signals_Action_handler ); - _Thread_queue_Initialize( - &api->Join_List, - THREAD_QUEUE_DISCIPLINE_FIFO, - 0 - ); + _Thread_queue_Initialize( &api->Join_List, THREAD_QUEUE_DISCIPLINE_FIFO ); _Watchdog_Initialize( &api->Sporadic_timer, @@ -286,7 +286,9 @@ static void _POSIX_Threads_Terminate_extension( *(void **)the_thread->Wait.return_argument = value_ptr; if ( api->schedpolicy == SCHED_SPORADIC ) - (void) _Watchdog_Remove( &api->Sporadic_timer ); + _Watchdog_Remove_ticks( &api->Sporadic_timer ); + + _Thread_queue_Destroy( &api->Join_List ); _Thread_Enable_dispatch(); } diff --git a/cpukit/posix/src/pthreadjoin.c b/cpukit/posix/src/pthreadjoin.c index e2b1664b0b..f7361310c4 100644 --- a/cpukit/posix/src/pthreadjoin.c +++ b/cpukit/posix/src/pthreadjoin.c @@ -67,12 +67,12 @@ on_EINTR: _Thread_Clear_state( the_thread, STATES_WAITING_FOR_JOIN_AT_EXIT ); } else { executing->Wait.return_argument = &return_pointer; - _Thread_queue_Enter_critical_section( &api->Join_List ); _Thread_queue_Enqueue( &api->Join_List, executing, STATES_WAITING_FOR_JOIN | STATES_INTERRUPTIBLE_BY_SIGNAL, - WATCHDOG_NO_TIMEOUT + WATCHDOG_NO_TIMEOUT, + 0 ); } _Objects_Put( &the_thread->Object ); diff --git a/cpukit/posix/src/pthreadsetschedparam.c b/cpukit/posix/src/pthreadsetschedparam.c index d5d0a5ba8a..067e6ba509 100644 --- a/cpukit/posix/src/pthreadsetschedparam.c +++ b/cpukit/posix/src/pthreadsetschedparam.c @@ -44,6 +44,7 @@ int pthread_setschedparam( Thread_CPU_budget_algorithm_callout budget_callout; Objects_Locations location; int rc; + Priority_Control unused; /* * Check all the parameters @@ -70,7 +71,7 @@ int pthread_setschedparam( api = the_thread->API_Extensions[ THREAD_API_POSIX ]; if ( api->schedpolicy == SCHED_SPORADIC ) - (void) _Watchdog_Remove( &api->Sporadic_timer ); + _Watchdog_Remove_ticks( &api->Sporadic_timer ); api->schedpolicy = policy; api->schedparam = *param; @@ -87,19 +88,17 @@ int pthread_setschedparam( the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); - the_thread->real_priority = - _POSIX_Priority_To_core( api->schedparam.sched_priority ); - - _Thread_Change_priority( - the_thread, - the_thread->real_priority, - true + _Thread_Set_priority( + the_thread, + _POSIX_Priority_To_core( api->schedparam.sched_priority ), + &unused, + true ); break; case SCHED_SPORADIC: api->ss_high_priority = api->schedparam.sched_priority; - _Watchdog_Remove( &api->Sporadic_timer ); + _Watchdog_Remove_ticks( &api->Sporadic_timer ); _POSIX_Threads_Sporadic_budget_TSR( 0, the_thread ); break; } diff --git a/cpukit/posix/src/semaphorewaitsupp.c b/cpukit/posix/src/semaphorewaitsupp.c index cd572c4017..8b36561983 100644 --- a/cpukit/posix/src/semaphorewaitsupp.c +++ b/cpukit/posix/src/semaphorewaitsupp.c @@ -40,8 +40,13 @@ int _POSIX_Semaphore_Wait_support( POSIX_Semaphore_Control *the_semaphore; Objects_Locations location; Thread_Control *executing; + ISR_lock_Context lock_context; - the_semaphore = _POSIX_Semaphore_Get( sem, &location ); + the_semaphore = _POSIX_Semaphore_Get_interrupt_disable( + sem, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: @@ -51,9 +56,9 @@ int _POSIX_Semaphore_Wait_support( executing, the_semaphore->Object.id, blocking, - timeout + timeout, + &lock_context ); - _Objects_Put( &the_semaphore->Object ); if ( !executing->Wait.return_code ) return 0; diff --git a/cpukit/posix/src/sempost.c b/cpukit/posix/src/sempost.c index f139c9ab67..9c8c673ddb 100644 --- a/cpukit/posix/src/sempost.c +++ b/cpukit/posix/src/sempost.c @@ -37,8 +37,13 @@ int sem_post( { POSIX_Semaphore_Control *the_semaphore; Objects_Locations location; + ISR_lock_Context lock_context; - the_semaphore = _POSIX_Semaphore_Get( sem, &location ); + the_semaphore = _POSIX_Semaphore_Get_interrupt_disable( + sem, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: @@ -46,12 +51,12 @@ int sem_post( &the_semaphore->Semaphore, the_semaphore->Object.id, #if defined(RTEMS_MULTIPROCESSING) - NULL /* POSIX Semaphores are local only */ + NULL, /* POSIX Semaphores are local only */ #else - NULL + NULL, #endif + &lock_context ); - _Objects_Put( &the_semaphore->Object ); return 0; #if defined(RTEMS_MULTIPROCESSING) diff --git a/cpukit/posix/src/sigtimedwait.c b/cpukit/posix/src/sigtimedwait.c index 8d86ba72a9..fb8a243a0a 100644 --- a/cpukit/posix/src/sigtimedwait.c +++ b/cpukit/posix/src/sigtimedwait.c @@ -152,17 +152,16 @@ int sigtimedwait( the_info->si_signo = -1; _Thread_Disable_dispatch(); - executing->Wait.queue = &_POSIX_signals_Wait_queue; executing->Wait.return_code = EINTR; executing->Wait.option = *set; executing->Wait.return_argument = the_info; - _Thread_queue_Enter_critical_section( &_POSIX_signals_Wait_queue ); - _POSIX_signals_Release( &lock_context ); - _Thread_queue_Enqueue( + _Thread_queue_Enqueue_critical( &_POSIX_signals_Wait_queue, executing, STATES_WAITING_FOR_SIGNAL | STATES_INTERRUPTIBLE_BY_SIGNAL, - interval + interval, + EAGAIN, + &lock_context ); _Thread_Enable_dispatch(); diff --git a/cpukit/posix/src/timerdelete.c b/cpukit/posix/src/timerdelete.c index 71b25faed9..e090be2aca 100644 --- a/cpukit/posix/src/timerdelete.c +++ b/cpukit/posix/src/timerdelete.c @@ -54,7 +54,7 @@ int timer_delete( case OBJECTS_LOCAL: _Objects_Close( &_POSIX_Timer_Information, &ptimer->Object ); ptimer->state = POSIX_TIMER_STATE_FREE; - (void) _Watchdog_Remove( &ptimer->Timer ); + _Watchdog_Remove_ticks( &ptimer->Timer ); _Objects_Put( &ptimer->Object ); _POSIX_Timer_Free( ptimer ); _Objects_Allocator_unlock(); diff --git a/cpukit/posix/src/timergettime.c b/cpukit/posix/src/timergettime.c index 2a810ef42d..f065cc927c 100644 --- a/cpukit/posix/src/timergettime.c +++ b/cpukit/posix/src/timergettime.c @@ -49,7 +49,7 @@ int timer_gettime( rtems_set_errno_and_return_minus_one( EINVAL ); /* Reads the current time */ - _TOD_Get( ¤t_time ); + _TOD_Get_as_timespec( ¤t_time ); ptimer = _POSIX_Timer_Get( timerid, &location ); switch ( location ) { diff --git a/cpukit/posix/src/timerinserthelper.c b/cpukit/posix/src/timerinserthelper.c index 9d028350ad..4d7c3fb681 100644 --- a/cpukit/posix/src/timerinserthelper.c +++ b/cpukit/posix/src/timerinserthelper.c @@ -39,7 +39,7 @@ bool _POSIX_Timer_Insert_helper( { ISR_Level level; - (void) _Watchdog_Remove( timer ); + _Watchdog_Remove_ticks( timer ); _ISR_Disable( level ); /* diff --git a/cpukit/posix/src/timersettime.c b/cpukit/posix/src/timersettime.c index 6cc378082a..2a8cec7652 100644 --- a/cpukit/posix/src/timersettime.c +++ b/cpukit/posix/src/timersettime.c @@ -66,7 +66,7 @@ int timer_settime( /* Convert absolute to relative time */ if (flags == TIMER_ABSTIME) { struct timespec now; - _TOD_Get( &now ); + _TOD_Get_as_timespec( &now ); /* Check for seconds in the past */ if ( _Timespec_Greater_than( &now, &normalize.it_value ) ) rtems_set_errno_and_return_minus_one( EINVAL ); @@ -85,7 +85,7 @@ int timer_settime( /* First, it verifies if the timer must be stopped */ if ( normalize.it_value.tv_sec == 0 && normalize.it_value.tv_nsec == 0 ) { /* Stop the timer */ - (void) _Watchdog_Remove( &ptimer->Timer ); + _Watchdog_Remove_ticks( &ptimer->Timer ); /* The old data of the timer are returned */ if ( ovalue ) *ovalue = ptimer->timer_data; @@ -125,7 +125,7 @@ int timer_settime( /* Indicate that the time is running */ ptimer->state = POSIX_TIMER_STATE_CREATE_RUN; - _TOD_Get( &ptimer->time ); + _TOD_Get_as_timespec( &ptimer->time ); _Objects_Put( &ptimer->Object ); return 0; diff --git a/cpukit/posix/src/timertsr.c b/cpukit/posix/src/timertsr.c index 85554633e9..512dd06d38 100644 --- a/cpukit/posix/src/timertsr.c +++ b/cpukit/posix/src/timertsr.c @@ -58,7 +58,7 @@ void _POSIX_Timer_TSR( return; /* Store the time when the timer was started again */ - _TOD_Get( &ptimer->time ); + _TOD_Get_as_timespec( &ptimer->time ); /* The state really did not change but just to be safe */ ptimer->state = POSIX_TIMER_STATE_CREATE_RUN; diff --git a/cpukit/posix/src/ualarm.c b/cpukit/posix/src/ualarm.c index d9a85e6a92..9235ef165c 100644 --- a/cpukit/posix/src/ualarm.c +++ b/cpukit/posix/src/ualarm.c @@ -72,7 +72,7 @@ useconds_t ualarm( _Thread_Disable_dispatch(); - state = _Watchdog_Remove( the_timer ); + state = _Watchdog_Remove_ticks( the_timer ); if ( (state == WATCHDOG_ACTIVE) || (state == WATCHDOG_REMOVE_IT) ) { /* * The stop_time and start_time fields are snapshots of ticks since diff --git a/cpukit/rtems/Makefile.am b/cpukit/rtems/Makefile.am index 5e6f2eafa0..f38990d2a7 100644 --- a/cpukit/rtems/Makefile.am +++ b/cpukit/rtems/Makefile.am @@ -155,10 +155,8 @@ librtems_a_SOURCES += src/clockgettod.c librtems_a_SOURCES += src/clockgettodtimeval.c librtems_a_SOURCES += src/clockgetuptime.c librtems_a_SOURCES += src/clockgetuptimetimeval.c -librtems_a_SOURCES += src/clockgetuptimeseconds.c librtems_a_SOURCES += src/clockgetuptimenanoseconds.c librtems_a_SOURCES += src/clockset.c -librtems_a_SOURCES += src/clocksetnsecshandler.c librtems_a_SOURCES += src/clocktick.c librtems_a_SOURCES += src/clocktodtoseconds.c librtems_a_SOURCES += src/clocktodvalidate.c @@ -210,7 +208,6 @@ librtems_a_SOURCES += src/eventreceive.c librtems_a_SOURCES += src/eventseize.c librtems_a_SOURCES += src/eventsend.c librtems_a_SOURCES += src/eventsurrender.c -librtems_a_SOURCES += src/eventtimeout.c librtems_a_SOURCES += src/systemeventsend.c librtems_a_SOURCES += src/systemeventreceive.c diff --git a/cpukit/rtems/include/rtems/rtems/barrierimpl.h b/cpukit/rtems/include/rtems/rtems/barrierimpl.h index 963ebd93da..e718028715 100644 --- a/cpukit/rtems/include/rtems/rtems/barrierimpl.h +++ b/cpukit/rtems/include/rtems/rtems/barrierimpl.h @@ -86,6 +86,7 @@ RTEMS_INLINE_ROUTINE void _Barrier_Free ( Barrier_Control *the_barrier ) { + _CORE_barrier_Destroy( &the_barrier->Barrier ); _Objects_Free( &_Barrier_Information, &the_barrier->Object ); } diff --git a/cpukit/rtems/include/rtems/rtems/clock.h b/cpukit/rtems/include/rtems/rtems/clock.h index 2a1c77251f..989bf2f5c9 100644 --- a/cpukit/rtems/include/rtems/rtems/clock.h +++ b/cpukit/rtems/include/rtems/rtems/clock.h @@ -14,7 +14,6 @@ * * - set the current date and time * - obtain the current date and time - * - set the nanoseconds since last clock tick handler * - announce a clock tick * - obtain the system uptime */ @@ -35,6 +34,7 @@ #include <rtems/rtems/status.h> #include <rtems/rtems/types.h> #include <rtems/config.h> +#include <rtems/score/timecounterimpl.h> #include <sys/time.h> /* struct timeval */ @@ -69,12 +69,6 @@ typedef enum { } rtems_clock_get_options; /** - * Type for the nanoseconds since last tick BSP extension. - */ -typedef TOD_Nanoseconds_since_last_tick_routine - rtems_nanoseconds_extension_routine; - -/** * @brief Obtain Current Time of Day * * @deprecated rtems_clock_get() is deprecated. Use the more explicit @@ -279,24 +273,6 @@ rtems_status_code rtems_clock_set( rtems_status_code rtems_clock_tick( void ); /** - * @brief Set the BSP specific Nanoseconds Extension - * - * Clock Manager - * - * This directive sets the BSP provided nanoseconds since last tick - * extension. - * - * @param[in] routine is a pointer to the extension routine - * - * @return This method returns RTEMS_SUCCESSFUL if there was not an - * error. Otherwise, a status code is returned indicating the - * source of the error. - */ -rtems_status_code rtems_clock_set_nanoseconds_extension( - rtems_nanoseconds_extension_routine routine -); - -/** * @brief Obtain the System Uptime * * This directive returns the system uptime. @@ -328,7 +304,10 @@ void rtems_clock_get_uptime_timeval( struct timeval *uptime ); * * @retval The system uptime in seconds. */ -time_t rtems_clock_get_uptime_seconds( void ); +RTEMS_INLINE_ROUTINE time_t rtems_clock_get_uptime_seconds( void ) +{ + return _Timecounter_Time_uptime - 1; +} /** * @brief Returns the system uptime in nanoseconds. diff --git a/cpukit/rtems/include/rtems/rtems/event.h b/cpukit/rtems/include/rtems/rtems/event.h index dce7de1165..012452a9d8 100644 --- a/cpukit/rtems/include/rtems/rtems/event.h +++ b/cpukit/rtems/include/rtems/rtems/event.h @@ -319,6 +319,11 @@ rtems_status_code rtems_event_receive ( #define RTEMS_EVENT_SYSTEM_NETWORK_CLOSE RTEMS_EVENT_26 /** + * @brief Reserved system event for the timer server. + */ +#define RTEMS_EVENT_SYSTEM_TIMER_SERVER RTEMS_EVENT_30 + +/** * @brief Reserved system event for transient usage. */ #define RTEMS_EVENT_SYSTEM_TRANSIENT RTEMS_EVENT_31 diff --git a/cpukit/rtems/include/rtems/rtems/messageimpl.h b/cpukit/rtems/include/rtems/rtems/messageimpl.h index e2bc88d0db..2399d65f29 100644 --- a/cpukit/rtems/include/rtems/rtems/messageimpl.h +++ b/cpukit/rtems/include/rtems/rtems/messageimpl.h @@ -18,6 +18,7 @@ #include <rtems/rtems/message.h> #include <rtems/score/objectimpl.h> +#include <rtems/score/coremsgimpl.h> #ifdef __cplusplus extern "C" { @@ -138,6 +139,21 @@ RTEMS_INLINE_ROUTINE Message_queue_Control *_Message_queue_Get ( _Objects_Get( &_Message_queue_Information, id, location ); } +RTEMS_INLINE_ROUTINE Message_queue_Control * +_Message_queue_Get_interrupt_disable( + Objects_Id id, + Objects_Locations *location, + ISR_lock_Context *lock_context +) +{ + return (Message_queue_Control *) _Objects_Get_isr_disable( + &_Message_queue_Information, + id, + location, + lock_context + ); +} + RTEMS_INLINE_ROUTINE Message_queue_Control *_Message_queue_Allocate( void ) { return (Message_queue_Control *) diff --git a/cpukit/rtems/include/rtems/rtems/regionimpl.h b/cpukit/rtems/include/rtems/rtems/regionimpl.h index 9ff7b966ca..ae1a50d208 100644 --- a/cpukit/rtems/include/rtems/rtems/regionimpl.h +++ b/cpukit/rtems/include/rtems/rtems/regionimpl.h @@ -20,6 +20,7 @@ #include <rtems/rtems/region.h> #include <rtems/score/heapimpl.h> #include <rtems/score/objectimpl.h> +#include <rtems/score/threadqimpl.h> #include <rtems/debug.h> #ifdef __cplusplus @@ -84,6 +85,7 @@ RTEMS_INLINE_ROUTINE void _Region_Free ( Region_Control *the_region ) { + _Thread_queue_Destroy( &the_region->Wait_queue ); _Objects_Free( &_Region_Information, &the_region->Object ); } diff --git a/cpukit/rtems/include/rtems/rtems/timerimpl.h b/cpukit/rtems/include/rtems/rtems/timerimpl.h index b695d5e2fe..e5b37aa5f1 100644 --- a/cpukit/rtems/include/rtems/rtems/timerimpl.h +++ b/cpukit/rtems/include/rtems/rtems/timerimpl.h @@ -50,9 +50,9 @@ extern "C" { typedef struct Timer_server_Control Timer_server_Control; /** - * @brief Method used to schedule the insertion of task based timers. + * @brief Method used for task based timers. */ -typedef void (*Timer_server_Schedule_operation)( +typedef void (*Timer_server_Method)( Timer_server_Control *timer_server, Timer_Control *timer ); @@ -65,28 +65,52 @@ typedef struct { Watchdog_Control System_watchdog; /** + * @brief Remaining delta of the system watchdog. + */ + Watchdog_Interval system_watchdog_delta; + + /** + * @brief Unique identifier of the context which deals currently with the + * system watchdog. + */ + Thread_Control *system_watchdog_helper; + + /** + * @brief Each insert and tickle operation increases the generation count so + * that the system watchdog dealer notices updates of the watchdog chain. + */ + uint32_t generation; + + /** * @brief Watchdog header managed by the timer server. */ Watchdog_Header Header; /** - * @brief Last known time snapshot of the timer server. + * @brief Last time snapshot of the timer server. * * The units may be ticks or seconds. */ - Watchdog_Interval volatile last_snapshot; + Watchdog_Interval last_snapshot; + + /** + * @brief Current time snapshot of the timer server. + * + * The units may be ticks or seconds. + */ + Watchdog_Interval current_snapshot; } Timer_server_Watchdogs; struct Timer_server_Control { /** - * @brief Timer server thread. + * @brief The cancel method of the timer server. */ - Thread_Control *thread; + Timer_server_Method cancel; /** * @brief The schedule operation method of the timer server. */ - Timer_server_Schedule_operation schedule_operation; + Timer_server_Method schedule_operation; /** * @brief Interval watchdogs triggered by the timer server. @@ -97,26 +121,6 @@ struct Timer_server_Control { * @brief TOD watchdogs triggered by the timer server. */ Timer_server_Watchdogs TOD_watchdogs; - - /** - * @brief Chain of timers scheduled for insert. - * - * This pointer is not @c NULL whenever the interval and TOD chains are - * processed. After the processing this list will be checked and if - * necessary the processing will be restarted. Processing of these chains - * can be only interrupted through interrupts. - */ - Chain_Control *volatile insert_chain; - - /** - * @brief Indicates that the timer server is active or not. - * - * The server is active after the delay on a system watchdog. The activity - * period of the server ends when no more watchdogs managed by the server - * fire. The system watchdogs must not be manipulated when the server is - * active. - */ - bool volatile active; }; /** @@ -220,6 +224,8 @@ RTEMS_INLINE_ROUTINE bool _Timer_Is_dormant_class ( return ( the_class == TIMER_DORMANT ); } +void _Timer_Cancel( Timer_Control *the_timer ); + /**@}*/ #ifdef __cplusplus diff --git a/cpukit/rtems/src/clockgetuptime.c b/cpukit/rtems/src/clockgetuptime.c index 91ce6c46ee..acbe39a00e 100644 --- a/cpukit/rtems/src/clockgetuptime.c +++ b/cpukit/rtems/src/clockgetuptime.c @@ -42,6 +42,6 @@ rtems_status_code rtems_clock_get_uptime( if ( !uptime ) return RTEMS_INVALID_ADDRESS; - _TOD_Get_uptime_as_timespec( uptime ); + _TOD_Get_zero_based_uptime_as_timespec( uptime ); return RTEMS_SUCCESSFUL; } diff --git a/cpukit/rtems/src/clockgetuptimenanoseconds.c b/cpukit/rtems/src/clockgetuptimenanoseconds.c index 0310e592e9..03ff73bba3 100644 --- a/cpukit/rtems/src/clockgetuptimenanoseconds.c +++ b/cpukit/rtems/src/clockgetuptimenanoseconds.c @@ -23,13 +23,8 @@ uint64_t rtems_clock_get_uptime_nanoseconds( void ) { Timestamp_Control snapshot_as_timestamp; - uint32_t nanoseconds; - ISR_lock_Context lock_context; - _TOD_Acquire( &_TOD, &lock_context ); - snapshot_as_timestamp = _TOD.uptime; - nanoseconds = ( *_TOD.nanoseconds_since_last_tick )(); - _TOD_Release( &_TOD, &lock_context ); + _TOD_Get_zero_based_uptime(&snapshot_as_timestamp); - return _Timestamp_Get_As_nanoseconds( &snapshot_as_timestamp, nanoseconds ); + return _Timestamp_Get_as_nanoseconds(&snapshot_as_timestamp); } diff --git a/cpukit/rtems/src/clockgetuptimeseconds.c b/cpukit/rtems/src/clockgetuptimeseconds.c deleted file mode 100644 index 0312921113..0000000000 --- a/cpukit/rtems/src/clockgetuptimeseconds.c +++ /dev/null @@ -1,43 +0,0 @@ -/** - * @file - * - * @brief Returns the system uptime in seconds. - * @ingroup ClassicClock Clocks - */ - -/* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H - #include "config.h" -#endif - -#include <rtems/rtems/clock.h> -#include <rtems/score/todimpl.h> - -time_t rtems_clock_get_uptime_seconds( void ) -{ - TOD_Control *tod = &_TOD; - Timestamp_Control snapshot_as_timestamp; - struct timespec snapshot_as_timespec; - ISR_lock_Context lock_context; - - _TOD_Acquire( tod, &lock_context ); - snapshot_as_timestamp = tod->uptime; - _TOD_Release( tod, &lock_context ); - - _Timestamp_To_timespec( &snapshot_as_timestamp, &snapshot_as_timespec ); - - return snapshot_as_timespec.tv_sec; -} diff --git a/cpukit/rtems/src/clockgetuptimetimeval.c b/cpukit/rtems/src/clockgetuptimetimeval.c index 0cbaa836bc..2e09ae2cd2 100644 --- a/cpukit/rtems/src/clockgetuptimetimeval.c +++ b/cpukit/rtems/src/clockgetuptimetimeval.c @@ -30,6 +30,6 @@ void rtems_clock_get_uptime_timeval( struct timeval *uptime ) { Timestamp_Control snapshot_as_timestamp; - _TOD_Get_uptime( &snapshot_as_timestamp ); + _TOD_Get_zero_based_uptime( &snapshot_as_timestamp ); _Timestamp_To_timeval( &snapshot_as_timestamp, uptime ); } diff --git a/cpukit/rtems/src/clocksetnsecshandler.c b/cpukit/rtems/src/clocksetnsecshandler.c deleted file mode 100644 index ae08246902..0000000000 --- a/cpukit/rtems/src/clocksetnsecshandler.c +++ /dev/null @@ -1,34 +0,0 @@ -/** - * @file - * - * @brief Set the BSP specific Nanoseconds Extension - * @ingroup ClassicClock Clocks - */ - -/* - * COPYRIGHT (c) 1989-2006. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/rtems/clock.h> -#include <rtems/score/todimpl.h> - -rtems_status_code rtems_clock_set_nanoseconds_extension( - rtems_nanoseconds_extension_routine routine -) -{ - if ( !routine ) - return RTEMS_INVALID_ADDRESS; - - _TOD_Set_nanoseconds_since_last_tick_handler( routine ); - - return RTEMS_SUCCESSFUL; -} diff --git a/cpukit/rtems/src/clocktick.c b/cpukit/rtems/src/clocktick.c index a026b44494..e2cd35f5fc 100644 --- a/cpukit/rtems/src/clocktick.c +++ b/cpukit/rtems/src/clocktick.c @@ -19,30 +19,14 @@ #endif #include <rtems/rtems/clock.h> -#include <rtems/score/schedulerimpl.h> -#include <rtems/score/threadimpl.h> -#include <rtems/score/todimpl.h> -#include <rtems/score/watchdogimpl.h> +#include <rtems/score/timecounter.h> rtems_status_code rtems_clock_tick( void ) { -#if defined( RTEMS_SMP ) - _Thread_Disable_dispatch(); -#endif - - _TOD_Tickle_ticks(); - - _Watchdog_Tickle_ticks(); - - _Scheduler_Tick(); - -#if defined( RTEMS_SMP ) - _Thread_Enable_dispatch(); -#else - if ( _Thread_Is_context_switch_necessary() && - _Thread_Dispatch_is_enabled() ) - _Thread_Dispatch(); -#endif + _Timecounter_Tick_simple( + rtems_configuration_get_microseconds_per_tick(), + 0 + ); return RTEMS_SUCCESSFUL; } diff --git a/cpukit/rtems/src/eventmp.c b/cpukit/rtems/src/eventmp.c index b2a5ce8a72..e2e43fcd0d 100644 --- a/cpukit/rtems/src/eventmp.c +++ b/cpukit/rtems/src/eventmp.c @@ -59,7 +59,8 @@ rtems_status_code _Event_MP_Send_request_packet ( _MPCI_Send_request_packet( _Objects_Get_node( event_id ), &the_packet->Prefix, - STATES_READY + STATES_READY, + RTEMS_TIMEOUT ); break; diff --git a/cpukit/rtems/src/eventseize.c b/cpukit/rtems/src/eventseize.c index 929665641c..a9290b38e8 100644 --- a/cpukit/rtems/src/eventseize.c +++ b/cpukit/rtems/src/eventseize.c @@ -84,12 +84,17 @@ void _Event_Seize( executing->Wait.return_argument = event_out; _Thread_Wait_flags_set( executing, intend_to_block ); - cpu_self = _Thread_Dispatch_disable_critical(); + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Thread_Lock_release_default( executing, lock_context ); - _Giant_Acquire( cpu_self ); if ( ticks ) { - _Watchdog_Initialize( &executing->Timer, _Event_Timeout, 0, executing ); + _Thread_Wait_set_timeout_code( executing, RTEMS_TIMEOUT ); + _Watchdog_Initialize( + &executing->Timer, + _Thread_Timeout, + 0, + executing + ); _Watchdog_Insert_ticks( &executing->Timer, ticks ); } @@ -101,10 +106,9 @@ void _Event_Seize( wait_class | THREAD_WAIT_STATE_BLOCKED ); if ( !success ) { - _Watchdog_Remove( &executing->Timer ); + _Watchdog_Remove_ticks( &executing->Timer ); _Thread_Unblock( executing ); } - _Giant_Release( cpu_self ); _Thread_Dispatch_enable( cpu_self ); } diff --git a/cpukit/rtems/src/eventsend.c b/cpukit/rtems/src/eventsend.c index c9e81fb95f..23eed740a8 100644 --- a/cpukit/rtems/src/eventsend.c +++ b/cpukit/rtems/src/eventsend.c @@ -44,7 +44,6 @@ rtems_status_code rtems_event_send( THREAD_WAIT_CLASS_EVENT, &lock_context ); - _Objects_Put_for_get_isr_disable( &thread->Object ); sc = RTEMS_SUCCESSFUL; break; #ifdef RTEMS_MULTIPROCESSING diff --git a/cpukit/rtems/src/eventsurrender.c b/cpukit/rtems/src/eventsurrender.c index ba4e429f1e..156586023d 100644 --- a/cpukit/rtems/src/eventsurrender.c +++ b/cpukit/rtems/src/eventsurrender.c @@ -85,7 +85,7 @@ void _Event_Surrender( success = _Thread_Wait_flags_try_change_critical( the_thread, intend_to_block, - wait_class | THREAD_WAIT_STATE_INTERRUPT_SATISFIED + wait_class | THREAD_WAIT_STATE_READY_AGAIN ); if ( success ) { _Event_Satisfy( the_thread, event, pending_events, seized_events ); @@ -94,7 +94,7 @@ void _Event_Surrender( _Event_Satisfy( the_thread, event, pending_events, seized_events ); _Thread_Wait_flags_set( the_thread, - wait_class | THREAD_WAIT_STATE_SATISFIED + wait_class | THREAD_WAIT_STATE_READY_AGAIN ); unblock = true; } else { @@ -107,14 +107,12 @@ void _Event_Surrender( if ( unblock ) { Per_CPU_Control *cpu_self; - cpu_self = _Thread_Dispatch_disable_critical(); + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Thread_Lock_release_default( the_thread, lock_context ); - _Giant_Acquire( cpu_self ); - _Watchdog_Remove( &the_thread->Timer ); + _Watchdog_Remove_ticks( &the_thread->Timer ); _Thread_Unblock( the_thread ); - _Giant_Release( cpu_self ); _Thread_Dispatch_enable( cpu_self ); } else { _Thread_Lock_release_default( the_thread, lock_context ); diff --git a/cpukit/rtems/src/msgmp.c b/cpukit/rtems/src/msgmp.c index 7043138963..83b5a31f52 100644 --- a/cpukit/rtems/src/msgmp.c +++ b/cpukit/rtems/src/msgmp.c @@ -148,7 +148,8 @@ rtems_status_code _Message_queue_MP_Send_request_packet ( return (rtems_status_code) _MPCI_Send_request_packet( _Objects_Get_node(message_queue_id), &the_packet->Prefix, - STATES_WAITING_FOR_MESSAGE + STATES_WAITING_FOR_MESSAGE, + RTEMS_TIMEOUT ); break; @@ -173,7 +174,8 @@ rtems_status_code _Message_queue_MP_Send_request_packet ( return (rtems_status_code) _MPCI_Send_request_packet( _Objects_Get_node(message_queue_id), &the_packet->Prefix, - STATES_WAITING_FOR_MESSAGE + STATES_WAITING_FOR_MESSAGE, + RTEMS_TIMEOUT ); break; @@ -291,7 +293,7 @@ void _Message_queue_MP_Process_packet ( the_thread = _Thread_MP_Find_proxy( the_packet->proxy_id ); if (! _Thread_Is_null( the_thread ) ) - _Thread_queue_Extract( the_thread->Wait.queue, the_thread ); + _Thread_queue_Extract( the_thread ); _MPCI_Return_packet( the_packet_prefix ); break; diff --git a/cpukit/rtems/src/msgqbroadcast.c b/cpukit/rtems/src/msgqbroadcast.c index 64ea80e3de..aabbf3f6ac 100644 --- a/cpukit/rtems/src/msgqbroadcast.c +++ b/cpukit/rtems/src/msgqbroadcast.c @@ -40,6 +40,7 @@ rtems_status_code rtems_message_queue_broadcast( Message_queue_Control *the_message_queue; Objects_Locations location; CORE_message_queue_Status core_status; + ISR_lock_Context lock_context; if ( !buffer ) return RTEMS_INVALID_ADDRESS; @@ -47,7 +48,11 @@ rtems_status_code rtems_message_queue_broadcast( if ( !count ) return RTEMS_INVALID_ADDRESS; - the_message_queue = _Message_queue_Get( id, &location ); + the_message_queue = _Message_queue_Get_interrupt_disable( + id, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: @@ -61,10 +66,9 @@ rtems_status_code rtems_message_queue_broadcast( #else NULL, #endif - count + count, + &lock_context ); - - _Objects_Put( &the_message_queue->Object ); return _Message_queue_Translate_core_message_queue_return_code( core_status ); diff --git a/cpukit/rtems/src/msgqflush.c b/cpukit/rtems/src/msgqflush.c index 7ae7ef4544..809c243b52 100644 --- a/cpukit/rtems/src/msgqflush.c +++ b/cpukit/rtems/src/msgqflush.c @@ -54,16 +54,23 @@ rtems_status_code rtems_message_queue_flush( { Message_queue_Control *the_message_queue; Objects_Locations location; + ISR_lock_Context lock_context; if ( !count ) return RTEMS_INVALID_ADDRESS; - the_message_queue = _Message_queue_Get( id, &location ); + the_message_queue = _Message_queue_Get_interrupt_disable( + id, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: - *count = _CORE_message_queue_Flush( &the_message_queue->message_queue ); - _Objects_Put( &the_message_queue->Object ); + *count = _CORE_message_queue_Flush( + &the_message_queue->message_queue, + &lock_context + ); return RTEMS_SUCCESSFUL; #if defined(RTEMS_MULTIPROCESSING) diff --git a/cpukit/rtems/src/msgqreceive.c b/cpukit/rtems/src/msgqreceive.c index db09cfe547..2b9a4e742b 100644 --- a/cpukit/rtems/src/msgqreceive.c +++ b/cpukit/rtems/src/msgqreceive.c @@ -42,6 +42,7 @@ rtems_status_code rtems_message_queue_receive( Objects_Locations location; bool wait; Thread_Control *executing; + ISR_lock_Context lock_context; if ( !buffer ) return RTEMS_INVALID_ADDRESS; @@ -49,7 +50,11 @@ rtems_status_code rtems_message_queue_receive( if ( !size ) return RTEMS_INVALID_ADDRESS; - the_message_queue = _Message_queue_Get( id, &location ); + the_message_queue = _Message_queue_Get_interrupt_disable( + id, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: @@ -66,9 +71,9 @@ rtems_status_code rtems_message_queue_receive( buffer, size, wait, - timeout + timeout, + &lock_context ); - _Objects_Put( &the_message_queue->Object ); return _Message_queue_Translate_core_message_queue_return_code( executing->Wait.return_code ); diff --git a/cpukit/rtems/src/msgqsend.c b/cpukit/rtems/src/msgqsend.c index 34b7c29e7c..fb3979ed78 100644 --- a/cpukit/rtems/src/msgqsend.c +++ b/cpukit/rtems/src/msgqsend.c @@ -62,11 +62,16 @@ rtems_status_code rtems_message_queue_send( Message_queue_Control *the_message_queue; Objects_Locations location; CORE_message_queue_Status status; + ISR_lock_Context lock_context; if ( !buffer ) return RTEMS_INVALID_ADDRESS; - the_message_queue = _Message_queue_Get( id, &location ); + the_message_queue = _Message_queue_Get_interrupt_disable( + id, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: @@ -77,11 +82,10 @@ rtems_status_code rtems_message_queue_send( id, MESSAGE_QUEUE_MP_HANDLER, false, /* sender does not block */ - 0 /* no timeout */ + 0, /* no timeout */ + &lock_context ); - _Objects_Put( &the_message_queue->Object ); - /* * Since this API does not allow for blocking sends, we can directly * return the returned status. diff --git a/cpukit/rtems/src/msgqurgent.c b/cpukit/rtems/src/msgqurgent.c index 85a9d4f1b4..e6ae5efcf0 100644 --- a/cpukit/rtems/src/msgqurgent.c +++ b/cpukit/rtems/src/msgqurgent.c @@ -45,11 +45,16 @@ rtems_status_code rtems_message_queue_urgent( Message_queue_Control *the_message_queue; Objects_Locations location; CORE_message_queue_Status status; + ISR_lock_Context lock_context; if ( !buffer ) return RTEMS_INVALID_ADDRESS; - the_message_queue = _Message_queue_Get( id, &location ); + the_message_queue = _Message_queue_Get_interrupt_disable( + id, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: @@ -60,9 +65,9 @@ rtems_status_code rtems_message_queue_urgent( id, MESSAGE_QUEUE_MP_HANDLER, false, /* sender does not block */ - 0 /* no timeout */ + 0, /* no timeout */ + &lock_context ); - _Objects_Put( &the_message_queue->Object ); /* * Since this API does not allow for blocking sends, we can directly diff --git a/cpukit/rtems/src/partmp.c b/cpukit/rtems/src/partmp.c index 943c24ac6d..9593ce58a9 100644 --- a/cpukit/rtems/src/partmp.c +++ b/cpukit/rtems/src/partmp.c @@ -104,7 +104,8 @@ rtems_status_code _Partition_MP_Send_request_packet ( _MPCI_Send_request_packet( _Objects_Get_node( partition_id ), &the_packet->Prefix, - STATES_READY /* Not used */ + STATES_READY, /* Not used */ + RTEMS_TIMEOUT ); break; @@ -209,7 +210,7 @@ void _Partition_MP_Process_packet ( the_thread = _Thread_MP_Find_proxy( the_packet->proxy_id ); if ( ! _Thread_Is_null( the_thread ) ) - _Thread_queue_Extract( the_thread->Wait.queue, the_thread ); + _Thread_queue_Extract( the_thread ); _MPCI_Return_packet( the_packet_prefix ); break; diff --git a/cpukit/rtems/src/ratemoncancel.c b/cpukit/rtems/src/ratemoncancel.c index d4a9102955..67b230fbfc 100644 --- a/cpukit/rtems/src/ratemoncancel.c +++ b/cpukit/rtems/src/ratemoncancel.c @@ -38,7 +38,7 @@ rtems_status_code rtems_rate_monotonic_cancel( _Objects_Put( &the_period->Object ); return RTEMS_NOT_OWNER_OF_RESOURCE; } - (void) _Watchdog_Remove( &the_period->Timer ); + _Watchdog_Remove_ticks( &the_period->Timer ); the_period->state = RATE_MONOTONIC_INACTIVE; _Scheduler_Release_job( the_period->owner, 0 ); _Objects_Put( &the_period->Object ); diff --git a/cpukit/rtems/src/ratemondelete.c b/cpukit/rtems/src/ratemondelete.c index 971ad8ef3d..77cf3fe306 100644 --- a/cpukit/rtems/src/ratemondelete.c +++ b/cpukit/rtems/src/ratemondelete.c @@ -37,7 +37,7 @@ rtems_status_code rtems_rate_monotonic_delete( case OBJECTS_LOCAL: _Scheduler_Release_job( the_period->owner, 0 ); _Objects_Close( &_Rate_monotonic_Information, &the_period->Object ); - (void) _Watchdog_Remove( &the_period->Timer ); + _Watchdog_Remove_ticks( &the_period->Timer ); the_period->state = RATE_MONOTONIC_INACTIVE; _Objects_Put( &the_period->Object ); _Rate_monotonic_Free( the_period ); diff --git a/cpukit/rtems/src/regioncreate.c b/cpukit/rtems/src/regioncreate.c index 0daf644ce3..409510c892 100644 --- a/cpukit/rtems/src/regioncreate.c +++ b/cpukit/rtems/src/regioncreate.c @@ -71,6 +71,11 @@ rtems_status_code rtems_region_create( return_status = RTEMS_TOO_MANY; else { + _Thread_queue_Initialize( + &the_region->Wait_queue, + _Attributes_Is_priority( attribute_set ) ? + THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO + ); the_region->maximum_segment_size = _Heap_Initialize( &the_region->Memory, starting_address, length, page_size @@ -79,23 +84,13 @@ rtems_status_code rtems_region_create( if ( !the_region->maximum_segment_size ) { _Region_Free( the_region ); return_status = RTEMS_INVALID_SIZE; - } - - else { - + } else { the_region->starting_address = starting_address; the_region->length = length; the_region->page_size = page_size; the_region->attribute_set = attribute_set; the_region->number_of_used_blocks = 0; - _Thread_queue_Initialize( - &the_region->Wait_queue, - _Attributes_Is_priority( attribute_set ) ? - THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO, - RTEMS_TIMEOUT - ); - _Objects_Open( &_Region_Information, &the_region->Object, diff --git a/cpukit/rtems/src/regiondelete.c b/cpukit/rtems/src/regiondelete.c index 5a2df7c009..b5209da890 100644 --- a/cpukit/rtems/src/regiondelete.c +++ b/cpukit/rtems/src/regiondelete.c @@ -52,7 +52,6 @@ rtems_status_code rtems_region_delete( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/regionextend.c b/cpukit/rtems/src/regionextend.c index 65b68cdf1a..2ee2b992e4 100644 --- a/cpukit/rtems/src/regionextend.c +++ b/cpukit/rtems/src/regionextend.c @@ -65,7 +65,6 @@ rtems_status_code rtems_region_extend( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/regiongetfreeinfo.c b/cpukit/rtems/src/regiongetfreeinfo.c index be0f0089a9..6ebd1abbd2 100644 --- a/cpukit/rtems/src/regiongetfreeinfo.c +++ b/cpukit/rtems/src/regiongetfreeinfo.c @@ -56,7 +56,6 @@ rtems_status_code rtems_region_get_free_information( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/regiongetinfo.c b/cpukit/rtems/src/regiongetinfo.c index c3466f0566..d5eee727d3 100644 --- a/cpukit/rtems/src/regiongetinfo.c +++ b/cpukit/rtems/src/regiongetinfo.c @@ -50,7 +50,6 @@ rtems_status_code rtems_region_get_information( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/regiongetsegment.c b/cpukit/rtems/src/regiongetsegment.c index 1a52bc1d59..26437b5188 100644 --- a/cpukit/rtems/src/regiongetsegment.c +++ b/cpukit/rtems/src/regiongetsegment.c @@ -79,18 +79,16 @@ rtems_status_code rtems_region_get_segment( _Thread_Disable_dispatch(); _RTEMS_Unlock_allocator(); - executing->Wait.queue = &the_region->Wait_queue; executing->Wait.id = id; executing->Wait.count = size; executing->Wait.return_argument = segment; - _Thread_queue_Enter_critical_section( &the_region->Wait_queue ); - _Thread_queue_Enqueue( &the_region->Wait_queue, executing, STATES_WAITING_FOR_SEGMENT, - timeout + timeout, + RTEMS_TIMEOUT ); _Objects_Put( &the_region->Object ); @@ -102,7 +100,6 @@ rtems_status_code rtems_region_get_segment( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/regiongetsegmentsize.c b/cpukit/rtems/src/regiongetsegmentsize.c index 8f823fcec9..ab07a56e9c 100644 --- a/cpukit/rtems/src/regiongetsegmentsize.c +++ b/cpukit/rtems/src/regiongetsegmentsize.c @@ -53,7 +53,6 @@ rtems_status_code rtems_region_get_segment_size( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/regionmp.c b/cpukit/rtems/src/regionmp.c index 58dfa99dde..135a69dd97 100644 --- a/cpukit/rtems/src/regionmp.c +++ b/cpukit/rtems/src/regionmp.c @@ -102,7 +102,8 @@ rtems_status_code _Region_MP_Send_request_packet ( return (rtems_status_code) _MPCI_Send_request_packet( _Objects_Get_node( region_id ), &the_packet->Prefix, - STATES_READY /* Not used */ + STATES_READY, /* Not used */ + RTEMS_TIMEOUT ); break; @@ -195,7 +196,7 @@ void _Region_MP_Process_packet ( the_thread = _Thread_MP_Find_proxy( the_packet->proxy_id ); if ( ! _Thread_Is_null( the_thread ) ) - _Thread_queue_Extract( the_thread->Wait.queue, the_thread ); + _Thread_queue_Extract( the_thread ); _MPCI_Return_packet( the_packet_prefix ); break; diff --git a/cpukit/rtems/src/regionprocessqueue.c b/cpukit/rtems/src/regionprocessqueue.c index 54081af84d..a06a077e3d 100644 --- a/cpukit/rtems/src/regionprocessqueue.c +++ b/cpukit/rtems/src/regionprocessqueue.c @@ -61,7 +61,7 @@ void _Region_Process_queue( *(void **)the_thread->Wait.return_argument = the_segment; the_region->number_of_used_blocks += 1; - _Thread_queue_Extract( &the_region->Wait_queue, the_thread ); + _Thread_queue_Extract( the_thread ); the_thread->Wait.return_code = RTEMS_SUCCESSFUL; } _Thread_Enable_dispatch(); diff --git a/cpukit/rtems/src/regionresizesegment.c b/cpukit/rtems/src/regionresizesegment.c index ee3499a729..b1d9482c79 100644 --- a/cpukit/rtems/src/regionresizesegment.c +++ b/cpukit/rtems/src/regionresizesegment.c @@ -79,7 +79,6 @@ rtems_status_code rtems_region_resize_segment( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/regionreturnsegment.c b/cpukit/rtems/src/regionreturnsegment.c index 6ae537fd4f..98f2240a23 100644 --- a/cpukit/rtems/src/regionreturnsegment.c +++ b/cpukit/rtems/src/regionreturnsegment.c @@ -82,7 +82,6 @@ rtems_status_code rtems_region_return_segment( #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ - break; #endif case OBJECTS_ERROR: diff --git a/cpukit/rtems/src/semdelete.c b/cpukit/rtems/src/semdelete.c index a805ac61de..e9c3ad21e9 100644 --- a/cpukit/rtems/src/semdelete.c +++ b/cpukit/rtems/src/semdelete.c @@ -76,12 +76,14 @@ rtems_status_code rtems_semaphore_delete( SEMAPHORE_MP_OBJECT_WAS_DELETED, CORE_MUTEX_WAS_DELETED ); + _CORE_mutex_Destroy( &the_semaphore->Core_control.mutex ); } else { _CORE_semaphore_Flush( &the_semaphore->Core_control.semaphore, SEMAPHORE_MP_OBJECT_WAS_DELETED, CORE_SEMAPHORE_WAS_DELETED ); + _CORE_semaphore_Destroy( &the_semaphore->Core_control.semaphore ); } _Objects_Close( &_Semaphore_Information, &the_semaphore->Object ); diff --git a/cpukit/rtems/src/semmp.c b/cpukit/rtems/src/semmp.c index eabd1b7a0b..f4c5cb73df 100644 --- a/cpukit/rtems/src/semmp.c +++ b/cpukit/rtems/src/semmp.c @@ -95,7 +95,8 @@ rtems_status_code _Semaphore_MP_Send_request_packet ( return _MPCI_Send_request_packet( _Objects_Get_node( semaphore_id ), &the_packet->Prefix, - STATES_WAITING_FOR_SEMAPHORE + STATES_WAITING_FOR_SEMAPHORE, + RTEMS_TIMEOUT ); break; @@ -188,7 +189,7 @@ void _Semaphore_MP_Process_packet ( the_thread = _Thread_MP_Find_proxy( the_packet->proxy_id ); if ( ! _Thread_Is_null( the_thread ) ) - _Thread_queue_Extract( the_thread->Wait.queue, the_thread ); + _Thread_queue_Extract( the_thread ); _MPCI_Return_packet( the_packet_prefix ); break; diff --git a/cpukit/rtems/src/semobtain.c b/cpukit/rtems/src/semobtain.c index 0edac96264..bda39fa80e 100644 --- a/cpukit/rtems/src/semobtain.c +++ b/cpukit/rtems/src/semobtain.c @@ -56,19 +56,16 @@ rtems_status_code rtems_semaphore_obtain( attribute_set = the_semaphore->attribute_set; wait = !_Options_Is_no_wait( option_set ); #if defined(RTEMS_SMP) - _Thread_Disable_dispatch(); if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) { MRSP_Status mrsp_status; - _ISR_lock_ISR_enable( &lock_context ); mrsp_status = _MRSP_Obtain( &the_semaphore->Core_control.mrsp, executing, wait, - timeout + timeout, + &lock_context ); - _Thread_Enable_dispatch(); - _Objects_Put_for_get_isr_disable( &the_semaphore->Object ); return _Semaphore_Translate_MRSP_status_code( mrsp_status ); } else #endif @@ -81,16 +78,12 @@ rtems_status_code rtems_semaphore_obtain( timeout, &lock_context ); -#if defined(RTEMS_SMP) - _Thread_Enable_dispatch(); -#endif - _Objects_Put_for_get_isr_disable( &the_semaphore->Object ); return _Semaphore_Translate_core_mutex_return_code( executing->Wait.return_code ); } /* must be a counting semaphore */ - _CORE_semaphore_Seize_isr_disable( + _CORE_semaphore_Seize( &the_semaphore->Core_control.semaphore, executing, id, @@ -98,10 +91,6 @@ rtems_status_code rtems_semaphore_obtain( timeout, &lock_context ); -#if defined(RTEMS_SMP) - _Thread_Enable_dispatch(); -#endif - _Objects_Put_for_get_isr_disable( &the_semaphore->Object ); return _Semaphore_Translate_core_semaphore_return_code( executing->Wait.return_code ); diff --git a/cpukit/rtems/src/semrelease.c b/cpukit/rtems/src/semrelease.c index 37a05b2507..5d41b6cfae 100644 --- a/cpukit/rtems/src/semrelease.c +++ b/cpukit/rtems/src/semrelease.c @@ -62,19 +62,26 @@ rtems_status_code rtems_semaphore_release( CORE_mutex_Status mutex_status; CORE_semaphore_Status semaphore_status; rtems_attribute attribute_set; + ISR_lock_Context lock_context; - the_semaphore = _Semaphore_Get( id, &location ); + the_semaphore = _Semaphore_Get_interrupt_disable( + id, + &location, + &lock_context + ); switch ( location ) { case OBJECTS_LOCAL: attribute_set = the_semaphore->attribute_set; #if defined(RTEMS_SMP) if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) { - MRSP_Status mrsp_status = _MRSP_Release( + MRSP_Status mrsp_status; + + mrsp_status = _MRSP_Release( &the_semaphore->Core_control.mrsp, - _Thread_Get_executing() + _Thread_Executing, + &lock_context ); - _Objects_Put( &the_semaphore->Object ); return _Semaphore_Translate_MRSP_status_code( mrsp_status ); } else #endif @@ -82,17 +89,17 @@ rtems_status_code rtems_semaphore_release( mutex_status = _CORE_mutex_Surrender( &the_semaphore->Core_control.mutex, id, - MUTEX_MP_SUPPORT + MUTEX_MP_SUPPORT, + &lock_context ); - _Objects_Put( &the_semaphore->Object ); return _Semaphore_Translate_core_mutex_return_code( mutex_status ); } else { semaphore_status = _CORE_semaphore_Surrender( &the_semaphore->Core_control.semaphore, id, - MUTEX_MP_SUPPORT + MUTEX_MP_SUPPORT, + &lock_context ); - _Objects_Put( &the_semaphore->Object ); return _Semaphore_Translate_core_semaphore_return_code( semaphore_status ); } diff --git a/cpukit/rtems/src/signalmp.c b/cpukit/rtems/src/signalmp.c index 9d2177d214..4a2d5738af 100644 --- a/cpukit/rtems/src/signalmp.c +++ b/cpukit/rtems/src/signalmp.c @@ -60,7 +60,8 @@ rtems_status_code _Signal_MP_Send_request_packet ( return _MPCI_Send_request_packet( _Objects_Get_node( task_id ), &the_packet->Prefix, - STATES_READY /* Not used */ + STATES_READY, /* Not used */ + RTEMS_TIMEOUT ); break; diff --git a/cpukit/rtems/src/systemeventsend.c b/cpukit/rtems/src/systemeventsend.c index 1892c13fe7..3c821c9ef9 100644 --- a/cpukit/rtems/src/systemeventsend.c +++ b/cpukit/rtems/src/systemeventsend.c @@ -50,7 +50,6 @@ rtems_status_code rtems_event_system_send( THREAD_WAIT_CLASS_SYSTEM_EVENT, &lock_context ); - _Objects_Put_for_get_isr_disable( &thread->Object ); sc = RTEMS_SUCCESSFUL; break; #ifdef RTEMS_MULTIPROCESSING diff --git a/cpukit/rtems/src/taskmp.c b/cpukit/rtems/src/taskmp.c index 4b97ffe639..339544cdc4 100644 --- a/cpukit/rtems/src/taskmp.c +++ b/cpukit/rtems/src/taskmp.c @@ -108,7 +108,8 @@ rtems_status_code _RTEMS_tasks_MP_Send_request_packet ( return _MPCI_Send_request_packet( _Objects_Get_node( task_id ), &the_packet->Prefix, - STATES_READY /* Not used */ + STATES_READY, /* Not used */ + RTEMS_TIMEOUT ); break; diff --git a/cpukit/rtems/src/tasksetpriority.c b/cpukit/rtems/src/tasksetpriority.c index 4e4835675d..582c67f9f2 100644 --- a/cpukit/rtems/src/tasksetpriority.c +++ b/cpukit/rtems/src/tasksetpriority.c @@ -41,16 +41,18 @@ rtems_status_code rtems_task_set_priority( switch ( location ) { case OBJECTS_LOCAL: - *old_priority = _RTEMS_tasks_Priority_from_Core( - the_thread->current_priority - ); if ( new_priority != RTEMS_CURRENT_PRIORITY ) { - the_thread->real_priority = _RTEMS_tasks_Priority_to_Core( - new_priority - ); - if ( !_Thread_Owns_resources( the_thread ) || - the_thread->current_priority > new_priority ) - _Thread_Change_priority( the_thread, new_priority, false ); + _Thread_Set_priority( + the_thread, + _RTEMS_tasks_Priority_to_Core( new_priority ), + old_priority, + false + ); + *old_priority = _RTEMS_tasks_Priority_from_Core( *old_priority ); + } else { + *old_priority = _RTEMS_tasks_Priority_from_Core( + the_thread->current_priority + ); } _Objects_Put( &the_thread->Object ); return RTEMS_SUCCESSFUL; diff --git a/cpukit/rtems/src/taskwakeafter.c b/cpukit/rtems/src/taskwakeafter.c index 6f0322723a..b7f328f5bc 100644 --- a/cpukit/rtems/src/taskwakeafter.c +++ b/cpukit/rtems/src/taskwakeafter.c @@ -30,23 +30,25 @@ rtems_status_code rtems_task_wake_after( * It is critical to obtain the executing thread after thread dispatching is * disabled on SMP configurations. */ - Thread_Control *executing; + Thread_Control *executing; + Per_CPU_Control *cpu_self; - _Thread_Disable_dispatch(); + cpu_self = _Thread_Dispatch_disable(); executing = _Thread_Executing; if ( ticks == 0 ) { _Thread_Yield( executing ); } else { _Thread_Set_state( executing, STATES_DELAYING ); + _Thread_Wait_flags_set( executing, THREAD_WAIT_STATE_BLOCKED ); _Watchdog_Initialize( &executing->Timer, - _Thread_Delay_ended, + _Thread_Timeout, 0, executing ); _Watchdog_Insert_ticks( &executing->Timer, ticks ); } - _Thread_Enable_dispatch(); + _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; } diff --git a/cpukit/rtems/src/taskwakewhen.c b/cpukit/rtems/src/taskwakewhen.c index a1fc15f047..cf0b303cd3 100644 --- a/cpukit/rtems/src/taskwakewhen.c +++ b/cpukit/rtems/src/taskwakewhen.c @@ -30,6 +30,7 @@ rtems_status_code rtems_task_wake_when( { Watchdog_Interval seconds; Thread_Control *executing; + Per_CPU_Control *cpu_self; if ( !_TOD_Is_set() ) return RTEMS_NOT_DEFINED; @@ -47,12 +48,13 @@ rtems_status_code rtems_task_wake_when( if ( seconds <= _TOD_Seconds_since_epoch() ) return RTEMS_INVALID_CLOCK; - _Thread_Disable_dispatch(); + cpu_self = _Thread_Dispatch_disable(); executing = _Thread_Executing; _Thread_Set_state( executing, STATES_WAITING_FOR_TIME ); + _Thread_Wait_flags_set( executing, THREAD_WAIT_STATE_BLOCKED ); _Watchdog_Initialize( &executing->Timer, - _Thread_Delay_ended, + _Thread_Timeout, 0, executing ); @@ -60,6 +62,6 @@ rtems_status_code rtems_task_wake_when( &executing->Timer, seconds - _TOD_Seconds_since_epoch() ); - _Thread_Enable_dispatch(); + _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; } diff --git a/cpukit/rtems/src/timercancel.c b/cpukit/rtems/src/timercancel.c index a8ce1478da..1e737a25bb 100644 --- a/cpukit/rtems/src/timercancel.c +++ b/cpukit/rtems/src/timercancel.c @@ -45,8 +45,7 @@ rtems_status_code rtems_timer_cancel( switch ( location ) { case OBJECTS_LOCAL: - if ( !_Timer_Is_dormant_class( the_timer->the_class ) ) - (void) _Watchdog_Remove( &the_timer->Ticker ); + _Timer_Cancel( the_timer ); _Objects_Put( &the_timer->Object ); return RTEMS_SUCCESSFUL; diff --git a/cpukit/rtems/src/timercreate.c b/cpukit/rtems/src/timercreate.c index 0b1c44bdc2..13a01feda9 100644 --- a/cpukit/rtems/src/timercreate.c +++ b/cpukit/rtems/src/timercreate.c @@ -21,10 +21,39 @@ #include <rtems/system.h> #include <rtems/rtems/status.h> #include <rtems/rtems/support.h> +#include <rtems/score/assert.h> #include <rtems/score/thread.h> #include <rtems/rtems/timerimpl.h> #include <rtems/score/watchdogimpl.h> +void _Timer_Cancel( Timer_Control *the_timer ) +{ + Timer_server_Control *timer_server; + ISR_Level level; + + /* The timer class must not change during the cancel operation */ + _ISR_Disable( level ); + + switch ( the_timer->the_class ) { + case TIMER_INTERVAL: + _Watchdog_Remove_ticks( &the_timer->Ticker ); + break; + case TIMER_TIME_OF_DAY: + _Watchdog_Remove_seconds( &the_timer->Ticker ); + break; + case TIMER_INTERVAL_ON_TASK: + case TIMER_TIME_OF_DAY_ON_TASK: + timer_server = _Timer_server; + (*timer_server->cancel)( timer_server, the_timer ); + break; + default: + _Assert( the_timer->the_class == TIMER_DORMANT ); + break; + } + + _ISR_Enable( level ); +} + rtems_status_code rtems_timer_create( rtems_name name, rtems_id *id diff --git a/cpukit/rtems/src/timerdelete.c b/cpukit/rtems/src/timerdelete.c index 19232c8096..0849ec5ba6 100644 --- a/cpukit/rtems/src/timerdelete.c +++ b/cpukit/rtems/src/timerdelete.c @@ -38,7 +38,7 @@ rtems_status_code rtems_timer_delete( case OBJECTS_LOCAL: _Objects_Close( &_Timer_Information, &the_timer->Object ); - (void) _Watchdog_Remove( &the_timer->Ticker ); + _Timer_Cancel( the_timer ); _Objects_Put( &the_timer->Object ); _Timer_Free( the_timer ); _Objects_Allocator_unlock(); diff --git a/cpukit/rtems/src/timerfireafter.c b/cpukit/rtems/src/timerfireafter.c index 07862cde8c..84cf46bc37 100644 --- a/cpukit/rtems/src/timerfireafter.c +++ b/cpukit/rtems/src/timerfireafter.c @@ -46,7 +46,7 @@ rtems_status_code rtems_timer_fire_after( switch ( location ) { case OBJECTS_LOCAL: - (void) _Watchdog_Remove( &the_timer->Ticker ); + _Timer_Cancel( the_timer ); _ISR_Disable( level ); diff --git a/cpukit/rtems/src/timerfirewhen.c b/cpukit/rtems/src/timerfirewhen.c index 6ac7d17433..1acbaf9b8f 100644 --- a/cpukit/rtems/src/timerfirewhen.c +++ b/cpukit/rtems/src/timerfirewhen.c @@ -51,7 +51,7 @@ rtems_status_code rtems_timer_fire_when( switch ( location ) { case OBJECTS_LOCAL: - (void) _Watchdog_Remove( &the_timer->Ticker ); + _Timer_Cancel( the_timer ); the_timer->the_class = TIMER_TIME_OF_DAY; _Watchdog_Initialize( &the_timer->Ticker, routine, id, user_data ); _Watchdog_Insert_seconds( diff --git a/cpukit/rtems/src/timerreset.c b/cpukit/rtems/src/timerreset.c index 49c4925aa3..7ab172ea4f 100644 --- a/cpukit/rtems/src/timerreset.c +++ b/cpukit/rtems/src/timerreset.c @@ -67,7 +67,7 @@ rtems_status_code rtems_timer_reset( return RTEMS_INCORRECT_STATE; } #endif - _Watchdog_Remove( &the_timer->Ticker ); + (*timer_server->cancel)( timer_server, the_timer ); (*timer_server->schedule_operation)( timer_server, the_timer ); } else { /* diff --git a/cpukit/rtems/src/timerserver.c b/cpukit/rtems/src/timerserver.c index 25191e43d7..047fd0978e 100644 --- a/cpukit/rtems/src/timerserver.c +++ b/cpukit/rtems/src/timerserver.c @@ -15,7 +15,7 @@ /* COPYRIGHT (c) 1989-2008. * On-Line Applications Research Corporation (OAR). * - * Copyright (c) 2009 embedded brains GmbH. + * Copyright (c) 2009-2015 embedded brains GmbH. * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -26,188 +26,129 @@ #include "config.h" #endif +#include <rtems.h> #include <rtems/rtems/timerimpl.h> #include <rtems/rtems/tasksimpl.h> -#include <rtems/score/isrlevel.h> -#include <rtems/score/threadimpl.h> +#include <rtems/score/apimutex.h> #include <rtems/score/todimpl.h> static Timer_server_Control _Timer_server_Default; -static void _Timer_server_Stop_interval_system_watchdog( - Timer_server_Control *ts +static void _Timer_server_Cancel_method( + Timer_server_Control *ts, + Timer_Control *timer ) { - _Watchdog_Remove( &ts->Interval_watchdogs.System_watchdog ); + if ( timer->the_class == TIMER_INTERVAL_ON_TASK ) { + _Watchdog_Remove( &ts->Interval_watchdogs.Header, &timer->Ticker ); + } else if ( timer->the_class == TIMER_TIME_OF_DAY_ON_TASK ) { + _Watchdog_Remove( &ts->TOD_watchdogs.Header, &timer->Ticker ); + } } -static void _Timer_server_Reset_interval_system_watchdog( - Timer_server_Control *ts -) +static Watchdog_Interval _Timer_server_Get_ticks( void ) { - ISR_Level level; - - _Timer_server_Stop_interval_system_watchdog( ts ); - - _ISR_Disable( level ); - if ( !_Watchdog_Is_empty( &ts->Interval_watchdogs.Header ) ) { - Watchdog_Interval delta_interval = - _Watchdog_First( &ts->Interval_watchdogs.Header )->delta_interval; - _ISR_Enable( level ); - - /* - * The unit is TICKS here. - */ - _Watchdog_Insert_ticks( - &ts->Interval_watchdogs.System_watchdog, - delta_interval - ); - } else { - _ISR_Enable( level ); - } + return _Watchdog_Ticks_since_boot; } -static void _Timer_server_Stop_tod_system_watchdog( - Timer_server_Control *ts -) +static Watchdog_Interval _Timer_server_Get_seconds( void ) { - _Watchdog_Remove( &ts->TOD_watchdogs.System_watchdog ); + return _TOD_Seconds_since_epoch(); } -static void _Timer_server_Reset_tod_system_watchdog( - Timer_server_Control *ts +static void _Timer_server_Update_system_watchdog( + Timer_server_Watchdogs *watchdogs, + Watchdog_Header *system_header ) { - ISR_Level level; + ISR_lock_Context lock_context; - _Timer_server_Stop_tod_system_watchdog( ts ); + _Watchdog_Acquire( &watchdogs->Header, &lock_context ); - _ISR_Disable( level ); - if ( !_Watchdog_Is_empty( &ts->TOD_watchdogs.Header ) ) { - Watchdog_Interval delta_interval = - _Watchdog_First( &ts->TOD_watchdogs.Header )->delta_interval; - _ISR_Enable( level ); + if ( watchdogs->system_watchdog_helper == NULL ) { + Thread_Control *executing; + uint32_t my_generation; - /* - * The unit is SECONDS here. - */ - _Watchdog_Insert_seconds( - &ts->TOD_watchdogs.System_watchdog, - delta_interval - ); - } else { - _ISR_Enable( level ); - } -} + executing = _Thread_Executing; + watchdogs->system_watchdog_helper = executing; -static void _Timer_server_Insert_timer( - Timer_server_Control *ts, - Timer_Control *timer -) -{ - if ( timer->the_class == TIMER_INTERVAL_ON_TASK ) { - _Watchdog_Insert( &ts->Interval_watchdogs.Header, &timer->Ticker ); - } else if ( timer->the_class == TIMER_TIME_OF_DAY_ON_TASK ) { - _Watchdog_Insert( &ts->TOD_watchdogs.Header, &timer->Ticker ); + do { + my_generation = watchdogs->generation; + + if ( !_Watchdog_Is_empty( &watchdogs->Header ) ) { + Watchdog_Control *first; + Watchdog_Interval delta; + + first = _Watchdog_First( &watchdogs->Header ); + delta = first->delta_interval; + + if ( + watchdogs->System_watchdog.state == WATCHDOG_INACTIVE + || delta != watchdogs->system_watchdog_delta + ) { + watchdogs->system_watchdog_delta = delta; + _Watchdog_Release( &watchdogs->Header, &lock_context ); + + _Watchdog_Remove( system_header, &watchdogs->System_watchdog ); + watchdogs->System_watchdog.initial = delta; + _Watchdog_Insert( system_header, &watchdogs->System_watchdog ); + + _Watchdog_Acquire( &watchdogs->Header, &lock_context ); + } + } + } while ( watchdogs->generation != my_generation ); + + watchdogs->system_watchdog_helper = NULL; } + + _Watchdog_Release( &watchdogs->Header, &lock_context ); } -static void _Timer_server_Insert_timer_and_make_snapshot( - Timer_server_Control *ts, - Timer_Control *timer +static void _Timer_server_Insert_timer( + Timer_server_Watchdogs *watchdogs, + Timer_Control *timer, + Watchdog_Header *system_header, + Watchdog_Interval (*get_ticks)( void ) ) { - Watchdog_Control *first_watchdog; - Watchdog_Interval delta_interval; - Watchdog_Interval last_snapshot; - Watchdog_Interval snapshot; + ISR_lock_Context lock_context; + Watchdog_Interval now; Watchdog_Interval delta; - ISR_Level level; - /* - * We have to update the time snapshots here, because otherwise we may have - * problems with the integer range of the delta values. The time delta DT - * from the last snapshot to now may be arbitrarily long. The last snapshot - * is the reference point for the delta chain. Thus if we do not update the - * reference point we have to add DT to the initial delta of the watchdog - * being inserted. This could result in an integer overflow. - */ + _Watchdog_Acquire( &watchdogs->Header, &lock_context ); - _Thread_Disable_dispatch(); + now = (*get_ticks)(); + delta = now - watchdogs->last_snapshot; + watchdogs->last_snapshot = now; + watchdogs->current_snapshot = now; - if ( timer->the_class == TIMER_INTERVAL_ON_TASK ) { - /* - * We have to advance the last known ticks value of the server and update - * the watchdog chain accordingly. - */ - _ISR_Disable( level ); - snapshot = _Watchdog_Ticks_since_boot; - last_snapshot = ts->Interval_watchdogs.last_snapshot; - if ( !_Watchdog_Is_empty( &ts->Interval_watchdogs.Header ) ) { - first_watchdog = _Watchdog_First( &ts->Interval_watchdogs.Header ); - - /* - * We assume adequate unsigned arithmetic here. - */ - delta = snapshot - last_snapshot; - - delta_interval = first_watchdog->delta_interval; - if (delta_interval > delta) { - delta_interval -= delta; - } else { - delta_interval = 0; - } - first_watchdog->delta_interval = delta_interval; - } - ts->Interval_watchdogs.last_snapshot = snapshot; - _ISR_Enable( level ); + if ( watchdogs->system_watchdog_delta > delta ) { + watchdogs->system_watchdog_delta -= delta; + } else { + watchdogs->system_watchdog_delta = 0; + } - _Watchdog_Insert( &ts->Interval_watchdogs.Header, &timer->Ticker ); + if ( !_Watchdog_Is_empty( &watchdogs->Header ) ) { + Watchdog_Control *first = _Watchdog_First( &watchdogs->Header ); - if ( !ts->active ) { - _Timer_server_Reset_interval_system_watchdog( ts ); - } - } else if ( timer->the_class == TIMER_TIME_OF_DAY_ON_TASK ) { - /* - * We have to advance the last known seconds value of the server and update - * the watchdog chain accordingly. - */ - _ISR_Disable( level ); - snapshot = (Watchdog_Interval) _TOD_Seconds_since_epoch(); - last_snapshot = ts->TOD_watchdogs.last_snapshot; - if ( !_Watchdog_Is_empty( &ts->TOD_watchdogs.Header ) ) { - first_watchdog = _Watchdog_First( &ts->TOD_watchdogs.Header ); - delta_interval = first_watchdog->delta_interval; - if ( snapshot > last_snapshot ) { - /* - * We advanced in time. - */ - delta = snapshot - last_snapshot; - if (delta_interval > delta) { - delta_interval -= delta; - } else { - delta_interval = 0; - } - } else { - /* - * Someone put us in the past. - */ - delta = last_snapshot - snapshot; - delta_interval += delta; - } - first_watchdog->delta_interval = delta_interval; + if ( first->delta_interval > delta ) { + first->delta_interval -= delta; + } else { + first->delta_interval = 0; } - ts->TOD_watchdogs.last_snapshot = snapshot; - _ISR_Enable( level ); + } - _Watchdog_Insert( &ts->TOD_watchdogs.Header, &timer->Ticker ); + _Watchdog_Insert_locked( + &watchdogs->Header, + &timer->Ticker, + &lock_context + ); - if ( !ts->active ) { - _Timer_server_Reset_tod_system_watchdog( ts ); - } - } + ++watchdogs->generation; + + _Watchdog_Release( &watchdogs->Header, &lock_context ); - _Thread_Enable_dispatch(); + _Timer_server_Update_system_watchdog( watchdogs, system_header ); } static void _Timer_server_Schedule_operation_method( @@ -215,143 +156,71 @@ static void _Timer_server_Schedule_operation_method( Timer_Control *timer ) { - if ( ts->insert_chain == NULL ) { - _Timer_server_Insert_timer_and_make_snapshot( ts, timer ); - } else { - /* - * We interrupted a critical section of the timer server. The timer - * server is not preemptible, so we must be in interrupt context here. No - * thread dispatch will happen until the timer server finishes its - * critical section. We have to use the protected chain methods because - * we may be interrupted by a higher priority interrupt. - */ - _Chain_Append( ts->insert_chain, &timer->Object.Node ); + if ( timer->the_class == TIMER_INTERVAL_ON_TASK ) { + _Timer_server_Insert_timer( + &ts->Interval_watchdogs, + timer, + &_Watchdog_Ticks_header, + _Timer_server_Get_ticks + ); + } else if ( timer->the_class == TIMER_TIME_OF_DAY_ON_TASK ) { + _Timer_server_Insert_timer( + &ts->TOD_watchdogs, + timer, + &_Watchdog_Seconds_header, + _Timer_server_Get_seconds + ); } } -static void _Timer_server_Process_interval_watchdogs( +static void _Timer_server_Update_current_snapshot( Timer_server_Watchdogs *watchdogs, - Chain_Control *fire_chain + Watchdog_Interval (*get_ticks)( void ) ) { - Watchdog_Interval snapshot = _Watchdog_Ticks_since_boot; - - /* - * We assume adequate unsigned arithmetic here. - */ - Watchdog_Interval delta = snapshot - watchdogs->last_snapshot; - - watchdogs->last_snapshot = snapshot; + ISR_lock_Context lock_context; - _Watchdog_Adjust_to_chain( &watchdogs->Header, delta, fire_chain ); + _Watchdog_Acquire( &watchdogs->Header, &lock_context ); + watchdogs->current_snapshot = (*get_ticks)(); + watchdogs->system_watchdog_delta = 0; + _Watchdog_Release( &watchdogs->Header, &lock_context ); } -static void _Timer_server_Process_tod_watchdogs( +static void _Timer_server_Tickle( Timer_server_Watchdogs *watchdogs, - Chain_Control *fire_chain + Watchdog_Header *system_header, + Watchdog_Interval (*get_ticks)( void ), + bool ticks ) { - Watchdog_Interval snapshot = (Watchdog_Interval) _TOD_Seconds_since_epoch(); - Watchdog_Interval last_snapshot = watchdogs->last_snapshot; - Watchdog_Interval delta; - - /* - * Process the seconds chain. Start by checking that the Time - * of Day (TOD) has not been set backwards. If it has then - * we want to adjust the watchdogs->Header to indicate this. - */ - if ( snapshot > last_snapshot ) { - /* - * This path is for normal forward movement and cases where the - * TOD has been set forward. - */ - delta = snapshot - last_snapshot; - _Watchdog_Adjust_to_chain( &watchdogs->Header, delta, fire_chain ); - - } else if ( snapshot < last_snapshot ) { - /* - * The current TOD is before the last TOD which indicates that - * TOD has been set backwards. - */ - delta = last_snapshot - snapshot; - _Watchdog_Adjust_backward( &watchdogs->Header, delta ); - } - - watchdogs->last_snapshot = snapshot; -} - -static void _Timer_server_Process_insertions( Timer_server_Control *ts ) -{ - while ( true ) { - Timer_Control *timer = (Timer_Control *) _Chain_Get( ts->insert_chain ); - - if ( timer == NULL ) { - break; - } + ISR_lock_Context lock_context; + Watchdog_Interval now; + Watchdog_Interval last; - _Timer_server_Insert_timer( ts, timer ); - } -} - -static void _Timer_server_Get_watchdogs_that_fire_now( - Timer_server_Control *ts, - Chain_Control *insert_chain, - Chain_Control *fire_chain -) -{ - /* - * Afterwards all timer inserts are directed to this chain and the interval - * and TOD chains will be no more modified by other parties. - */ - ts->insert_chain = insert_chain; + _Watchdog_Acquire( &watchdogs->Header, &lock_context ); - while ( true ) { - ISR_Level level; + now = watchdogs->current_snapshot; + last = watchdogs->last_snapshot; + watchdogs->last_snapshot = now; - /* - * Remove all the watchdogs that need to fire so we can invoke them. - */ - _Timer_server_Process_interval_watchdogs( - &ts->Interval_watchdogs, - fire_chain + if ( ticks || now >= last ) { + _Watchdog_Adjust_forward_locked( + &watchdogs->Header, + now - last, + &lock_context + ); + } else { + _Watchdog_Adjust_backward_locked( + &watchdogs->Header, + last - now ); - _Timer_server_Process_tod_watchdogs( &ts->TOD_watchdogs, fire_chain ); - - /* - * The insertions have to take place here, because they reference the - * current time. The previous process methods take a snapshot of the - * current time. In case someone inserts a watchdog with an initial value - * of zero it will be processed in the next iteration of the timer server - * body loop. - */ - _Timer_server_Process_insertions( ts ); - - _ISR_Disable( level ); - if ( _Chain_Is_empty( insert_chain ) ) { - ts->insert_chain = NULL; - _ISR_Enable( level ); - - break; - } else { - _ISR_Enable( level ); - } } -} -/* FIXME: This locking approach for SMP is improvable! */ + ++watchdogs->generation; -static void _Timer_server_SMP_lock_aquire( void ) -{ -#if defined( RTEMS_SMP ) - _Thread_Disable_dispatch(); -#endif -} + _Watchdog_Release( &watchdogs->Header, &lock_context ); -static void _Timer_server_SMP_lock_release( void ) -{ -#if defined( RTEMS_SMP ) - _Thread_Enable_dispatch(); -#endif + _Timer_server_Update_system_watchdog( watchdogs, system_header ); } /** @@ -368,81 +237,73 @@ static rtems_task _Timer_server_Body( ) { Timer_server_Control *ts = (Timer_server_Control *) arg; - Chain_Control insert_chain; - Chain_Control fire_chain; - _Chain_Initialize_empty( &insert_chain ); - _Chain_Initialize_empty( &fire_chain ); + while ( true ) { + rtems_event_set events; - _Timer_server_SMP_lock_aquire(); + _Timer_server_Tickle( + &ts->Interval_watchdogs, + &_Watchdog_Ticks_header, + _Timer_server_Get_ticks, + true + ); - while ( true ) { - _Timer_server_Get_watchdogs_that_fire_now( ts, &insert_chain, &fire_chain ); - - if ( !_Chain_Is_empty( &fire_chain ) ) { - /* - * Fire the watchdogs. - */ - while ( true ) { - Watchdog_Control *watchdog; - ISR_Level level; - - /* - * It is essential that interrupts are disable here since an interrupt - * service routine may remove a watchdog from the chain. - */ - _ISR_Disable( level ); - watchdog = (Watchdog_Control *) _Chain_Get_unprotected( &fire_chain ); - if ( watchdog != NULL ) { - watchdog->state = WATCHDOG_INACTIVE; - _ISR_Enable( level ); - } else { - _ISR_Enable( level ); - - break; - } + _Timer_server_Tickle( + &ts->TOD_watchdogs, + &_Watchdog_Seconds_header, + _Timer_server_Get_seconds, + false + ); - _Timer_server_SMP_lock_release(); + (void) rtems_event_system_receive( + RTEMS_EVENT_SYSTEM_TIMER_SERVER, + RTEMS_EVENT_ALL | RTEMS_WAIT, + RTEMS_NO_TIMEOUT, + &events + ); + } +} - /* - * The timer server may block here and wait for resources or time. - * The system watchdogs are inactive and will remain inactive since - * the active flag of the timer server is true. - */ - (*watchdog->routine)( watchdog->id, watchdog->user_data ); +static void _Timer_server_Wakeup( + Objects_Id id, + void *arg +) +{ + Timer_server_Control *ts = arg; - _Timer_server_SMP_lock_aquire(); - } - } else { - ts->active = false; + _Timer_server_Update_current_snapshot( + &ts->Interval_watchdogs, + _Timer_server_Get_ticks + ); - /* - * Block until there is something to do. - */ -#if !defined( RTEMS_SMP ) - _Thread_Disable_dispatch(); -#endif - _Thread_Set_state( ts->thread, STATES_DELAYING ); - _Timer_server_Reset_interval_system_watchdog( ts ); - _Timer_server_Reset_tod_system_watchdog( ts ); -#if !defined( RTEMS_SMP ) - _Thread_Enable_dispatch(); -#endif + _Timer_server_Update_current_snapshot( + &ts->TOD_watchdogs, + _Timer_server_Get_seconds + ); - _Timer_server_SMP_lock_release(); - _Timer_server_SMP_lock_aquire(); + (void) rtems_event_system_send( id, RTEMS_EVENT_SYSTEM_TIMER_SERVER ); +} - ts->active = true; +static void _Timer_server_Initialize_watchdogs( + Timer_server_Control *ts, + rtems_id id, + Timer_server_Watchdogs *watchdogs, + Watchdog_Interval (*get_ticks)( void ) +) +{ + Watchdog_Interval now; - /* - * Maybe an interrupt did reset the system timers, so we have to stop - * them here. Since we are active now, there will be no more resets - * until we are inactive again. - */ - _Timer_server_Stop_interval_system_watchdog( ts ); - _Timer_server_Stop_tod_system_watchdog( ts ); - } - } + now = (*get_ticks)(); + watchdogs->last_snapshot = now; + watchdogs->current_snapshot = now; + + _Watchdog_Header_initialize( &watchdogs->Header ); + _Watchdog_Initialize( + &watchdogs->System_watchdog, + _Timer_server_Wakeup, + id, + ts + ); } /** @@ -486,10 +347,10 @@ rtems_status_code rtems_timer_initiate_server( /* * Just to make sure this is only called once. */ - _Thread_Disable_dispatch(); + _Once_Lock(); tmpInitialized = initialized; initialized = true; - _Thread_Enable_dispatch(); + _Once_Unlock(); if ( tmpInitialized ) return RTEMS_INCORRECT_STATE; @@ -530,50 +391,27 @@ rtems_status_code rtems_timer_initiate_server( * Timer Server so we do not have to have a critical section. */ - /* - * We work with the TCB pointer, not the ID, so we need to convert - * to a TCB pointer from here out. - */ - ts->thread = (Thread_Control *)_Objects_Get_local_object( - &_RTEMS_tasks_Information, - _Objects_Get_index(id) + _Timer_server_Initialize_watchdogs( + ts, + id, + &ts->Interval_watchdogs, + _Timer_server_Get_ticks ); - /* - * Initialize the timer lists that the server will manage. - */ - _Watchdog_Header_initialize( &ts->Interval_watchdogs.Header ); - _Watchdog_Header_initialize( &ts->TOD_watchdogs.Header ); - - /* - * Initialize the timers that will be used to control when the - * Timer Server wakes up and services the task-based timers. - */ - _Watchdog_Initialize( - &ts->Interval_watchdogs.System_watchdog, - _Thread_Delay_ended, - 0, - ts->thread - ); - _Watchdog_Initialize( - &ts->TOD_watchdogs.System_watchdog, - _Thread_Delay_ended, - 0, - ts->thread + _Timer_server_Initialize_watchdogs( + ts, + id, + &ts->TOD_watchdogs, + _Timer_server_Get_seconds ); /* - * Initialize the pointer to the timer schedule method so applications that + * Initialize the pointer to the timer server methods so applications that * do not use the Timer Server do not have to pull it in. */ + ts->cancel = _Timer_server_Cancel_method; ts->schedule_operation = _Timer_server_Schedule_operation_method; - ts->Interval_watchdogs.last_snapshot = _Watchdog_Ticks_since_boot; - ts->TOD_watchdogs.last_snapshot = (Watchdog_Interval) _TOD_Seconds_since_epoch(); - - ts->insert_chain = NULL; - ts->active = false; - /* * The default timer server is now available. */ diff --git a/cpukit/rtems/src/timerserverfireafter.c b/cpukit/rtems/src/timerserverfireafter.c index 125664510f..0636782ae0 100644 --- a/cpukit/rtems/src/timerserverfireafter.c +++ b/cpukit/rtems/src/timerserverfireafter.c @@ -50,7 +50,7 @@ rtems_status_code rtems_timer_server_fire_after( switch ( location ) { case OBJECTS_LOCAL: - (void) _Watchdog_Remove( &the_timer->Ticker ); + _Timer_Cancel( the_timer ); _ISR_Disable( level ); diff --git a/cpukit/rtems/src/timerserverfirewhen.c b/cpukit/rtems/src/timerserverfirewhen.c index 32695fb0e6..0069af1c3b 100644 --- a/cpukit/rtems/src/timerserverfirewhen.c +++ b/cpukit/rtems/src/timerserverfirewhen.c @@ -72,7 +72,7 @@ rtems_status_code rtems_timer_server_fire_when( switch ( location ) { case OBJECTS_LOCAL: - (void) _Watchdog_Remove( &the_timer->Ticker ); + _Timer_Cancel( the_timer ); the_timer->the_class = TIMER_TIME_OF_DAY_ON_TASK; _Watchdog_Initialize( &the_timer->Ticker, routine, id, user_data ); the_timer->Ticker.initial = seconds - _TOD_Seconds_since_epoch(); diff --git a/cpukit/sapi/Makefile.am b/cpukit/sapi/Makefile.am index 070800e636..d7fd9aac63 100644 --- a/cpukit/sapi/Makefile.am +++ b/cpukit/sapi/Makefile.am @@ -4,6 +4,7 @@ include $(top_srcdir)/automake/compile.am include_rtemsdir = $(includedir)/rtems include_rtems_HEADERS = include/confdefs.h +include_rtems_HEADERS += include/rtems/bsd.h include_rtems_HEADERS += include/rtems/chain.h include_rtems_HEADERS += include/rtems/config.h include_rtems_HEADERS += include/rtems/counter.h @@ -19,6 +20,7 @@ include_rtems_HEADERS += include/rtems/rbheap.h include_rtems_HEADERS += include/rtems/rbtree.h include_rtems_HEADERS += include/rtems/scheduler.h include_rtems_HEADERS += include/rtems/sptables.h +include_rtems_HEADERS += include/rtems/timecounter.h include_rtems_HEADERS += include/rtems/timespec.h EXTRA_DIST = include/rtems/README @@ -39,6 +41,7 @@ libsapi_a_SOURCES += src/delayticks.c libsapi_a_SOURCES += src/delaynano.c libsapi_a_SOURCES += src/profilingiterate.c libsapi_a_SOURCES += src/profilingreportxml.c +libsapi_a_SOURCES += src/tcsimpleinstall.c libsapi_a_CPPFLAGS = $(AM_CPPFLAGS) include $(srcdir)/preinstall.am diff --git a/cpukit/sapi/include/rtems/bsd.h b/cpukit/sapi/include/rtems/bsd.h new file mode 100644 index 0000000000..0c44e3787d --- /dev/null +++ b/cpukit/sapi/include/rtems/bsd.h @@ -0,0 +1,141 @@ +/** + * @file + * + * @ingroup BSD + * + * @brief BSD Compatibility API + */ + +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_BSD_H +#define _RTEMS_BSD_H + +#include <rtems/score/timecounter.h> +#include <rtems/score/basedefs.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup BSD BSD Compatibility Support + * + * @{ + */ + +/** + * @copydoc _Timecounter_Bintime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_bintime( struct bintime *bt ) +{ + _Timecounter_Bintime( bt ); +} + +/** + * @copydoc _Timecounter_Nanotime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_nanotime( struct timespec *ts ) +{ + _Timecounter_Nanotime( ts ); +} + +/** + * @copydoc _Timecounter_Microtime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_microtime( struct timeval *tv ) +{ + _Timecounter_Microtime( tv ); +} + +/** + * @copydoc _Timecounter_Binuptime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_binuptime( struct bintime *bt ) +{ + _Timecounter_Binuptime( bt ); +} + +/** + * @copydoc _Timecounter_Nanouptime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_nanouptime( struct timespec *ts ) +{ + _Timecounter_Nanouptime( ts ); +} + +/** + * @copydoc _Timecounter_Microtime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_microuptime( struct timeval *tv ) +{ + _Timecounter_Microuptime( tv ); +} + +/** + * @copydoc _Timecounter_Getbintime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_getbintime( struct bintime *bt ) +{ + _Timecounter_Getbintime( bt ); +} + +/** + * @copydoc _Timecounter_Getnanotime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_getnanotime( struct timespec *ts ) +{ + _Timecounter_Getnanotime( ts ); +} + +/** + * @copydoc _Timecounter_Getmicrotime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_getmicrotime( struct timeval *tv ) +{ + _Timecounter_Getmicrotime( tv ); +} + +/** + * @copydoc _Timecounter_Getbinuptime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_getbinuptime( struct bintime *bt ) +{ + _Timecounter_Getbinuptime( bt ); +} + +/** + * @copydoc _Timecounter_Getnanouptime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_getnanouptime( struct timespec *ts ) +{ + _Timecounter_Getnanouptime( ts ); +} + +/** + * @copydoc _Timecounter_Getmicrouptime() + */ +RTEMS_INLINE_ROUTINE void rtems_bsd_getmicrouptime( struct timeval *tv ) +{ + _Timecounter_Getmicrouptime( tv ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_BSD_H */ diff --git a/cpukit/sapi/include/rtems/timecounter.h b/cpukit/sapi/include/rtems/timecounter.h new file mode 100644 index 0000000000..04bc534d55 --- /dev/null +++ b/cpukit/sapi/include/rtems/timecounter.h @@ -0,0 +1,304 @@ +/** + * @file + * + * @ingroup SAPITimecounter + * + * @brief Timecounter API + */ + +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_TIMECOUNTER_H +#define _RTEMS_TIMECOUNTER_H + +#include <rtems/score/timecounter.h> +#include <rtems/score/basedefs.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup SAPITimecounter Timecounter Support + * + * @{ + */ + +/** + * @brief Timecounter quality for the clock drivers. + * + * Timecounter with higher quality value are used in favour of those with lower + * quality value. + */ +#define RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER 100 + +/** + * @copydoc _Timecounter_Install() + * + * Below is an exemplary code snippet that shows the adjustable parameters and + * the following call of the install routine. + * + * @code + * struct timecounter tc; + * + * uint32_t get_timecount( struct timecounter *tc ) + * { + * return some_free_running_counter; + * } + * + * void install( void ) + * { + * tc.tc_get_timecount = get_timecount; + * tc.tc_counter_mask = 0xffffffff; + * tc.tc_frequency = 123456; + * tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + * rtems_timecounter_install( &tc ); + * } + * @endcode + */ +RTEMS_INLINE_ROUTINE void rtems_timecounter_install( + struct timecounter *tc +) +{ + _Timecounter_Install( tc ); +} + +/** + * @copydoc _Timecounter_Tick() + */ +RTEMS_INLINE_ROUTINE void rtems_timecounter_tick(void) +{ + _Timecounter_Tick(); +} + +/** + * @brief Simple timecounter to support legacy clock drivers. + */ +typedef struct { + struct timecounter tc; + uint64_t scaler; + uint32_t real_interval; + uint32_t binary_interval; +} rtems_timecounter_simple; + +/** + * @brief Returns the current value of a simple timecounter. + */ +typedef uint32_t rtems_timecounter_simple_get( + rtems_timecounter_simple *tc +); + +/** + * @brief Returns true if the interrupt of a simple timecounter is pending, and + * false otherwise. + */ +typedef bool rtems_timecounter_simple_is_pending( + rtems_timecounter_simple *tc +); + +/** + * @brief Initializes and installs a simple timecounter. + * + * A simple timecounter can be used if the hardware provides no free running + * counter or only the module used for the clock tick is available. The period + * of the simple timecounter equals the clock tick interval. The interval is + * scaled up to the next power of two. + * + * @param[in] tc Zero initialized simple timecounter. + * @param[in] frequency_in_hz The timecounter frequency in Hz. + * @param[in] timecounter_ticks_per_clock_tick The timecounter ticks per clock tick. + * @param[in] get_timecount The method to get the current time count. + * + * @code + * #include <rtems/timecounter.h> + * + * static rtems_timecounter_simple some_tc; + * + * static uint32_t some_tc_get( rtems_timecounter_simple *tc ) + * { + * return some.value; + * } + * + * static bool some_tc_is_pending( rtems_timecounter_simple *tc ) + * { + * return some.is_pending; + * } + * + * static uint32_t some_tc_get_timecount( struct timecounter *tc ) + * { + * return rtems_timecounter_simple_downcounter_get( + * tc, + * some_tc_get, + * some_tc_is_pending + * ); + * } + * + * static void some_tc_tick( void ) + * { + * rtems_timecounter_simple_downcounter_tick( &some_tc, some_tc_get ); + * } + * + * void install( void ) + * { + * uint32_t frequency = 123456; + * uint64_t us_per_tick = rtems_configuration_get_microseconds_per_tick(); + * uint32_t timecounter_ticks_per_clock_tick = + * ( frequency * us_per_tick ) / 1000000; + * + * rtems_timecounter_simple_install( + * &some_tc, + * frequency, + * timecounter_ticks_per_clock_tick, + * some_tc_get_timecount + * ); + * } + * @endcode + * + * @see rtems_timecounter_simple_downcounter_get(), + * rtems_timecounter_simple_downcounter_tick(), + * rtems_timecounter_simple_upcounter_get() and + * rtems_timecounter_simple_upcounter_tick(). + */ +void rtems_timecounter_simple_install( + rtems_timecounter_simple *tc, + uint32_t frequency_in_hz, + uint32_t timecounter_ticks_per_clock_tick, + timecounter_get_t *get_timecount +); + +/** + * @brief Maps a simple timecounter value into its binary frequency domain. + * + * @param[in] tc The simple timecounter. + * @param[in] value The value of the simple timecounter. + * + * @return The scaled value. + */ +RTEMS_INLINE_ROUTINE uint32_t rtems_timecounter_simple_scale( + const rtems_timecounter_simple *tc, + uint32_t value +) +{ + return (uint32_t) ( ( value * tc->scaler ) >> 32 ); +} + +/** + * @brief Performs a simple timecounter tick for downcounters. + * + * @param[in] tc The simple timecounter. + * @param[in] get The method to get the value of the simple timecounter. + */ +RTEMS_INLINE_ROUTINE void rtems_timecounter_simple_downcounter_tick( + rtems_timecounter_simple *tc, + rtems_timecounter_simple_get get +) +{ + uint32_t current; + + current = rtems_timecounter_simple_scale( + tc, + tc->real_interval - ( *get )( tc ) + ); + + _Timecounter_Tick_simple( tc->binary_interval, current ); +} + +/** + * @brief Performs a simple timecounter tick for upcounters. + * + * @param[in] tc The simple timecounter. + * @param[in] get The method to get the value of the simple timecounter. + */ +RTEMS_INLINE_ROUTINE void rtems_timecounter_simple_upcounter_tick( + rtems_timecounter_simple *tc, + rtems_timecounter_simple_get get +) +{ + uint32_t current; + + current = rtems_timecounter_simple_scale( tc, ( *get )( tc ) ); + + _Timecounter_Tick_simple( tc->binary_interval, current ); +} + +/** + * @brief Gets the simple timecounter value mapped to its binary frequency + * domain for downcounters. + * + * @param[in] tc The simple timecounter. + * @param[in] get The method to get the value of the simple timecounter. + * @param[in] is_pending The method which indicates if the interrupt of the + * simple timecounter is pending. + */ +RTEMS_INLINE_ROUTINE uint32_t rtems_timecounter_simple_downcounter_get( + struct timecounter *tc_base, + rtems_timecounter_simple_get get, + rtems_timecounter_simple_is_pending is_pending +) +{ + rtems_timecounter_simple *tc; + uint32_t counter; + uint32_t interval; + + tc = (rtems_timecounter_simple *) tc_base; + counter = ( *get )( tc ); + interval = tc->real_interval; + + if ( ( *is_pending )( tc ) ) { + counter = ( *get )( tc ); + interval *= 2; + } + + return rtems_timecounter_simple_scale( tc, interval - counter ); +} + +/** + * @brief Gets the simple timecounter value mapped to its binary frequency + * domain for upcounters. + * + * @param[in] tc The simple timecounter. + * @param[in] get The method to get the value of the simple timecounter. + * @param[in] is_pending The method which indicates if the interrupt of the + * simple timecounter is pending. + */ +RTEMS_INLINE_ROUTINE uint32_t rtems_timecounter_simple_upcounter_get( + struct timecounter *tc_base, + rtems_timecounter_simple_get get, + rtems_timecounter_simple_is_pending is_pending +) +{ + rtems_timecounter_simple *tc; + uint32_t counter; + uint32_t interval; + + tc = (rtems_timecounter_simple *) tc_base; + counter = ( *get )( tc ); + interval = 0; + + if ( ( *is_pending )( tc ) ) { + counter = ( *get )( tc ); + interval = tc->real_interval; + } + + return rtems_timecounter_simple_scale( tc, interval + counter ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_TIMECOUNTER_H */ diff --git a/cpukit/sapi/preinstall.am b/cpukit/sapi/preinstall.am index 3f864bb61d..8a4e54ffd2 100644 --- a/cpukit/sapi/preinstall.am +++ b/cpukit/sapi/preinstall.am @@ -22,6 +22,10 @@ $(PROJECT_INCLUDE)/rtems/confdefs.h: include/confdefs.h $(PROJECT_INCLUDE)/rtems $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/confdefs.h PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/confdefs.h +$(PROJECT_INCLUDE)/rtems/bsd.h: include/rtems/bsd.h $(PROJECT_INCLUDE)/rtems/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/bsd.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/bsd.h + $(PROJECT_INCLUDE)/rtems/chain.h: include/rtems/chain.h $(PROJECT_INCLUDE)/rtems/$(dirstamp) $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/chain.h PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/chain.h @@ -82,6 +86,10 @@ $(PROJECT_INCLUDE)/rtems/sptables.h: include/rtems/sptables.h $(PROJECT_INCLUDE) $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/sptables.h PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/sptables.h +$(PROJECT_INCLUDE)/rtems/timecounter.h: include/rtems/timecounter.h $(PROJECT_INCLUDE)/rtems/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/timecounter.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/timecounter.h + $(PROJECT_INCLUDE)/rtems/timespec.h: include/rtems/timespec.h $(PROJECT_INCLUDE)/rtems/$(dirstamp) $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/timespec.h PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/timespec.h diff --git a/cpukit/sapi/src/exinit.c b/cpukit/sapi/src/exinit.c index ba71ec465b..31158995ab 100644 --- a/cpukit/sapi/src/exinit.c +++ b/cpukit/sapi/src/exinit.c @@ -44,6 +44,7 @@ #include <rtems/score/priority.h> #include <rtems/score/schedulerimpl.h> #include <rtems/score/smpimpl.h> +#include <rtems/score/timecounter.h> #include <rtems/score/threadimpl.h> #include <rtems/score/todimpl.h> #include <rtems/score/userextimpl.h> diff --git a/cpukit/sapi/src/tcsimpleinstall.c b/cpukit/sapi/src/tcsimpleinstall.c new file mode 100644 index 0000000000..563edd7c87 --- /dev/null +++ b/cpukit/sapi/src/tcsimpleinstall.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#if HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/timecounter.h> + +void rtems_timecounter_simple_install( + rtems_timecounter_simple *tc, + uint32_t frequency_in_hz, + uint32_t counter_ticks_per_clock_tick, + timecounter_get_t *get_timecount +) +{ + uint32_t power_of_two = 1; + uint32_t mask; + uint64_t scaler; + int i; + + for ( i = 0; i < 32; ++i ) { + if ( power_of_two >= counter_ticks_per_clock_tick ) { + break; + } + + power_of_two *= 2; + } + + mask = ( 2 * power_of_two ) - 1; + scaler = ( (uint64_t) power_of_two << 32 ) / counter_ticks_per_clock_tick; + + tc->scaler = scaler; + tc->real_interval = counter_ticks_per_clock_tick; + tc->binary_interval = ( mask + 1 ) / 2; + tc->tc.tc_get_timecount = get_timecount; + tc->tc.tc_counter_mask = mask; + tc->tc.tc_frequency = (uint32_t) ( ( frequency_in_hz * scaler ) >> 32 ); + tc->tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + _Timecounter_Install( &tc->tc ); +} diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am index f8ad60da3a..11399e9c73 100644 --- a/cpukit/score/Makefile.am +++ b/cpukit/score/Makefile.am @@ -5,6 +5,15 @@ SUBDIRS = cpu ## include +include_sysdir = $(includedir)/sys + +include_sys_HEADERS = +include_sys_HEADERS += include/sys/_ffcounter.h +include_sys_HEADERS += include/sys/timeffc.h +include_sys_HEADERS += include/sys/timepps.h +include_sys_HEADERS += include/sys/timetc.h +include_sys_HEADERS += include/sys/timex.h + include_rtemsdir = $(includedir)/rtems include_rtems_HEADERS = include/rtems/debug.h @@ -82,10 +91,10 @@ include_rtems_score_HEADERS += include/rtems/score/threadimpl.h include_rtems_score_HEADERS += include/rtems/score/threaddispatch.h include_rtems_score_HEADERS += include/rtems/score/threadq.h include_rtems_score_HEADERS += include/rtems/score/threadqimpl.h -include_rtems_score_HEADERS += include/rtems/score/threadsync.h include_rtems_score_HEADERS += include/rtems/score/timespec.h include_rtems_score_HEADERS += include/rtems/score/timestamp.h -include_rtems_score_HEADERS += include/rtems/score/timestamp64.h +include_rtems_score_HEADERS += include/rtems/score/timecounter.h +include_rtems_score_HEADERS += include/rtems/score/timecounterimpl.h include_rtems_score_HEADERS += include/rtems/score/tls.h include_rtems_score_HEADERS += include/rtems/score/tod.h include_rtems_score_HEADERS += include/rtems/score/todimpl.h @@ -139,6 +148,7 @@ libscore_a_SOURCES += src/schedulerprioritysmp.c libscore_a_SOURCES += src/schedulersimplesmp.c libscore_a_SOURCES += src/schedulersmpdebug.c libscore_a_SOURCES += src/smp.c +libscore_a_SOURCES += src/smplock.c libscore_a_SOURCES += src/smpmulticastaction.c libscore_a_SOURCES += src/cpuset.c libscore_a_SOURCES += src/cpusetprintsupport.c @@ -159,7 +169,7 @@ libscore_a_SOURCES += src/corebarrier.c src/corebarrierrelease.c \ ## CORE_MESSAGE_QUEUE_C_FILES libscore_a_SOURCES += src/coremsg.c src/coremsgbroadcast.c \ src/coremsgclose.c src/coremsgflush.c src/coremsgflushwait.c \ - src/coremsginsert.c src/coremsgflushsupp.c src/coremsgseize.c \ + src/coremsginsert.c src/coremsgseize.c \ src/coremsgsubmit.c ## CORE_MUTEX_C_FILES @@ -178,8 +188,7 @@ libscore_a_SOURCES += src/corerwlock.c src/corerwlockobtainread.c \ endif ## CORE_SEMAPHORE_C_FILES -libscore_a_SOURCES += src/coresem.c src/coresemflush.c src/coresemseize.c \ - src/coresemsurrender.c +libscore_a_SOURCES += src/coresem.c ## CORE_SPINLOCK_C_FILES if HAS_PTHREADS @@ -281,7 +290,7 @@ libscore_a_SOURCES += src/rbtree.c \ ## THREAD_C_FILES libscore_a_SOURCES += src/thread.c src/threadchangepriority.c \ src/threadclearstate.c src/threadcreateidle.c \ - src/threaddelayended.c src/threaddispatch.c \ + src/threaddispatch.c \ src/threadenabledispatch.c src/threaddisabledispatch.c \ src/threadget.c src/threadhandler.c src/threadinitialize.c \ src/threadloadenv.c \ @@ -290,6 +299,7 @@ libscore_a_SOURCES += src/thread.c src/threadchangepriority.c \ src/threadstackallocate.c src/threadstackfree.c src/threadstart.c \ src/threadstartmultitasking.c src/iterateoverthreads.c libscore_a_SOURCES += src/threadglobalconstruction.c +libscore_a_SOURCES += src/threadtimeout.c libscore_a_SOURCES += src/threadyield.c if HAS_SMP @@ -300,7 +310,8 @@ endif libscore_a_SOURCES += src/threadq.c \ src/threadqenqueue.c \ src/threadqextractwithproxy.c src/threadqfirst.c \ - src/threadqflush.c src/threadqprocesstimeout.c src/threadqtimeout.c + src/threadqflush.c +libscore_a_SOURCES += src/threadqops.c ## TIMESPEC_C_FILES libscore_a_SOURCES += src/timespecaddto.c src/timespecfromticks.c \ @@ -308,24 +319,16 @@ libscore_a_SOURCES += src/timespecaddto.c src/timespecfromticks.c \ src/timespecsubtract.c src/timespectoticks.c src/timespecdivide.c \ src/timespecdividebyinteger.c src/timespecgetasnanoseconds.c -## TIMESTAMP_INT64_C_FILES -libscore_a_SOURCES += src/ts64addto.c \ - src/ts64divide.c src/ts64equalto.c \ - src/ts64getnanoseconds.c src/ts64getseconds.c \ - src/ts64lessthan.c \ - src/ts64set.c src/ts64settozero.c src/ts64subtract.c \ - src/ts64totimespec.c src/ts64totimeval.c - ## TOD_C_FILES -libscore_a_SOURCES += src/coretod.c src/coretodset.c src/coretodget.c \ - src/coretodgetuptimetimespec.c src/coretodtickle.c \ - src/coretodsecondssinceepoch.c src/coretodtickspersec.c \ +libscore_a_SOURCES += src/coretod.c src/coretodset.c \ + src/coretodtickle.c \ + src/coretodtickspersec.c \ src/coretodadjust.c ## WATCHDOG_C_FILES libscore_a_SOURCES += src/watchdog.c src/watchdogadjust.c \ - src/watchdogadjusttochain.c src/watchdoginsert.c src/watchdogremove.c \ - src/watchdogtickle.c + src/watchdoginsert.c src/watchdogremove.c +libscore_a_SOURCES += src/watchdogtick.c libscore_a_SOURCES += src/watchdogtickssinceboot.c ## USEREXT_C_FILES @@ -344,6 +347,7 @@ libscore_a_SOURCES += src/profilingisrentryexit.c libscore_a_SOURCES += src/once.c libscore_a_SOURCES += src/resourceiterate.c libscore_a_SOURCES += src/smpbarrierwait.c +libscore_a_SOURCES += src/kern_tc.c EXTRA_DIST = src/Unlimited.txt diff --git a/cpukit/score/cpu/Makefile.am b/cpukit/score/cpu/Makefile.am index 69abcd6cf6..7279d381f8 100644 --- a/cpukit/score/cpu/Makefile.am +++ b/cpukit/score/cpu/Makefile.am @@ -4,6 +4,7 @@ DIST_SUBDIRS = DIST_SUBDIRS += arm DIST_SUBDIRS += avr DIST_SUBDIRS += bfin +DIST_SUBDIRS += epiphany DIST_SUBDIRS += h8300 DIST_SUBDIRS += i386 DIST_SUBDIRS += lm32 diff --git a/cpukit/score/cpu/epiphany/Makefile.am b/cpukit/score/cpu/epiphany/Makefile.am new file mode 100644 index 0000000000..0099f081ca --- /dev/null +++ b/cpukit/score/cpu/epiphany/Makefile.am @@ -0,0 +1,32 @@ +include $(top_srcdir)/automake/compile.am + +CLEANFILES = +DISTCLEANFILES = + +include_rtemsdir = $(includedir)/rtems + +include_rtems_HEADERS = rtems/asm.h + +include_rtems_scoredir = $(includedir)/rtems/score + +include_rtems_score_HEADERS = +include_rtems_score_HEADERS += rtems/score/cpu.h +include_rtems_score_HEADERS += rtems/score/cpuatomic.h +include_rtems_score_HEADERS += rtems/score/cpu_asm.h +include_rtems_score_HEADERS += rtems/score/types.h +include_rtems_score_HEADERS += rtems/score/epiphany.h +include_rtems_score_HEADERS += rtems/score/epiphany-utility.h + +noinst_LIBRARIES = libscorecpu.a + +libscorecpu_a_SOURCES = cpu.c +libscorecpu_a_SOURCES += epiphany-exception-handler.S +libscorecpu_a_SOURCES += epiphany-context-switch.S +libscorecpu_a_SOURCES += epiphany-context-initialize.c + +libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS) + +all-local: $(PREINSTALL_FILES) + +include $(srcdir)/preinstall.am +include $(top_srcdir)/automake/local.am diff --git a/cpukit/score/cpu/epiphany/cpu.c b/cpukit/score/cpu/epiphany/cpu.c new file mode 100644 index 0000000000..ada0f8c518 --- /dev/null +++ b/cpukit/score/cpu/epiphany/cpu.c @@ -0,0 +1,114 @@ +/* + * Epiphany CPU Dependent Source + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <rtems/system.h> +#include <rtems/score/isr.h> +#include <rtems/score/wkspace.h> +#include <bsp/linker-symbols.h> +#include <rtems/score/cpu.h> + +void init(void); +void fini(void); + +void _init() +{ +} + +void _fini() +{ +} + +void _CPU_Exception_frame_print (const CPU_Exception_frame *ctx) +{ + /* Do nothing */ +} +/** + * @brief Performs processor dependent initialization. + */ +void _CPU_Initialize(void) +{ + /* Do nothing */ +} + +void _CPU_ISR_Set_level(uint32_t level) +{ + /* Do nothing */ +} + +uint32_t _CPU_ISR_Get_level( void ) +{ + /* Do nothing */ + return 0; +} + +void _CPU_ISR_install_raw_handler( + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + /* Do nothing */ +} + +void _CPU_ISR_install_vector( + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + /* Do nothing */ +} + +void _CPU_Install_interrupt_stack( void ) +{ + /* Do nothing */ +} + +CPU_Counter_ticks _CPU_Counter_read( void ) +{ + static CPU_Counter_ticks counter; + + CPU_Counter_ticks snapshot; + + snapshot = counter; + counter = snapshot + 1; + + return snapshot; +} + +void *_CPU_Thread_Idle_body( uintptr_t ignored ) +{ + do { + __asm__ __volatile__ ("idle"); + } while (1); + + return NULL; +} diff --git a/cpukit/score/cpu/epiphany/epiphany-context-initialize.c b/cpukit/score/cpu/epiphany/epiphany-context-initialize.c new file mode 100644 index 0000000000..b47871e4ab --- /dev/null +++ b/cpukit/score/cpu/epiphany/epiphany-context-initialize.c @@ -0,0 +1,66 @@ +/* + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <string.h> + +#include <rtems/score/cpu.h> +#include <rtems/score/interr.h> + +void _CPU_Context_Initialize( + Context_Control *context, + void *stack_area_begin, + size_t stack_area_size, + uint32_t new_level, + void (*entry_point)( void ), + bool is_fp, + void *tls_area +) +{ + uintptr_t stack = ((uintptr_t) stack_area_begin); + uint32_t sr, iret; + + /* Account for red-zone */ + uintptr_t stack_high = stack + stack_area_size - EPIPHANY_GCC_RED_ZONE_SIZE; + + asm volatile ("movfs %0, status \n" : "=r" (sr):); + asm volatile ("movfs %0, iret \n" : "=r" (iret):); + + memset(context, 0, sizeof(*context)); + + context->r[11] = stack_high; + context->r[13] = stack_high; + context->r[14] = (uintptr_t) entry_point; + context->status = sr; + context->iret = iret; +} diff --git a/cpukit/score/cpu/epiphany/epiphany-context-switch.S b/cpukit/score/cpu/epiphany/epiphany-context-switch.S new file mode 100644 index 0000000000..6d08389713 --- /dev/null +++ b/cpukit/score/cpu/epiphany/epiphany-context-switch.S @@ -0,0 +1,216 @@ +/* + * Epiphany CPU Dependent Source + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/asm.h> + +.section .text,"ax" +.align 4 + +PUBLIC(_CPU_Context_switch) +PUBLIC(_CPU_Context_restore) +PUBLIC(_CPU_Context_restore_fp) +PUBLIC(_CPU_Context_save_fp) +PUBLIC(restore) + +SYM(_CPU_Context_switch): + /* Disable interrupts and store all registers */ + gid + + str r0, [r0] + str r1, [r0,1] + str r2, [r0,2] + str r3, [r0,3] + str r4, [r0,4] + str r5, [r0,5] + str r6, [r0,6] + str r7, [r0,7] + str r8, [r0,8] + str r9, [r0,9] + str r10, [r0,10] + str fp, [r0,11] + str r12, [r0,12] + str sp, [r0,13] + str lr, [r0,14] + str r15, [r0,15] + str r16, [r0,16] + str r17, [r0,17] + str r18, [r0,18] + str r19, [r0,19] + str r20, [r0,20] + str r21, [r0,21] + str r22, [r0,22] + str r23, [r0,23] + str r24, [r0,24] + str r25, [r0,25] + str r26, [r0,26] + str r27, [r0,27] + str r28, [r0,28] + str r29, [r0,29] + str r30, [r0,30] + str r31, [r0,31] + str r32, [r0,32] + str r33, [r0,33] + str r34, [r0,34] + str r35, [r0,35] + str r36, [r0,36] + str r37, [r0,37] + str r38, [r0,38] + str r39, [r0,39] + str r40, [r0,40] + str r41, [r0,41] + str r42, [r0,42] + str r43, [r0,43] + str r44, [r0,44] + str r45, [r0,45] + str r46, [r0,46] + str r47, [r0,47] + str r48, [r0,48] + str r49, [r0,49] + str r50, [r0,50] + str r51, [r0,51] + str r52, [r0,52] + str r53, [r0,53] + str r54, [r0,54] + str r55, [r0,55] + str r56, [r0,56] + str r57, [r0,57] + str r58, [r0,58] + str r59, [r0,59] + str r60, [r0,60] + str r61, [r0,61] + str r62, [r0,62] + str r63, [r0,63] + + /* Store status register */ + movfs r27, status + str r27, [r0,64] + + /* Store config register */ + movfs r27, config + str r27, [r0,65] + + /* Store interrupt return address register */ + movfs r27, iret + str r27, [r0,66] + +SYM(restore): + + /* r1 contains buffer address, skip it */ + ldr r2, [r1,2] + ldr r3, [r1,3] + ldr r4, [r1,4] + ldr r5, [r1,5] + ldr r6, [r1,6] + ldr r7, [r1,7] + ldr r8, [r1,8] + ldr r9, [r1,9] + ldr r10, [r1,10] + ldr fp, [r1,11] + ldr r12, [r1,12] + ldr sp, [r1,13] + ldr lr, [r1,14] + ldr r15, [r1,15] + ldr r16, [r1,16] + ldr r17, [r1,17] + ldr r18, [r1,18] + ldr r19, [r1,19] + ldr r20, [r1,20] + ldr r21, [r1,21] + ldr r22, [r1,22] + ldr r23, [r1,23] + ldr r24, [r1,24] + ldr r25, [r1,25] + ldr r26, [r1,26] + ldr r27, [r1,27] + ldr r32, [r1,32] + ldr r33, [r1,33] + ldr r34, [r1,34] + ldr r35, [r1,35] + ldr r36, [r1,36] + ldr r37, [r1,37] + ldr r38, [r1,38] + ldr r39, [r1,39] + ldr r40, [r1,40] + ldr r41, [r1,41] + ldr r42, [r1,42] + ldr r43, [r1,43] + ldr r44, [r1,44] + ldr r45, [r1,45] + ldr r46, [r1,46] + ldr r47, [r1,47] + ldr r48, [r1,48] + ldr r49, [r1,49] + ldr r50, [r1,50] + ldr r51, [r1,51] + ldr r52, [r1,52] + ldr r53, [r1,53] + ldr r54, [r1,54] + ldr r55, [r1,55] + ldr r56, [r1,56] + ldr r57, [r1,57] + ldr r58, [r1,58] + ldr r59, [r1,59] + ldr r60, [r1,60] + ldr r61, [r1,61] + ldr r62, [r1,62] + ldr r63, [r1,63] + + /* Load status register */ + ldr r0, [r1,64] + movts status, r0 + + /* Load config register */ + ldr r0, [r1,65] + movts config, r0 + + /* Load interrupt return address register */ + ldr r0,[r1,66] + movts iret, r0 + + ldr r0,[r1] + ldr r1,[r1,1] + + /* Enable interrupts and return */ + gie + jr lr + +SYM(_CPU_Context_restore): + mov r1, r0 + b _restore + nop + +/* No FP support for Epiphany yet */ +SYM(_CPU_Context_restore_fp): + nop + + SYM(_CPU_Context_save_fp): + nop diff --git a/cpukit/score/cpu/epiphany/epiphany-exception-handler.S b/cpukit/score/cpu/epiphany/epiphany-exception-handler.S new file mode 100644 index 0000000000..09c88fd861 --- /dev/null +++ b/cpukit/score/cpu/epiphany/epiphany-exception-handler.S @@ -0,0 +1,304 @@ +/** + * @file + * + * @ingroup ScoreCPU + * + * @brief Epiphany exception support implementation. + */ + +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems/score/cpu.h> + +#include <rtems/asm.h> +#include <rtems/score/percpu.h> + +EXTERN(bsp_start_vector_table_begin) +EXTERN(_Thread_Dispatch) +PUBLIC(ISR_Handler) + +.section .text, "ax" +.align 4 +TYPE_FUNC(ISR_Handler) +SYM(ISR_Handler): + /* Reserve space for CPU_Exception_frame */ + sub sp, sp, #(CPU_EXCEPTION_FRAME_SIZE) + + str r0, [sp] + str r1, [sp,1] + str r2, [sp,2] + str r3, [sp,3] + str r4, [sp,4] + str r5, [sp,5] + str r6, [sp,6] + str r7, [sp,7] + str r8, [sp,8] + str r9, [sp,9] + str r10, [sp,10] + str fp, [sp,11] + str r12, [sp,12] + + /* Save interrupted task stack pointer */ + add r1, sp, #(CPU_EXCEPTION_FRAME_SIZE + 8) + str r1,[sp,13] + + str lr, [sp,14] + str r15, [sp,15] + str r16, [sp,16] + str r17, [sp,17] + str r18, [sp,18] + str r19, [sp,19] + str r20, [sp,20] + str r21, [sp,21] + str r22, [sp,22] + str r23, [sp,23] + str r24, [sp,24] + str r25, [sp,25] + str r26, [sp,26] + str r27, [sp,27] + str r28, [sp,28] + str r29, [sp,29] + str r30, [sp,30] + str r31, [sp,31] + str r32, [sp,32] + str r33, [sp,33] + str r34, [sp,34] + str r35, [sp,35] + str r36, [sp,36] + str r37, [sp,37] + str r38, [sp,38] + str r39, [sp,39] + str r40, [sp,40] + str r41, [sp,41] + str r42, [sp,42] + str r43, [sp,43] + str r44, [sp,44] + str r45, [sp,45] + str r46, [sp,46] + str r47, [sp,47] + str r48, [sp,48] + str r49, [sp,49] + str r50, [sp,50] + str r51, [sp,51] + str r52, [sp,52] + str r53, [sp,53] + str r54, [sp,54] + str r55, [sp,55] + str r56, [sp,56] + str r57, [sp,57] + str r58, [sp,58] + str r59, [sp,59] + str r60, [sp,60] + str r61, [sp,61] + /* r62 and r63 are saved from start.S interrupt entry + * and hold vector number and _ISR_Handler address repsectively. + */ + + /* Save status register */ + movfs r1,status + str r1, [sp,62] + + /* Save config register */ + movfs r1,config + str r1, [sp,63] + + /* Save interrupt return address register */ + movfs r1,iret + str r1, [sp,64] + + mov r33, %low(__Per_CPU_Information) + movt r33, %high(__Per_CPU_Information) + + add r6, r33, #(PER_CPU_ISR_NEST_LEVEL) + add r8, r33, #(PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL) + + /* Increment nesting level and disable thread dispatch */ + ldr r5, [r6] + ldr r7, [r8] + add r5, r5, #1 + add r7, r7, #1 + str r5, [r6] + str r7, [r8] + + /* Keep sp (Exception frame address) in r32 - Callee saved */ + mov r32, sp + + /* Keep __Per_CPU_Information address in r33 - Callee saved */ + mov r33, r18 + + /* Call the exception handler from vector table. + * First function arg for C handler is vector number, + * and the second is a pointer to exception frame. + */ + mov r0, r62 + mov r1, sp + + mov r27, r62 + lsl r27, r27, #2 + mov r26, %low(_bsp_start_vector_table_begin) + movt r15, #0 + add r27, r27, r26 + ldr r27, [r27] + + /* Do not switch stacks if we are in a nested interrupt. At + * this point r5 should be holding ISR_NEST_LEVEL value. + */ + sub r37, r5, #1 + bgtu jump_to_c_handler + + /* Switch to RTEMS dedicated interrupt stack */ + add sp, r18, #(PER_CPU_INTERRUPT_STACK_HIGH) + ldr sp, [sp] + +jump_to_c_handler: + jalr r27 + + /* Switch back to the interrupted task stack */ + mov sp, r32 + + /* Get the address of __Per_CPU_Information */ + mov r18, r33 + + /* Decrement nesting level and enable multitasking */ + add r6, r18, #(PER_CPU_ISR_NEST_LEVEL) + add r8, r18, #(PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL) + + ldr r5, [r6] + ldr r7, [r8] + sub r5, r5, #1 + sub r7, r7, #1 + str r5, [r6] + str r7, [r8] + + /* Check if _ISR_Nest_level > 0 */ + sub r37, r5, #0 + bgtu exception_frame_restore + + /* Check if _Thread_Dispatch_disable_level > 0 */ + sub r37, r7, #0 + bgtu exception_frame_restore + + /* Check if dispatch needed */ + add r31, r18, #(PER_CPU_DISPATCH_NEEDED) + ldr r31, [r31] + + sub r35, r31, #0 + beq exception_frame_restore + + mov r35, %low(__Thread_Dispatch) + movt r35, %high(__Thread_Dispatch) + jalr r35 + +exception_frame_restore: + + ldr r1, [sp,1] + ldr r2, [sp,2] + ldr r3, [sp,3] + ldr r4, [sp,4] + ldr r5, [sp,5] + ldr r6, [sp,6] + ldr r7, [sp,7] + ldr r8, [sp,8] + ldr r9, [sp,9] + ldr r10, [sp,10] + ldr fp, [sp,11] + ldr r12, [sp,12] + ldr lr, [sp,14] + ldr r15, [sp,15] + ldr r16, [sp,16] + ldr r17, [sp,17] + ldr r18, [sp,18] + ldr r19, [sp,19] + ldr r20, [sp,20] + ldr r21, [sp,21] + ldr r22, [sp,22] + ldr r23, [sp,23] + ldr r24, [sp,24] + ldr r25, [sp,25] + ldr r26, [sp,26] + ldr r27, [sp,27] + ldr r28, [sp,28] + ldr r29, [sp,29] + ldr r30, [sp,30] + ldr r31, [sp,31] + ldr r32, [sp,32] + ldr r34, [sp,34] + ldr r36, [sp,36] + ldr r38, [sp,38] + ldr r39, [sp,39] + ldr r40, [sp,40] + ldr r41, [sp,41] + ldr r42, [sp,42] + ldr r43, [sp,43] + ldr r44, [sp,44] + ldr r45, [sp,45] + ldr r46, [sp,46] + ldr r47, [sp,47] + ldr r48, [sp,48] + ldr r49, [sp,49] + ldr r50, [sp,50] + ldr r51, [sp,51] + ldr r52, [sp,52] + ldr r53, [sp,53] + ldr r54, [sp,54] + ldr r55, [sp,55] + ldr r56, [sp,56] + ldr r57, [sp,57] + ldr r58, [sp,58] + ldr r59, [sp,59] + ldr r60, [sp,60] + ldr r61, [sp,61] + + /* Restore status register */ + ldr r0,[sp,62] + movts status, r0 + + /* Restore config register */ + ldr r0, [sp,63] + movts config, r0 + + /* Restore interrupt return address register */ + ldr r0, [sp,64] + movts iret, r0 + + ldr r0,[sp] + + /* Restore interrupted task's stack pointer */ + ldr sp, [sp,13] + + /* r62 and r63 are saved from start.S interrupt entry + * and hold vector number and _ISR_Handler address repsectively. + */ + ldr r62, [sp, -8] + ldr r63, [sp, -4] + + /* return from interrupt */ + rti diff --git a/cpukit/score/cpu/epiphany/preinstall.am b/cpukit/score/cpu/epiphany/preinstall.am new file mode 100644 index 0000000000..0250d128ed --- /dev/null +++ b/cpukit/score/cpu/epiphany/preinstall.am @@ -0,0 +1,53 @@ +## Automatically generated by ampolish3 - Do not edit + +if AMPOLISH3 +$(srcdir)/preinstall.am: Makefile.am + $(AMPOLISH3) $(srcdir)/Makefile.am > $(srcdir)/preinstall.am +endif + +PREINSTALL_DIRS = +DISTCLEANFILES += $(PREINSTALL_DIRS) + +all-am: $(PREINSTALL_FILES) + +PREINSTALL_FILES = +CLEANFILES += $(PREINSTALL_FILES) + +$(PROJECT_INCLUDE)/rtems/$(dirstamp): + @$(MKDIR_P) $(PROJECT_INCLUDE)/rtems + @: > $(PROJECT_INCLUDE)/rtems/$(dirstamp) +PREINSTALL_DIRS += $(PROJECT_INCLUDE)/rtems/$(dirstamp) + +$(PROJECT_INCLUDE)/rtems/asm.h: rtems/asm.h $(PROJECT_INCLUDE)/rtems/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/asm.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/asm.h + +$(PROJECT_INCLUDE)/rtems/score/$(dirstamp): + @$(MKDIR_P) $(PROJECT_INCLUDE)/rtems/score + @: > $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) +PREINSTALL_DIRS += $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + +$(PROJECT_INCLUDE)/rtems/score/cpu.h: rtems/score/cpu.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpu.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpu.h + +$(PROJECT_INCLUDE)/rtems/score/cpuatomic.h: rtems/score/cpuatomic.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpuatomic.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpuatomic.h + +$(PROJECT_INCLUDE)/rtems/score/cpu_asm.h: rtems/score/cpu_asm.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpu_asm.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpu_asm.h + +$(PROJECT_INCLUDE)/rtems/score/types.h: rtems/score/types.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/types.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/types.h + +$(PROJECT_INCLUDE)/rtems/score/epiphany.h: rtems/score/epiphany.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/epiphany.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/epiphany.h + +$(PROJECT_INCLUDE)/rtems/score/epiphany-utility.h: rtems/score/epiphany-utility.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/epiphany-utility.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/epiphany-utility.h + diff --git a/cpukit/score/cpu/epiphany/rtems/asm.h b/cpukit/score/cpu/epiphany/rtems/asm.h new file mode 100644 index 0000000000..87e0cca1cb --- /dev/null +++ b/cpukit/score/cpu/epiphany/rtems/asm.h @@ -0,0 +1,120 @@ +/** + * @file rtems/asm.h + * + * This include file attempts to address the problems + * caused by incompatible flavors of assemblers and + * toolsets. It primarily addresses variations in the + * use of leading underscores on symbols and the requirement + * that register names be preceded by a %. + */ + +/* + * NOTE: The spacing in the use of these macros + * is critical to them working as advertised. + * + * This file is based on similar code found in newlib available + * from ftp.cygnus.com. The file which was used had no copyright + * notice. This file is freely distributable as long as the source + * of the file is noted. This file is: + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * + * COPYRIGHT (c) 1994-1997. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef __EPIPHANY_ASM_H +#define __EPIPHANY_ASM_H + +/* + * Indicate we are in an assembly file and get the basic CPU definitions. + */ + +#ifndef ASM +#define ASM +#endif +#include <rtems/score/cpuopts.h> +#include <rtems/score/epiphany.h> + +/* + * Recent versions of GNU cpp define variables which indicate the + * need for underscores and percents. If not using GNU cpp or + * the version does not support this, then you will obviously + * have to define these as appropriate. + */ + +#ifndef __USER_LABEL_PREFIX__ +#define __USER_LABEL_PREFIX__ _ +#endif + +#ifndef __REGISTER_PREFIX__ +#define __REGISTER_PREFIX__ +#endif + +/* ANSI concatenation macros. */ + +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ + +#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) + +/* Use the right prefix for registers. */ + +#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x) + +/* + * define macros for all of the registers on this CPU + * + * EXAMPLE: #define d0 REG (d0) + */ + +/* + * Define macros to handle section beginning and ends. + */ +#define BEGIN_CODE_DCL .text +#define END_CODE_DCL +#define BEGIN_DATA_DCL .data +#define END_DATA_DCL +#define BEGIN_CODE .text +#define END_CODE +#define BEGIN_DATA +#define END_DATA +#define BEGIN_BSS +#define END_BSS +#define END + +/* + * Following must be tailor for a particular flavor of the C compiler. + * They may need to put underscores in front of the symbols. + */ + +#define PUBLIC(sym) .global SYM (sym) +#define EXTERN(sym) .extern SYM (sym) +#define TYPE_FUNC(sym) .type SYM (sym), %function + +#endif diff --git a/cpukit/score/cpu/epiphany/rtems/score/cpu.h b/cpukit/score/cpu/epiphany/rtems/score/cpu.h new file mode 100644 index 0000000000..fb5e6b2966 --- /dev/null +++ b/cpukit/score/cpu/epiphany/rtems/score/cpu.h @@ -0,0 +1,1184 @@ +/** + * @file rtems/score/cpu.h + */ + +/* + * + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _EPIPHANY_CPU_H +#define _EPIPHANY_CPU_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rtems/score/epiphany.h> /* pick up machine definitions */ +#include <rtems/score/types.h> +#ifndef ASM +#include <rtems/bspIo.h> +#include <stdint.h> +#include <stdio.h> /* for printk */ +#endif + +/* conditional compilation parameters */ + +/* + * Should the calls to _Thread_Enable_dispatch be inlined? + * + * If TRUE, then they are inlined. + * If FALSE, then a subroutine call is made. + * + * Basically this is an example of the classic trade-off of size + * versus speed. Inlining the call (TRUE) typically increases the + * size of RTEMS while speeding up the enabling of dispatching. + * [NOTE: In general, the _Thread_Dispatch_disable_level will + * only be 0 or 1 unless you are in an interrupt handler and that + * interrupt handler invokes the executive.] When not inlined + * something calls _Thread_Enable_dispatch which in turns calls + * _Thread_Dispatch. If the enable dispatch is inlined, then + * one subroutine call is avoided entirely.] + * + */ + +#define CPU_INLINE_ENABLE_DISPATCH FALSE + +/* + * Should the body of the search loops in _Thread_queue_Enqueue_priority + * be unrolled one time? In unrolled each iteration of the loop examines + * two "nodes" on the chain being searched. Otherwise, only one node + * is examined per iteration. + * + * If TRUE, then the loops are unrolled. + * If FALSE, then the loops are not unrolled. + * + * The primary factor in making this decision is the cost of disabling + * and enabling interrupts (_ISR_Flash) versus the cost of rest of the + * body of the loop. On some CPUs, the flash is more expensive than + * one iteration of the loop body. In this case, it might be desirable + * to unroll the loop. It is important to note that on some CPUs, this + * code is the longest interrupt disable period in RTEMS. So it is + * necessary to strike a balance when setting this parameter. + * + */ + +#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE + +/* + * Does RTEMS manage a dedicated interrupt stack in software? + * + * If TRUE, then a stack is allocated in _ISR_Handler_initialization. + * If FALSE, nothing is done. + * + * If the CPU supports a dedicated interrupt stack in hardware, + * then it is generally the responsibility of the BSP to allocate it + * and set it up. + * + * If the CPU does not support a dedicated interrupt stack, then + * the porter has two options: (1) execute interrupts on the + * stack of the interrupted task, and (2) have RTEMS manage a dedicated + * interrupt stack. + * + * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE. + * + * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and + * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is + * possible that both are FALSE for a particular CPU. Although it + * is unclear what that would imply about the interrupt processing + * procedure on that CPU. + * + * Currently, for epiphany port, _ISR_Handler is responsible for switching to + * RTEMS dedicated interrupt task. + * + */ + +#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE + +/* + * Does this CPU have hardware support for a dedicated interrupt stack? + * + * If TRUE, then it must be installed during initialization. + * If FALSE, then no installation is performed. + * + * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE. + * + * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and + * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is + * possible that both are FALSE for a particular CPU. Although it + * is unclear what that would imply about the interrupt processing + * procedure on that CPU. + * + */ + +#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE + +/* + * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager? + * + * If TRUE, then the memory is allocated during initialization. + * If FALSE, then the memory is allocated during initialization. + * + * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE + * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE. + * + */ + +#define CPU_ALLOCATE_INTERRUPT_STACK TRUE + +/* + * Does the RTEMS invoke the user's ISR with the vector number and + * a pointer to the saved interrupt frame (1) or just the vector + * number (0)? + * + */ + +#define CPU_ISR_PASSES_FRAME_POINTER 1 + +/* + * Does the CPU have hardware floating point? + * + * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported. + * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored. + * + * If there is a FP coprocessor such as the i387 or mc68881, then + * the answer is TRUE. + * + * The macro name "epiphany_HAS_FPU" should be made CPU specific. + * It indicates whether or not this CPU model has FP support. For + * example, it would be possible to have an i386_nofp CPU model + * which set this to false to indicate that you have an i386 without + * an i387 and wish to leave floating point support out of RTEMS. + * + * The CPU_SOFTWARE_FP is used to indicate whether or not there + * is software implemented floating point that must be context + * switched. The determination of whether or not this applies + * is very tool specific and the state saved/restored is also + * compiler specific. + * + * epiphany Specific Information: + * + * At this time there are no implementations of Epiphany that are + * expected to implement floating point. + */ + +#define CPU_HARDWARE_FP FALSE +#define CPU_SOFTWARE_FP FALSE + +/* + * Are all tasks RTEMS_FLOATING_POINT tasks implicitly? + * + * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed. + * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed. + * + * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well. + * + */ + +#define CPU_ALL_TASKS_ARE_FP FALSE + +/* + * Should the IDLE task have a floating point context? + * + * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task + * and it has a floating point context which is switched in and out. + * If FALSE, then the IDLE task does not have a floating point context. + * + * Setting this to TRUE negatively impacts the time required to preempt + * the IDLE task from an interrupt because the floating point context + * must be saved as part of the preemption. + * + */ + +#define CPU_IDLE_TASK_IS_FP FALSE + +/* + * Should the saving of the floating point registers be deferred + * until a context switch is made to another different floating point + * task? + * + * If TRUE, then the floating point context will not be stored until + * necessary. It will remain in the floating point registers and not + * disturned until another floating point task is switched to. + * + * If FALSE, then the floating point context is saved when a floating + * point task is switched out and restored when the next floating point + * task is restored. The state of the floating point registers between + * those two operations is not specified. + * + * If the floating point context does NOT have to be saved as part of + * interrupt dispatching, then it should be safe to set this to TRUE. + * + * Setting this flag to TRUE results in using a different algorithm + * for deciding when to save and restore the floating point context. + * The deferred FP switch algorithm minimizes the number of times + * the FP context is saved and restored. The FP context is not saved + * until a context switch is made to another, different FP task. + * Thus in a system with only one FP task, the FP context will never + * be saved or restored. + * + */ + +#define CPU_USE_DEFERRED_FP_SWITCH FALSE + +/* + * Does this port provide a CPU dependent IDLE task implementation? + * + * If TRUE, then the routine _CPU_Thread_Idle_body + * must be provided and is the default IDLE thread body instead of + * _CPU_Thread_Idle_body. + * + * If FALSE, then use the generic IDLE thread body if the BSP does + * not provide one. + * + * This is intended to allow for supporting processors which have + * a low power or idle mode. When the IDLE thread is executed, then + * the CPU can be powered down. + * + * The order of precedence for selecting the IDLE thread body is: + * + * 1. BSP provided + * 2. CPU dependent (if provided) + * 3. generic (if no BSP and no CPU dependent) + * + */ + +#define CPU_PROVIDES_IDLE_THREAD_BODY TRUE + +/* + * Does the stack grow up (toward higher addresses) or down + * (toward lower addresses)? + * + * If TRUE, then the grows upward. + * If FALSE, then the grows toward smaller addresses. + * + */ + +#define CPU_STACK_GROWS_UP FALSE + +/* + * The following is the variable attribute used to force alignment + * of critical RTEMS structures. On some processors it may make + * sense to have these aligned on tighter boundaries than + * the minimum requirements of the compiler in order to have as + * much of the critical data area as possible in a cache line. + * + * The placement of this macro in the declaration of the variables + * is based on the syntactically requirements of the GNU C + * "__attribute__" extension. For example with GNU C, use + * the following to force a structures to a 32 byte boundary. + * + * __attribute__ ((aligned (32))) + * + * NOTE: Currently only the Priority Bit Map table uses this feature. + * To benefit from using this, the data must be heavily + * used so it will stay in the cache and used frequently enough + * in the executive to justify turning this on. + * + */ + +#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64))) + +/* + * Define what is required to specify how the network to host conversion + * routines are handled. + * + * epiphany Specific Information: + * + * This version of RTEMS is designed specifically to run with + * big endian architectures. If you want little endian, you'll + * have to make the appropriate adjustments here and write + * efficient routines for byte swapping. The epiphany architecture + * doesn't do this very well. + */ + +#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE +#define CPU_BIG_ENDIAN FALSE +#define CPU_LITTLE_ENDIAN TRUE + +/* + * The following defines the number of bits actually used in the + * interrupt field of the task mode. How those bits map to the + * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level(). + * + */ + +#define CPU_MODES_INTERRUPT_MASK 0x00000001 + +/* + * Processor defined structures required for cpukit/score. + */ + +/* + * Contexts + * + * Generally there are 2 types of context to save. + * 1. Interrupt registers to save + * 2. Task level registers to save + * + * This means we have the following 3 context items: + * 1. task level context stuff:: Context_Control + * 2. floating point task stuff:: Context_Control_fp + * 3. special interrupt level context :: Context_Control_interrupt + * + * On some processors, it is cost-effective to save only the callee + * preserved registers during a task context switch. This means + * that the ISR code needs to save those registers which do not + * persist across function calls. It is not mandatory to make this + * distinctions between the caller/callee saves registers for the + * purpose of minimizing context saved during task switch and on interrupts. + * If the cost of saving extra registers is minimal, simplicity is the + * choice. Save the same context on interrupt entry as for tasks in + * this case. + * + * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then + * care should be used in designing the context area. + * + * On some CPUs with hardware floating point support, the Context_Control_fp + * structure will not be used or it simply consist of an array of a + * fixed number of bytes. This is done when the floating point context + * is dumped by a "FP save context" type instruction and the format + * is not really defined by the CPU. In this case, there is no need + * to figure out the exact format -- only the size. Of course, although + * this is enough information for RTEMS, it is probably not enough for + * a debugger such as gdb. But that is another problem. + * + * + */ +#ifndef ASM + +typedef struct { + uint32_t r[64]; + + uint32_t status; + uint32_t config; + uint32_t iret; + +#ifdef RTEMS_SMP + /** + * @brief On SMP configurations the thread context must contain a boolean + * indicator to signal if this context is executing on a processor. + * + * This field must be updated during a context switch. The context switch + * to the heir must wait until the heir context indicates that it is no + * longer executing on a processor. The context switch must also check if + * a thread dispatch is necessary to honor updates of the heir thread for + * this processor. This indicator must be updated using an atomic test and + * set operation to ensure that at most one processor uses the heir + * context at the same time. + * + * @code + * void _CPU_Context_switch( + * Context_Control *executing, + * Context_Control *heir + * ) + * { + * save( executing ); + * + * executing->is_executing = false; + * memory_barrier(); + * + * if ( test_and_set( &heir->is_executing ) ) { + * do { + * Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot(); + * + * if ( cpu_self->dispatch_necessary ) { + * heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); + * } + * } while ( test_and_set( &heir->is_executing ) ); + * } + * + * restore( heir ); + * } + * @endcode + */ + volatile bool is_executing; +#endif +} Context_Control; + +#define _CPU_Context_Get_SP( _context ) \ + (_context)->r[13] + +typedef struct { + /** FPU registers are listed here */ + double some_float_register; +} Context_Control_fp; + +typedef Context_Control CPU_Interrupt_frame; + +/* + * The size of the floating point context area. On some CPUs this + * will not be a "sizeof" because the format of the floating point + * area is not defined -- only the size is. This is usually on + * CPUs with a "floating point save context" instruction. + * + * epiphany Specific Information: + * + */ + +#define CPU_CONTEXT_FP_SIZE 0 +SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context; + +/* + * Amount of extra stack (above minimum stack size) required by + * MPCI receive server thread. Remember that in a multiprocessor + * system this thread must exist and be able to process all directives. + * + */ + +#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0 + +/* + * Should be large enough to run all RTEMS tests. This insures + * that a "reasonable" small application should not have any problems. + * + */ + +#define CPU_STACK_MINIMUM_SIZE 4096 + +/* + * CPU's worst alignment requirement for data types on a byte boundary. This + * alignment does not take into account the requirements for the stack. + * + */ + +#define CPU_ALIGNMENT 8 + +/* + * This is defined if the port has a special way to report the ISR nesting + * level. Most ports maintain the variable _ISR_Nest_level. + */ +#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE + +/* + * This number corresponds to the byte alignment requirement for the + * heap handler. This alignment requirement may be stricter than that + * for the data types alignment specified by CPU_ALIGNMENT. It is + * common for the heap to follow the same alignment requirement as + * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap, + * then this should be set to CPU_ALIGNMENT. + * + * NOTE: This does not have to be a power of 2 although it should be + * a multiple of 2 greater than or equal to 2. The requirement + * to be a multiple of 2 is because the heap uses the least + * significant field of the front and back flags to indicate + * that a block is in use or free. So you do not want any odd + * length blocks really putting length data in that bit. + * + * On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will + * have to be greater or equal to than CPU_ALIGNMENT to ensure that + * elements allocated from the heap meet all restrictions. + * + */ + +#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT + +/* + * This number corresponds to the byte alignment requirement for memory + * buffers allocated by the partition manager. This alignment requirement + * may be stricter than that for the data types alignment specified by + * CPU_ALIGNMENT. It is common for the partition to follow the same + * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict + * enough for the partition, then this should be set to CPU_ALIGNMENT. + * + * NOTE: This does not have to be a power of 2. It does have to + * be greater or equal to than CPU_ALIGNMENT. + * + */ + +#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT + +/* + * This number corresponds to the byte alignment requirement for the + * stack. This alignment requirement may be stricter than that for the + * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT + * is strict enough for the stack, then this should be set to 0. + * + * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT. + * + */ + +#define CPU_STACK_ALIGNMENT 8 + +/* ISR handler macros */ + +/* + * Support routine to initialize the RTEMS vector table after it is allocated. + * + * NO_CPU Specific Information: + * + * XXX document implementation including references if appropriate + */ + +#define _CPU_Initialize_vectors() + +/* + * Disable all interrupts for an RTEMS critical section. The previous + * level is returned in _level. + * + */ + +static inline uint32_t epiphany_interrupt_disable( void ) +{ + uint32_t sr; + __asm__ __volatile__ ("movfs %[sr], status \n" : [sr] "=r" (sr):); + __asm__ __volatile__("gid \n"); + return sr; +} + +static inline void epiphany_interrupt_enable(uint32_t level) +{ + __asm__ __volatile__("gie \n"); + __asm__ __volatile__ ("movts status, %[level] \n" :: [level] "r" (level):); +} + +#define _CPU_ISR_Disable( _level ) \ + _level = epiphany_interrupt_disable() + +/* + * Enable interrupts to the previous level (returned by _CPU_ISR_Disable). + * This indicates the end of an RTEMS critical section. The parameter + * _level is not modified. + * + */ + +#define _CPU_ISR_Enable( _level ) \ + epiphany_interrupt_enable( _level ) + +/* + * This temporarily restores the interrupt to _level before immediately + * disabling them again. This is used to divide long RTEMS critical + * sections into two or more parts. The parameter _level is not + * modified. + * + */ + +#define _CPU_ISR_Flash( _level ) \ + do{ \ + if ( (_level & 0x2) != 0 ) \ + _CPU_ISR_Enable( _level ); \ + epiphany_interrupt_disable(); \ + } while(0) + +/* + * Map interrupt level in task mode onto the hardware that the CPU + * actually provides. Currently, interrupt levels which do not + * map onto the CPU in a generic fashion are undefined. Someday, + * it would be nice if these were "mapped" by the application + * via a callout. For example, m68k has 8 levels 0 - 7, levels + * 8 - 255 would be available for bsp/application specific meaning. + * This could be used to manage a programmable interrupt controller + * via the rtems_task_mode directive. + * + * The get routine usually must be implemented as a subroutine. + * + */ + +void _CPU_ISR_Set_level( uint32_t level ); + +uint32_t _CPU_ISR_Get_level( void ); + +/* end of ISR handler macros */ + +/* Context handler macros */ + +/* + * Initialize the context to a state suitable for starting a + * task after a context restore operation. Generally, this + * involves: + * + * - setting a starting address + * - preparing the stack + * - preparing the stack and frame pointers + * - setting the proper interrupt level in the context + * - initializing the floating point context + * + * This routine generally does not set any unnecessary register + * in the context. The state of the "general data" registers is + * undefined at task start time. + * + * NOTE: This is_fp parameter is TRUE if the thread is to be a floating + * point thread. This is typically only used on CPUs where the + * FPU may be easily disabled by software such as on the SPARC + * where the PSR contains an enable FPU bit. + * + */ + +/** + * @brief Account for GCC red-zone + * + * The following macro is used when initializing task's stack + * to account for GCC red-zone. + */ + +#define EPIPHANY_GCC_RED_ZONE_SIZE 128 + +/** + * @brief Initializes the CPU context. + * + * The following steps are performed: + * - setting a starting address + * - preparing the stack + * - preparing the stack and frame pointers + * - setting the proper interrupt level in the context + * + * @param[in] context points to the context area + * @param[in] stack_area_begin is the low address of the allocated stack area + * @param[in] stack_area_size is the size of the stack area in bytes + * @param[in] new_level is the interrupt level for the task + * @param[in] entry_point is the task's entry point + * @param[in] is_fp is set to @c true if the task is a floating point task + * @param[in] tls_area is the thread-local storage (TLS) area + */ +void _CPU_Context_Initialize( + Context_Control *context, + void *stack_area_begin, + size_t stack_area_size, + uint32_t new_level, + void (*entry_point)( void ), + bool is_fp, + void *tls_area +); + +/* + * This routine is responsible for somehow restarting the currently + * executing task. If you are lucky, then all that is necessary + * is restoring the context. Otherwise, there will need to be + * a special assembly routine which does something special in this + * case. Context_Restore should work most of the time. It will + * not work if restarting self conflicts with the stack frame + * assumptions of restoring a context. + * + */ + +#define _CPU_Context_Restart_self( _the_context ) \ + _CPU_Context_restore( (_the_context) ) + +/* + * The purpose of this macro is to allow the initial pointer into + * a floating point context area (used to save the floating point + * context) to be at an arbitrary place in the floating point + * context area. + * + * This is necessary because some FP units are designed to have + * their context saved as a stack which grows into lower addresses. + * Other FP units can be saved by simply moving registers into offsets + * from the base of the context area. Finally some FP units provide + * a "dump context" instruction which could fill in from high to low + * or low to high based on the whim of the CPU designers. + * + */ + +#define _CPU_Context_Fp_start( _base, _offset ) \ + ( (void *) _Addresses_Add_offset( (_base), (_offset) ) ) + +/* + * This routine initializes the FP context area passed to it to. + * There are a few standard ways in which to initialize the + * floating point context. The code included for this macro assumes + * that this is a CPU in which a "initial" FP context was saved into + * _CPU_Null_fp_context and it simply copies it to the destination + * context passed to it. + * + * Other models include (1) not doing anything, and (2) putting + * a "null FP status word" in the correct place in the FP context. + * + */ + +#define _CPU_Context_Initialize_fp( _destination ) \ + { \ + *(*(_destination)) = _CPU_Null_fp_context; \ + } + +/* end of Context handler macros */ + +/* Fatal Error manager macros */ + +/* + * This routine copies _error into a known place -- typically a stack + * location or a register, optionally disables interrupts, and + * halts/stops the CPU. + * + */ + +#define _CPU_Fatal_halt(_source, _error ) \ + printk("Fatal Error %d.%d Halted\n",_source, _error); \ + asm("trap 3" :: "r" (_error)); \ + for(;;) + +/* end of Fatal Error manager macros */ + +/* Bitfield handler macros */ + +/* + * This routine sets _output to the bit number of the first bit + * set in _value. _value is of CPU dependent type Priority_Bit_map_control. + * This type may be either 16 or 32 bits wide although only the 16 + * least significant bits will be used. + * + * There are a number of variables in using a "find first bit" type + * instruction. + * + * (1) What happens when run on a value of zero? + * (2) Bits may be numbered from MSB to LSB or vice-versa. + * (3) The numbering may be zero or one based. + * (4) The "find first bit" instruction may search from MSB or LSB. + * + * RTEMS guarantees that (1) will never happen so it is not a concern. + * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and + * _CPU_Priority_bits_index(). These three form a set of routines + * which must logically operate together. Bits in the _value are + * set and cleared based on masks built by _CPU_Priority_mask(). + * The basic major and minor values calculated by _Priority_Major() + * and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index() + * to properly range between the values returned by the "find first bit" + * instruction. This makes it possible for _Priority_Get_highest() to + * calculate the major and directly index into the minor table. + * This mapping is necessary to ensure that 0 (a high priority major/minor) + * is the first bit found. + * + * This entire "find first bit" and mapping process depends heavily + * on the manner in which a priority is broken into a major and minor + * components with the major being the 4 MSB of a priority and minor + * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest + * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next + * to the lowest priority. + * + * If your CPU does not have a "find first bit" instruction, then + * there are ways to make do without it. Here are a handful of ways + * to implement this in software: + * + * - a series of 16 bit test instructions + * - a "binary search using if's" + * - _number = 0 + * if _value > 0x00ff + * _value >>=8 + * _number = 8; + * + * if _value > 0x0000f + * _value >=8 + * _number += 4 + * + * _number += bit_set_table[ _value ] + * + * where bit_set_table[ 16 ] has values which indicate the first + * bit set + * + */ + + /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */ +#define CPU_USE_GENERIC_BITFIELD_CODE TRUE +#define CPU_USE_GENERIC_BITFIELD_DATA TRUE + +#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) + +#define _CPU_Bitfield_Find_first_bit( _value, _output ) \ + { \ + (_output) = 0; /* do something to prevent warnings */ \ + } +#endif + +/* end of Bitfield handler macros */ + +/* + * This routine builds the mask which corresponds to the bit fields + * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion + * for that routine. + * + */ + +#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) + +#define _CPU_Priority_Mask( _bit_number ) \ + (1 << _bit_number) + +#endif + +/* + * This routine translates the bit numbers returned by + * _CPU_Bitfield_Find_first_bit() into something suitable for use as + * a major or minor component of a priority. See the discussion + * for that routine. + * + */ + +#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) + +#define _CPU_Priority_bits_index( _priority ) \ + (_priority) + +#endif + +#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE +#define CPU_TIMESTAMP_USE_INT64 TRUE +#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE + +typedef struct { +/* There is no CPU specific per-CPU state */ +} CPU_Per_CPU_control; +#endif /* ASM */ + +/** + * Size of a pointer. + * + * This must be an integer literal that can be used by the assembler. This + * value will be used to calculate offsets of structure members. These + * offsets will be used in assembler code. + */ +#define CPU_SIZEOF_POINTER 4 +#define CPU_EXCEPTION_FRAME_SIZE 260 +#define CPU_PER_CPU_CONTROL_SIZE 0 + +#ifndef ASM +typedef uint16_t Priority_bit_map_Word; + +typedef struct { + uint32_t r[62]; + uint32_t status; + uint32_t config; + uint32_t iret; +} CPU_Exception_frame; + +/** + * @brief Prints the exception frame via printk(). + * + * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION. + */ +void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ); + + +/* end of Priority handler macros */ + +/* functions */ + +/* + * _CPU_Initialize + * + * This routine performs CPU dependent initialization. + * + */ + +void _CPU_Initialize( + void +); + +/* + * _CPU_ISR_install_raw_handler + * + * This routine installs a "raw" interrupt handler directly into the + * processor's vector table. + * + */ + +void _CPU_ISR_install_raw_handler( + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +); + +/* + * _CPU_ISR_install_vector + * + * This routine installs an interrupt vector. + * + * NO_CPU Specific Information: + * + * XXX document implementation including references if appropriate + */ + +void _CPU_ISR_install_vector( + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +); + +/* + * _CPU_Install_interrupt_stack + * + * This routine installs the hardware interrupt stack pointer. + * + * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK + * is TRUE. + * + */ + +void _CPU_Install_interrupt_stack( void ); + +/* + * _CPU_Thread_Idle_body + * + * This routine is the CPU dependent IDLE thread body. + * + * NOTE: It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY + * is TRUE. + * + */ + +void *_CPU_Thread_Idle_body( uintptr_t ignored ); + +/* + * _CPU_Context_switch + * + * This routine switches from the run context to the heir context. + * + * epiphany Specific Information: + * + * Please see the comments in the .c file for a description of how + * this function works. There are several things to be aware of. + */ + +void _CPU_Context_switch( + Context_Control *run, + Context_Control *heir +); + +/* + * _CPU_Context_restore + * + * This routine is generally used only to restart self in an + * efficient manner. It may simply be a label in _CPU_Context_switch. + * + * NOTE: May be unnecessary to reload some registers. + * + */ + +void _CPU_Context_restore( + Context_Control *new_context +) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE; + +/* + * _CPU_Context_save_fp + * + * This routine saves the floating point context passed to it. + * + */ + +void _CPU_Context_save_fp( + void **fp_context_ptr +); + +/* + * _CPU_Context_restore_fp + * + * This routine restores the floating point context passed to it. + * + */ + +void _CPU_Context_restore_fp( + void **fp_context_ptr +); + +/* The following routine swaps the endian format of an unsigned int. + * It must be static because it is referenced indirectly. + * + * This version will work on any processor, but if there is a better + * way for your CPU PLEASE use it. The most common way to do this is to: + * + * swap least significant two bytes with 16-bit rotate + * swap upper and lower 16-bits + * swap most significant two bytes with 16-bit rotate + * + * Some CPUs have special instructions which swap a 32-bit quantity in + * a single instruction (e.g. i486). It is probably best to avoid + * an "endian swapping control bit" in the CPU. One good reason is + * that interrupts would probably have to be disabled to insure that + * an interrupt does not try to access the same "chunk" with the wrong + * endian. Another good reason is that on some CPUs, the endian bit + * endianness for ALL fetches -- both code and data -- so the code + * will be fetched incorrectly. + * + */ + +static inline unsigned int CPU_swap_u32( + unsigned int value +) +{ + uint32_t byte1, byte2, byte3, byte4, swapped; + + byte4 = (value >> 24) & 0xff; + byte3 = (value >> 16) & 0xff; + byte2 = (value >> 8) & 0xff; + byte1 = value & 0xff; + + swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4; + return( swapped ); +} + +#define CPU_swap_u16( value ) \ + (((value&0xff) << 8) | ((value >> 8)&0xff)) + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) +{ + /* TODO */ +} + +static inline void _CPU_Context_validate( uintptr_t pattern ) +{ + while (1) { + /* TODO */ + } +} + +typedef uint32_t CPU_Counter_ticks; + +CPU_Counter_ticks _CPU_Counter_read( void ); + +static inline CPU_Counter_ticks _CPU_Counter_difference( + CPU_Counter_ticks second, + CPU_Counter_ticks first +) +{ + return second - first; +} + +#ifdef RTEMS_SMP + /** + * @brief Performs CPU specific SMP initialization in the context of the boot + * processor. + * + * This function is invoked on the boot processor during system + * initialization. All interrupt stacks are allocated at this point in case + * the CPU port allocates the interrupt stacks. This function is called + * before _CPU_SMP_Start_processor() or _CPU_SMP_Finalize_initialization() is + * used. + * + * @return The count of physically or virtually available processors. + * Depending on the configuration the application may use not all processors. + */ + uint32_t _CPU_SMP_Initialize( void ); + + /** + * @brief Starts a processor specified by its index. + * + * This function is invoked on the boot processor during system + * initialization. + * + * This function will be called after _CPU_SMP_Initialize(). + * + * @param[in] cpu_index The processor index. + * + * @retval true Successful operation. + * @retval false Unable to start this processor. + */ + bool _CPU_SMP_Start_processor( uint32_t cpu_index ); + + /** + * @brief Performs final steps of CPU specific SMP initialization in the + * context of the boot processor. + * + * This function is invoked on the boot processor during system + * initialization. + * + * This function will be called after all processors requested by the + * application have been started. + * + * @param[in] cpu_count The minimum value of the count of processors + * requested by the application configuration and the count of physically or + * virtually available processors. + */ + void _CPU_SMP_Finalize_initialization( uint32_t cpu_count ); + + /** + * @brief Returns the index of the current processor. + * + * An architecture specific method must be used to obtain the index of the + * current processor in the system. The set of processor indices is the + * range of integers starting with zero up to the processor count minus one. + */ + uint32_t _CPU_SMP_Get_current_processor( void ); + + /** + * @brief Sends an inter-processor interrupt to the specified target + * processor. + * + * This operation is undefined for target processor indices out of range. + * + * @param[in] target_processor_index The target processor index. + */ + void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); + + /** + * @brief Broadcasts a processor event. + * + * Some architectures provide a low-level synchronization primitive for + * processors in a multi-processor environment. Processors waiting for this + * event may go into a low-power state and stop generating system bus + * transactions. This function must ensure that preceding store operations + * can be observed by other processors. + * + * @see _CPU_SMP_Processor_event_receive(). + */ + void _CPU_SMP_Processor_event_broadcast( void ); + + /** + * @brief Receives a processor event. + * + * This function will wait for the processor event and may wait forever if no + * such event arrives. + * + * @see _CPU_SMP_Processor_event_broadcast(). + */ + static inline void _CPU_SMP_Processor_event_receive( void ) + { + __asm__ volatile ( "" : : : "memory" ); + } + + /** + * @brief Gets the is executing indicator of the thread context. + * + * @param[in] context The context. + */ + static inline bool _CPU_Context_Get_is_executing( + const Context_Control *context + ) + { + return context->is_executing; + } + + /** + * @brief Sets the is executing indicator of the thread context. + * + * @param[in] context The context. + * @param[in] is_executing The new value for the is executing indicator. + */ + static inline void _CPU_Context_Set_is_executing( + Context_Control *context, + bool is_executing + ) + { + context->is_executing = is_executing; + } +#endif /* RTEMS_SMP */ + +#endif /* ASM */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h b/cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h new file mode 100644 index 0000000000..cc091fa909 --- /dev/null +++ b/cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h @@ -0,0 +1,74 @@ +/** + * @file + * + * @brief Epiphany Assembly File + * + * Very loose template for an include file for the cpu_asm.? file + * if it is implemented as a ".S" file (preprocessed by cpp) instead + * of a ".s" file (preprocessed by gm4 or gasp). + */ + +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + */ + +#ifndef _RTEMS_SCORE_CPU_ASM_H +#define _RTEMS_SCORE_CPU_ASM_H + +/* pull in the generated offsets */ + +/* +#include <rtems/score/offsets.h> +*/ + +/* + * Hardware General Registers + */ + +/* put something here */ + +/* + * Hardware Floating Point Registers + */ + +/* put something here */ + +/* + * Hardware Control Registers + */ + +/* put something here */ + +/* + * Calling Convention + */ + +/* put something here */ + +/* + * Temporary registers + */ + +/* put something here */ + +/* + * Floating Point Registers - SW Conventions + */ + +/* put something here */ + +/* + * Temporary floating point registers + */ + +/* put something here */ + +#endif + +/* end of file */ diff --git a/cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h b/cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h new file mode 100644 index 0000000000..598ee76b20 --- /dev/null +++ b/cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h @@ -0,0 +1,14 @@ +/* + * COPYRIGHT (c) 2012-2013 Deng Hengyi. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ATOMIC_CPU_H +#define _RTEMS_SCORE_ATOMIC_CPU_H + +#include <rtems/score/cpustdatomic.h> + +#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/epiphany/rtems/score/epiphany-utility.h b/cpukit/score/cpu/epiphany/rtems/score/epiphany-utility.h new file mode 100644 index 0000000000..bf223f93bf --- /dev/null +++ b/cpukit/score/cpu/epiphany/rtems/score/epiphany-utility.h @@ -0,0 +1,180 @@ +/** + * @file + * + * @ingroup ScoreCPU + * + * @brief This include file contains macros pertaining to the + * Epiphany processor family. + */ + +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _EPIPHANY_UTILITY_H +#define _EPIPHANY_UTILITY_H + +/* eCore IRQs */ +typedef enum +{ + START, + SW_EXCEPTION, + MEM_FAULT, + TIMER0, + TIMER1, + SMP_MESSAGE, + DMA0, + DMA1, + SER, +} EPIPHANY_IRQ_PER_CORE_T; + +/* Per-core IO mapped register addresses + * @see Epiphany architecture reference. + */ +#define EPIPHANY_PER_CORE_REG_CONFIG 0xF0400 +#define EPIPHANY_PER_CORE_REG_STATUS 0xF0404 +#define EPIPHANY_PER_CORE_REG_PC 0xF0408 +#define EPIPHANY_PER_CORE_REG_DEBUGSTATUS 0xF040C +#define EPIPHANY_PER_CORE_REG_LC 0xF0414 +#define EPIPHANY_PER_CORE_REG_LS 0xF0418 +#define EPIPHANY_PER_CORE_REG_LE 0xF041C +#define EPIPHANY_PER_CORE_REG_IRET 0xF0420 +#define EPIPHANY_PER_CORE_REG_IMASK 0xF0424 +#define EPIPHANY_PER_CORE_REG_ILAT 0xF0428 +#define EPIPHANY_PER_CORE_REG_ILATST 0xF042C +#define EPIPHANY_PER_CORE_REG_ILATCL 0xF0430 +#define EPIPHANY_PER_CORE_REG_IPEND 0xF0434 +#define EPIPHANY_PER_CORE_REG_FSTATUS 0xF0440 +#define EPIPHANY_PER_CORE_REG_DEBUGCMD 0xF0448 +#define EPIPHANY_PER_CORE_REG_RESETCORE 0xF070C + +/* Event timer registers */ +#define EPIPHANY_PER_CORE_REG_CTIMER0 0xF0438 +#define EPIPHANY_PER_CORE_REG_CTIMER1 0xF043C + +/* Processor control registers */ +#define EPIPHANY_PER_CORE_REG_MEMSTATUS 0xF0604 +#define EPIPHANY_PER_CORE_REG_MEMPROTECT 0xF0608 + +/* DMA Registers */ +#define EPIPHANY_PER_CORE_REG_DMA0CONFIG 0xF0500 +#define EPIPHANY_PER_CORE_REG_DMA0STRIDE 0xF0504 +#define EPIPHANY_PER_CORE_REG_DMA0COUNT 0xF0508 +#define EPIPHANY_PER_CORE_REG_DMA0SRCADDR 0xF050C +#define EPIPHANY_PER_CORE_REG_DMA0DSTADDR 0xF0510 +#define EPIPHANY_PER_CORE_REG_DMA0AUTO0 0xF0514 +#define EPIPHANY_PER_CORE_REG_DMA0AUTO1 0xF0518 +#define EPIPHANY_PER_CORE_REG_DMA0STATUS 0xF051C +#define EPIPHANY_PER_CORE_REG_DMA1CONFIG 0xF0520 +#define EPIPHANY_PER_CORE_REG_DMA1STRIDE 0xF0524 +#define EPIPHANY_PER_CORE_REG_DMA1COUNT 0xF0528 +#define EPIPHANY_PER_CORE_REG_DMA1SRCADDR 0xF052C +#define EPIPHANY_PER_CORE_REG_DMA1DSTADDR 0xF0530 +#define EPIPHANY_PER_CORE_REG_DMA1AUTO0 0xF0534 +#define EPIPHANY_PER_CORE_REG_DMA1AUTO1 0xF0538 +#define EPIPHANY_PER_CORE_REG_DMA1STATUS 0xF053C + +/* Mesh Node Control Registers */ +#define EPIPHANY_PER_CORE_REG_MESHCONFIG 0xF0700 +#define EPIPHANY_PER_CORE_REG_COREID 0xF0704 +#define EPIPHANY_PER_CORE_REG_MULTICAST 0xF0708 +#define EPIPHANY_PER_CORE_REG_CMESHROUTE 0xF0710 +#define EPIPHANY_PER_CORE_REG_XMESHROUTE 0xF0714 +#define EPIPHANY_PER_CORE_REG_RMESHROUTE 0xF0718 + +/* This macros constructs an address space of epiphany cores + * from their IDs. + */ +#define EPIPHANY_COREID_TO_MSB_ADDR(id) (id) << 20 + +/* Construct a complete/absolute IO mapped address register from + * core ID and register name + */ +#define EPIPHANY_GET_REG_ABSOLUTE_ADDR(coreid, reg) \ + (EPIPHANY_COREID_TO_MSB_ADDR(coreid) | (reg)) + +#define EPIPHANY_REG(reg) (uint32_t *) (reg) + +/* Read register with its absolute address */ +static inline uint32_t read_epiphany_reg(volatile uint32_t reg_addr) +{ + return *(EPIPHANY_REG(reg_addr)); +} + +/* Write register with its abolute address */ +static inline void write_epiphany_reg(volatile uint32_t reg_addr, uint32_t val) +{ + *(EPIPHANY_REG(reg_addr)) = val; +} + +/* Epiphany uses 12 bits for defining core IDs, while RTEMS uses + * linear IDs. The following function converts RTEMS linear IDs to + * Epiphany corresponding ones + */ +static const uint32_t map[16] = +{ + 0x808, 0x809, 0x80A, 0x80B, + 0x848, 0x849, 0x84A, 0x84B, + 0x888, 0x889, 0x88A, 0x88B, + 0x8C8, 0x8C9, 0x8CA, 0x8CB +}; + +static inline uint32_t rtems_coreid_to_epiphany_map(uint32_t rtems_id) +{ + return map[rtems_id]; +} + +/* Epiphany uses 12 bits for defining core IDs, while RTEMS uses + * linear IDs. The following function is used to map Epiphany IDs to + * RTEMS linear IDs. + */ +static inline uint32_t epiphany_coreid_to_rtems_map(uint32_t epiphany_id) +{ + register uint32_t coreid asm ("r17") = epiphany_id; + + /* Mapping from Epiphany IDs to 0-16 IDs macro */ + __asm__ __volatile__(" \ + movfs r17, coreid \ + mov r19, #0x003 \ + mov r20, #0x0F0 \ + and r19, r17, r19 \ + and r20, r17, r20 \ + lsr r20, r20, #4 \ + add r17, r19, r20 \ + "); + + /* coreid or r17 now holds the rtems core id */ + return coreid; +} + +static inline uint32_t _Epiphany_Get_current_processor() +{ + uint32_t coreid; + + asm volatile ("movfs %0, coreid" : "=r" (coreid): ); + + return epiphany_coreid_to_rtems_map(coreid); +} +#endif /* _EPIPHANY_UTILITY_H */ diff --git a/cpukit/score/cpu/epiphany/rtems/score/epiphany.h b/cpukit/score/cpu/epiphany/rtems/score/epiphany.h new file mode 100644 index 0000000000..60d975581f --- /dev/null +++ b/cpukit/score/cpu/epiphany/rtems/score/epiphany.h @@ -0,0 +1,64 @@ +/** + * @file rtems/score/epiphany.h + */ + +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-1999, 2010. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _RTEMS_SCORE_EPIPHANY_H +#define _RTEMS_SCORE_EPIPHANY_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the Epiphany family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + + /* + * Define the name of the CPU family and specific model. + */ + +#define CPU_NAME "EPIPHANY" +#define CPU_MODEL_NAME "EPIPHANY" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_EPIPHANY_H */ diff --git a/cpukit/score/cpu/epiphany/rtems/score/types.h b/cpukit/score/cpu/epiphany/rtems/score/types.h new file mode 100644 index 0000000000..5b6c503739 --- /dev/null +++ b/cpukit/score/cpu/epiphany/rtems/score/types.h @@ -0,0 +1,68 @@ +/** + * @file + * + * @brief Epiphany Architecture Types API + */ + +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + #ifndef _RTEMS_SCORE_TYPES_H +#define _RTEMS_SCORE_TYPES_H + +#include <rtems/score/basedefs.h> + +#ifndef ASM + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreCPU + */ +/**@{**/ + +/* + * This section defines the basic types for this processor. + */ + +/** Type that can store a 32-bit integer or a pointer. */ +typedef uintptr_t CPU_Uint32ptr; + +typedef uint16_t Priority_bit_map_Word; +typedef void epiphany_isr; +typedef void ( *epiphany_isr_entry )( void ); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* !ASM */ + +#endif diff --git a/cpukit/score/cpu/sparc/Makefile.am b/cpukit/score/cpu/sparc/Makefile.am index c6ea1c3da9..8cf4f4a65a 100644 --- a/cpukit/score/cpu/sparc/Makefile.am +++ b/cpukit/score/cpu/sparc/Makefile.am @@ -11,6 +11,8 @@ include_rtems_score_HEADERS += rtems/score/cpuatomic.h noinst_LIBRARIES = libscorecpu.a libscorecpu_a_SOURCES = cpu.c cpu_asm.S +libscorecpu_a_SOURCES += sparc-context-volatile-clobber.S +libscorecpu_a_SOURCES += sparc-context-validate.S libscorecpu_a_SOURCES += sparc-counter.c libscorecpu_a_SOURCES += sparcv8-atomic.c libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS) diff --git a/cpukit/score/cpu/sparc/rtems/score/cpu.h b/cpukit/score/cpu/sparc/rtems/score/cpu.h index 235b3652ac..64e87509b4 100644 --- a/cpukit/score/cpu/sparc/rtems/score/cpu.h +++ b/cpukit/score/cpu/sparc/rtems/score/cpu.h @@ -1216,17 +1216,9 @@ void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr ); -static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) -{ - /* TODO */ -} +void _CPU_Context_volatile_clobber( uintptr_t pattern ); -static inline void _CPU_Context_validate( uintptr_t pattern ) -{ - while (1) { - /* TODO */ - } -} +void _CPU_Context_validate( uintptr_t pattern ); typedef struct { uint32_t trap; diff --git a/cpukit/score/cpu/sparc/sparc-context-validate.S b/cpukit/score/cpu/sparc/sparc-context-validate.S new file mode 100644 index 0000000000..0ee7177d73 --- /dev/null +++ b/cpukit/score/cpu/sparc/sparc-context-validate.S @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/asm.h> +#include <rtems/score/cpu.h> + +#define FRAME_OFFSET_BUFFER (CPU_MINIMUM_STACK_FRAME_SIZE) +#define FRAME_OFFSET_L0 (FRAME_OFFSET_BUFFER + 0x04) +#define FRAME_OFFSET_L1 (FRAME_OFFSET_L0 + 0x04) +#define FRAME_OFFSET_L2 (FRAME_OFFSET_L1 + 0x04) +#define FRAME_OFFSET_L3 (FRAME_OFFSET_L2 + 0x04) +#define FRAME_OFFSET_L4 (FRAME_OFFSET_L3 + 0x04) +#define FRAME_OFFSET_L5 (FRAME_OFFSET_L4 + 0x04) +#define FRAME_OFFSET_L6 (FRAME_OFFSET_L5 + 0x04) +#define FRAME_OFFSET_L7 (FRAME_OFFSET_L6 + 0x04) +#define FRAME_OFFSET_I0 (FRAME_OFFSET_L7 + 0x04) +#define FRAME_OFFSET_I1 (FRAME_OFFSET_I0 + 0x04) +#define FRAME_OFFSET_I2 (FRAME_OFFSET_I1 + 0x04) +#define FRAME_OFFSET_I3 (FRAME_OFFSET_I2 + 0x04) +#define FRAME_OFFSET_I4 (FRAME_OFFSET_I3 + 0x04) +#define FRAME_OFFSET_I5 (FRAME_OFFSET_I4 + 0x04) +#define FRAME_OFFSET_I6 (FRAME_OFFSET_I5 + 0x04) +#define FRAME_OFFSET_I7 (FRAME_OFFSET_I6 + 0x04) +#define FRAME_OFFSET_SP (FRAME_OFFSET_I7 + 0x04) +#define FRAME_END (FRAME_OFFSET_SP + 0x04) +#define FRAME_SIZE \ + ((FRAME_END + CPU_STACK_ALIGNMENT - 1) & ~(CPU_STACK_ALIGNMENT - 1)) + +.macro check_register reg + sub %g1, 1, %g1 + cmp %g1, \reg + bne restore_registers + nop +.endm + +.macro check_float_register reg + sub %g1, 1, %g1 + st \reg, [%sp + FRAME_OFFSET_BUFFER] + ld [%sp + FRAME_OFFSET_BUFFER], %o1 + cmp %g1, %o1 + bne restore_registers + nop +.endm + +.macro check_fsr_register reg + st \reg, [%sp + FRAME_OFFSET_BUFFER] + ld [%sp + FRAME_OFFSET_BUFFER], %o1 + sub %g1, 1, %g1 + clr %g3 + sethi %hi(0xCF800000), %g3 + or %g3, %lo(0x0FFF), %g3 + and %g1, %g3, %g3 + and %o1, %g3, %o1 + cmp %o1, %g3 + bne restore_registers + nop +.endm + +.macro write_register reg + add %g1, 1, %g1 + mov %g1, \reg +.endm + +.macro write_float_register reg + add %g1, 1, %g1 + st %g1, [%sp + FRAME_OFFSET_BUFFER] + ld [%sp + FRAME_OFFSET_BUFFER], \reg +.endm + +.macro write_fsr_register reg + st \reg, [%sp + FRAME_OFFSET_BUFFER] + ld [%sp + FRAME_OFFSET_BUFFER], %o1 + add %g1, 1, %g1 + clr %g3 + + /* + * FSR is masked with undefined, reserved or system-specific values + * (e.g. FPU architecture version, FP queue). + */ + sethi %hi(0xCF800000), %g3 + or %g3, %lo(0x0FFF), %g3 + and %g1, %g3, %g3 + or %o1, %g3, %g3 + st %g3, [%sp + FRAME_OFFSET_BUFFER] + ld [%sp + FRAME_OFFSET_BUFFER], \reg +.endm + + .align 4 + PUBLIC(_CPU_Context_validate) +SYM(_CPU_Context_validate): + + /* + * g2 checks if the Floating Point Unit in the Processor Status + * Register (PSR) is set. + */ + mov %psr, %g2 + sethi %hi(SPARC_PSR_EF_MASK), %g3 + and %g2, %g3, %g2 + + /* g1 is used to save the original pattern */ + mov %o0, %g1 + + /* g4 establishes window counter */ + clr %g4 + + add %sp, -FRAME_SIZE, %sp + + st %l0, [%sp + FRAME_OFFSET_L0] + st %l1, [%sp + FRAME_OFFSET_L1] + st %l2, [%sp + FRAME_OFFSET_L2] + st %l3, [%sp + FRAME_OFFSET_L3] + st %l4, [%sp + FRAME_OFFSET_L4] + st %l5, [%sp + FRAME_OFFSET_L5] + st %l6, [%sp + FRAME_OFFSET_L6] + st %l7, [%sp + FRAME_OFFSET_L7] + st %i0, [%sp + FRAME_OFFSET_I0] + st %i1, [%sp + FRAME_OFFSET_I1] + st %i2, [%sp + FRAME_OFFSET_I2] + st %i3, [%sp + FRAME_OFFSET_I3] + st %i4, [%sp + FRAME_OFFSET_I4] + st %i5, [%sp + FRAME_OFFSET_I5] + st %i6, [%sp + FRAME_OFFSET_I6] + st %i7, [%sp + FRAME_OFFSET_I7] + st %sp, [%sp + FRAME_OFFSET_SP] + + cmp %g4, 0 + bne write_locals_and_outputs + nop + be check_for_fp + nop + +new_check_cycle: + clr %g4 + sub %g1, 1, %g1 + + /* Write pattern values into registers */ + +check_for_fp: + cmp %g2, 0 + be write_y + nop + + write_fsr_register %fsr + write_float_register %f0 + write_float_register %f1 + write_float_register %f2 + write_float_register %f3 + write_float_register %f4 + write_float_register %f5 + write_float_register %f6 + write_float_register %f7 + write_float_register %f8 + write_float_register %f9 + write_float_register %f10 + write_float_register %f11 + write_float_register %f12 + write_float_register %f13 + write_float_register %f14 + write_float_register %f15 + write_float_register %f16 + write_float_register %f17 + write_float_register %f18 + write_float_register %f19 + write_float_register %f20 + write_float_register %f21 + write_float_register %f22 + write_float_register %f23 + write_float_register %f24 + write_float_register %f25 + write_float_register %f26 + write_float_register %f27 + write_float_register %f28 + write_float_register %f29 + write_float_register %f30 + write_float_register %f31 + +write_y: + write_register %y + + write_register %i0 + write_register %i1 + write_register %i2 + write_register %i3 + write_register %i4 + write_register %i5 + /* Don't write register $i6 => frame pointer */ + /* Don't write register $i7 => return address */ + b write_locals_and_outputs + nop + +switch_to_next_window: + save %sp, -FRAME_SIZE, %sp + +write_locals_and_outputs: + /* l0 is used as a scratch register */ + write_register %l1 + write_register %l2 + write_register %l3 + write_register %l4 + write_register %l5 + write_register %l6 + write_register %l7 + write_register %o1 + write_register %o2 + write_register %o3 + write_register %o4 + write_register %o5 + /* Don't write register $o6 => stack pointer */ + /* Don't write register $o7 => return address */ + + add %g4, 1, %g4 + cmp %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS + bne switch_to_next_window + nop + + /* Dummy increment to set up reverse mechanism for checking process */ + add %g1, 1, %g1 + clr %g4 + + /* Checking begins here */ +window_checking: + cmp %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS + be y_checking + nop + +further_checking: + cmp %g4, 0 + bne goto_local_registers + nop + + /* Check normal registers */ + check_register %o5 + check_register %o4 + check_register %o3 + check_register %o2 + check_register %o1 + +goto_local_registers: + check_register %l7 + check_register %l6 + check_register %l5 + check_register %l4 + check_register %l3 + check_register %l2 + check_register %l1 + + check_register %i5 + check_register %i4 + check_register %i3 + check_register %i2 + check_register %i1 + /* + For the last window i0 also needs to be checked as this variable + is not overwritten by the outputs of another window. + */ + add %g4, 1, %g4 + cmp %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS + bne dont_check_i0 + nop + check_register %i0 + b y_checking + nop + +dont_check_i0: + restore + + ba window_checking + nop + + /* Check Y register */ +y_checking: + mov %y, %o1 + check_register %o1 + cmp %g2, 0 + be new_check_cycle + nop + + /* Check floating point registers */ + check_float_register %f31 + check_float_register %f30 + check_float_register %f29 + check_float_register %f28 + check_float_register %f27 + check_float_register %f26 + check_float_register %f25 + check_float_register %f24 + check_float_register %f23 + check_float_register %f22 + check_float_register %f21 + check_float_register %f20 + check_float_register %f19 + check_float_register %f18 + check_float_register %f17 + check_float_register %f16 + check_float_register %f15 + check_float_register %f14 + check_float_register %f13 + check_float_register %f12 + check_float_register %f11 + check_float_register %f10 + check_float_register %f9 + check_float_register %f8 + check_float_register %f7 + check_float_register %f6 + check_float_register %f5 + check_float_register %f4 + check_float_register %f3 + check_float_register %f2 + check_float_register %f1 + check_float_register %f0 + check_fsr_register %fsr + + be new_check_cycle + nop + + /****** RESTORE STARTS HERE *******/ + + /* Restore non-volatile registers */ + +restore_registers: + and %g4, (SPARC_NUMBER_OF_REGISTER_WINDOWS - 1), %g4 + cmp %g4, 0 + be real_restore + nop + restore + sub %g4, 1, %g4 + bne restore_registers + nop + +real_restore: + ld [%sp + FRAME_OFFSET_L0], %l0 + ld [%sp + FRAME_OFFSET_L1], %l1 + ld [%sp + FRAME_OFFSET_L2], %l2 + ld [%sp + FRAME_OFFSET_L3], %l3 + ld [%sp + FRAME_OFFSET_L4], %l4 + ld [%sp + FRAME_OFFSET_L5], %l5 + ld [%sp + FRAME_OFFSET_L6], %l6 + ld [%sp + FRAME_OFFSET_L7], %l7 + ld [%sp + FRAME_OFFSET_I0], %i0 + ld [%sp + FRAME_OFFSET_I1], %i1 + ld [%sp + FRAME_OFFSET_I2], %i2 + ld [%sp + FRAME_OFFSET_I3], %i3 + ld [%sp + FRAME_OFFSET_I4], %i4 + ld [%sp + FRAME_OFFSET_I5], %i5 + ld [%sp + FRAME_OFFSET_I6], %i6 + ld [%sp + FRAME_OFFSET_I7], %i7 + + sub %sp, -FRAME_SIZE, %sp + +return_value: + /* Load callback address and jump back */ + jmp %o7 + 8 + add %sp, FRAME_SIZE, %sp diff --git a/cpukit/score/cpu/sparc/sparc-context-volatile-clobber.S b/cpukit/score/cpu/sparc/sparc-context-volatile-clobber.S new file mode 100644 index 0000000000..6e364cd3f6 --- /dev/null +++ b/cpukit/score/cpu/sparc/sparc-context-volatile-clobber.S @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/asm.h> +#include <rtems/score/cpu.h> + +#define SCRATCH_0 (CPU_MINIMUM_STACK_FRAME_SIZE) +#define SCRATCH_1 (SCRATCH_0 + 0x04) +#define FRAME_END (SCRATCH_1 + 0x04) +#define FRAME_SIZE \ + ((FRAME_END + CPU_STACK_ALIGNMENT - 1) & ~(CPU_STACK_ALIGNMENT - 1)) + +.macro clobber_register reg + sub %g2, 1, %g2 + mov %g2, \reg +.endm + +.macro clobber_fp_register reg + sub %g2, 1, %g2 + st %g2, [%sp + SCRATCH_0] + ld [%sp + SCRATCH_0], \reg +.endm + + .section ".bss" + .align 4 + + /* + * Use a global variable to vary the clobbered windows in each + * invocation to test the window overflow and underflow conditions. + */ +window_clobber_count: + .skip 4 + + .section ".text" + .align 4 + + PUBLIC(_CPU_Context_volatile_clobber) +SYM(_CPU_Context_volatile_clobber): + + /* Increment number of flushed windows by one */ + sethi %hi(window_clobber_count), %o1 + ld [%o1 + %lo(window_clobber_count)], %o2 + add %o2, 1, %o2 + st %o2, [%o1 + %lo(window_clobber_count)] + + /* Clear window counter number */ + clr %g1 + + /* Save pattern to global register */ + mov %o0, %g2 + +window_clobber: + + /* Switch window */ + + save %sp, -FRAME_SIZE, %sp + + /* Check how many windows shall be flushed */ + sethi %hi(window_clobber_count), %o1 + ld [%o1 + %lo(window_clobber_count)], %o2 + st %o2, [%o1 + %lo(window_clobber_count)] + and %o2, (SPARC_NUMBER_OF_REGISTER_WINDOWS - 1), %o1 + cmp %o1, 0 + bne no_manual_update + nop + add %o1, SPARC_NUMBER_OF_REGISTER_WINDOWS, %o1 + +no_manual_update: + /* Register to determine whether FPU is switched on */ + mov %psr, %o2 + sethi %hi(SPARC_PSR_EF_MASK), %o3 + and %o3, %o2, %o2 + + clobber_register %o3 + clobber_register %o4 + clobber_register %o5 + /* Don't overwrite return address $o7 */ + clobber_register %g3 + clobber_register %g4 + clobber_register %y + + cmp %o2, 0 + be window_update_check + nop + + clobber_fp_register %f0 + clobber_fp_register %f1 + clobber_fp_register %f2 + clobber_fp_register %f3 + clobber_fp_register %f4 + clobber_fp_register %f5 + clobber_fp_register %f6 + clobber_fp_register %f7 + clobber_fp_register %f8 + clobber_fp_register %f9 + clobber_fp_register %f10 + clobber_fp_register %f11 + clobber_fp_register %f12 + clobber_fp_register %f13 + clobber_fp_register %f14 + clobber_fp_register %f15 + clobber_fp_register %f16 + clobber_fp_register %f17 + clobber_fp_register %f18 + clobber_fp_register %f19 + clobber_fp_register %f20 + clobber_fp_register %f21 + clobber_fp_register %f22 + clobber_fp_register %f23 + clobber_fp_register %f24 + clobber_fp_register %f25 + clobber_fp_register %f26 + clobber_fp_register %f27 + clobber_fp_register %f28 + clobber_fp_register %f29 + clobber_fp_register %f30 + clobber_fp_register %f31 + +window_update_check: + + /* Counter to how many windows were switched */ + add %g1, 1, %g1 + cmp %g1, %o1 + bl window_clobber + nop + +restore_check: + + cmp %g1, 0 + be clobber_return + nop + + restore + sub %g1, 1, %g1 + ba restore_check + nop + +clobber_return: + + jmp %o7 + 8 + add %sp, FRAME_SIZE, %sp diff --git a/cpukit/score/include/rtems/score/assert.h b/cpukit/score/include/rtems/score/assert.h index 43ec2d0e38..63083784fa 100644 --- a/cpukit/score/include/rtems/score/assert.h +++ b/cpukit/score/include/rtems/score/assert.h @@ -83,6 +83,15 @@ extern "C" { #endif /** + * @brief Like _Assert(), but only armed if RTEMS_SMP is defined. + */ +#if defined( RTEMS_SMP ) + #define _SMP_Assert( _e ) _Assert( _e ) +#else + #define _SMP_Assert( _e ) ( ( void ) 0 ) +#endif + +/** * @brief Returns true if thread dispatching is allowed. * * Thread dispatching can be repressed via _Thread_Disable_dispatch() or diff --git a/cpukit/score/include/rtems/score/corebarrierimpl.h b/cpukit/score/include/rtems/score/corebarrierimpl.h index 124ecabf05..e8b330dcb6 100644 --- a/cpukit/score/include/rtems/score/corebarrierimpl.h +++ b/cpukit/score/include/rtems/score/corebarrierimpl.h @@ -21,6 +21,7 @@ #include <rtems/score/corebarrier.h> #include <rtems/score/thread.h> +#include <rtems/score/threadqimpl.h> #include <rtems/score/watchdog.h> #ifdef __cplusplus @@ -83,6 +84,13 @@ void _CORE_barrier_Initialize( CORE_barrier_Attributes *the_barrier_attributes ); +RTEMS_INLINE_ROUTINE void _CORE_barrier_Destroy( + CORE_barrier_Control *the_barrier +) +{ + _Thread_queue_Destroy( &the_barrier->Wait_queue ); +} + /** * @brief Wait for the barrier. * diff --git a/cpukit/score/include/rtems/score/coremsgimpl.h b/cpukit/score/include/rtems/score/coremsgimpl.h index cedf2760e8..51b5f3780f 100644 --- a/cpukit/score/include/rtems/score/coremsgimpl.h +++ b/cpukit/score/include/rtems/score/coremsgimpl.h @@ -21,6 +21,7 @@ #include <rtems/score/coremsg.h> #include <rtems/score/chainimpl.h> +#include <rtems/score/threaddispatch.h> #include <rtems/score/threadqimpl.h> #include <limits.h> @@ -172,28 +173,13 @@ void _CORE_message_queue_Close( * number of messages flushed from the queue is returned. * * @param[in] the_message_queue points to the message queue to flush + * @param[in] lock_context The lock context of the interrupt disable. * * @retval This method returns the number of message pending messages flushed. */ uint32_t _CORE_message_queue_Flush( - CORE_message_queue_Control *the_message_queue -); - -/** - * @brief Flush all outstanding messages. - * - * This routine flushes all outstanding messages and returns - * them to the inactive message chain. - * - * @param[in] the_message_queue points to the message queue to flush - * - * @retval This method returns the number of pending messages flushed. - * - * - INTERRUPT LATENCY: - * + single case - */ -uint32_t _CORE_message_queue_Flush_support( - CORE_message_queue_Control *the_message_queue + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context ); #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) @@ -232,6 +218,7 @@ uint32_t _CORE_message_queue_Flush_support( * a thread that is unblocked is actually a remote thread. * @param[out] count points to the variable that will contain the * number of tasks that are sent this message + * @param[in] lock_context The lock context of the interrupt disable. * @retval @a *count will contain the number of messages sent * @retval indication of the successful completion or reason for failure */ @@ -241,7 +228,8 @@ CORE_message_queue_Status _CORE_message_queue_Broadcast( size_t size, Objects_Id id, CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, - uint32_t *count + uint32_t *count, + ISR_lock_Context *lock_context ); /** @@ -267,6 +255,7 @@ CORE_message_queue_Status _CORE_message_queue_Broadcast( * if the message queue is full. * @param[in] timeout is the maximum number of clock ticks that the calling * thread is willing to block if the message queue is full. + * @param[in] lock_context The lock context of the interrupt disable. * @retval indication of the successful completion or reason for failure */ CORE_message_queue_Status _CORE_message_queue_Submit( @@ -278,7 +267,8 @@ CORE_message_queue_Status _CORE_message_queue_Submit( CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, CORE_message_queue_Submit_types submit_type, bool wait, - Watchdog_Interval timeout + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ); /** @@ -304,6 +294,7 @@ CORE_message_queue_Status _CORE_message_queue_Submit( * if the message queue is empty. * @param[in] timeout is the maximum number of clock ticks that the calling * thread is willing to block if the message queue is empty. + * @param[in] lock_context The lock context of the interrupt disable. * * @retval indication of the successful completion or reason for failure. * On success, the location pointed to @a size_p will contain the @@ -322,7 +313,8 @@ void _CORE_message_queue_Seize( void *buffer, size_t *size_p, bool wait, - Watchdog_Interval timeout + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ); /** @@ -355,8 +347,9 @@ RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Send( size_t size, Objects_Id id, CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, - bool wait, - Watchdog_Interval timeout + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ) { return _CORE_message_queue_Submit( @@ -368,7 +361,8 @@ RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Send( api_message_queue_mp_support, CORE_MESSAGE_QUEUE_SEND_REQUEST, wait, /* sender may block */ - timeout /* timeout interval */ + timeout, /* timeout interval */ + lock_context ); } @@ -381,8 +375,9 @@ RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Urgent( size_t size, Objects_Id id, CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, - bool wait, - Watchdog_Interval timeout + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ) { return _CORE_message_queue_Submit( @@ -394,10 +389,46 @@ RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Urgent( api_message_queue_mp_support, CORE_MESSAGE_QUEUE_URGENT_REQUEST, wait, /* sender may block */ - timeout /* timeout interval */ + timeout, /* timeout interval */ + lock_context ); } +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire( + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Acquire( &the_message_queue->Wait_queue, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire_critical( + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Acquire_critical( &the_message_queue->Wait_queue, lock_context ); + + #if defined(RTEMS_MULTIPROCESSING) + /* + * In case RTEMS_MULTIPROCESSING is enabled, then we have to prevent + * deletion of the executing thread after the thread queue operations. + */ + _Thread_Dispatch_disable(); + #endif +} + +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Release( + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Release( &the_message_queue->Wait_queue, lock_context ); + #if defined(RTEMS_MULTIPROCESSING) + _Thread_Dispatch_enable( _Per_CPU_Get() ); + #endif +} + /** * This routine copies the contents of the source message buffer * to the destination message buffer. @@ -421,7 +452,7 @@ _CORE_message_queue_Allocate_message_buffer ( ) { return (CORE_message_queue_Buffer_control *) - _Chain_Get( &the_message_queue->Inactive_messages ); + _Chain_Get_unprotected( &the_message_queue->Inactive_messages ); } /** @@ -433,7 +464,7 @@ RTEMS_INLINE_ROUTINE void _CORE_message_queue_Free_message_buffer ( CORE_message_queue_Buffer_control *the_message ) { - _Chain_Append( &the_message_queue->Inactive_messages, &the_message->Node ); + _Chain_Append_unprotected( &the_message_queue->Inactive_messages, &the_message->Node ); } /** @@ -527,6 +558,55 @@ RTEMS_INLINE_ROUTINE bool _CORE_message_queue_Is_priority( the_message_queue, the_handler, the_argument ) #endif +RTEMS_INLINE_ROUTINE Thread_Control *_CORE_message_queue_Dequeue_receiver( + CORE_message_queue_Control *the_message_queue, + const void *buffer, + size_t size, + CORE_message_queue_Submit_types submit_type, + ISR_lock_Context *lock_context +) +{ + Thread_Control *the_thread; + + /* + * If there are pending messages, then there can't be threads + * waiting for us to send them a message. + * + * NOTE: This check is critical because threads can block on + * send and receive and this ensures that we are broadcasting + * the message to threads waiting to receive -- not to send. + */ + if ( the_message_queue->number_of_pending_messages != 0 ) { + return NULL; + } + + /* + * There must be no pending messages if there is a thread waiting to + * receive a message. + */ + the_thread = _Thread_queue_First_locked( &the_message_queue->Wait_queue ); + if ( the_thread == NULL ) { + return NULL; + } + + *(size_t *) the_thread->Wait.return_argument = size; + the_thread->Wait.count = (uint32_t) submit_type; + + _CORE_message_queue_Copy_buffer( + buffer, + the_thread->Wait.return_argument_second.mutable_object, + size + ); + + _Thread_queue_Extract_critical( + &the_message_queue->Wait_queue, + the_thread, + lock_context + ); + + return the_thread; +} + /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/include/rtems/score/coremuteximpl.h b/cpukit/score/include/rtems/score/coremuteximpl.h index e019b0abab..f57fdfcfd5 100644 --- a/cpukit/score/include/rtems/score/coremuteximpl.h +++ b/cpukit/score/include/rtems/score/coremuteximpl.h @@ -119,6 +119,11 @@ CORE_mutex_Status _CORE_mutex_Initialize( bool initially_locked ); +RTEMS_INLINE_ROUTINE void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex ) +{ + _Thread_queue_Destroy( &the_mutex->Wait_queue ); +} + /** * @brief Attempt to receive a unit from the_mutex. * @@ -205,17 +210,10 @@ void _CORE_mutex_Seize_interrupt_blocking( * * @retval this method returns true if dispatch is in an unsafe state. */ -#ifdef RTEMS_SMP - #define _CORE_mutex_Check_dispatch_for_seize(_wait) \ - (_Thread_Dispatch_get_disable_level() != 1 \ - && (_wait) \ - && (_System_state_Get() >= SYSTEM_STATE_UP)) -#else - #define _CORE_mutex_Check_dispatch_for_seize(_wait) \ - (!_Thread_Dispatch_is_enabled() \ - && (_wait) \ - && (_System_state_Get() >= SYSTEM_STATE_UP)) -#endif +#define _CORE_mutex_Check_dispatch_for_seize(_wait) \ + (!_Thread_Dispatch_is_enabled() \ + && (_wait) \ + && (_System_state_Get() >= SYSTEM_STATE_UP)) /** * @brief Attempt to obtain the mutex. @@ -260,9 +258,10 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Seize_body( INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE ); } + _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context ); if ( _CORE_mutex_Seize_interrupt_trylock( the_mutex, executing, lock_context ) ) { if ( !wait ) { - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); executing->Wait.return_code = CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT; } else { @@ -315,13 +314,15 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Seize_body( * @param[in] id is the id of the RTEMS Object associated with this mutex * @param[in] api_mutex_mp_support is the routine that will be called when * unblocking a remote mutex + * @param[in] lock_context is the interrupt level * * @retval an indication of whether the routine succeeded or failed */ CORE_mutex_Status _CORE_mutex_Surrender( CORE_mutex_Control *the_mutex, Objects_Id id, - CORE_mutex_API_mp_support_callout api_mutex_mp_support + CORE_mutex_API_mp_support_callout api_mutex_mp_support, + ISR_lock_Context *lock_context ); /** @@ -467,7 +468,7 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock_body( } if ( !_CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) { - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return 0; } /* else must be CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING * @@ -481,19 +482,17 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock_body( ceiling = the_mutex->Attributes.priority_ceiling; current = executing->current_priority; if ( current == ceiling ) { - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return 0; } if ( current > ceiling ) { - _Thread_Disable_dispatch(); - _ISR_lock_ISR_enable( lock_context ); - _Thread_Change_priority( - executing, - ceiling, - false - ); - _Thread_Enable_dispatch(); + Per_CPU_Control *cpu_self; + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + _Thread_Raise_priority( executing, ceiling ); + _Thread_Dispatch_enable( cpu_self ); return 0; } /* if ( current < ceiling ) */ { @@ -501,7 +500,7 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock_body( the_mutex->holder = NULL; the_mutex->nest_count = 0; /* undo locking above */ executing->resource_count--; /* undo locking above */ - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return 0; } } @@ -517,12 +516,12 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock_body( switch ( the_mutex->Attributes.lock_nesting_behavior ) { case CORE_MUTEX_NESTING_ACQUIRES: the_mutex->nest_count++; - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return 0; #if defined(RTEMS_POSIX_API) case CORE_MUTEX_NESTING_IS_ERROR: executing->Wait.return_code = CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED; - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return 0; #endif case CORE_MUTEX_NESTING_BLOCKS: diff --git a/cpukit/score/include/rtems/score/corerwlockimpl.h b/cpukit/score/include/rtems/score/corerwlockimpl.h index 331510ba02..e619574d89 100644 --- a/cpukit/score/include/rtems/score/corerwlockimpl.h +++ b/cpukit/score/include/rtems/score/corerwlockimpl.h @@ -21,6 +21,7 @@ #include <rtems/score/corerwlock.h> #include <rtems/score/thread.h> +#include <rtems/score/threadqimpl.h> #include <rtems/score/watchdog.h> #ifdef __cplusplus @@ -87,6 +88,13 @@ void _CORE_RWLock_Initialize( CORE_RWLock_Attributes *the_rwlock_attributes ); +RTEMS_INLINE_ROUTINE void _CORE_RWLock_Destroy( + CORE_RWLock_Control *the_rwlock +) +{ + _Thread_queue_Destroy( &the_rwlock->Wait_queue ); +} + /** * @brief Obtain RWLock for reading. * diff --git a/cpukit/score/include/rtems/score/coresemimpl.h b/cpukit/score/include/rtems/score/coresemimpl.h index 6a3a212f34..da57ad1dd4 100644 --- a/cpukit/score/include/rtems/score/coresemimpl.h +++ b/cpukit/score/include/rtems/score/coresemimpl.h @@ -20,6 +20,7 @@ #define _RTEMS_SCORE_CORESEMIMPL_H #include <rtems/score/coresem.h> +#include <rtems/score/objectimpl.h> #include <rtems/score/threaddispatch.h> #include <rtems/score/threadqimpl.h> #include <rtems/score/statesimpl.h> @@ -33,10 +34,6 @@ extern "C" { */ /**@{**/ -#if defined(RTEMS_POSIX_API) || defined(RTEMS_MULTIPROCESSING) - #define RTEMS_SCORE_CORESEM_ENABLE_SEIZE_BODY -#endif - /** * Core Semaphore handler return statuses. */ @@ -98,29 +95,12 @@ void _CORE_semaphore_Initialize( uint32_t initial_value ); -#if defined(RTEMS_SCORE_CORESEM_ENABLE_SEIZE_BODY) - /** - * This routine attempts to receive a unit from @a the_semaphore. - * If a unit is available or if the wait flag is false, then the routine - * returns. Otherwise, the calling task is blocked until a unit becomes - * available. - * - * @param[in] the_semaphore is the semaphore to seize - * @param[in,out] executing The currently executing thread. - * @param[in] id is the Id of the API level Semaphore object associated - * with this instance of a SuperCore Semaphore - * @param[in] wait indicates if the caller is willing to block - * @param[in] timeout is the number of ticks the calling thread is willing - * to wait if @a wait is true. - */ - void _CORE_semaphore_Seize( - CORE_semaphore_Control *the_semaphore, - Thread_Control *executing, - Objects_Id id, - bool wait, - Watchdog_Interval timeout - ); -#endif +RTEMS_INLINE_ROUTINE void _CORE_semaphore_Destroy( + CORE_semaphore_Control *the_semaphore +) +{ + _Thread_queue_Destroy( &the_semaphore->Wait_queue ); +} /** * @brief Surrender a unit to a semaphore. @@ -134,14 +114,54 @@ void _CORE_semaphore_Initialize( * with this instance of a SuperCore Semaphore * @param[in] api_semaphore_mp_support is the routine to invoke if the * thread unblocked is remote + * @param[in] lock_context is a temporary variable used to contain the ISR + * disable level cookie * * @retval an indication of whether the routine succeeded or failed */ -CORE_semaphore_Status _CORE_semaphore_Surrender( +RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Surrender( CORE_semaphore_Control *the_semaphore, Objects_Id id, - CORE_semaphore_API_mp_support_callout api_semaphore_mp_support -); + CORE_semaphore_API_mp_support_callout api_semaphore_mp_support, + ISR_lock_Context *lock_context +) +{ + Thread_Control *the_thread; + CORE_semaphore_Status status; + + status = CORE_SEMAPHORE_STATUS_SUCCESSFUL; + + _Thread_queue_Acquire_critical( &the_semaphore->Wait_queue, lock_context ); + + the_thread = _Thread_queue_First_locked( &the_semaphore->Wait_queue ); + if ( the_thread != NULL ) { +#if defined(RTEMS_MULTIPROCESSING) + _Thread_Dispatch_disable(); +#endif + + _Thread_queue_Extract_critical( + &the_semaphore->Wait_queue, + the_thread, + lock_context + ); + +#if defined(RTEMS_MULTIPROCESSING) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) + (*api_semaphore_mp_support) ( the_thread, id ); + + _Thread_Dispatch_enable( _Per_CPU_Get() ); +#endif + } else { + if ( the_semaphore->count < the_semaphore->Attributes.maximum_count ) + the_semaphore->count += 1; + else + status = CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED; + + _Thread_queue_Release( &the_semaphore->Wait_queue, lock_context ); + } + + return status; +} /** * @brief Core semaphore flush. @@ -158,11 +178,18 @@ CORE_semaphore_Status _CORE_semaphore_Surrender( * thread unblocked is remote * @param[in] status is the status to be returned to the unblocked thread */ -void _CORE_semaphore_Flush( +RTEMS_INLINE_ROUTINE void _CORE_semaphore_Flush( CORE_semaphore_Control *the_semaphore, Thread_queue_Flush_callout remote_extract_callout, uint32_t status -); +) +{ + _Thread_queue_Flush( + &the_semaphore->Wait_queue, + remote_extract_callout, + status + ); +} /** * This function returns true if the priority attribute is @@ -209,7 +236,7 @@ RTEMS_INLINE_ROUTINE uint32_t _CORE_semaphore_Get_count( * * @note There is currently no MACRO version of this routine. */ -RTEMS_INLINE_ROUTINE void _CORE_semaphore_Seize_isr_disable( +RTEMS_INLINE_ROUTINE void _CORE_semaphore_Seize( CORE_semaphore_Control *the_semaphore, Thread_Control *executing, Objects_Id id, @@ -221,31 +248,28 @@ RTEMS_INLINE_ROUTINE void _CORE_semaphore_Seize_isr_disable( /* disabled when you get here */ executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL; + _Thread_queue_Acquire_critical( &the_semaphore->Wait_queue, lock_context ); if ( the_semaphore->count != 0 ) { the_semaphore->count -= 1; - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_semaphore->Wait_queue, lock_context ); return; } if ( !wait ) { - _ISR_lock_ISR_enable( lock_context ); + _Thread_queue_Release( &the_semaphore->Wait_queue, lock_context ); executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT; return; } - _Thread_Disable_dispatch(); - _Thread_queue_Enter_critical_section( &the_semaphore->Wait_queue ); - executing->Wait.queue = &the_semaphore->Wait_queue; - executing->Wait.id = id; - _ISR_lock_ISR_enable( lock_context ); - - _Thread_queue_Enqueue( + executing->Wait.id = id; + _Thread_queue_Enqueue_critical( &the_semaphore->Wait_queue, executing, STATES_WAITING_FOR_SEMAPHORE, - timeout + timeout, + CORE_SEMAPHORE_TIMEOUT, + lock_context ); - _Thread_Enable_dispatch(); } /** @} */ diff --git a/cpukit/score/include/rtems/score/isrlock.h b/cpukit/score/include/rtems/score/isrlock.h index 5a68937b4d..994eb48ed0 100644 --- a/cpukit/score/include/rtems/score/isrlock.h +++ b/cpukit/score/include/rtems/score/isrlock.h @@ -68,6 +68,12 @@ typedef struct { #else ISR_Level isr_level; #endif +#if defined( RTEMS_PROFILING ) + /** + * @brief The last interrupt disable instant in CPU counter ticks. + */ + CPU_Counter_ticks ISR_disable_instant; +#endif } ISR_lock_Context; /** @@ -304,6 +310,13 @@ typedef struct { _ISR_Flash( ( _context )->isr_level ) #endif +#if defined( RTEMS_PROFILING ) + #define _ISR_lock_ISR_disable_profile( _context ) \ + ( _context )->ISR_disable_instant = _CPU_Counter_read(); +#else + #define _ISR_lock_ISR_disable_profile( _context ) +#endif + /** * @brief Disables interrupts and saves the previous interrupt state in the ISR * lock context. @@ -316,10 +329,16 @@ typedef struct { */ #if defined( RTEMS_SMP ) #define _ISR_lock_ISR_disable( _context ) \ - _ISR_Disable_without_giant( ( _context )->Lock_context.isr_level ) + do { \ + _ISR_Disable_without_giant( ( _context )->Lock_context.isr_level ); \ + _ISR_lock_ISR_disable_profile( _context ) \ + } while ( 0 ) #else #define _ISR_lock_ISR_disable( _context ) \ - _ISR_Disable( ( _context )->isr_level ) + do { \ + _ISR_Disable( ( _context )->isr_level ); \ + _ISR_lock_ISR_disable_profile( _context ) \ + } while ( 0 ) #endif /** diff --git a/cpukit/score/include/rtems/score/mpciimpl.h b/cpukit/score/include/rtems/score/mpciimpl.h index 22dff883a2..5652f6afa1 100644 --- a/cpukit/score/include/rtems/score/mpciimpl.h +++ b/cpukit/score/include/rtems/score/mpciimpl.h @@ -189,13 +189,15 @@ void _MPCI_Send_process_packet ( * set in addition to the remote operation pending state. It * may indicate the caller is blocking on a message queue * operation. + * @param[in] timeout_code is the timeout code * * @retval This method returns the operation status from the remote node. */ uint32_t _MPCI_Send_request_packet ( uint32_t destination, MP_packet_Prefix *the_packet, - States_Control extra_state + States_Control extra_state, + uint32_t timeout_code ); /** diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h index 9eb2887766..08f96ac4ac 100644 --- a/cpukit/score/include/rtems/score/mrsp.h +++ b/cpukit/score/include/rtems/score/mrsp.h @@ -20,6 +20,7 @@ #if defined(RTEMS_SMP) #include <rtems/score/chain.h> +#include <rtems/score/isrlock.h> #include <rtems/score/scheduler.h> #include <rtems/score/thread.h> @@ -75,24 +76,30 @@ typedef enum { MRSP_WAIT_FOR_OWNERSHIP = 255 } MRSP_Status; +typedef struct MRSP_Control MRSP_Control; + /** * @brief MrsP rival. * * The rivals are used by threads waiting for resource ownership. They are - * registered in the MRSP control block. + * registered in the MrsP control block. */ typedef struct { /** - * @brief The node for registration in the MRSP rival chain. + * @brief The node for registration in the MrsP rival chain. * - * The chain operations are protected by the Giant lock and disabled - * interrupts. + * The chain operations are protected by the MrsP control lock. * * @see MRSP_Control::Rivals. */ Chain_Node Node; /** + * @brief The corresponding MrsP control block. + */ + MRSP_Control *resource; + + /** * @brief Identification of the rival thread. */ Thread_Control *thread; @@ -118,8 +125,7 @@ typedef struct { * * Initially the status is set to MRSP_WAIT_FOR_OWNERSHIP. The rival will * busy wait until a status change happens. This can be MRSP_SUCCESSFUL or - * MRSP_TIMEOUT. State changes are protected by the Giant lock and disabled - * interrupts. + * MRSP_TIMEOUT. State changes are protected by the MrsP control lock. */ volatile MRSP_Status status; } MRSP_Rival; @@ -127,7 +133,7 @@ typedef struct { /** * @brief MrsP control block. */ -typedef struct { +struct MRSP_Control { /** * @brief Basic resource control. */ @@ -141,6 +147,11 @@ typedef struct { Chain_Control Rivals; /** + * @brief Lock to protect the resource dependency tree. + */ + ISR_LOCK_MEMBER( Lock ) + + /** * @brief The initial priority of the owner before it was elevated to the * ceiling priority. */ @@ -150,7 +161,7 @@ typedef struct { * @brief One ceiling priority per scheduler instance. */ Priority_Control *ceiling_priorities; -} MRSP_Control; +}; /** @} */ diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h index c40f41f716..bc9ed4b511 100644 --- a/cpukit/score/include/rtems/score/mrspimpl.h +++ b/cpukit/score/include/rtems/score/mrspimpl.h @@ -36,13 +36,35 @@ extern "C" { * @{ */ -RTEMS_INLINE_ROUTINE void _MRSP_Elevate_priority( - MRSP_Control *mrsp, - Thread_Control *new_owner, - Priority_Control ceiling_priority +/* + * FIXME: Operations with the resource dependency tree are protected by the + * global scheduler lock. Since the scheduler lock should be scheduler + * instance specific in the future this will only work temporarily. A more + * sophisticated locking strategy is necessary. + */ + +RTEMS_INLINE_ROUTINE void _MRSP_Giant_acquire( ISR_lock_Context *lock_context ) +{ + _ISR_lock_Acquire( &_Scheduler_Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context ) +{ + _ISR_lock_Release( &_Scheduler_Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE bool _MRSP_Restore_priority_filter( + Thread_Control *thread, + Priority_Control *new_priority, + void *arg ) { - _Thread_Change_priority( new_owner, ceiling_priority, false ); + *new_priority = _Thread_Priority_highest( + thread->real_priority, + *new_priority + ); + + return *new_priority != thread->current_priority; } RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority( @@ -55,13 +77,13 @@ RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority( * or priority inheritance semaphores. */ if ( thread->resource_count == 0 ) { - Priority_Control new_priority = _Scheduler_Highest_priority_of_two( - _Scheduler_Get( thread ), + _Thread_Change_priority( + thread, initial_priority, - thread->real_priority + NULL, + _MRSP_Restore_priority_filter, + true ); - - _Thread_Change_priority( thread, new_priority, true ); } } @@ -69,14 +91,23 @@ RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership( MRSP_Control *mrsp, Thread_Control *new_owner, Priority_Control initial_priority, - Priority_Control ceiling_priority + Priority_Control ceiling_priority, + ISR_lock_Context *lock_context ) { + Per_CPU_Control *cpu_self; + _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource ); _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node ); mrsp->initial_priority_of_owner = initial_priority; - _MRSP_Elevate_priority( mrsp, new_owner, ceiling_priority ); _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER ); + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + + _Thread_Raise_priority( new_owner, ceiling_priority ); + + _Thread_Dispatch_enable( cpu_self ); } RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize( @@ -106,6 +137,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize( _Resource_Initialize( &mrsp->Resource ); _Chain_Initialize_empty( &mrsp->Rivals ); + _ISR_lock_Initialize( &mrsp->Lock, "MrsP" ); return MRSP_SUCCESSFUL; } @@ -133,27 +165,32 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout( ) { MRSP_Rival *rival = arg; + MRSP_Control *mrsp = rival->resource; Thread_Control *thread = rival->thread; - ISR_Level level; + ISR_lock_Context lock_context; (void) id; - _ISR_Disable( level ); + _ISR_lock_ISR_disable_and_acquire( &mrsp->Lock, &lock_context ); if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) { - rival->status = MRSP_TIMEOUT; + ISR_lock_Context giant_lock_context; + + _MRSP_Giant_acquire( &giant_lock_context ); _Chain_Extract_unprotected( &rival->Node ); _Resource_Node_extract( &thread->Resource_node ); _Resource_Node_set_dependency( &thread->Resource_node, NULL ); - - _ISR_Enable( level ); - _Scheduler_Thread_change_help_state( thread, rival->initial_help_state ); _Scheduler_Thread_change_resource_root( thread, thread ); - _MRSP_Restore_priority( thread, rival->initial_priority ); + + _MRSP_Giant_release( &giant_lock_context ); + + rival->status = MRSP_TIMEOUT; + + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, &lock_context ); } else { - _ISR_Enable( level ); + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, &lock_context ); } } @@ -163,35 +200,41 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership( Thread_Control *executing, Priority_Control initial_priority, Priority_Control ceiling_priority, - Watchdog_Interval timeout + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ) { MRSP_Status status; MRSP_Rival rival; bool initial_life_protection; - ISR_Level level; + Per_CPU_Control *cpu_self; + ISR_lock_Context giant_lock_context; rival.thread = executing; + rival.resource = mrsp; rival.initial_priority = initial_priority; + + _MRSP_Giant_acquire( &giant_lock_context ); + rival.initial_help_state = _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL ); rival.status = MRSP_WAIT_FOR_OWNERSHIP; - _MRSP_Elevate_priority( mrsp, executing, ceiling_priority ); - - _ISR_Disable( level ); - _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node ); _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node ); _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource ); - - _ISR_Enable( level ); - _Scheduler_Thread_change_resource_root( executing, THREAD_RESOURCE_NODE_TO_THREAD( _Resource_Node_get_root( owner ) ) ); + _MRSP_Giant_release( &giant_lock_context ); + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + + _Thread_Raise_priority( executing, ceiling_priority ); + if ( timeout > 0 ) { _Watchdog_Initialize( &executing->Timer, @@ -203,7 +246,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership( } initial_life_protection = _Thread_Set_life_protection( true ); - _Thread_Enable_dispatch(); + _Thread_Dispatch_enable( cpu_self ); _Assert( _Debug_Is_thread_dispatching_allowed() ); @@ -212,11 +255,14 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership( status = rival.status; } while ( status == MRSP_WAIT_FOR_OWNERSHIP ); - _Thread_Disable_dispatch(); _Thread_Set_life_protection( initial_life_protection ); if ( timeout > 0 ) { - _Watchdog_Remove( &executing->Timer ); + _Watchdog_Remove_ticks( &executing->Timer ); + + if ( status == MRSP_TIMEOUT ) { + _MRSP_Restore_priority( executing, initial_priority ); + } } return status; @@ -226,7 +272,8 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain( MRSP_Control *mrsp, Thread_Control *executing, bool wait, - Watchdog_Interval timeout + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ) { MRSP_Status status; @@ -235,39 +282,44 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain( Priority_Control initial_priority = executing->current_priority; Priority_Control ceiling_priority = _MRSP_Get_ceiling_priority( mrsp, scheduler_index ); - bool priority_ok = !_Scheduler_Is_priority_higher_than( - scheduler, - initial_priority, - ceiling_priority + bool priority_ok = !_Thread_Priority_less_than( + ceiling_priority, + initial_priority ); Resource_Node *owner; if ( !priority_ok) { + _ISR_lock_ISR_enable( lock_context ); return MRSP_INVALID_PRIORITY; } + _ISR_lock_Acquire( &mrsp->Lock, lock_context ); owner = _Resource_Get_owner( &mrsp->Resource ); if ( owner == NULL ) { _MRSP_Claim_ownership( mrsp, executing, initial_priority, - ceiling_priority + ceiling_priority, + lock_context ); status = MRSP_SUCCESSFUL; - } else if ( _Resource_Node_get_root( owner ) == &executing->Resource_node ) { - /* Nested access or deadlock */ - status = MRSP_UNSATISFIED; - } else if ( wait ) { + } else if ( + wait + && _Resource_Node_get_root( owner ) != &executing->Resource_node + ) { status = _MRSP_Wait_for_ownership( mrsp, owner, executing, initial_priority, ceiling_priority, - timeout + timeout, + lock_context ); } else { + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + /* Not available, nested access or deadlock */ status = MRSP_UNSATISFIED; } @@ -275,13 +327,17 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain( } RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release( - MRSP_Control *mrsp, - Thread_Control *executing + MRSP_Control *mrsp, + Thread_Control *executing, + ISR_lock_Context *lock_context ) { - ISR_Level level; + Priority_Control initial_priority; + Per_CPU_Control *cpu_self; + ISR_lock_Context giant_lock_context; if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) { + _ISR_lock_ISR_enable( lock_context ); return MRSP_NOT_OWNER_OF_RESOURCE; } @@ -291,18 +347,19 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release( &executing->Resource_node ) ) { + _ISR_lock_ISR_enable( lock_context ); return MRSP_INCORRECT_STATE; } - _MRSP_Restore_priority( executing, mrsp->initial_priority_of_owner ); + initial_priority = mrsp->initial_priority_of_owner; + + _ISR_lock_Acquire( &mrsp->Lock, lock_context ); - _ISR_Disable( level ); + _MRSP_Giant_acquire( &giant_lock_context ); _Resource_Extract( &mrsp->Resource ); if ( _Chain_Is_empty( &mrsp->Rivals ) ) { - _ISR_Enable( level ); - _Resource_Set_owner( &mrsp->Resource, NULL ); } else { MRSP_Rival *rival = (MRSP_Rival *) @@ -321,9 +378,6 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release( _Resource_Node_set_dependency( &new_owner->Resource_node, NULL ); _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource ); _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node ); - - _ISR_Enable( level ); - _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER ); _Scheduler_Thread_change_resource_root( new_owner, new_owner ); } @@ -332,6 +386,15 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release( _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_YOURSELF ); } + _MRSP_Giant_release( &giant_lock_context ); + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + + _MRSP_Restore_priority( executing, initial_priority ); + + _Thread_Dispatch_enable( cpu_self ); + return MRSP_SUCCESSFUL; } @@ -341,6 +404,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Destroy( MRSP_Control *mrsp ) return MRSP_RESOUCE_IN_USE; } + _ISR_lock_Destroy( &mrsp->Lock ); _Workspace_Free( mrsp->ceiling_priorities ); return MRSP_SUCCESSFUL; diff --git a/cpukit/score/include/rtems/score/objectimpl.h b/cpukit/score/include/rtems/score/objectimpl.h index 4080a249cf..a5a3b7ef5f 100644 --- a/cpukit/score/include/rtems/score/objectimpl.h +++ b/cpukit/score/include/rtems/score/objectimpl.h @@ -991,16 +991,6 @@ RTEMS_INLINE_ROUTINE void _Objects_Put_without_thread_dispatch( } /** - * @brief Puts back an object obtained with _Objects_Get_isr_disable(). - */ -RTEMS_INLINE_ROUTINE void _Objects_Put_for_get_isr_disable( - Objects_Control *the_object -) -{ - (void) the_object; -} - -/** * @brief Locks the object allocator mutex. * * While holding the allocator mutex the executing thread is protected from diff --git a/cpukit/score/include/rtems/score/profiling.h b/cpukit/score/include/rtems/score/profiling.h index f5fa6cc241..a6ab283ae6 100644 --- a/cpukit/score/include/rtems/score/profiling.h +++ b/cpukit/score/include/rtems/score/profiling.h @@ -24,6 +24,7 @@ #define _RTEMS_SCORE_PROFILING #include <rtems/score/percpu.h> +#include <rtems/score/isrlock.h> #ifdef __cplusplus extern "C" { @@ -55,6 +56,26 @@ static inline void _Profiling_Thread_dispatch_disable( #endif } +static inline void _Profiling_Thread_dispatch_disable_critical( + Per_CPU_Control *cpu, + uint32_t previous_thread_dispatch_disable_level, + const ISR_lock_Context *lock_context +) +{ +#if defined( RTEMS_PROFILING ) + if ( previous_thread_dispatch_disable_level == 0 ) { + Per_CPU_Stats *stats = &cpu->Stats; + + stats->thread_dispatch_disabled_instant = lock_context->ISR_disable_instant; + ++stats->thread_dispatch_disabled_count; + } +#else + (void) cpu; + (void) previous_thread_dispatch_disable_level; + (void) lock_context; +#endif +} + static inline void _Profiling_Thread_dispatch_enable( Per_CPU_Control *cpu, uint32_t new_thread_dispatch_disable_level diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h index 212bace075..cadebfd02f 100644 --- a/cpukit/score/include/rtems/score/schedulerimpl.h +++ b/cpukit/score/include/rtems/score/schedulerimpl.h @@ -693,54 +693,6 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_higher_than( return _Scheduler_Priority_compare( scheduler, p1, p2 ) > 0; } -/** - * @brief Returns the priority encoding @a p1 or @a p2 with the higher priority - * in the intuitive sense of priority. - */ -RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Highest_priority_of_two( - const Scheduler_Control *scheduler, - Priority_Control p1, - Priority_Control p2 -) -{ - return _Scheduler_Is_priority_higher_than( scheduler, p1, p2 ) ? p1 : p2; -} - -/** - * @brief Sets the thread priority to @a priority if it is higher than the - * current priority of the thread in the intuitive sense of priority. - */ -RTEMS_INLINE_ROUTINE void _Scheduler_Set_priority_if_higher( - const Scheduler_Control *scheduler, - Thread_Control *the_thread, - Priority_Control priority -) -{ - Priority_Control current = the_thread->current_priority; - - if ( _Scheduler_Is_priority_higher_than( scheduler, priority, current ) ) { - _Thread_Set_priority( the_thread, priority ); - } -} - -/** - * @brief Changes the thread priority to @a priority if it is higher than the - * current priority of the thread in the intuitive sense of priority. - */ -RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority_if_higher( - const Scheduler_Control *scheduler, - Thread_Control *the_thread, - Priority_Control priority, - bool prepend_it -) -{ - Priority_Control current = the_thread->current_priority; - - if ( _Scheduler_Is_priority_higher_than( scheduler, priority, current ) ) { - _Thread_Change_priority( the_thread, priority, prepend_it ); - } -} - RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count( const Scheduler_Control *scheduler ) diff --git a/cpukit/score/include/rtems/score/smplock.h b/cpukit/score/include/rtems/score/smplock.h index 5eb6ef344f..50a0662ca9 100644 --- a/cpukit/score/include/rtems/score/smplock.h +++ b/cpukit/score/include/rtems/score/smplock.h @@ -10,7 +10,7 @@ * COPYRIGHT (c) 1989-2011. * On-Line Applications Research Corporation (OAR). * - * Copyright (c) 2013-2014 embedded brains GmbH + * Copyright (c) 2013-2015 embedded brains GmbH * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -32,6 +32,10 @@ #include <string.h> #endif +#if defined( RTEMS_PROFILING ) +#define RTEMS_SMP_LOCK_DO_NOT_INLINE +#endif + #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ @@ -368,7 +372,16 @@ typedef struct { * @param[in] name The name for the SMP lock statistics. This name must be * persistent throughout the life time of this statistics block. */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Initialize( + SMP_lock_Control *lock, + const char *name +); + +static inline void _SMP_lock_Initialize_body( +#else static inline void _SMP_lock_Initialize( +#endif SMP_lock_Control *lock, const char *name ) @@ -383,7 +396,13 @@ static inline void _SMP_lock_Initialize( * * @param[in,out] lock The SMP lock control. */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Destroy( SMP_lock_Control *lock ); + +static inline void _SMP_lock_Destroy_body( SMP_lock_Control *lock ) +#else static inline void _SMP_lock_Destroy( SMP_lock_Control *lock ) +#endif { _SMP_ticket_lock_Destroy( &lock->ticket_lock ); } @@ -399,7 +418,16 @@ static inline void _SMP_lock_Destroy( SMP_lock_Control *lock ) * @param[in,out] context The local SMP lock context for an acquire and release * pair. */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Acquire( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_Acquire_body( +#else static inline void _SMP_lock_Acquire( +#endif SMP_lock_Control *lock, SMP_lock_Context *context ) @@ -415,7 +443,16 @@ static inline void _SMP_lock_Acquire( * @param[in,out] context The local SMP lock context for an acquire and release * pair. */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Release( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_Release_body( +#else static inline void _SMP_lock_Release( +#endif SMP_lock_Control *lock, SMP_lock_Context *context ) @@ -431,7 +468,16 @@ static inline void _SMP_lock_Release( * @param[in,out] context The local SMP lock context for an acquire and release * pair. */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_ISR_disable_and_acquire( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_ISR_disable_and_acquire_body( +#else static inline void _SMP_lock_ISR_disable_and_acquire( +#endif SMP_lock_Control *lock, SMP_lock_Context *context ) @@ -447,7 +493,16 @@ static inline void _SMP_lock_ISR_disable_and_acquire( * @param[in,out] context The local SMP lock context for an acquire and release * pair. */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Release_and_ISR_enable( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_Release_and_ISR_enable_body( +#else static inline void _SMP_lock_Release_and_ISR_enable( +#endif SMP_lock_Control *lock, SMP_lock_Context *context ) diff --git a/cpukit/score/include/rtems/score/statesimpl.h b/cpukit/score/include/rtems/score/statesimpl.h index 9f94675c89..d1c402e643 100644 --- a/cpukit/score/include/rtems/score/statesimpl.h +++ b/cpukit/score/include/rtems/score/statesimpl.h @@ -105,19 +105,14 @@ extern "C" { STATES_WAITING_FOR_BSD_WAKEUP | \ STATES_WAITING_FOR_RWLOCK ) -/** This macro corresponds to a task waiting which is blocked on - * a thread queue. */ -#define STATES_WAITING_ON_THREAD_QUEUE \ - ( STATES_LOCALLY_BLOCKED | \ - STATES_WAITING_FOR_RPC_REPLY ) - /** This macro corresponds to a task waiting which is blocked. */ #define STATES_BLOCKED ( STATES_DELAYING | \ + STATES_LOCALLY_BLOCKED | \ STATES_WAITING_FOR_TIME | \ STATES_WAITING_FOR_PERIOD | \ STATES_WAITING_FOR_EVENT | \ + STATES_WAITING_FOR_RPC_REPLY | \ STATES_WAITING_FOR_SYSTEM_EVENT | \ - STATES_WAITING_ON_THREAD_QUEUE | \ STATES_INTERRUPTIBLE_BY_SIGNAL ) /** All state bits set to one (provided for _Thread_Ready()) */ @@ -416,23 +411,6 @@ RTEMS_INLINE_ROUTINE bool _States_Is_locally_blocked ( /** * This function returns true if one of the states which indicates - * that a task is blocked waiting for a local resource is set in - * the_states, and false otherwise. - * - * @param[in] the_states is the task state set to test - * - * @return This method returns true if the state indicates that the - * assocated thread is waiting on a thread queue. - */ -RTEMS_INLINE_ROUTINE bool _States_Is_waiting_on_thread_queue ( - States_Control the_states -) -{ - return (the_states & STATES_WAITING_ON_THREAD_QUEUE); -} - -/** - * This function returns true if one of the states which indicates * that a task is blocked is set in the_states, and false otherwise. * * @param[in] the_states is the task state set to test diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h index 72e011ccff..39fcb1707f 100644 --- a/cpukit/score/include/rtems/score/thread.h +++ b/cpukit/score/include/rtems/score/thread.h @@ -249,56 +249,6 @@ typedef struct { } Thread_Start_information; /** - * @brief Priority change handler. - * - * @param[in] the_thread The thread. - * @param[in] new_priority The new priority value. - * @param[in] context The handler context. - * - * @see _Thread_Priority_set_change_handler(). - */ -typedef void (*Thread_Priority_change_handler)( - Thread_Control *the_thread, - Priority_Control new_priority, - void *context -); - -/** - * @brief Thread priority control. - */ -typedef struct { - /** - * @brief Generation of the current priority value. - * - * It is used in _Thread_Change_priority() to serialize the update of - * priority related data structures. - */ - uint32_t generation; - - /** - * @brief Priority change handler. - * - * Called by _Thread_Change_priority() to notify a thread about a priority - * change. In case this thread waits currently for a resource the handler - * may adjust its data structures according to the new priority value. This - * handler must not be NULL, instead the default handler - * _Thread_Priority_change_do_nothing() should be used in case nothing needs - * to be done during a priority change. - * - * @see _Thread_Priority_set_change_handler() and - * _Thread_Priority_restore_default_change_handler(). - */ - Thread_Priority_change_handler change_handler; - - /** - * @brief Context for priority change handler. - * - * @see _Thread_Priority_set_change_handler(). - */ - void *change_handler_context; -} Thread_Priority_control; - -/** * @brief Union type to hold a pointer to an immutable or a mutable object. * * The main purpose is to enable passing of pointers to read-only send buffers @@ -324,11 +274,8 @@ typedef union { * * The mutually exclusive wait state flags are * - @ref THREAD_WAIT_STATE_INTEND_TO_BLOCK, - * - @ref THREAD_WAIT_STATE_BLOCKED, - * - @ref THREAD_WAIT_STATE_SATISFIED, - * - @ref THREAD_WAIT_STATE_TIMEOUT, - * - @ref THREAD_WAIT_STATE_INTERRUPT_SATISFIED, and - * - @ref THREAD_WAIT_STATE_INTERRUPT_TIMEOUT, + * - @ref THREAD_WAIT_STATE_BLOCKED, and + * - @ref THREAD_WAIT_STATE_READY_AGAIN. */ typedef unsigned int Thread_Wait_flags; @@ -339,6 +286,21 @@ typedef unsigned int Thread_Wait_flags; * blocked and to return information to it. */ typedef struct { + /** + * @brief Node for thread queues. + */ + union { + /** + * @brief A node for chains. + */ + Chain_Node Chain; + + /** + * @brief A node for red-black trees. + */ + RBTree_Node RBTree; + } Node; + /** This field is the Id of the object this thread is waiting upon. */ Objects_Id id; /** This field is used to return an integer while when blocked. */ @@ -357,7 +319,19 @@ typedef struct { */ uint32_t return_code; - /** This field points to the thread queue on which this thread is blocked. */ + /** + * @brief Code to set the timeout return code in _Thread_Timeout(). + */ + uint32_t timeout_code; + + /** + * @brief The current thread queue. + * + * In case this field is @c NULL, then the thread is not blocked on a thread + * queue. This field is protected by the thread lock. + * + * @see _Thread_Lock_set() and _Thread_Wait_set_queue(). + */ Thread_queue_Control *queue; /** @@ -369,6 +343,15 @@ typedef struct { #else Thread_Wait_flags flags; #endif + + /** + * @brief The current thread queue operations. + * + * This field is protected by the thread lock. + * + * @see _Thread_Lock_set() and _Thread_Wait_set_operations(). + */ + const Thread_queue_Operations *operations; } Thread_Wait_information; /** @@ -381,19 +364,43 @@ typedef struct { typedef struct { /** This field is the object management structure for each proxy. */ Objects_Control Object; - /** This field is used to enqueue the thread on RBTrees. */ - RBTree_Node RBNode; /** This field is the current execution state of this proxy. */ States_Control current_state; - /** This field is the current priority state of this proxy. */ + + /** + * @brief This field is the current priority state of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ Priority_Control current_priority; - /** This field is the base priority of this proxy. */ + + /** + * @brief This field is the base priority of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ Priority_Control real_priority; /** - * @brief Thread priority control. + * @brief Generation of the current priority value. + * + * It is used in _Thread_Change_priority() to serialize the update of + * priority related data structures. + */ + uint32_t priority_generation; + + /** + * @brief Hints if a priority restore is necessary once the resource count + * changes from one to zero. + * + * This is an optimization to speed up the mutex surrender sequence in case + * no attempt to change the priority was made during the mutex ownership. On + * SMP configurations atomic fences must synchronize writes to + * Thread_Control::priority_restore_hint and Thread_Control::resource_count. */ - Thread_Priority_control Priority; + bool priority_restore_hint; /** This field is the number of mutexes currently held by this proxy. */ uint32_t resource_count; @@ -636,8 +643,8 @@ typedef struct { * * The thread lock protects the following thread variables * - Thread_Control::current_priority, - * - Thread_Control::Priority::change_handler, and - * - Thread_Control::Priority::change_handler_context. + * - Thread_Control::Wait::queue, and + * - Thread_Control::Wait::operations. * * @see _Thread_Lock_acquire(), _Thread_Lock_release(), _Thread_Lock_set() and * _Thread_Lock_restore_default(). @@ -667,19 +674,43 @@ typedef struct { struct Thread_Control_struct { /** This field is the object management structure for each thread. */ Objects_Control Object; - /** This field is used to enqueue the thread on RBTrees. */ - RBTree_Node RBNode; /** This field is the current execution state of this thread. */ States_Control current_state; - /** This field is the current priority state of this thread. */ + + /** + * @brief This field is the current priority state of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ Priority_Control current_priority; - /** This field is the base priority of this thread. */ + + /** + * @brief This field is the base priority of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ Priority_Control real_priority; /** - * @brief Thread priority control. + * @brief Generation of the current priority value. + * + * It is used in _Thread_Change_priority() to serialize the update of + * priority related data structures. + */ + uint32_t priority_generation; + + /** + * @brief Hints if a priority restore is necessary once the resource count + * changes from one to zero. + * + * This is an optimization to speed up the mutex surrender sequence in case + * no attempt to change the priority was made during the mutex ownership. On + * SMP configurations atomic fences must synchronize writes to + * Thread_Control::priority_restore_hint and Thread_Control::resource_count. */ - Thread_Priority_control Priority; + bool priority_restore_hint; /** This field is the number of mutexes currently held by this thread. */ uint32_t resource_count; diff --git a/cpukit/score/include/rtems/score/threaddispatch.h b/cpukit/score/include/rtems/score/threaddispatch.h index 89f5c0ba5b..4ef5538f7e 100644 --- a/cpukit/score/include/rtems/score/threaddispatch.h +++ b/cpukit/score/include/rtems/score/threaddispatch.h @@ -15,7 +15,7 @@ #define _RTEMS_SCORE_THREADDISPATCH_H #include <rtems/score/percpu.h> -#include <rtems/score/smplock.h> +#include <rtems/score/isrlock.h> #include <rtems/score/profiling.h> #ifdef __cplusplus @@ -242,16 +242,25 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level ); * * This function does not acquire the Giant lock. * + * @param[in] lock_context The lock context of the corresponding + * _ISR_lock_ISR_disable() that started the critical section. + * * @return The current processor. */ -RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical( void ) +RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical( + const ISR_lock_Context *lock_context +) { Per_CPU_Control *cpu_self; uint32_t disable_level; cpu_self = _Per_CPU_Get(); disable_level = cpu_self->thread_dispatch_disable_level; - _Profiling_Thread_dispatch_disable( cpu_self, disable_level ); + _Profiling_Thread_dispatch_disable_critical( + cpu_self, + disable_level, + lock_context + ); cpu_self->thread_dispatch_disable_level = disable_level + 1; return cpu_self; @@ -266,18 +275,17 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical( void ) */ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable( void ) { - Per_CPU_Control *cpu_self; + Per_CPU_Control *cpu_self; + ISR_lock_Context lock_context; #if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING ) - ISR_Level level; - - _ISR_Disable_without_giant( level ); + _ISR_lock_ISR_disable( &lock_context ); #endif - cpu_self = _Thread_Dispatch_disable_critical(); + cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); #if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING ) - _ISR_Enable_without_giant( level ); + _ISR_lock_ISR_enable( &lock_context ); #endif return cpu_self; diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h index 84c9ac316f..ffff220288 100644 --- a/cpukit/score/include/rtems/score/threadimpl.h +++ b/cpukit/score/include/rtems/score/threadimpl.h @@ -30,6 +30,7 @@ #include <rtems/score/resourceimpl.h> #include <rtems/score/statesimpl.h> #include <rtems/score/sysstate.h> +#include <rtems/score/threadqimpl.h> #include <rtems/score/todimpl.h> #include <rtems/config.h> @@ -77,8 +78,11 @@ SCORE_EXTERN Thread_Control *_Thread_Allocated_fp; SCORE_EXTERN struct _reent **_Thread_libc_reent; #endif +#define THREAD_CHAIN_NODE_TO_THREAD( node ) \ + RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain ) + #define THREAD_RBTREE_NODE_TO_THREAD( node ) \ - RTEMS_CONTAINER_OF( node, Thread_Control, RBNode ) + RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree ) #if defined(RTEMS_SMP) #define THREAD_RESOURCE_NODE_TO_THREAD( node ) \ @@ -329,34 +333,120 @@ void _Thread_Delay_ended( ); /** - * @brief Change the priority of a thread. + * @brief Returns true if the left thread priority is less than the right + * thread priority in the intuitive sense of priority and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than( + Priority_Control left, + Priority_Control right +) +{ + return left > right; +} + +/** + * @brief Returns the highest priority of the left and right thread priorities + * in the intuitive sense of priority. + */ +RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest( + Priority_Control left, + Priority_Control right +) +{ + return _Thread_Priority_less_than( left, right ) ? right : left; +} + +/** + * @brief Filters a thread priority change. + * + * Called by _Thread_Change_priority() under the protection of the thread lock. * - * This routine changes the current priority of @a the_thread to - * @a new_priority. It performs any necessary scheduling operations - * including the selection of a new heir thread. + * @param[in] the_thread The thread. + * @param[in, out] new_priority The new priority of the thread. The filter may + * alter this value. + * @param[in] arg The argument passed to _Thread_Change_priority(). * - * @param[in] the_thread is the thread to change - * @param[in] new_priority is the priority to set @a the_thread to - * @param[in] prepend_it is a switch to prepend the thread + * @retval true Change the current priority. + * @retval false Otherwise. */ -void _Thread_Change_priority ( +typedef bool ( *Thread_Change_priority_filter )( Thread_Control *the_thread, - Priority_Control new_priority, - bool prepend_it + Priority_Control *new_priority, + void *arg ); /** - * @brief Set thread priority. + * @brief Changes the priority of a thread if allowed by the filter function. + * + * It changes current priority of the thread to the new priority in case the + * filter function returns true. In this case the scheduler is notified of the + * priority change as well. * - * This routine updates the priority related fields in the_thread - * control block to indicate the current priority is now new_priority. + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority of the thread. + * @param[in] arg The argument for the filter function. + * @param[in] filter The filter function to determine if a priority change is + * allowed and optionally perform other actions under the protection of the + * thread lock simultaneously with the update of the current priority. + * @param[in] prepend_it In case this is true, then the thread is prepended to + * its priority group in its scheduler instance, otherwise it is appended. + */ +void _Thread_Change_priority( + Thread_Control *the_thread, + Priority_Control new_priority, + void *arg, + Thread_Change_priority_filter filter, + bool prepend_it +); + +/** + * @brief Raises the priority of a thread. + * + * It changes the current priority of the thread to the new priority if the new + * priority is higher than the current priority. In this case the thread is + * appended to its new priority group in its scheduler instance. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority of the thread. + * + * @see _Thread_Change_priority(). */ -void _Thread_Set_priority( +void _Thread_Raise_priority( Thread_Control *the_thread, Priority_Control new_priority ); /** + * @brief Sets the current to the real priority of a thread. + * + * Sets the priority restore hint to false. + */ +void _Thread_Restore_priority( Thread_Control *the_thread ); + +/** + * @brief Sets the priority of a thread. + * + * It sets the real priority of the thread. In addition it changes the current + * priority of the thread if the new priority is higher than the current + * priority or the thread owns no resources. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority of the thread. + * @param[out] old_priority The old real priority of the thread. This pointer + * must not be @c NULL. + * @param[in] prepend_it In case this is true, then the thread is prepended to + * its priority group in its scheduler instance, otherwise it is appended. + * + * @see _Thread_Change_priority(). + */ +void _Thread_Set_priority( + Thread_Control *the_thread, + Priority_Control new_priority, + Priority_Control *old_priority, + bool prepend_it +); + +/** * @brief Maps thread Id to a TCB pointer. * * This function maps thread IDs to thread control @@ -1078,41 +1168,6 @@ RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default( do { } while ( 0 ) #endif -void _Thread_Priority_change_do_nothing( - Thread_Control *the_thread, - Priority_Control new_priority, - void *context -); - -/** - * @brief Sets the thread priority change handler and its context. - * - * @param[in] the_thread The thread. - * @param[in] new_handler The new handler. - * @param[in] new_context The new handler context. - */ -RTEMS_INLINE_ROUTINE void _Thread_Priority_set_change_handler( - Thread_Control *the_thread, - Thread_Priority_change_handler new_handler, - void *new_context -) -{ - the_thread->Priority.change_handler = new_handler; - the_thread->Priority.change_handler_context = new_context; -} - -/** - * @brief Restores the thread priority change default handler and its context. - * - * @param[in] the_thread The thread. - */ -RTEMS_INLINE_ROUTINE void _Thread_Priority_restore_default_change_handler( - Thread_Control *the_thread -) -{ - the_thread->Priority.change_handler = _Thread_Priority_change_do_nothing; -} - /** * @brief The initial thread wait flags value set by _Thread_Initialize(). */ @@ -1138,28 +1193,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Priority_restore_default_change_handler( #define THREAD_WAIT_STATE_BLOCKED 0x2U /** - * @brief Indicates that the thread progress condition is satisfied and it is - * ready to resume execution. - */ -#define THREAD_WAIT_STATE_SATISFIED 0x4U - -/** - * @brief Indicates that a timeout occurred and the thread is ready to resume - * execution. - */ -#define THREAD_WAIT_STATE_TIMEOUT 0x8U - -/** - * @brief Indicates that the thread progress condition was satisfied during the - * blocking operation and it is ready to resume execution. - */ -#define THREAD_WAIT_STATE_INTERRUPT_SATISFIED 0x10U - -/** - * @brief Indicates that a timeout occurred during the blocking operation and - * the thread is ready to resume execution. + * @brief Indicates that a condition to end the thread wait occurred. + * + * This could be a timeout, a signal, an event or a resource availability. */ -#define THREAD_WAIT_STATE_INTERRUPT_TIMEOUT 0x20U +#define THREAD_WAIT_STATE_READY_AGAIN 0x4U /** * @brief Mask to get the thread wait class flags. @@ -1274,6 +1312,80 @@ RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change( return success; } +/** + * @brief Sets the thread queue. + * + * The caller must be the owner of the thread lock. + * + * @param[in] the_thread The thread. + * @param[in] new_queue The new queue. + * + * @see _Thread_Lock_set(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue( + Thread_Control *the_thread, + Thread_queue_Control *new_queue +) +{ + the_thread->Wait.queue = new_queue; +} + +/** + * @brief Sets the thread queue operations. + * + * The caller must be the owner of the thread lock. + * + * @param[in] the_thread The thread. + * @param[in] new_operations The new queue operations. + * + * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations( + Thread_Control *the_thread, + const Thread_queue_Operations *new_operations +) +{ + the_thread->Wait.operations = new_operations; +} + +/** + * @brief Restores the default thread queue operations. + * + * The caller must be the owner of the thread lock. + * + * @param[in] the_thread The thread. + * + * @see _Thread_Wait_set_operations(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations( + Thread_Control *the_thread +) +{ + the_thread->Wait.operations = &_Thread_queue_Operations_default; +} + +/** + * @brief Sets the thread wait timeout code. + * + * @param[in] the_thread The thread. + * @param[in] timeout_code The new thread wait timeout code. + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code( + Thread_Control *the_thread, + uint32_t timeout_code +) +{ + the_thread->Wait.timeout_code = timeout_code; +} + +/** + * @brief General purpose thread wait timeout. + * + * @param[in] id Unused. + * @param[in] arg The thread. + */ +void _Thread_Timeout( Objects_Id id, void *arg ); + RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor( Thread_Control *the_thread, Per_CPU_Control *cpu diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h index 00b9221356..4a8db7c59d 100644 --- a/cpukit/score/include/rtems/score/threadq.h +++ b/cpukit/score/include/rtems/score/threadq.h @@ -20,9 +20,11 @@ #define _RTEMS_SCORE_THREADQ_H #include <rtems/score/chain.h> -#include <rtems/score/states.h> -#include <rtems/score/threadsync.h> +#include <rtems/score/isrlock.h> +#include <rtems/score/percpu.h> +#include <rtems/score/priority.h> #include <rtems/score/rbtree.h> +#include <rtems/score/states.h> #ifdef __cplusplus extern "C" { @@ -39,6 +41,120 @@ extern "C" { */ /**@{*/ +typedef struct Thread_queue_Control Thread_queue_Control; + +/** + * @brief Thread queue priority change operation. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority value. + * @param[in] the_thread_queue The thread queue. + * + * @see Thread_queue_Operations. + */ +typedef void ( *Thread_queue_Priority_change_operation )( + Thread_Control *the_thread, + Priority_Control new_priority, + Thread_queue_Control *the_thread_queue +); + +/** + * @brief Thread queue initialize operation. + * + * @param[in] the_thread_queue The thread queue. + * + * @see _Thread_Wait_set_operations(). + */ +typedef void ( *Thread_queue_Initialize_operation )( + Thread_queue_Control *the_thread_queue +); + +/** + * @brief Thread queue enqueue operation. + * + * @param[in] the_thread_queue The thread queue. + * @param[in] the_thread The thread to enqueue on the queue. + * + * @see _Thread_Wait_set_operations(). + */ +typedef void ( *Thread_queue_Enqueue_operation )( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +); + +/** + * @brief Thread queue extract operation. + * + * @param[in] the_thread_queue The thread queue. + * @param[in] the_thread The thread to extract from the thread queue. + * + * @see _Thread_Wait_set_operations(). + */ +typedef void ( *Thread_queue_Extract_operation )( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +); + +/** + * @brief Thread queue first operation. + * + * @param[in] the_thread_queue The thread queue. + * + * @retval NULL No thread is present on the thread queue. + * @retval first The first thread of the thread queue according to the insert + * order. This thread remains on the thread queue. + * + * @see _Thread_Wait_set_operations(). + */ +typedef Thread_Control *( *Thread_queue_First_operation )( + Thread_queue_Control *the_thread_queue +); + +/** + * @brief Thread queue operations. + * + * @see _Thread_wait_Set_operations(). + */ +typedef struct { + /** + * @brief Thread queue priority change operation. + * + * Called by _Thread_Change_priority() to notify a thread about a priority + * change. In case this thread waits currently for a resource the handler + * may adjust its data structures according to the new priority value. This + * handler must not be NULL, instead the default handler + * _Thread_Do_nothing_priority_change() should be used in case nothing needs + * to be done during a priority change. + */ + Thread_queue_Priority_change_operation priority_change; + + /** + * @brief Thread queue initialize operation. + * + * Called by object initialization routines. + */ + Thread_queue_Initialize_operation initialize; + + /** + * @brief Thread queue enqueue operation. + * + * Called by object routines to enqueue the thread. + */ + Thread_queue_Enqueue_operation enqueue; + + /** + * @brief Thread queue extract operation. + * + * Called by object routines to extract a thread from a thread queue. + */ + Thread_queue_Extract_operation extract; + + /** + * @brief Thread queue first operation. + */ + Thread_queue_First_operation first; +} Thread_queue_Operations; + /** * The following enumerated type details all of the disciplines * supported by the Thread Queue Handler. @@ -52,7 +168,7 @@ typedef enum { * This is the structure used to manage sets of tasks which are blocked * waiting to acquire a resource. */ -typedef struct { +struct Thread_queue_Control { /** This union contains the data structures used to manage the blocked * set of tasks which varies based upon the discipline. */ @@ -62,15 +178,23 @@ typedef struct { /** This is the set of threads for priority discipline waiting. */ RBTree_Control Priority; } Queues; - /** This field is used to manage the critical section. */ - Thread_blocking_operation_States sync_state; - /** This field indicates the thread queue's blocking discipline. */ - Thread_queue_Disciplines discipline; - /** This is the status value returned to threads which timeout while - * waiting on this thread queue. + + /** + * @brief The operations for this thread queue. + */ + const Thread_queue_Operations *operations; + + /** + * @brief Lock to protect this thread queue. + * + * It may be used to protect additional state of the object embedding this + * thread queue. + * + * @see _Thread_queue_Acquire(), _Thread_queue_Acquire_critical() and + * _Thread_queue_Release(). */ - uint32_t timeout_status; -} Thread_queue_Control; + ISR_LOCK_MEMBER( Lock ) +}; /**@}*/ diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h index 6eac3643f4..32217ad73e 100644 --- a/cpukit/score/include/rtems/score/threadqimpl.h +++ b/cpukit/score/include/rtems/score/threadqimpl.h @@ -20,6 +20,8 @@ #define _RTEMS_SCORE_THREADQIMPL_H #include <rtems/score/threadq.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/rbtreeimpl.h> #include <rtems/score/thread.h> #ifdef __cplusplus @@ -31,6 +33,31 @@ extern "C" { */ /**@{*/ +RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire_critical( + Thread_queue_Control *the_thread_queue, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_Acquire( &the_thread_queue->Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire( + Thread_queue_Control *the_thread_queue, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_ISR_disable( lock_context ); + _Thread_queue_Acquire_critical( the_thread_queue, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Thread_queue_Release( + Thread_queue_Control *the_thread_queue, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_Release_and_ISR_enable( &the_thread_queue->Lock, lock_context ); +} + /** * The following type defines the callout used when a remote task * is extracted from a local thread queue. @@ -55,62 +82,191 @@ Thread_Control *_Thread_queue_Dequeue( ); /** - * @brief Blocks a thread and places it on a thread. + * @brief Blocks the thread and places it on the thread queue. * - * This routine blocks a thread, places it on a thread, and optionally - * starts a timeout timer. + * This enqueues the thread on the thread queue, blocks the thread, and + * optionally starts the thread timer in case the timeout interval is not + * WATCHDOG_NO_TIMEOUT. * - * @param[in] the_thread_queue pointer to threadq - * @param[in] the_thread the thread to enqueue - * @param[in] state is the new state of the thread - * @param[in] timeout interval to wait + * The caller must be the owner of the thread queue lock. This function will + * release the thread queue lock and register it as the new thread lock. + * Thread dispatching is disabled before the thread queue lock is released. + * Thread dispatching is enabled once the sequence to block the thread is + * complete. The operation to enqueue the thread on the queue is protected by + * the thread queue lock. This makes it possible to use the thread queue lock + * to protect the state of objects embedding the thread queue and directly + * enter _Thread_queue_Enqueue_critical() in case the thread must block. * - * - INTERRUPT LATENCY: - * + single case + * @code + * #include <rtems/score/threadqimpl.h> + * #include <rtems/score/statesimpl.h> + * + * typedef struct { + * Thread_queue_Control Queue; + * Thread_Control *owner; + * } Mutex; + * + * void _Mutex_Obtain( Mutex *mutex ) + * { + * ISR_lock_Context lock_context; + * Thread_Control *executing; + * + * _Thread_queue_Acquire( &mutex->Queue, &lock_context ); + * + * executing = _Thread_Executing; + * + * if ( mutex->owner == NULL ) { + * mutex->owner = executing; + * _Thread_queue_Release( &mutex->Queue, &lock_context ); + * } else { + * _Thread_queue_Enqueue_critical( + * &mutex->Queue, + * executing, + * STATES_WAITING_FOR_MUTEX, + * WATCHDOG_NO_TIMEOUT, + * 0, + * &lock_context + * ); + * } + * } + * @endcode + * + * @param[in] the_thread_queue The thread queue. + * @param[in] the_thread The thread to enqueue. + * @param[in] state The new state of the thread. + * @param[in] timeout Interval to wait. Use WATCHDOG_NO_TIMEOUT to block + * potentially forever. + * @param[in] timeout_code The return code in case a timeout occurs. + * @param[in] lock_context The lock context of the lock acquire. */ -void _Thread_queue_Enqueue( +void _Thread_queue_Enqueue_critical( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread, States_Control state, - Watchdog_Interval timeout + Watchdog_Interval timeout, + uint32_t timeout_code, + ISR_lock_Context *lock_context ); /** - * @brief Extracts thread from thread queue. + * @brief Acquires the thread queue lock and calls + * _Thread_queue_Enqueue_critical(). + */ +RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread, + States_Control state, + Watchdog_Interval timeout, + uint32_t timeout_code +) +{ + ISR_lock_Context lock_context; + + _Thread_queue_Acquire( the_thread_queue, &lock_context ); + _Thread_queue_Enqueue_critical( + the_thread_queue, + the_thread, + state, + timeout, + timeout_code, + &lock_context + ); +} + +/** + * @brief Extracts the thread from the thread queue, restores the default wait + * operations and restores the default thread lock. * - * This routine removes @a the_thread from @a the_thread_queue - * and cancels any timeouts associated with this blocking. + * The caller must be the owner of the thread queue lock. The thread queue + * lock is not released. * - * @param[in] the_thread_queue is the pointer to the ThreadQ header - * @param[in] the_thread is the pointer to a thread control block that - * is to be removed + * @param[in] the_thread_queue The thread queue. + * @param[in] the_thread The thread to extract. */ -void _Thread_queue_Extract( +void _Thread_queue_Extract_locked( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread ); /** - * @brief Extracts thread from thread queue (w/return code). + * @brief Unblocks the thread which was on the thread queue before. * - * This routine removes @a the_thread from @a the_thread_queue - * and cancels any timeouts associated with this blocking. + * The caller must be the owner of the thread queue lock. This function will + * release the thread queue lock. Thread dispatching is disabled before the + * thread queue lock is released and an unblock is necessary. Thread + * dispatching is enabled once the sequence to unblock the thread is complete. * - * @param[in] the_thread_queue is the pointer to the ThreadQ header - * @param[in] the_thread is the pointer to a thread control block that - * is to be removed - * @param[in] return_code specifies the status to be returned. + * @param[in] the_thread_queue The thread queue. + * @param[in] the_thread The thread to extract. + * @param[in] lock_context The lock context of the lock acquire. + */ +void _Thread_queue_Unblock_critical( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread, + ISR_lock_Context *lock_context +); + +/** + * @brief Extracts the thread from the thread queue and unblocks it. * - * - INTERRUPT LATENCY: - * + single case + * The caller must be the owner of the thread queue lock. This function will + * release the thread queue lock and restore the default thread lock. Thread + * dispatching is disabled before the thread queue lock is released and an + * unblock is necessary. Thread dispatching is enabled once the sequence to + * unblock the thread is complete. This makes it possible to use the thread + * queue lock to protect the state of objects embedding the thread queue and + * directly enter _Thread_queue_Extract_critical() to finalize an operation in + * case a waiting thread exists. + * + * @code + * #include <rtems/score/threadqimpl.h> + * + * typedef struct { + * Thread_queue_Control Queue; + * Thread_Control *owner; + * } Mutex; + * + * void _Mutex_Release( Mutex *mutex ) + * { + * ISR_lock_Context lock_context; + * Thread_Control *first; + * + * _Thread_queue_Acquire( &mutex->Queue, &lock_context ); + * + * first = _Thread_queue_First_locked( &mutex->Queue ); + * mutex->owner = first; + * + * if ( first != NULL ) { + * _Thread_queue_Extract_critical( + * &mutex->Queue, + * first, + * &lock_context + * ); + * } + * @endcode + * + * @param[in] the_thread_queue The thread queue. + * @param[in] the_thread The thread to extract. + * @param[in] lock_context The lock context of the lock acquire. */ -void _Thread_queue_Extract_with_return_code( +void _Thread_queue_Extract_critical( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread, - uint32_t return_code + ISR_lock_Context *lock_context ); /** + * @brief Extracts thread from thread queue. + * + * This routine removes @a the_thread its thread queue + * and cancels any timeouts associated with this blocking. + * + * @param[in] the_thread is the pointer to a thread control block that + * is to be removed + */ +void _Thread_queue_Extract( Thread_Control *the_thread ); + +/** * @brief Extracts the_thread from the_thread_queue. * * This routine extracts the_thread from the_thread_queue @@ -122,15 +278,34 @@ void _Thread_queue_Extract_with_proxy( ); /** - * @brief Gets a pointer to the "first" thread on the_thread_queue. + * @brief Returns the first thread on the thread queue if it exists, otherwise + * @c NULL. + * + * The caller must be the owner of the thread queue lock. The thread queue + * lock is not released. + * + * @param[in] the_thread_queue The thread queue. * - * This function returns a pointer to the "first" thread - * on the_thread_queue. The "first" thread is selected - * based on the discipline of the_thread_queue. + * @retval NULL No thread is present on the thread queue. + * @retval first The first thread on the thread queue according to the enqueue + * order. + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Thread_queue_First_locked( + Thread_queue_Control *the_thread_queue +) +{ + return ( *the_thread_queue->operations->first )( the_thread_queue ); +} + +/** + * @brief Returns the first thread on the thread queue if it exists, otherwise + * @c NULL. * - * @param[in] the_thread_queue pointer to thread queue + * @param[in] the_thread_queue The thread queue. * - * @retval first thread or NULL + * @retval NULL No thread is present on the thread queue. + * @retval first The first thread on the thread queue according to the enqueue + * order. */ Thread_Control *_Thread_queue_First( Thread_queue_Control *the_thread_queue @@ -163,45 +338,50 @@ void _Thread_queue_Flush( * * @param[in] the_thread_queue is the pointer to a threadq header * @param[in] the_discipline is the queueing discipline - * @param[in] timeout_status is the return on a timeout */ void _Thread_queue_Initialize( - Thread_queue_Control *the_thread_queue, - Thread_queue_Disciplines the_discipline, - uint32_t timeout_status + Thread_queue_Control *the_thread_queue, + Thread_queue_Disciplines the_discipline ); -/** - * @brief Thread queue timeout. - * - * This routine is invoked when a task's request has not - * been satisfied after the timeout interval specified to - * enqueue. The task represented by ID will be unblocked and - * its status code will be set in it's control block to indicate - * that a timeout has occurred. - * - * @param[in] id thread id - */ -void _Thread_queue_Timeout( - Objects_Id id, - void *ignored -); +#if defined(RTEMS_SMP) + #define THREAD_QUEUE_FIFO_INITIALIZER( designator, name ) { \ + .Queues = { \ + .Fifo = CHAIN_INITIALIZER_EMPTY( designator.Queues.Fifo ) \ + }, \ + .operations = &_Thread_queue_Operations_FIFO, \ + .Lock = ISR_LOCK_INITIALIZER( name ) \ + } -/** - * @brief Process thread queue timeout. - * - * This is a shared helper routine which makes it easier to have multiple - * object class specific timeout routines. - * - * @param[in] the_thread is the thread to extract - * - * @note This method assumes thread dispatching is disabled - * and is expected to be called via the processing of - * a clock tick. - */ -void _Thread_queue_Process_timeout( - Thread_Control *the_thread -); + #define THREAD_QUEUE_PRIORIY_INITIALIZER( designator, name ) { \ + .Queues = { \ + .Priority = RBTREE_INITIALIZER_EMPTY( designator.Queues.Priority ) \ + }, \ + .operations = &_Thread_queue_Operations_priority, \ + .Lock = ISR_LOCK_INITIALIZER( name ) \ + } +#else + #define THREAD_QUEUE_FIFO_INITIALIZER( designator, name ) { \ + .Queues = { \ + .Fifo = CHAIN_INITIALIZER_EMPTY( designator.Queues.Fifo ) \ + }, \ + .operations = &_Thread_queue_Operations_FIFO \ + } + + #define THREAD_QUEUE_PRIORIY_INITIALIZER( designator, name ) { \ + .Queues = { \ + .Priority = RBTREE_INITIALIZER_EMPTY( designator.Queues.Priority ) \ + }, \ + .operations = &_Thread_queue_Operations_priority \ + } +#endif + +RTEMS_INLINE_ROUTINE void _Thread_queue_Destroy( + Thread_queue_Control *the_thread_queue +) +{ + _ISR_lock_Destroy( &the_thread_queue->Lock ); +} /** * @brief Compare two thread's priority for RBTree Insertion. @@ -218,16 +398,11 @@ RBTree_Compare_result _Thread_queue_Compare_priority( const RBTree_Node *right ); -/** - * This routine is invoked to indicate that the specified thread queue is - * entering a critical section. - */ -RTEMS_INLINE_ROUTINE void _Thread_queue_Enter_critical_section ( - Thread_queue_Control *the_thread_queue -) -{ - the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED; -} +extern const Thread_queue_Operations _Thread_queue_Operations_default; + +extern const Thread_queue_Operations _Thread_queue_Operations_FIFO; + +extern const Thread_queue_Operations _Thread_queue_Operations_priority; /**@}*/ diff --git a/cpukit/score/include/rtems/score/threadsync.h b/cpukit/score/include/rtems/score/threadsync.h deleted file mode 100644 index fa7aeeb003..0000000000 --- a/cpukit/score/include/rtems/score/threadsync.h +++ /dev/null @@ -1,60 +0,0 @@ -/** - * @file rtems/score/threadsync.h - * - * @brief Synchronize Thread Blocking Operations with Actions in an ISR - * - * This include file contains all constants and structures associated - * with synchronizing a thread blocking operation with potential - * actions in an ISR. - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_THREAD_SYNC_H -#define _RTEMS_SCORE_THREAD_SYNC_H - -/** - * @defgroup ScoreThreadSync Thread Blocking Operation Synchronization Handler - * - * @ingroup Score - * - * This handler encapsulates functionality related to the management of - * synchronization critical sections during blocking operations. - */ -/**@{*/ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * The following enumerated types indicate what happened while the thread - * blocking was in the synchronization window. - */ -typedef enum { - THREAD_BLOCKING_OPERATION_SYNCHRONIZED, - THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED, - THREAD_BLOCKING_OPERATION_TIMEOUT, - THREAD_BLOCKING_OPERATION_SATISFIED -} Thread_blocking_operation_States; - -/* - * Operations require a thread pointer so they are prototyped - * in thread.h - */ - -#ifdef __cplusplus -} -#endif - -/**@}*/ - -#endif -/* end of include file */ diff --git a/cpukit/score/include/rtems/score/timecounter.h b/cpukit/score/include/rtems/score/timecounter.h new file mode 100644 index 0000000000..0d17cc7ce3 --- /dev/null +++ b/cpukit/score/include/rtems/score/timecounter.h @@ -0,0 +1,199 @@ +/** + * @file + * + * @ingroup ScoreTimecounter + * + * @brief Timecounter API + */ + +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TIMECOUNTER_H +#define _RTEMS_SCORE_TIMECOUNTER_H + +#include <sys/time.h> +#include <sys/timetc.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreTimecounter Timecounter Handler + * + * @ingroup Score + * + * @{ + */ + +/** + * @brief Returns the wall clock time in the bintime format. + * + * @param[out] bt Returns the wall clock time. + */ +void _Timecounter_Bintime( struct bintime *bt ); + +/** + * @brief Returns the wall clock time in the timespec format. + * + * @param[out] ts Returns the wall clock time. + */ +void _Timecounter_Nanotime( struct timespec *ts ); + +/** + * @brief Returns the wall clock time in the timeval format. + * + * @param[out] tv Returns the wall clock time. + */ +void _Timecounter_Microtime( struct timeval *tv ); + +/** + * @brief Returns the uptime in the bintime format. + * + * @param[out] bt Returns the uptime. + */ +void _Timecounter_Binuptime( struct bintime *bt ); + +/** + * @brief Returns the uptime in the timespec format. + * + * @param[out] ts Returns the uptime. + */ +void _Timecounter_Nanouptime( struct timespec *ts ); + +/** + * @brief Returns the uptime in the timeval format. + * + * @param[out] tv Returns the uptime. + */ +void _Timecounter_Microuptime( struct timeval *tv ); + +/** + * @brief Returns the wall clock time in the bintime format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Bintime() variant. + * + * @param[out] ts Returns the wall clock time. + */ +void _Timecounter_Getbintime( struct bintime *bt ); + +/** + * @brief Returns the wall clock time in the timespec format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Nanotime() variant. + * + * @param[out] ts Returns the wall clock time. + * + * @see _Timecounter_Getbintime(). + */ +void _Timecounter_Getnanotime( struct timespec *ts ); + +/** + * @brief Returns the wall clock time in the timeval format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Microtime() variant. + * + * @param[out] tv Returns the wall clock time. + * + * @see _Timecounter_Getbintime(). + */ +void _Timecounter_Getmicrotime( struct timeval *tv ); + +/** + * @brief Returns the uptime in the bintime format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Binuptime() variant. + * + * @param[out] ts Returns the uptime. + */ +void _Timecounter_Getbinuptime( struct bintime *bt ); + +/** + * @brief Returns the uptime in the timespec format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Nanouptime() variant. + * + * @param[out] ts Returns the uptime. + */ +void _Timecounter_Getnanouptime( struct timespec *ts ); + +/** + * @brief Returns the uptime in the timeval format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Microuptime() variant. + * + * @param[out] tv Returns the uptime. + */ +void _Timecounter_Getmicrouptime( struct timeval *tv ); + +/** + * @brief Installs the timecounter. + * + * The timecounter structure must contain valid values in the fields + * tc_get_timecount, tc_counter_mask, tc_frequency and tc_quality. All other + * fields must be zero initialized. + * + * @param[in] tc The timecounter. + */ +void _Timecounter_Install( struct timecounter *tc ); + +/** + * @brief Performs a timecounter tick. + */ +void _Timecounter_Tick( void ); + +/** + * @brief Performs a simple timecounter tick. + * + * This is a special purpose tick function for simple timecounter to support + * legacy clock drivers. + * + * @param[in] delta The time in timecounter ticks elapsed since the last call + * to _Timecounter_Tick_simple(). + * @param[in] offset The current value of the timecounter. + */ +void _Timecounter_Tick_simple( uint32_t delta, uint32_t offset ); + +/** + * @brief The wall clock time in seconds. + */ +extern volatile time_t _Timecounter_Time_second; + +/** + * @brief The uptime in seconds. + * + * For compatibility with the FreeBSD network stack the initial value is one + * second. + */ +extern volatile time_t _Timecounter_Time_uptime; + +/** + * @brief The current timecounter. + */ +extern struct timecounter *_Timecounter; + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_TIMECOUNTER_H */ diff --git a/cpukit/score/include/rtems/score/timecounterimpl.h b/cpukit/score/include/rtems/score/timecounterimpl.h new file mode 100644 index 0000000000..dd47aacc04 --- /dev/null +++ b/cpukit/score/include/rtems/score/timecounterimpl.h @@ -0,0 +1,49 @@ +/** + * @file + * + * @ingroup ScoreTimecounter + * + * @brief Timecounter Implementation + */ + +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TIMECOUNTERIMPL_H +#define _RTEMS_SCORE_TIMECOUNTERIMPL_H + +#include <rtems/score/timecounter.h> +#include <sys/timetc.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @addtogroup ScoreTimecounter + * + * @{ + */ + +void _Timecounter_Initialize( void ); + +void _Timecounter_Set_clock( const struct timespec *ts ); + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_TIMECOUNTERIMPL_H */ diff --git a/cpukit/score/include/rtems/score/timespec.h b/cpukit/score/include/rtems/score/timespec.h index 9200880222..72a000177f 100644 --- a/cpukit/score/include/rtems/score/timespec.h +++ b/cpukit/score/include/rtems/score/timespec.h @@ -94,13 +94,11 @@ extern "C" { * This method returns the timestamp as nanoseconds. * * @param[in] time points to the timestamp. - * @param[in] nanoseconds the nanoseconds since the last tick. * * @retval The time in nanoseconds. */ -uint64_t _Timespec_Get_As_nanoseconds( - const struct timespec *time, - const uint32_t nanoseconds +uint64_t _Timespec_Get_as_nanoseconds( + const struct timespec *time ); /** diff --git a/cpukit/score/include/rtems/score/timestamp.h b/cpukit/score/include/rtems/score/timestamp.h index dbd0425ff8..9d25943a8c 100644 --- a/cpukit/score/include/rtems/score/timestamp.h +++ b/cpukit/score/include/rtems/score/timestamp.h @@ -42,37 +42,17 @@ #include <sys/time.h> -#include <rtems/score/cpu.h> +#include <rtems/score/basedefs.h> #include <rtems/score/timespec.h> #ifdef __cplusplus extern "C" { #endif -#if ! ( ( CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE \ - && CPU_TIMESTAMP_USE_INT64 == FALSE \ - && CPU_TIMESTAMP_USE_INT64_INLINE == FALSE ) \ - || ( CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == FALSE \ - && CPU_TIMESTAMP_USE_INT64 == TRUE \ - && CPU_TIMESTAMP_USE_INT64_INLINE == FALSE ) \ - || ( CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == FALSE \ - && CPU_TIMESTAMP_USE_INT64 == FALSE \ - && CPU_TIMESTAMP_USE_INT64_INLINE == TRUE ) ) - #error "Invalid SuperCore Timestamp implementations selection." -#endif - -#if CPU_TIMESTAMP_USE_INT64 == TRUE || CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #include <rtems/score/timestamp64.h> -#endif - /** * Define the Timestamp control type. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - typedef struct timespec Timestamp_Control; -#else - typedef Timestamp64_Control Timestamp_Control; -#endif +typedef struct bintime Timestamp_Control; /** * @brief Set timestamp to specified seconds and nanoseconds. @@ -84,13 +64,19 @@ extern "C" { * @param[in] _seconds is the seconds portion of the timestamp * @param[in] _nanoseconds is the nanoseconds portion of the timestamp */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Set( _time, _seconds, _nanoseconds ) \ - _Timespec_Set( _time, _seconds, _nanoseconds ) -#else - #define _Timestamp_Set( _time, _seconds, _nanoseconds ) \ - _Timestamp64_Set( _time, _seconds, _nanoseconds ) -#endif +RTEMS_INLINE_ROUTINE void _Timestamp_Set( + Timestamp_Control *_time, + time_t _seconds, + long _nanoseconds +) +{ + struct timespec _ts; + + _ts.tv_sec = _seconds; + _ts.tv_nsec = _nanoseconds; + + timespec2bintime( &_ts, _time ); +} /** * @brief Sets the timestamp to zero. @@ -100,13 +86,14 @@ extern "C" { * * @param[in] _time points to the timestamp instance to zero. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Set_to_zero( _time ) \ - _Timespec_Set_to_zero( _time ) -#else - #define _Timestamp_Set_to_zero( _time ) \ - _Timestamp64_Set_to_zero( _time ) -#endif + +RTEMS_INLINE_ROUTINE void _Timestamp_Set_to_zero( + Timestamp_Control *_time +) +{ + _time->sec = 0; + _time->frac = 0; +} /** * @brief Less than operator for timestamps. @@ -119,13 +106,20 @@ extern "C" { * @retval This method returns true if @a _lhs is less than the @a _rhs and * false otherwise. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Less_than( _lhs, _rhs ) \ - _Timespec_Less_than( _lhs, _rhs ) -#else - #define _Timestamp_Less_than( _lhs, _rhs ) \ - _Timestamp64_Less_than( _lhs, _rhs ) -#endif + +RTEMS_INLINE_ROUTINE bool _Timestamp_Less_than( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs +) +{ + if ( _lhs->sec < _rhs->sec ) + return true; + + if ( _lhs->sec > _rhs->sec ) + return false; + + return _lhs->frac < _rhs->frac; +} /** * @brief Greater than operator for timestamps. @@ -138,8 +132,20 @@ extern "C" { * @retval This method returns true if @a _lhs is greater than the @a _rhs and * false otherwise. */ -#define _Timestamp_Greater_than( _lhs, _rhs ) \ - _Timestamp_Less_than( _rhs, _lhs ) + +RTEMS_INLINE_ROUTINE bool _Timestamp_Greater_than( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs +) +{ + if ( _lhs->sec > _rhs->sec ) + return true; + + if ( _lhs->sec < _rhs->sec ) + return false; + + return _lhs->frac > _rhs->frac; +} /** * @brief Equal to than operator for timestamps. @@ -152,13 +158,14 @@ extern "C" { * @retval This method returns true if @a _lhs is equal to @a _rhs and * false otherwise. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Equal_to( _lhs, _rhs ) \ - _Timespec_Equal_to( _lhs, _rhs ) -#else - #define _Timestamp_Equal_to( _lhs, _rhs ) \ - _Timestamp64_Equal_to( _lhs, _rhs ) -#endif + +RTEMS_INLINE_ROUTINE bool _Timestamp_Equal_to( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs +) +{ + return _lhs->sec == _rhs->sec && _lhs->frac == _rhs->frac; +} /** * @brief Adds two timestamps. @@ -171,13 +178,17 @@ extern "C" { * * @retval This method returns the number of seconds @a time increased by. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Add_to( _time, _add ) \ - _Timespec_Add_to( _time, _add ) -#else - #define _Timestamp_Add_to( _time, _add ) \ - _Timestamp64_Add_to( _time, _add ) -#endif +RTEMS_INLINE_ROUTINE time_t _Timestamp_Add_to( + Timestamp_Control *_time, + const Timestamp_Control *_add +) +{ + time_t seconds = _time->sec; + + bintime_add( _time, _add ); + + return _time->sec - seconds; +} /** * @brief Subtracts two timestamps. @@ -192,13 +203,17 @@ extern "C" { * * @retval This method fills in @a _result. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Subtract( _start, _end, _result ) \ - _Timespec_Subtract( _start, _end, _result ) -#else - #define _Timestamp_Subtract( _start, _end, _result ) \ - _Timestamp64_Subtract( _start, _end, _result ) -#endif +RTEMS_INLINE_ROUTINE void _Timestamp_Subtract( + const Timestamp_Control *_start, + const Timestamp_Control *_end, + Timestamp_Control *_result +) +{ + _result->sec = _end->sec; + _result->frac = _end->frac; + + bintime_sub( _result, _start ); +} /** * @brief Divides a timestamp by another timestamp. @@ -213,13 +228,26 @@ extern "C" { * * @retval This method fills in @a result. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Divide( _lhs, _rhs, _ival_percentage, _fval_percentage ) \ - _Timespec_Divide( _lhs, _rhs, _ival_percentage, _fval_percentage ) -#else - #define _Timestamp_Divide( _lhs, _rhs, _ival_percentage, _fval_percentage ) \ - _Timestamp64_Divide( _lhs, _rhs, _ival_percentage, _fval_percentage ) -#endif +RTEMS_INLINE_ROUTINE void _Timestamp_Divide( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs, + uint32_t *_ival_percentage, + uint32_t *_fval_percentage +) +{ + struct timespec _ts_left; + struct timespec _ts_right; + + bintime2timespec( _lhs, &_ts_left ); + bintime2timespec( _rhs, &_ts_right ); + + _Timespec_Divide( + &_ts_left, + &_ts_right, + _ival_percentage, + _fval_percentage + ); +} /** * @brief Get seconds portion of timestamp. @@ -230,13 +258,12 @@ extern "C" { * * @retval The seconds portion of @a _time. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Get_seconds( _time ) \ - _Timespec_Get_seconds( _time ) -#else - #define _Timestamp_Get_seconds( _time ) \ - _Timestamp64_Get_seconds( _time ) -#endif +RTEMS_INLINE_ROUTINE time_t _Timestamp_Get_seconds( + const Timestamp_Control *_time +) +{ + return _time->sec; +} /** * @brief Get nanoseconds portion of timestamp. @@ -247,13 +274,16 @@ extern "C" { * * @retval The nanoseconds portion of @a _time. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Get_nanoseconds( _time ) \ - _Timespec_Get_nanoseconds( _time ) -#else - #define _Timestamp_Get_nanoseconds( _time ) \ - _Timestamp64_Get_nanoseconds( _time ) -#endif +RTEMS_INLINE_ROUTINE uint32_t _Timestamp_Get_nanoseconds( + const Timestamp_Control *_time +) +{ + struct timespec _ts; + + bintime2timespec( _time, &_ts ); + + return _ts.tv_nsec; +} /** * @brief Get the timestamp as nanoseconds. @@ -264,13 +294,16 @@ extern "C" { * * @retval The time in nanoseconds. */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_Get_As_nanoseconds( _timestamp, _nanoseconds ) \ - _Timespec_Get_As_nanoseconds( _timestamp, _nanoseconds ) -#else - #define _Timestamp_Get_As_nanoseconds( _timestamp, _nanoseconds ) \ - _Timestamp64_Get_As_nanoseconds( _timestamp, _nanoseconds ) -#endif +RTEMS_INLINE_ROUTINE uint64_t _Timestamp_Get_as_nanoseconds( + const Timestamp_Control *_time +) +{ + struct timespec _ts; + + bintime2timespec( _time, &_ts ); + + return _Timespec_Get_as_nanoseconds( &_ts ); +} /** * @brief Convert timestamp to struct timespec. @@ -280,14 +313,13 @@ extern "C" { * @param[in] _timestamp points to the timestamp * @param[in] _timespec points to the timespec */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - /* in this case we know they are the same type so use simple assignment */ - #define _Timestamp_To_timespec( _timestamp, _timespec ) \ - *(_timespec) = *(_timestamp) -#else - #define _Timestamp_To_timespec( _timestamp, _timespec ) \ - _Timestamp64_To_timespec( _timestamp, _timespec ) -#endif +RTEMS_INLINE_ROUTINE void _Timestamp_To_timespec( + const Timestamp_Control *_timestamp, + struct timespec *_timespec +) +{ + bintime2timespec( _timestamp, _timespec ); +} /** * @brief Convert timestamp to struct timeval. @@ -295,16 +327,13 @@ extern "C" { * @param[in] _timestamp points to the timestamp * @param[in] _timeval points to the timeval */ -#if CPU_TIMESTAMP_USE_STRUCT_TIMESPEC == TRUE - #define _Timestamp_To_timeval( _timestamp, _timeval ) \ - do { \ - (_timeval)->tv_sec = (_timestamp)->tv_sec; \ - (_timeval)->tv_usec = (_timestamp)->tv_nsec / 1000; \ - } while (0) -#else - #define _Timestamp_To_timeval( _timestamp, _timeval ) \ - _Timestamp64_To_timeval( _timestamp, _timeval ) -#endif +RTEMS_INLINE_ROUTINE void _Timestamp_To_timeval( + const Timestamp_Control *_timestamp, + struct timeval *_timeval +) +{ + bintime2timeval( _timestamp, _timeval ); +} #ifdef __cplusplus } diff --git a/cpukit/score/include/rtems/score/timestamp64.h b/cpukit/score/include/rtems/score/timestamp64.h deleted file mode 100644 index 39b4965a8c..0000000000 --- a/cpukit/score/include/rtems/score/timestamp64.h +++ /dev/null @@ -1,379 +0,0 @@ -/** - * @file rtems/score/timestamp64.h - * - * @brief Helpers for Manipulating 64-bit Integer Timestamps - * - * This include file contains helpers for manipulating - * 64-bit integer timestamps. - */ - -/* - * COPYRIGHT (c) 1989-2009. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_TIMESTAMP64_H -#define _RTEMS_SCORE_TIMESTAMP64_H - -/** - * @defgroup SuperCoreTimestamp64 SuperCore Sixty-Four Bit Timestamps - * - * @ingroup Score - * - * This handler encapsulates functionality related to manipulating - * the 64 bit integer implementation of SuperCore Timestamps. - */ -/**@{*/ - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * This .h file is not for general use. It is an alternative - * implementation of Timestamps and should only be used that way. - */ -#ifndef _RTEMS_SCORE_TIMESTAMP_H - #error "Should only be included by rtems/score/timestamp.h" -#endif - -/* - * Verify something is defined. - */ -#if CPU_TIMESTAMP_USE_INT64 != TRUE && CPU_TIMESTAMP_USE_INT64_INLINE != TRUE - #error "SuperCore Timestamp64 implementation included but not defined." -#endif - -/** - * Define the Timestamp control type. - */ -typedef int64_t Timestamp64_Control; - -static inline void _Timestamp64_implementation_Set( - Timestamp64_Control *_time, - Timestamp64_Control _seconds, - Timestamp64_Control _nanoseconds -) -{ - *_time = _seconds * 1000000000L + _nanoseconds; -} - -/** - * @brief Set 64-bit timestamp to seconds nanosecond. - * - * This method sets the timestamp to the specified seconds and nanoseconds - * value. - * - * @param[in] _time points to the timestamp instance to validate. - * @param[in] _seconds is the seconds portion of the timestamp - * @param[in] _nanoseconds is the nanoseconds portion of the timestamp - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Set( _time, _seconds, _nanoseconds ) \ - _Timestamp64_implementation_Set( _time, _seconds, _nanoseconds ) -#else - void _Timestamp64_Set( - Timestamp64_Control *_time, - Timestamp64_Control _seconds, - Timestamp64_Control _nanoseconds - ); -#endif - -static inline void _Timestamp64_implementation_Set_to_zero( - Timestamp64_Control *_time -) -{ - *_time = 0; -} - -/** - * @brief Sets the 64-bit timestamp to zero. - * - * This method sets the timestamp to zero value. - * - * @param[in] _time points to the timestamp instance to zero. - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Set_to_zero( _time ) \ - _Timestamp64_implementation_Set_to_zero( _time ) -#else - void _Timestamp64_Set_to_zero( - Timestamp64_Control *_time - ); -#endif - -static inline bool _Timestamp64_implementation_Less_than( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs -) -{ - return *_lhs < *_rhs; -} - -/** - * @brief The "less than" operator for 64-bit timestamps. - * - * This method is the less than operator for timestamps. - * - * @param[in] _lhs points to the left hand side timestamp - * @param[in] _rhs points to the right hand side timestamp - * - * @retval This method returns true if @a _lhs is less than the @a _rhs and - * false otherwise. - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Less_than( _lhs, _rhs ) \ - _Timestamp64_implementation_Less_than( _lhs, _rhs ) -#else - bool _Timestamp64_Less_than( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs - ); -#endif - -static inline bool _Timestamp64_implementation_Equal_to( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs -) -{ - return *_lhs == *_rhs; -} - -#define _Timestamp64_Greater_than( _lhs, _rhs ) \ - _Timestamp64_Less_than( _rhs, _lhs ) - -/** - * @brief The "equal to" operator for 64-bit timestamps. - * - * This method is the is equal to than operator for timestamps. - * - * @param[in] _lhs points to the left hand side timestamp - * @param[in] _rhs points to the right hand side timestamp - * - * @retval This method returns true if @a _lhs is equal to @a _rhs and - * false otherwise. - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Equal_to( _lhs, _rhs ) \ - _Timestamp64_implementation_Equal_to( _lhs, _rhs ) -#else - bool _Timestamp64_Equal_to( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs - ); -#endif - -static inline void _Timestamp64_implementation_Add_to( - Timestamp64_Control *_time, - const Timestamp64_Control *_add -) -{ - *_time += *_add; -} - -/** - * @brief Add two 64-bit timestamps. - * - * This routine adds two timestamps. The second argument is added - * to the first. - * - * @param[in] _time points to the base time to be added to - * @param[in] _add points to the timestamp to add to the first argument - * - * @retval This method returns the number of seconds @a time increased by. - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Add_to( _time, _add ) \ - _Timestamp64_implementation_Add_to( _time, _add ) -#else - void _Timestamp64_Add_to( - Timestamp64_Control *_time, - const Timestamp64_Control *_add - ); -#endif - -static inline void _Timestamp64_implementation_Subtract( - const Timestamp64_Control *_start, - const Timestamp64_Control *_end, - Timestamp64_Control *_result -) -{ - *_result = *_end - *_start; -} - -/** - * @brief Subtract two 64-bit timestamps. - * - * This routine subtracts two timestamps. @a result is set to - * @a end - @a start. - * - * @param[in] _start points to the starting time - * @param[in] _end points to the ending time - * @param[out] _result points to the difference between - * starting and ending time. - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Subtract( _start, _end, _result ) \ - _Timestamp64_implementation_Subtract( _start, _end, _result ) -#else - void _Timestamp64_Subtract( - const Timestamp64_Control *_start, - const Timestamp64_Control *_end, - Timestamp64_Control *_result - ); -#endif - -/** - * @brief Divide 64-bit timestamp by another 64-bit timestamp. - * - * This routine divides a timestamp by another timestamp. The - * intended use is for calculating percentages to three decimal points. - * - * @param[in] _lhs points to the left hand number - * @param[in] _rhs points to the right hand number - * @param[out] _ival_percentage points to the integer portion of the average - * @param[out] _fval_percentage points to the thousandths of percentage - */ -void _Timestamp64_Divide( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs, - uint32_t *_ival_percentage, - uint32_t *_fval_percentage -); - -static inline uint32_t _Timestamp64_implementation_Get_seconds( - const Timestamp64_Control *_time -) -{ - return (uint32_t) (*_time / 1000000000L); -} - -/** - * @brief Get seconds portion of a 64-bit timestamp. - * - * This method returns the seconds portion of the specified timestamp - * - * @param[in] _time points to the timestamp - * - * @retval The seconds portion of @a _time. - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Get_seconds( _time ) \ - _Timestamp64_implementation_Get_seconds( _time ) -#else - uint32_t _Timestamp64_Get_seconds( - const Timestamp64_Control *_time - ); -#endif - -static inline uint32_t _Timestamp64_implementation_Get_nanoseconds( - const Timestamp64_Control *_time -) -{ - return (uint32_t) (*_time % 1000000000L); -} - -/** - * @brief Get nanoseconds portion of a 64-bit timestamp. - * - * This method returns the nanoseconds portion of the specified timestamp - * - * @param[in] _time points to the timestamp - * - * @retval The nanoseconds portion of @a _time. - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_Get_nanoseconds( _time ) \ - _Timestamp64_implementation_Get_nanoseconds( _time ) -#else - uint32_t _Timestamp64_Get_nanoseconds( - const Timestamp64_Control *_time - ); -#endif - -static inline uint64_t _Timestamp64_implementation_Get_As_nanoseconds( - const Timestamp64_Control *_time, - const uint32_t nanoseconds -) -{ - return *_time + (uint64_t) nanoseconds; -} - -/** - * @brief Get the 64-bit timestamp as nanoseconds. - * - * This method returns the 64-bit timestamp as it is already in nanoseconds. - * - * @param[in] _time points to the timestamp - * - * @retval The nanoseconds portion of @a _time. - */ -#define _Timestamp64_Get_As_nanoseconds( _time, _nanoseconds ) \ - _Timestamp64_implementation_Get_As_nanoseconds( _time, _nanoseconds ) - -static inline void _Timestamp64_implementation_To_timespec( - const Timestamp64_Control *_timestamp, - struct timespec *_timespec -) -{ - _timespec->tv_sec = (time_t) (*_timestamp / 1000000000L); - _timespec->tv_nsec = (long) (*_timestamp % 1000000000L); -} - -/** - * @brief Convert 64-bit timestamp to struct timespec. - * - * This method returns the seconds portion of the specified timestamp - * - * @param[in] _timestamp points to the timestamp - * @param[out] _timespec points to the timespec - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_To_timespec( _timestamp, _timespec ) \ - _Timestamp64_implementation_To_timespec( _timestamp, _timespec ) -#else - void _Timestamp64_To_timespec( - const Timestamp64_Control *_timestamp, - struct timespec *_timespec - ); -#endif - -static inline void _Timestamp64_implementation_To_timeval( - const Timestamp64_Control *_timestamp, - struct timeval *_timeval -) -{ - _timeval->tv_sec = (time_t) (*_timestamp / 1000000000U); - _timeval->tv_usec = (suseconds_t) ((*_timestamp % 1000000000U) / 1000U); -} - -/** - * @brief Convert 64-bit timestamp to struct timeval. - * - * This method returns the seconds portion of the specified timestamp - * - * @param[in] _timestamp points to the timestamp - * @param[out] _timeval points to the timeval - */ -#if CPU_TIMESTAMP_USE_INT64_INLINE == TRUE - #define _Timestamp64_To_timeval( _timestamp, _timeval ) \ - _Timestamp64_implementation_To_timeval( _timestamp, _timeval ) -#else - void _Timestamp64_To_timeval( - const Timestamp64_Control *_timestamp, - struct timeval *_timeval - ); -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ - -#endif -/* end of include file */ diff --git a/cpukit/score/include/rtems/score/tod.h b/cpukit/score/include/rtems/score/tod.h index 1972b0fa72..c0ab5e795d 100644 --- a/cpukit/score/include/rtems/score/tod.h +++ b/cpukit/score/include/rtems/score/tod.h @@ -24,15 +24,6 @@ extern "C" { #endif -/** - * @brief Returns the nanoseconds since the last clock tick. - * - * @ingroup ScoreTOD - * - * @return The nanoseconds since the last clock tick. - */ -typedef uint32_t ( *TOD_Nanoseconds_since_last_tick_routine )( void ); - #ifdef __cplusplus } #endif diff --git a/cpukit/score/include/rtems/score/todimpl.h b/cpukit/score/include/rtems/score/todimpl.h index ad5ed39dab..56c176d920 100644 --- a/cpukit/score/include/rtems/score/todimpl.h +++ b/cpukit/score/include/rtems/score/todimpl.h @@ -19,8 +19,8 @@ #define _RTEMS_SCORE_TODIMPL_H #include <rtems/score/tod.h> -#include <rtems/score/isrlock.h> #include <rtems/score/timestamp.h> +#include <rtems/score/timecounterimpl.h> #include <sys/time.h> #include <time.h> @@ -131,25 +131,6 @@ extern "C" { */ typedef struct { /** - * @brief Current time of day value. - * - * This field is protected by the lock. - */ - Timestamp_Control now; - - /** - * @brief System uptime. - * - * This field is protected by the lock. - */ - Timestamp_Control uptime; - - /** - * @brief Lock to protect the now and uptime fields. - */ - ISR_lock_Control lock; - - /** * @brief Time of day seconds trigger. * * This value specifies the nanoseconds since the last time of day second. @@ -159,13 +140,6 @@ typedef struct { uint32_t seconds_trigger; /** - * @brief The current nanoseconds since last tick handler. - * - * This field must not be NULL after initialization. - */ - TOD_Nanoseconds_since_last_tick_routine nanoseconds_since_last_tick; - - /** * @brief Indicates if the time of day is set. * * This is true if the application has set the current @@ -176,12 +150,6 @@ typedef struct { SCORE_EXTERN TOD_Control _TOD; -#define _TOD_Acquire( _tod, lock_context ) \ - _ISR_lock_ISR_disable_and_acquire( &( _tod )->lock, lock_context ) - -#define _TOD_Release( _tod, lock_context ) \ - _ISR_lock_Release_and_ISR_enable( &( _tod )->lock, lock_context ) - /** * @brief Initializes the time of day handler. * @@ -201,6 +169,18 @@ void _TOD_Set_with_timestamp( const Timestamp_Control *tod_as_timestamp ); +/** + * @brief Sets the time of day from timespec. + * + * The @a tod_as_timestamp timestamp represents the time since UNIX epoch. + * The watchdog seconds chain will be adjusted. + * + * In the process the input given as timespec will be transformed to FreeBSD + * bintime format to guarantee the right format for later setting it with a + * timestamp. + * + * @param[in] tod_as_timespec is the constant of the time of day as a timespec + */ static inline void _TOD_Set( const struct timespec *tod_as_timespec ) @@ -216,31 +196,27 @@ static inline void _TOD_Set( } /** - * @brief Returns a snapshot of a clock. - * - * This function invokes the nanoseconds extension. + * @brief Gets the current time in the bintime format. * - * @param[out] snapshot points to an area that will contain the current - * TOD plus the BSP nanoseconds since last tick adjustment - * @param[in] clock contains the current TOD - * - * @retval @a snapshot + * @param[out] time is the value gathered by the bintime request */ -Timestamp_Control *_TOD_Get_with_nanoseconds( - Timestamp_Control *snapshot, - const Timestamp_Control *clock -); - static inline void _TOD_Get( - struct timespec *tod_as_timespec + Timestamp_Control *time ) { - Timestamp_Control tod_as_timestamp; - Timestamp_Control *tod_as_timestamp_ptr; + _Timecounter_Bintime(time); +} - tod_as_timestamp_ptr = - _TOD_Get_with_nanoseconds( &tod_as_timestamp, &_TOD.now ); - _Timestamp_To_timespec( tod_as_timestamp_ptr, tod_as_timespec ); +/** + * @brief Gets the current time in the timespec format. + * + * @param[out] time is the value gathered by the nanotime request + */ +static inline void _TOD_Get_as_timespec( + struct timespec *time +) +{ + _Timecounter_Nanotime(time); } /** @@ -249,26 +225,47 @@ static inline void _TOD_Get( * This routine returns the system uptime with potential accuracy * to the nanosecond. * + * The initial uptime value is undefined. + * * @param[in] time is a pointer to the uptime to be returned */ static inline void _TOD_Get_uptime( Timestamp_Control *time ) { - _TOD_Get_with_nanoseconds( time, &_TOD.uptime ); + _Timecounter_Binuptime( time ); } /** * @brief Gets the system uptime with potential accuracy to the nanosecond. - * - * This routine returns the system uptime with potential accuracy * to the nanosecond. * + * The initial uptime value is zero. + * + * @param[in] time is a pointer to the uptime to be returned + */ +static inline void _TOD_Get_zero_based_uptime( + Timestamp_Control *time +) +{ + _Timecounter_Binuptime( time ); + --time->sec; +} + +/** + * @brief Gets the system uptime with potential accuracy to the nanosecond. + * + * The initial uptime value is zero. + * * @param[in] time is a pointer to the uptime to be returned */ -void _TOD_Get_uptime_as_timespec( +static inline void _TOD_Get_zero_based_uptime_as_timespec( struct timespec *time -); +) +{ + _Timecounter_Nanouptime( time ); + --time->tv_sec; +} /** * @brief Number of seconds Since RTEMS epoch. @@ -276,7 +273,10 @@ void _TOD_Get_uptime_as_timespec( * The following contains the number of seconds from 00:00:00 * January 1, TOD_BASE_YEAR until the current time of day. */ -uint32_t _TOD_Seconds_since_epoch( void ); +static inline uint32_t _TOD_Seconds_since_epoch( void ) +{ + return (uint32_t) _Timecounter_Time_second; +} /** * @brief Increments time of day at each clock tick. @@ -314,12 +314,7 @@ RTEMS_INLINE_ROUTINE void _TOD_Get_timeval( struct timeval *time ) { - Timestamp_Control snapshot_as_timestamp; - Timestamp_Control *snapshot_as_timestamp_ptr; - - snapshot_as_timestamp_ptr = - _TOD_Get_with_nanoseconds( &snapshot_as_timestamp, &_TOD.now ); - _Timestamp_To_timeval( snapshot_as_timestamp_ptr, time ); + _Timecounter_Microtime( time ); } /** @@ -335,18 +330,6 @@ void _TOD_Adjust( ); /** - * @brief Install the BSP's nanoseconds since clock tick handler - * - * @param[in] routine is the BSP's nanoseconds since clock tick method - */ -RTEMS_INLINE_ROUTINE void _TOD_Set_nanoseconds_since_last_tick_handler( - TOD_Nanoseconds_since_last_tick_routine routine -) -{ - _TOD.nanoseconds_since_last_tick = routine; -} - -/** * @brief Check if the TOD is Set * * @return TRUE is the time is set. FALSE otherwise. diff --git a/cpukit/score/include/rtems/score/watchdogimpl.h b/cpukit/score/include/rtems/score/watchdogimpl.h index e548e7025c..8405232a87 100644 --- a/cpukit/score/include/rtems/score/watchdogimpl.h +++ b/cpukit/score/include/rtems/score/watchdogimpl.h @@ -21,6 +21,7 @@ #include <rtems/score/watchdog.h> #include <rtems/score/chainimpl.h> +#include <rtems/score/isrlock.h> #ifdef __cplusplus extern "C" { @@ -45,30 +46,49 @@ extern "C" { } /** - * @brief Watchdog header. + * @brief Iterator item to synchronize concurrent insert, remove and tickle + * operations. */ typedef struct { /** - * @brief The chain of active or transient watchdogs. + * @brief A node for a Watchdog_Header::Iterators chain. */ - Chain_Control Watchdogs; -} Watchdog_Header; + Chain_Node Node; -/** - * @brief Watchdog synchronization level. - * - * This used for synchronization purposes - * during an insert on a watchdog delta chain. - */ -SCORE_EXTERN volatile uint32_t _Watchdog_Sync_level; + /** + * @brief The current delta interval of the new watchdog to insert. + */ + Watchdog_Interval delta_interval; + + /** + * @brief The current watchdog of the chain on the way to insert the new + * watchdog. + */ + Chain_Node *current; +} Watchdog_Iterator; /** - * @brief Watchdog synchronization count. - * - * This used for synchronization purposes - * during an insert on a watchdog delta chain. + * @brief Watchdog header. */ -SCORE_EXTERN volatile uint32_t _Watchdog_Sync_count; +typedef struct { + /** + * @brief ISR lock to protect this watchdog chain. + */ + ISR_LOCK_MEMBER( Lock ) + + /** + * @brief The chain of active or transient watchdogs. + */ + Chain_Control Watchdogs; + + /** + * @brief Currently active iterators. + * + * The iterators are registered in _Watchdog_Insert() and updated in case the + * watchdog chain changes. + */ + Chain_Control Iterators; +} Watchdog_Header; /** * @brief Watchdog chain which is managed at ticks. @@ -84,6 +104,30 @@ SCORE_EXTERN Watchdog_Header _Watchdog_Ticks_header; */ SCORE_EXTERN Watchdog_Header _Watchdog_Seconds_header; +RTEMS_INLINE_ROUTINE void _Watchdog_Acquire( + Watchdog_Header *header, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_ISR_disable_and_acquire( &header->Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Watchdog_Release( + Watchdog_Header *header, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_Release_and_ISR_enable( &header->Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Watchdog_Flash( + Watchdog_Header *header, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_Flash( &header->Lock, lock_context ); +} + /** * @brief Initialize the watchdog handler. * @@ -94,15 +138,24 @@ SCORE_EXTERN Watchdog_Header _Watchdog_Seconds_header; void _Watchdog_Handler_initialization( void ); /** + * @brief Triggers a watchdog tick. + * + * This routine executes TOD, watchdog and scheduler ticks. + */ +void _Watchdog_Tick( void ); + +/** * @brief Removes @a the_watchdog from the watchdog chain. * * This routine removes @a the_watchdog from the watchdog chain on which * it resides and returns the state @a the_watchdog timer was in. * + * @param[in] header The watchdog chain. * @param[in] the_watchdog will be removed * @retval the state in which @a the_watchdog was in when removed */ Watchdog_States _Watchdog_Remove ( + Watchdog_Header *header, Watchdog_Control *the_watchdog ); @@ -119,6 +172,22 @@ void _Watchdog_Adjust_backward( ); /** + * @brief Adjusts the watchdogs in backward direction in a locked context. + * + * The caller must be the owner of the watchdog lock and will be the owner + * after the call. + * + * @param[in] header The watchdog header. + * @param[in] units The units of ticks to adjust. + * + * @see _Watchdog_Adjust_forward(). + */ +void _Watchdog_Adjust_backward_locked( + Watchdog_Header *header, + Watchdog_Interval units +); + +/** * @brief Adjusts the header watchdog chain in the forward direction for units * ticks. * @@ -133,24 +202,22 @@ void _Watchdog_Adjust_forward( ); /** - * @brief Adjusts the @a header watchdog chain in the forward - * @a direction for @a units_arg ticks. + * @brief Adjusts the watchdogs in forward direction in a locked context. * - * This routine adjusts the @a header watchdog chain in the forward - * @a direction for @a units_arg ticks. + * The caller must be the owner of the watchdog lock and will be the owner + * after the call. This function may release and acquire the watchdog lock + * internally. * - * @param[in] header is the watchdog chain to adjust - * @param[in] units_arg is the number of units to adjust @a header - * @param[in] to_fire is a pointer to an initialized Chain_Control to which - * all watchdog instances that are to be fired will be placed. + * @param[in] header The watchdog header. + * @param[in] units The units of ticks to adjust. + * @param[in] lock_context The lock context. * - * @note This always adjusts forward. + * @see _Watchdog_Adjust_forward(). */ -void _Watchdog_Adjust_to_chain( +void _Watchdog_Adjust_forward_locked( Watchdog_Header *header, - Watchdog_Interval units_arg, - Chain_Control *to_fire - + Watchdog_Interval units, + ISR_lock_Context *lock_context ); /** @@ -170,6 +237,25 @@ void _Watchdog_Insert ( ); /** + * @brief Inserts the watchdog in a locked context. + * + * The caller must be the owner of the watchdog lock and will be the owner + * after the call. This function may release and acquire the watchdog lock + * internally. + * + * @param[in] header The watchdog header. + * @param[in] the_watchdog The watchdog. + * @param[in] lock_context The lock context. + * + * @see _Watchdog_Insert(). + */ +void _Watchdog_Insert_locked( + Watchdog_Header *header, + Watchdog_Control *the_watchdog, + ISR_lock_Context *lock_context +); + +/** * @brief This routine is invoked at appropriate intervals to update * the @a header watchdog chain. * @@ -306,6 +392,20 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Insert_seconds( } +RTEMS_INLINE_ROUTINE Watchdog_States _Watchdog_Remove_ticks( + Watchdog_Control *the_watchdog +) +{ + return _Watchdog_Remove( &_Watchdog_Ticks_header, the_watchdog ); +} + +RTEMS_INLINE_ROUTINE Watchdog_States _Watchdog_Remove_seconds( + Watchdog_Control *the_watchdog +) +{ + return _Watchdog_Remove( &_Watchdog_Seconds_header, the_watchdog ); +} + /** * This routine resets THE_WATCHDOG timer to its state at INSERT * time. This routine is valid only on interval watchdog timers @@ -318,7 +418,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Reset_ticks( ) { - (void) _Watchdog_Remove( the_watchdog ); + _Watchdog_Remove_ticks( the_watchdog ); _Watchdog_Insert( &_Watchdog_Ticks_header, the_watchdog ); @@ -391,7 +491,9 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Header_initialize( Watchdog_Header *header ) { + _ISR_lock_Initialize( &header->Lock, "Watchdog" ); _Chain_Initialize_empty( &header->Watchdogs ); + _Chain_Initialize_empty( &header->Iterators ); } /** @} */ diff --git a/cpukit/score/include/sys/_ffcounter.h b/cpukit/score/include/sys/_ffcounter.h new file mode 100644 index 0000000000..d83c48cd44 --- /dev/null +++ b/cpukit/score/include/sys/_ffcounter.h @@ -0,0 +1,42 @@ +/*- + * Copyright (c) 2011 The University of Melbourne + * All rights reserved. + * + * This software was developed by Julien Ridoux at the University of Melbourne + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD r277406 2015-01-20T03:54:30Z$ + */ + +#ifndef _SYS__FFCOUNTER_H_ +#define _SYS__FFCOUNTER_H_ + +/* + * The feed-forward clock counter. The fundamental element of a feed-forward + * clock is a wide monotonically increasing counter that accumulates at the same + * rate as the selected timecounter. + */ +typedef uint64_t ffcounter; + +#endif /* _SYS__FFCOUNTER_H_ */ diff --git a/cpukit/score/include/sys/timeffc.h b/cpukit/score/include/sys/timeffc.h new file mode 100644 index 0000000000..b3a1cd9ce7 --- /dev/null +++ b/cpukit/score/include/sys/timeffc.h @@ -0,0 +1,391 @@ +/*- + * Copyright (c) 2011 The University of Melbourne + * All rights reserved. + * + * This software was developed by Julien Ridoux at the University of Melbourne + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD r277406 2015-01-20T03:54:30Z$ + */ + +#ifndef _SYS_TIMEFF_H_ +#define _SYS_TIMEFF_H_ + +#include <sys/_ffcounter.h> + +/* + * Feed-forward clock estimate + * Holds time mark as a ffcounter and conversion to bintime based on current + * timecounter period and offset estimate passed by the synchronization daemon. + * Provides time of last daemon update, clock status and bound on error. + */ +struct ffclock_estimate { + struct bintime update_time; /* Time of last estimates update. */ + ffcounter update_ffcount; /* Counter value at last update. */ + ffcounter leapsec_next; /* Counter value of next leap second. */ + uint64_t period; /* Estimate of counter period. */ + uint32_t errb_abs; /* Bound on absolute clock error [ns]. */ + uint32_t errb_rate; /* Bound on counter rate error [ps/s]. */ + uint32_t status; /* Clock status. */ + int16_t leapsec_total; /* All leap seconds seen so far. */ + int8_t leapsec; /* Next leap second (in {-1,0,1}). */ +}; + +#if __BSD_VISIBLE +#ifdef _KERNEL + +#ifndef __rtems__ +/* Define the kern.sysclock sysctl tree. */ +SYSCTL_DECL(_kern_sysclock); + +/* Define the kern.sysclock.ffclock sysctl tree. */ +SYSCTL_DECL(_kern_sysclock_ffclock); +#endif /* __rtems__ */ + +/* + * Index into the sysclocks array for obtaining the ASCII name of a particular + * sysclock. + */ +#define SYSCLOCK_FBCK 0 +#define SYSCLOCK_FFWD 1 +extern int sysclock_active; + +/* + * Parameters of counter characterisation required by feed-forward algorithms. + */ +#define FFCLOCK_SKM_SCALE 1024 + +/* + * Feed-forward clock status + */ +#define FFCLOCK_STA_UNSYNC 1 +#define FFCLOCK_STA_WARMUP 2 + +/* + * Flags for use by sysclock_snap2bintime() and various ffclock_ functions to + * control how the timecounter hardware is read and how the hardware snapshot is + * converted into absolute time. + * {FB|FF}CLOCK_FAST: Do not read the hardware counter, instead using the + * value at last tick. The time returned has a resolution + * of the kernel tick timer (1/hz [s]). + * FFCLOCK_LERP: Linear interpolation of ffclock time to guarantee + * monotonic time. + * FFCLOCK_LEAPSEC: Include leap seconds. + * {FB|FF}CLOCK_UPTIME: Time stamp should be relative to system boot, not epoch. + */ +#define FFCLOCK_FAST 0x00000001 +#define FFCLOCK_LERP 0x00000002 +#define FFCLOCK_LEAPSEC 0x00000004 +#define FFCLOCK_UPTIME 0x00000008 +#define FFCLOCK_MASK 0x0000ffff + +#define FBCLOCK_FAST 0x00010000 /* Currently unused. */ +#define FBCLOCK_UPTIME 0x00020000 +#define FBCLOCK_MASK 0xffff0000 + +/* + * Feedback clock specific info structure. The feedback clock's estimation of + * clock error is an absolute figure determined by the NTP algorithm. The status + * is determined by the userland daemon. + */ +struct fbclock_info { + struct bintime error; + struct bintime tick_time; + uint64_t th_scale; + int status; +}; + +/* + * Feed-forward clock specific info structure. The feed-forward clock's + * estimation of clock error is an upper bound, which although potentially + * looser than the feedback clock equivalent, is much more reliable. The status + * is determined by the userland daemon. + */ +struct ffclock_info { + struct bintime error; + struct bintime tick_time; + struct bintime tick_time_lerp; + uint64_t period; + uint64_t period_lerp; + int leapsec_adjustment; + int status; +}; + +/* + * Snapshot of system clocks and related information. Holds time read from each + * clock based on a single read of the active hardware timecounter, as well as + * respective clock information such as error estimates and the ffcounter value + * at the time of the read. + */ +struct sysclock_snap { + struct fbclock_info fb_info; + struct ffclock_info ff_info; + ffcounter ffcount; + unsigned int delta; + int sysclock_active; +}; + +/* Take a snapshot of the system clocks and related information. */ +void sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast); + +/* Convert a timestamp from the selected system clock into bintime. */ +int sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt, + int whichclock, uint32_t flags); + +/* Resets feed-forward clock from RTC */ +void ffclock_reset_clock(struct timespec *ts); + +/* + * Return the current value of the feed-forward clock counter. Essential to + * measure time interval in counter units. If a fast timecounter is used by the + * system, may also allow fast but accurate timestamping. + */ +void ffclock_read_counter(ffcounter *ffcount); + +/* + * Retrieve feed-forward counter value and time of last kernel tick. This + * accepts the FFCLOCK_LERP flag. + */ +void ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags); + +/* + * Low level routines to convert a counter timestamp into absolute time and a + * counter timestamp interval into an interval in seconds. The absolute time + * conversion accepts the FFCLOCK_LERP flag. + */ +void ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags); +void ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt); + +/* + * Feed-forward clock routines. + * + * These functions rely on the timecounters and ffclock_estimates stored in + * fftimehands. Note that the error_bound parameter is not the error of the + * clock but an upper bound on the error of the absolute time or time interval + * returned. + * + * ffclock_abstime(): retrieves current time as counter value and convert this + * timestamp in seconds. The value (in seconds) of the converted timestamp + * depends on the flags passed: for a given counter value, different + * conversions are possible. Different clock models can be selected by + * combining flags (for example (FFCLOCK_LERP|FFCLOCK_UPTIME) produces + * linearly interpolated uptime). + * ffclock_difftime(): computes a time interval in seconds based on an interval + * measured in ffcounter units. This should be the preferred way to measure + * small time intervals very accurately. + */ +void ffclock_abstime(ffcounter *ffcount, struct bintime *bt, + struct bintime *error_bound, uint32_t flags); +void ffclock_difftime(ffcounter ffdelta, struct bintime *bt, + struct bintime *error_bound); + +/* + * Wrapper routines to return current absolute time using the feed-forward + * clock. These functions are named after those defined in <sys/time.h>, which + * contains a description of the original ones. + */ +void ffclock_bintime(struct bintime *bt); +void ffclock_nanotime(struct timespec *tsp); +void ffclock_microtime(struct timeval *tvp); + +void ffclock_getbintime(struct bintime *bt); +void ffclock_getnanotime(struct timespec *tsp); +void ffclock_getmicrotime(struct timeval *tvp); + +void ffclock_binuptime(struct bintime *bt); +void ffclock_nanouptime(struct timespec *tsp); +void ffclock_microuptime(struct timeval *tvp); + +void ffclock_getbinuptime(struct bintime *bt); +void ffclock_getnanouptime(struct timespec *tsp); +void ffclock_getmicrouptime(struct timeval *tvp); + +/* + * Wrapper routines to convert a time interval specified in ffcounter units into + * seconds using the current feed-forward clock estimates. + */ +void ffclock_bindifftime(ffcounter ffdelta, struct bintime *bt); +void ffclock_nanodifftime(ffcounter ffdelta, struct timespec *tsp); +void ffclock_microdifftime(ffcounter ffdelta, struct timeval *tvp); + +/* + * When FFCLOCK is enabled in the kernel, [get]{bin,nano,micro}[up]time() become + * wrappers around equivalent feedback or feed-forward functions. Provide access + * outside of kern_tc.c to the feedback clock equivalent functions for + * specialised use i.e. these are not for general consumption. + */ +void fbclock_bintime(struct bintime *bt); +void fbclock_nanotime(struct timespec *tsp); +void fbclock_microtime(struct timeval *tvp); + +void fbclock_getbintime(struct bintime *bt); +void fbclock_getnanotime(struct timespec *tsp); +void fbclock_getmicrotime(struct timeval *tvp); + +void fbclock_binuptime(struct bintime *bt); +void fbclock_nanouptime(struct timespec *tsp); +void fbclock_microuptime(struct timeval *tvp); + +void fbclock_getbinuptime(struct bintime *bt); +void fbclock_getnanouptime(struct timespec *tsp); +void fbclock_getmicrouptime(struct timeval *tvp); + +/* + * Public system clock wrapper API which allows consumers to select which clock + * to obtain time from, independent of the current default system clock. These + * wrappers should be used instead of directly calling the underlying fbclock_ + * or ffclock_ functions. + */ +static inline void +bintime_fromclock(struct bintime *bt, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_bintime(bt); + else + fbclock_bintime(bt); +} + +static inline void +nanotime_fromclock(struct timespec *tsp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_nanotime(tsp); + else + fbclock_nanotime(tsp); +} + +static inline void +microtime_fromclock(struct timeval *tvp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_microtime(tvp); + else + fbclock_microtime(tvp); +} + +static inline void +getbintime_fromclock(struct bintime *bt, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_getbintime(bt); + else + fbclock_getbintime(bt); +} + +static inline void +getnanotime_fromclock(struct timespec *tsp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_getnanotime(tsp); + else + fbclock_getnanotime(tsp); +} + +static inline void +getmicrotime_fromclock(struct timeval *tvp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_getmicrotime(tvp); + else + fbclock_getmicrotime(tvp); +} + +static inline void +binuptime_fromclock(struct bintime *bt, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_binuptime(bt); + else + fbclock_binuptime(bt); +} + +static inline void +nanouptime_fromclock(struct timespec *tsp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_nanouptime(tsp); + else + fbclock_nanouptime(tsp); +} + +static inline void +microuptime_fromclock(struct timeval *tvp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_microuptime(tvp); + else + fbclock_microuptime(tvp); +} + +static inline void +getbinuptime_fromclock(struct bintime *bt, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_getbinuptime(bt); + else + fbclock_getbinuptime(bt); +} + +static inline void +getnanouptime_fromclock(struct timespec *tsp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_getnanouptime(tsp); + else + fbclock_getnanouptime(tsp); +} + +static inline void +getmicrouptime_fromclock(struct timeval *tvp, int whichclock) +{ + + if (whichclock == SYSCLOCK_FFWD) + ffclock_getmicrouptime(tvp); + else + fbclock_getmicrouptime(tvp); +} + +#else /* !_KERNEL */ + +/* Feed-Forward Clock system calls. */ +__BEGIN_DECLS +int ffclock_getcounter(ffcounter *ffcount); +int ffclock_getestimate(struct ffclock_estimate *cest); +int ffclock_setestimate(struct ffclock_estimate *cest); +__END_DECLS + +#endif /* _KERNEL */ +#endif /* __BSD_VISIBLE */ +#endif /* _SYS_TIMEFF_H_ */ diff --git a/cpukit/score/include/sys/timepps.h b/cpukit/score/include/sys/timepps.h new file mode 100644 index 0000000000..71d74f54ee --- /dev/null +++ b/cpukit/score/include/sys/timepps.h @@ -0,0 +1,249 @@ +/*- + * ---------------------------------------------------------------------------- + * "THE BEER-WARE LICENSE" (Revision 42): + * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you + * can do whatever you want with this stuff. If we meet some day, and you think + * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + * ---------------------------------------------------------------------------- + * + * Copyright (c) 2011 The FreeBSD Foundation + * All rights reserved. + * + * Portions of this software were developed by Julien Ridoux at the University + * of Melbourne under sponsorship from the FreeBSD Foundation. + * + * $FreeBSD r277406 2015-01-20T03:54:30Z$ + * + * The is a FreeBSD version of the RFC 2783 API for Pulse Per Second + * timing interfaces. + */ + +#ifndef _SYS_TIMEPPS_H_ +#define _SYS_TIMEPPS_H_ + +#include <sys/_ffcounter.h> +#include <sys/ioccom.h> +#include <sys/time.h> + +#define PPS_API_VERS_1 1 + +typedef int pps_handle_t; + +typedef unsigned pps_seq_t; + +typedef struct ntp_fp { + unsigned int integral; + unsigned int fractional; +} ntp_fp_t; + +typedef union pps_timeu { + struct timespec tspec; + ntp_fp_t ntpfp; + unsigned long longpad[3]; +} pps_timeu_t; + +typedef struct { + pps_seq_t assert_sequence; /* assert event seq # */ + pps_seq_t clear_sequence; /* clear event seq # */ + pps_timeu_t assert_tu; + pps_timeu_t clear_tu; + int current_mode; /* current mode bits */ +} pps_info_t; + +typedef struct { + pps_seq_t assert_sequence; /* assert event seq # */ + pps_seq_t clear_sequence; /* clear event seq # */ + pps_timeu_t assert_tu; + pps_timeu_t clear_tu; + ffcounter assert_ffcount; /* ffcounter on assert event */ + ffcounter clear_ffcount; /* ffcounter on clear event */ + int current_mode; /* current mode bits */ +} pps_info_ffc_t; + +#define assert_timestamp assert_tu.tspec +#define clear_timestamp clear_tu.tspec + +#define assert_timestamp_ntpfp assert_tu.ntpfp +#define clear_timestamp_ntpfp clear_tu.ntpfp + +typedef struct { + int api_version; /* API version # */ + int mode; /* mode bits */ + pps_timeu_t assert_off_tu; + pps_timeu_t clear_off_tu; +} pps_params_t; + +#define assert_offset assert_off_tu.tspec +#define clear_offset clear_off_tu.tspec + +#define assert_offset_ntpfp assert_off_tu.ntpfp +#define clear_offset_ntpfp clear_off_tu.ntpfp + + +#define PPS_CAPTUREASSERT 0x01 +#define PPS_CAPTURECLEAR 0x02 +#define PPS_CAPTUREBOTH 0x03 + +#define PPS_OFFSETASSERT 0x10 +#define PPS_OFFSETCLEAR 0x20 + +#define PPS_ECHOASSERT 0x40 +#define PPS_ECHOCLEAR 0x80 + +#define PPS_CANWAIT 0x100 +#define PPS_CANPOLL 0x200 + +#define PPS_TSFMT_TSPEC 0x1000 +#define PPS_TSFMT_NTPFP 0x2000 + +#define PPS_TSCLK_FBCK 0x10000 +#define PPS_TSCLK_FFWD 0x20000 +#define PPS_TSCLK_MASK 0x30000 + +#define PPS_KC_HARDPPS 0 +#define PPS_KC_HARDPPS_PLL 1 +#define PPS_KC_HARDPPS_FLL 2 + +struct pps_fetch_args { + int tsformat; + pps_info_t pps_info_buf; + struct timespec timeout; +}; + +struct pps_fetch_ffc_args { + int tsformat; + pps_info_ffc_t pps_info_buf_ffc; + struct timespec timeout; +}; + +struct pps_kcbind_args { + int kernel_consumer; + int edge; + int tsformat; +}; + +#define PPS_IOC_CREATE _IO('1', 1) +#define PPS_IOC_DESTROY _IO('1', 2) +#define PPS_IOC_SETPARAMS _IOW('1', 3, pps_params_t) +#define PPS_IOC_GETPARAMS _IOR('1', 4, pps_params_t) +#define PPS_IOC_GETCAP _IOR('1', 5, int) +#define PPS_IOC_FETCH _IOWR('1', 6, struct pps_fetch_args) +#define PPS_IOC_KCBIND _IOW('1', 7, struct pps_kcbind_args) +#define PPS_IOC_FETCH_FFCOUNTER _IOWR('1', 8, struct pps_fetch_ffc_args) + +#ifdef _KERNEL + +struct pps_state { + /* Capture information. */ + struct timehands *capth; + struct fftimehands *capffth; + unsigned capgen; + unsigned capcount; + + /* State information. */ + pps_params_t ppsparam; + pps_info_t ppsinfo; + pps_info_ffc_t ppsinfo_ffc; + int kcmode; + int ppscap; + struct timecounter *ppstc; + unsigned ppscount[3]; +}; + +void pps_capture(struct pps_state *pps); +void pps_event(struct pps_state *pps, int event); +void pps_init(struct pps_state *pps); +int pps_ioctl(unsigned long cmd, caddr_t data, struct pps_state *pps); +void hardpps(struct timespec *tsp, long nsec); + +#else /* !_KERNEL */ + +static __inline int +time_pps_create(int filedes, pps_handle_t *handle) +{ + int error; + + *handle = -1; + error = ioctl(filedes, PPS_IOC_CREATE, 0); + if (error < 0) + return (-1); + *handle = filedes; + return (0); +} + +static __inline int +time_pps_destroy(pps_handle_t handle) +{ + return (ioctl(handle, PPS_IOC_DESTROY, 0)); +} + +static __inline int +time_pps_setparams(pps_handle_t handle, const pps_params_t *ppsparams) +{ + return (ioctl(handle, PPS_IOC_SETPARAMS, ppsparams)); +} + +static __inline int +time_pps_getparams(pps_handle_t handle, pps_params_t *ppsparams) +{ + return (ioctl(handle, PPS_IOC_GETPARAMS, ppsparams)); +} + +static __inline int +time_pps_getcap(pps_handle_t handle, int *mode) +{ + return (ioctl(handle, PPS_IOC_GETCAP, mode)); +} + +static __inline int +time_pps_fetch(pps_handle_t handle, const int tsformat, + pps_info_t *ppsinfobuf, const struct timespec *timeout) +{ + int error; + struct pps_fetch_args arg; + + arg.tsformat = tsformat; + if (timeout == NULL) { + arg.timeout.tv_sec = -1; + arg.timeout.tv_nsec = -1; + } else + arg.timeout = *timeout; + error = ioctl(handle, PPS_IOC_FETCH, &arg); + *ppsinfobuf = arg.pps_info_buf; + return (error); +} + +static __inline int +time_pps_fetch_ffc(pps_handle_t handle, const int tsformat, + pps_info_ffc_t *ppsinfobuf, const struct timespec *timeout) +{ + struct pps_fetch_ffc_args arg; + int error; + + arg.tsformat = tsformat; + if (timeout == NULL) { + arg.timeout.tv_sec = -1; + arg.timeout.tv_nsec = -1; + } else { + arg.timeout = *timeout; + } + error = ioctl(handle, PPS_IOC_FETCH_FFCOUNTER, &arg); + *ppsinfobuf = arg.pps_info_buf_ffc; + return (error); +} + +static __inline int +time_pps_kcbind(pps_handle_t handle, const int kernel_consumer, + const int edge, const int tsformat) +{ + struct pps_kcbind_args arg; + + arg.kernel_consumer = kernel_consumer; + arg.edge = edge; + arg.tsformat = tsformat; + return (ioctl(handle, PPS_IOC_KCBIND, &arg)); +} + +#endif /* KERNEL */ + +#endif /* !_SYS_TIMEPPS_H_ */ diff --git a/cpukit/score/include/sys/timetc.h b/cpukit/score/include/sys/timetc.h new file mode 100644 index 0000000000..88e90dedac --- /dev/null +++ b/cpukit/score/include/sys/timetc.h @@ -0,0 +1,91 @@ +/*- + * ---------------------------------------------------------------------------- + * "THE BEER-WARE LICENSE" (Revision 42): + * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you + * can do whatever you want with this stuff. If we meet some day, and you think + * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + * ---------------------------------------------------------------------------- + * + * $FreeBSD r277406 2015-01-20T03:54:30Z$ + */ + +#ifndef _SYS_TIMETC_H_ +#define _SYS_TIMETC_H_ + +#ifndef __rtems__ +#ifndef _KERNEL +#error "no user-serviceable parts inside" +#endif +#endif /* __rtems__ */ + +/*- + * `struct timecounter' is the interface between the hardware which implements + * a timecounter and the MI code which uses this to keep track of time. + * + * A timecounter is a binary counter which has two properties: + * * it runs at a fixed, known frequency. + * * it has sufficient bits to not roll over in less than approximately + * max(2 msec, 2/HZ seconds). (The value 2 here is really 1 + delta, + * for some indeterminate value of delta.) + */ + +struct timecounter; +typedef uint32_t timecounter_get_t(struct timecounter *); +typedef void timecounter_pps_t(struct timecounter *); + +struct timecounter { + timecounter_get_t *tc_get_timecount; + /* + * This function reads the counter. It is not required to + * mask any unimplemented bits out, as long as they are + * constant. + */ + timecounter_pps_t *tc_poll_pps; + /* + * This function is optional. It will be called whenever the + * timecounter is rewound, and is intended to check for PPS + * events. Normal hardware does not need it but timecounters + * which latch PPS in hardware (like sys/pci/xrpu.c) do. + */ + uint32_t tc_counter_mask; + /* This mask should mask off any unimplemented bits. */ + uint64_t tc_frequency; + /* Frequency of the counter in Hz. */ + char *tc_name; + /* Name of the timecounter. */ + int tc_quality; + /* + * Used to determine if this timecounter is better than + * another timecounter higher means better. Negative + * means "only use at explicit request". + */ + u_int tc_flags; +#define TC_FLAGS_C2STOP 1 /* Timer dies in C2+. */ +#define TC_FLAGS_SUSPEND_SAFE 2 /* + * Timer functional across + * suspend/resume. + */ + + void *tc_priv; + /* Pointer to the timecounter's private parts. */ + struct timecounter *tc_next; + /* Pointer to the next timecounter. */ +}; + +extern struct timecounter *timecounter; +extern int tc_min_ticktock_freq; /* + * Minimal tc_ticktock() call frequency, + * required to handle counter wraps. + */ + +u_int64_t tc_getfrequency(void); +void tc_init(struct timecounter *tc); +void tc_setclock(struct timespec *ts); +void tc_ticktock(int cnt); +void cpu_tick_calibration(void); + +#ifdef SYSCTL_DECL +SYSCTL_DECL(_kern_timecounter); +#endif + +#endif /* !_SYS_TIMETC_H_ */ diff --git a/cpukit/score/include/sys/timex.h b/cpukit/score/include/sys/timex.h new file mode 100644 index 0000000000..9eb0efb379 --- /dev/null +++ b/cpukit/score/include/sys/timex.h @@ -0,0 +1,171 @@ +/*- + *********************************************************************** + * * + * Copyright (c) David L. Mills 1993-2001 * + * Copyright (c) Poul-Henning Kamp 2000-2001 * + * * + * Permission to use, copy, modify, and distribute this software and * + * its documentation for any purpose and without fee is hereby * + * granted, provided that the above copyright notice appears in all * + * copies and that both the copyright notice and this permission * + * notice appear in supporting documentation, and that the name * + * University of Delaware not be used in advertising or publicity * + * pertaining to distribution of the software without specific, * + * written prior permission. The University of Delaware makes no * + * representations about the suitability this software for any * + * purpose. It is provided "as is" without express or implied * + * warranty. * + * * + *********************************************************************** + * + * $FreeBSD r277406 2015-01-20T03:54:30Z$ + * + * This header file defines the Network Time Protocol (NTP) interfaces + * for user and daemon application programs. + * + * This file was originally created 17 Sep 93 by David L. Mills, Professor + * of University of Delaware, building on work which had already been ongoing + * for a decade and a half at that point in time. + * + * In 2000 the APIs got a upgrade from microseconds to nanoseconds, + * a joint work between Poul-Henning Kamp and David L. Mills. + * + */ + +#ifndef _SYS_TIMEX_H_ +#define _SYS_TIMEX_H_ 1 + +#define NTP_API 4 /* NTP API version */ + +#ifdef __FreeBSD__ +#include <sys/_timespec.h> +#endif /* __FreeBSD__ */ + +/* + * The following defines establish the performance envelope of the + * kernel discipline loop. Phase or frequency errors greater than + * NAXPHASE or MAXFREQ are clamped to these maxima. For update intervals + * less than MINSEC, the loop always operates in PLL mode; while, for + * update intervals greater than MAXSEC, the loop always operates in FLL + * mode. Between these two limits the operating mode is selected by the + * STA_FLL bit in the status word. + */ + +#define MAXPHASE 500000000L /* max phase error (ns) */ +#define MAXFREQ 500000L /* max freq error (ns/s) */ +#define MINSEC 256 /* min FLL update interval (s) */ +#define MAXSEC 2048 /* max PLL update interval (s) */ +#define NANOSECOND 1000000000L /* nanoseconds in one second */ +#define SCALE_PPM (65536 / 1000) /* crude ns/s to scaled PPM */ +#define MAXTC 10 /* max time constant */ + +/* + * Control mode codes (timex.modes) + */ +#define MOD_OFFSET 0x0001 /* set time offset */ +#define MOD_FREQUENCY 0x0002 /* set frequency offset */ +#define MOD_MAXERROR 0x0004 /* set maximum time error */ +#define MOD_ESTERROR 0x0008 /* set estimated time error */ +#define MOD_STATUS 0x0010 /* set clock status bits */ +#define MOD_TIMECONST 0x0020 /* set PLL time constant */ +#define MOD_PPSMAX 0x0040 /* set PPS maximum averaging time */ +#define MOD_TAI 0x0080 /* set TAI offset */ +#define MOD_MICRO 0x1000 /* select microsecond resolution */ +#define MOD_NANO 0x2000 /* select nanosecond resolution */ +#define MOD_CLKB 0x4000 /* select clock B */ +#define MOD_CLKA 0x8000 /* select clock A */ + +/* + * Status codes (timex.status) + */ +#define STA_PLL 0x0001 /* enable PLL updates (rw) */ +#define STA_PPSFREQ 0x0002 /* enable PPS freq discipline (rw) */ +#define STA_PPSTIME 0x0004 /* enable PPS time discipline (rw) */ +#define STA_FLL 0x0008 /* enable FLL mode (rw) */ +#define STA_INS 0x0010 /* insert leap (rw) */ +#define STA_DEL 0x0020 /* delete leap (rw) */ +#define STA_UNSYNC 0x0040 /* clock unsynchronized (rw) */ +#define STA_FREQHOLD 0x0080 /* hold frequency (rw) */ +#define STA_PPSSIGNAL 0x0100 /* PPS signal present (ro) */ +#define STA_PPSJITTER 0x0200 /* PPS signal jitter exceeded (ro) */ +#define STA_PPSWANDER 0x0400 /* PPS signal wander exceeded (ro) */ +#define STA_PPSERROR 0x0800 /* PPS signal calibration error (ro) */ +#define STA_CLOCKERR 0x1000 /* clock hardware fault (ro) */ +#define STA_NANO 0x2000 /* resolution (0 = us, 1 = ns) (ro) */ +#define STA_MODE 0x4000 /* mode (0 = PLL, 1 = FLL) (ro) */ +#define STA_CLK 0x8000 /* clock source (0 = A, 1 = B) (ro) */ + +#define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \ + STA_PPSERROR | STA_CLOCKERR | STA_NANO | STA_MODE | STA_CLK) + +/* + * Clock states (ntptimeval.time_state) + */ +#define TIME_OK 0 /* no leap second warning */ +#define TIME_INS 1 /* insert leap second warning */ +#define TIME_DEL 2 /* delete leap second warning */ +#define TIME_OOP 3 /* leap second in progress */ +#define TIME_WAIT 4 /* leap second has occured */ +#define TIME_ERROR 5 /* error (see status word) */ + +/* + * NTP user interface -- ntp_gettime(2) - used to read kernel clock values + */ +struct ntptimeval { + struct timespec time; /* current time (ns) (ro) */ + long maxerror; /* maximum error (us) (ro) */ + long esterror; /* estimated error (us) (ro) */ + long tai; /* TAI offset */ + int time_state; /* time status */ +}; + +/* + * NTP daemon interface -- ntp_adjtime(2) -- used to discipline CPU clock + * oscillator and control/determine status. + * + * Note: The offset, precision and jitter members are in microseconds if + * STA_NANO is zero and nanoseconds if not. + */ +struct timex { + unsigned int modes; /* clock mode bits (wo) */ + long offset; /* time offset (ns/us) (rw) */ + long freq; /* frequency offset (scaled PPM) (rw) */ + long maxerror; /* maximum error (us) (rw) */ + long esterror; /* estimated error (us) (rw) */ + int status; /* clock status bits (rw) */ + long constant; /* poll interval (log2 s) (rw) */ + long precision; /* clock precision (ns/us) (ro) */ + long tolerance; /* clock frequency tolerance (scaled + * PPM) (ro) */ + /* + * The following read-only structure members are implemented + * only if the PPS signal discipline is configured in the + * kernel. They are included in all configurations to insure + * portability. + */ + long ppsfreq; /* PPS frequency (scaled PPM) (ro) */ + long jitter; /* PPS jitter (ns/us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + long stabil; /* PPS stability (scaled PPM) (ro) */ + long jitcnt; /* jitter limit exceeded (ro) */ + long calcnt; /* calibration intervals (ro) */ + long errcnt; /* calibration errors (ro) */ + long stbcnt; /* stability limit exceeded (ro) */ +}; + +#ifdef __FreeBSD__ + +#ifdef _KERNEL +void ntp_update_second(int64_t *adjustment, time_t *newsec); +#else /* !_KERNEL */ +#include <sys/cdefs.h> + +__BEGIN_DECLS +int ntp_adjtime(struct timex *); +int ntp_gettime(struct ntptimeval *); +__END_DECLS +#endif /* _KERNEL */ + +#endif /* __FreeBSD__ */ + +#endif /* !_SYS_TIMEX_H_ */ diff --git a/cpukit/score/preinstall.am b/cpukit/score/preinstall.am index 920c0d9aeb..062fe1b438 100644 --- a/cpukit/score/preinstall.am +++ b/cpukit/score/preinstall.am @@ -13,6 +13,31 @@ all-am: $(PREINSTALL_FILES) PREINSTALL_FILES = CLEANFILES = $(PREINSTALL_FILES) +$(PROJECT_INCLUDE)/sys/$(dirstamp): + @$(MKDIR_P) $(PROJECT_INCLUDE)/sys + @: > $(PROJECT_INCLUDE)/sys/$(dirstamp) +PREINSTALL_DIRS += $(PROJECT_INCLUDE)/sys/$(dirstamp) + +$(PROJECT_INCLUDE)/sys/_ffcounter.h: include/sys/_ffcounter.h $(PROJECT_INCLUDE)/sys/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/sys/_ffcounter.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/sys/_ffcounter.h + +$(PROJECT_INCLUDE)/sys/timeffc.h: include/sys/timeffc.h $(PROJECT_INCLUDE)/sys/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/sys/timeffc.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/sys/timeffc.h + +$(PROJECT_INCLUDE)/sys/timepps.h: include/sys/timepps.h $(PROJECT_INCLUDE)/sys/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/sys/timepps.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/sys/timepps.h + +$(PROJECT_INCLUDE)/sys/timetc.h: include/sys/timetc.h $(PROJECT_INCLUDE)/sys/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/sys/timetc.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/sys/timetc.h + +$(PROJECT_INCLUDE)/sys/timex.h: include/sys/timex.h $(PROJECT_INCLUDE)/sys/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/sys/timex.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/sys/timex.h + $(PROJECT_INCLUDE)/rtems/$(dirstamp): @$(MKDIR_P) $(PROJECT_INCLUDE)/rtems @: > $(PROJECT_INCLUDE)/rtems/$(dirstamp) @@ -311,10 +336,6 @@ $(PROJECT_INCLUDE)/rtems/score/threadqimpl.h: include/rtems/score/threadqimpl.h $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/threadqimpl.h PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/threadqimpl.h -$(PROJECT_INCLUDE)/rtems/score/threadsync.h: include/rtems/score/threadsync.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) - $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/threadsync.h -PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/threadsync.h - $(PROJECT_INCLUDE)/rtems/score/timespec.h: include/rtems/score/timespec.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/timespec.h PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/timespec.h @@ -323,9 +344,13 @@ $(PROJECT_INCLUDE)/rtems/score/timestamp.h: include/rtems/score/timestamp.h $(PR $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/timestamp.h PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/timestamp.h -$(PROJECT_INCLUDE)/rtems/score/timestamp64.h: include/rtems/score/timestamp64.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) - $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/timestamp64.h -PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/timestamp64.h +$(PROJECT_INCLUDE)/rtems/score/timecounter.h: include/rtems/score/timecounter.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/timecounter.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/timecounter.h + +$(PROJECT_INCLUDE)/rtems/score/timecounterimpl.h: include/rtems/score/timecounterimpl.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) + $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/timecounterimpl.h +PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/timecounterimpl.h $(PROJECT_INCLUDE)/rtems/score/tls.h: include/rtems/score/tls.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp) $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/tls.h diff --git a/cpukit/score/src/apimutexlock.c b/cpukit/score/src/apimutexlock.c index 45ad0ba9e7..bcac71a9c3 100644 --- a/cpukit/score/src/apimutexlock.c +++ b/cpukit/score/src/apimutexlock.c @@ -30,10 +30,6 @@ void _API_Mutex_Lock( API_Mutex_Control *the_mutex ) previous_thread_life_protection = _Thread_Set_life_protection( true ); - #if defined(RTEMS_SMP) - _Thread_Disable_dispatch(); - #endif - _ISR_lock_ISR_disable( &lock_context ); _CORE_mutex_Seize( @@ -49,8 +45,4 @@ void _API_Mutex_Lock( API_Mutex_Control *the_mutex ) the_mutex->previous_thread_life_protection = previous_thread_life_protection; } - - #if defined(RTEMS_SMP) - _Thread_Enable_dispatch(); - #endif } diff --git a/cpukit/score/src/apimutexunlock.c b/cpukit/score/src/apimutexunlock.c index d0432a6f4a..04657ddcec 100644 --- a/cpukit/score/src/apimutexunlock.c +++ b/cpukit/score/src/apimutexunlock.c @@ -24,18 +24,21 @@ void _API_Mutex_Unlock( API_Mutex_Control *the_mutex ) { - bool previous_thread_life_protection; - bool restore_thread_life_protection; - - _Thread_Disable_dispatch(); + ISR_lock_Context lock_context; + bool previous_thread_life_protection; + bool restore_thread_life_protection; previous_thread_life_protection = the_mutex->previous_thread_life_protection; restore_thread_life_protection = the_mutex->Mutex.nest_count == 1; - _CORE_mutex_Surrender( &the_mutex->Mutex, the_mutex->Object.id, NULL ); - - _Thread_Enable_dispatch(); + _ISR_lock_ISR_disable( &lock_context ); + _CORE_mutex_Surrender( + &the_mutex->Mutex, + the_mutex->Object.id, + NULL, + &lock_context + ); if ( restore_thread_life_protection ) { _Thread_Set_life_protection( previous_thread_life_protection ); diff --git a/cpukit/score/src/corebarrier.c b/cpukit/score/src/corebarrier.c index 2035961b92..eddf901934 100644 --- a/cpukit/score/src/corebarrier.c +++ b/cpukit/score/src/corebarrier.c @@ -32,7 +32,6 @@ void _CORE_barrier_Initialize( _Thread_queue_Initialize( &the_barrier->Wait_queue, - THREAD_QUEUE_DISCIPLINE_FIFO, - CORE_BARRIER_TIMEOUT + THREAD_QUEUE_DISCIPLINE_FIFO ); } diff --git a/cpukit/score/src/corebarrierwait.c b/cpukit/score/src/corebarrierwait.c index 6267ae67c0..39687d31da 100644 --- a/cpukit/score/src/corebarrierwait.c +++ b/cpukit/score/src/corebarrierwait.c @@ -32,30 +32,29 @@ void _CORE_barrier_Wait( CORE_barrier_API_mp_support_callout api_barrier_mp_support ) { - ISR_Level level; + ISR_lock_Context lock_context; executing->Wait.return_code = CORE_BARRIER_STATUS_SUCCESSFUL; - _ISR_Disable( level ); + _Thread_queue_Acquire( &the_barrier->Wait_queue, &lock_context ); the_barrier->number_of_waiting_threads++; if ( _CORE_barrier_Is_automatic( &the_barrier->Attributes ) ) { if ( the_barrier->number_of_waiting_threads == the_barrier->Attributes.maximum_count) { executing->Wait.return_code = CORE_BARRIER_STATUS_AUTOMATICALLY_RELEASED; - _ISR_Enable( level ); + _Thread_queue_Release( &the_barrier->Wait_queue, &lock_context ); _CORE_barrier_Release( the_barrier, id, api_barrier_mp_support ); return; } } - _Thread_queue_Enter_critical_section( &the_barrier->Wait_queue ); - executing->Wait.queue = &the_barrier->Wait_queue; - executing->Wait.id = id; - _ISR_Enable( level ); + executing->Wait.id = id; - _Thread_queue_Enqueue( + _Thread_queue_Enqueue_critical( &the_barrier->Wait_queue, executing, STATES_WAITING_FOR_BARRIER, - timeout + timeout, + CORE_BARRIER_TIMEOUT, + &lock_context ); } diff --git a/cpukit/score/src/coremsg.c b/cpukit/score/src/coremsg.c index 0790221eb9..ae2bc753c7 100644 --- a/cpukit/score/src/coremsg.c +++ b/cpukit/score/src/coremsg.c @@ -111,8 +111,7 @@ bool _CORE_message_queue_Initialize( _Thread_queue_Initialize( &the_message_queue->Wait_queue, _CORE_message_queue_Is_priority( the_message_queue_attributes ) ? - THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO, - CORE_MESSAGE_QUEUE_STATUS_TIMEOUT + THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO ); return true; diff --git a/cpukit/score/src/coremsgbroadcast.c b/cpukit/score/src/coremsgbroadcast.c index ff9f3ec220..3ee587ccfb 100644 --- a/cpukit/score/src/coremsgbroadcast.c +++ b/cpukit/score/src/coremsgbroadcast.c @@ -20,7 +20,6 @@ #include <rtems/score/coremsgimpl.h> #include <rtems/score/objectimpl.h> -#include <rtems/score/thread.h> CORE_message_queue_Status _CORE_message_queue_Broadcast( CORE_message_queue_Control *the_message_queue, @@ -33,55 +32,45 @@ CORE_message_queue_Status _CORE_message_queue_Broadcast( Objects_Id id __attribute__((unused)), CORE_message_queue_API_mp_support_callout api_message_queue_mp_support __attribute__((unused)), #endif - uint32_t *count + uint32_t *count, + ISR_lock_Context *lock_context ) { - Thread_Control *the_thread; - uint32_t number_broadcasted; - Thread_Wait_information *waitp; + Thread_Control *the_thread; + uint32_t number_broadcasted; if ( size > the_message_queue->maximum_message_size ) { + _ISR_lock_ISR_enable( lock_context ); return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE; } - /* - * If there are pending messages, then there can't be threads - * waiting for us to send them a message. - * - * NOTE: This check is critical because threads can block on - * send and receive and this ensures that we are broadcasting - * the message to threads waiting to receive -- not to send. - */ + number_broadcasted = 0; - if ( the_message_queue->number_of_pending_messages != 0 ) { - *count = 0; - return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL; - } + _CORE_message_queue_Acquire_critical( the_message_queue, lock_context ); - /* - * There must be no pending messages if there is a thread waiting to - * receive a message. - */ - number_broadcasted = 0; - while ((the_thread = - _Thread_queue_Dequeue(&the_message_queue->Wait_queue))) { - waitp = &the_thread->Wait; + while ( + ( the_thread = + _CORE_message_queue_Dequeue_receiver( + the_message_queue, + buffer, + size, + 0, + lock_context + ) + ) + ) { number_broadcasted += 1; - _CORE_message_queue_Copy_buffer( - buffer, - waitp->return_argument_second.mutable_object, - size - ); +#if defined(RTEMS_MULTIPROCESSING) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) + (*api_message_queue_mp_support) ( the_thread, id ); +#endif - *(size_t *) the_thread->Wait.return_argument = size; + _CORE_message_queue_Acquire( the_message_queue, lock_context ); + } - #if defined(RTEMS_MULTIPROCESSING) - if ( !_Objects_Is_local_id( the_thread->Object.id ) ) - (*api_message_queue_mp_support) ( the_thread, id ); - #endif + _CORE_message_queue_Release( the_message_queue, lock_context ); - } *count = number_broadcasted; return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL; } diff --git a/cpukit/score/src/coremsgclose.c b/cpukit/score/src/coremsgclose.c index 987f038cb3..e0d370d512 100644 --- a/cpukit/score/src/coremsgclose.c +++ b/cpukit/score/src/coremsgclose.c @@ -18,11 +18,7 @@ #include "config.h" #endif -#include <rtems/system.h> -#include <rtems/score/chain.h> -#include <rtems/score/isr.h> #include <rtems/score/coremsgimpl.h> -#include <rtems/score/thread.h> #include <rtems/score/wkspace.h> void _CORE_message_queue_Close( @@ -31,6 +27,7 @@ void _CORE_message_queue_Close( uint32_t status ) { + ISR_lock_Context lock_context; /* * This will flush blocked threads whether they were blocked on @@ -49,9 +46,10 @@ void _CORE_message_queue_Close( * the flush satisfying any blocked senders as a side-effect. */ - if ( the_message_queue->number_of_pending_messages != 0 ) - (void) _CORE_message_queue_Flush_support( the_message_queue ); + _ISR_lock_ISR_disable( &lock_context ); + (void) _CORE_message_queue_Flush( the_message_queue, &lock_context ); (void) _Workspace_Free( the_message_queue->message_buffers ); + _Thread_queue_Destroy( &the_message_queue->Wait_queue ); } diff --git a/cpukit/score/src/coremsgflush.c b/cpukit/score/src/coremsgflush.c index 51f6c8db98..f67dcf28eb 100644 --- a/cpukit/score/src/coremsgflush.c +++ b/cpukit/score/src/coremsgflush.c @@ -1,8 +1,9 @@ /** - * @file + * @file * - * @brief Flush Pending Messages - * @ingroup ScoreMessageQueue + * @brief Flush Messages Routine + * + * @ingroup ScoreMessageQueue */ /* @@ -18,19 +19,65 @@ #include "config.h" #endif -#include <rtems/system.h> -#include <rtems/score/chain.h> -#include <rtems/score/isr.h> #include <rtems/score/coremsgimpl.h> -#include <rtems/score/thread.h> -#include <rtems/score/wkspace.h> uint32_t _CORE_message_queue_Flush( - CORE_message_queue_Control *the_message_queue + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context ) { - if ( the_message_queue->number_of_pending_messages != 0 ) - return _CORE_message_queue_Flush_support( the_message_queue ); - else - return 0; + Chain_Node *inactive_head; + Chain_Node *inactive_first; + Chain_Node *message_queue_first; + Chain_Node *message_queue_last; + uint32_t count; + + /* + * Currently, RTEMS supports no API that has both flush and blocking + * sends. Thus, this routine assumes that there are no senders + * blocked waiting to send messages. In the event, that an API is + * added that can flush a message queue when threads are blocked + * waiting to send, there are two basic behaviors envisioned: + * + * (1) The thread queue of pending senders is a logical extension + * of the pending message queue. In this case, it should be + * flushed using the _Thread_queue_Flush() service with a status + * such as CORE_MESSAGE_QUEUE_SENDER_FLUSHED (which currently does + * not exist). This can be implemented without changing the "big-O" + * of the message flushing part of the routine. + * + * (2) Only the actual messages queued should be purged. In this case, + * the blocked sender threads must be allowed to send their messages. + * In this case, the implementation will be forced to individually + * dequeue the senders and queue their messages. This will force + * this routine to have "big O(n)" where n is the number of blocked + * senders. If there are more messages pending than senders blocked, + * then the existing flush code can be used to dispose of the remaining + * pending messages. + * + * For now, though, we are very happy to have a small routine with + * fixed execution time that only deals with pending messages. + */ + + _CORE_message_queue_Acquire_critical( the_message_queue, lock_context ); + + count = the_message_queue->number_of_pending_messages; + if ( count != 0 ) { + the_message_queue->number_of_pending_messages = 0; + + inactive_head = _Chain_Head( &the_message_queue->Inactive_messages ); + inactive_first = inactive_head->next; + message_queue_first = _Chain_First( &the_message_queue->Pending_messages ); + message_queue_last = _Chain_Last( &the_message_queue->Pending_messages ); + + inactive_head->next = message_queue_first; + message_queue_last->next = inactive_first; + inactive_first->previous = message_queue_last; + message_queue_first->previous = inactive_head; + + _Chain_Initialize_empty( &the_message_queue->Pending_messages ); + } + + _CORE_message_queue_Release( the_message_queue, lock_context ); + return count; } diff --git a/cpukit/score/src/coremsgflushsupp.c b/cpukit/score/src/coremsgflushsupp.c deleted file mode 100644 index 041972fb9c..0000000000 --- a/cpukit/score/src/coremsgflushsupp.c +++ /dev/null @@ -1,84 +0,0 @@ -/** - * @file - * - * @brief Flush Messages Support Routine - * - * @ingroup ScoreMessageQueue - */ - -/* - * COPYRIGHT (c) 1989-1999. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/system.h> -#include <rtems/score/chain.h> -#include <rtems/score/isr.h> -#include <rtems/score/coremsgimpl.h> -#include <rtems/score/thread.h> -#include <rtems/score/wkspace.h> - -uint32_t _CORE_message_queue_Flush_support( - CORE_message_queue_Control *the_message_queue -) -{ - ISR_Level level; - Chain_Node *inactive_head; - Chain_Node *inactive_first; - Chain_Node *message_queue_first; - Chain_Node *message_queue_last; - uint32_t count; - - /* - * Currently, RTEMS supports no API that has both flush and blocking - * sends. Thus, this routine assumes that there are no senders - * blocked waiting to send messages. In the event, that an API is - * added that can flush a message queue when threads are blocked - * waiting to send, there are two basic behaviors envisioned: - * - * (1) The thread queue of pending senders is a logical extension - * of the pending message queue. In this case, it should be - * flushed using the _Thread_queue_Flush() service with a status - * such as CORE_MESSAGE_QUEUE_SENDER_FLUSHED (which currently does - * not exist). This can be implemented without changing the "big-O" - * of the message flushing part of the routine. - * - * (2) Only the actual messages queued should be purged. In this case, - * the blocked sender threads must be allowed to send their messages. - * In this case, the implementation will be forced to individually - * dequeue the senders and queue their messages. This will force - * this routine to have "big O(n)" where n is the number of blocked - * senders. If there are more messages pending than senders blocked, - * then the existing flush code can be used to dispose of the remaining - * pending messages. - * - * For now, though, we are very happy to have a small routine with - * fixed execution time that only deals with pending messages. - */ - - _ISR_Disable( level ); - inactive_head = _Chain_Head( &the_message_queue->Inactive_messages ); - inactive_first = inactive_head->next; - message_queue_first = _Chain_First( &the_message_queue->Pending_messages ); - message_queue_last = _Chain_Last( &the_message_queue->Pending_messages ); - - inactive_head->next = message_queue_first; - message_queue_last->next = inactive_first; - inactive_first->previous = message_queue_last; - message_queue_first->previous = inactive_head; - - _Chain_Initialize_empty( &the_message_queue->Pending_messages ); - - count = the_message_queue->number_of_pending_messages; - the_message_queue->number_of_pending_messages = 0; - _ISR_Enable( level ); - return count; -} diff --git a/cpukit/score/src/coremsginsert.c b/cpukit/score/src/coremsginsert.c index 28407bae98..0a73af8c71 100644 --- a/cpukit/score/src/coremsginsert.c +++ b/cpukit/score/src/coremsginsert.c @@ -19,7 +19,6 @@ #endif #include <rtems/score/coremsgimpl.h> -#include <rtems/score/isrlevel.h> #if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY) static bool _CORE_message_queue_Order( @@ -45,7 +44,6 @@ void _CORE_message_queue_Insert_message( ) { Chain_Control *pending_messages; - ISR_Level level; #if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION) bool notify; #endif @@ -53,8 +51,6 @@ void _CORE_message_queue_Insert_message( _CORE_message_queue_Set_message_priority( the_message, submit_type ); pending_messages = &the_message_queue->Pending_messages; - _ISR_Disable( level ); - #if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION) notify = ( the_message_queue->number_of_pending_messages == 0 ); #endif @@ -74,8 +70,6 @@ void _CORE_message_queue_Insert_message( _Chain_Prepend_unprotected( pending_messages, &the_message->Node ); } - _ISR_Enable( level ); - #if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION) /* * According to POSIX, does this happen before or after the message diff --git a/cpukit/score/src/coremsgseize.c b/cpukit/score/src/coremsgseize.c index db8d558ca6..0d1c36fe47 100644 --- a/cpukit/score/src/coremsgseize.c +++ b/cpukit/score/src/coremsgseize.c @@ -33,18 +33,17 @@ void _CORE_message_queue_Seize( void *buffer, size_t *size_p, bool wait, - Watchdog_Interval timeout + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ) { - ISR_Level level; CORE_message_queue_Buffer_control *the_message; executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL; - _ISR_Disable( level ); + _CORE_message_queue_Acquire_critical( the_message_queue, lock_context ); the_message = _CORE_message_queue_Get_pending_message( the_message_queue ); if ( the_message != NULL ) { the_message_queue->number_of_pending_messages -= 1; - _ISR_Enable( level ); *size_p = the_message->Contents.size; executing->Wait.count = @@ -61,6 +60,7 @@ void _CORE_message_queue_Seize( * So return immediately. */ _CORE_message_queue_Free_message_buffer(the_message_queue, the_message); + _CORE_message_queue_Release( the_message_queue, lock_context ); return; #else { @@ -73,12 +73,15 @@ void _CORE_message_queue_Seize( * NOTE: If we note that the queue was not full before this receive, * then we can avoid this dequeue. */ - the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue ); - if ( !the_thread ) { + the_thread = _Thread_queue_First_locked( + &the_message_queue->Wait_queue + ); + if ( the_thread == NULL ) { _CORE_message_queue_Free_message_buffer( the_message_queue, the_message ); + _CORE_message_queue_Release( the_message_queue, lock_context ); return; } @@ -103,29 +106,39 @@ void _CORE_message_queue_Seize( the_message, _CORE_message_queue_Get_message_priority( the_message ) ); + _Thread_queue_Extract_critical( + &the_message_queue->Wait_queue, + the_thread, + lock_context + ); + #if defined(RTEMS_MULTIPROCESSING) + _Thread_Dispatch_enable( _Per_CPU_Get() ); + #endif return; } #endif } if ( !wait ) { - _ISR_Enable( level ); + _CORE_message_queue_Release( the_message_queue, lock_context ); executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT; return; } - _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue ); - executing->Wait.queue = &the_message_queue->Wait_queue; executing->Wait.id = id; executing->Wait.return_argument_second.mutable_object = buffer; executing->Wait.return_argument = size_p; /* Wait.count will be filled in with the message priority */ - _ISR_Enable( level ); - _Thread_queue_Enqueue( + _Thread_queue_Enqueue_critical( &the_message_queue->Wait_queue, executing, STATES_WAITING_FOR_MESSAGE, - timeout + timeout, + CORE_MESSAGE_QUEUE_STATUS_TIMEOUT, + lock_context ); + #if defined(RTEMS_MULTIPROCESSING) + _Thread_Dispatch_enable( _Per_CPU_Get() ); + #endif } diff --git a/cpukit/score/src/coremsgsubmit.c b/cpukit/score/src/coremsgsubmit.c index 4437856b17..37f857917c 100644 --- a/cpukit/score/src/coremsgsubmit.c +++ b/cpukit/score/src/coremsgsubmit.c @@ -38,36 +38,39 @@ CORE_message_queue_Status _CORE_message_queue_Submit( #endif CORE_message_queue_Submit_types submit_type, bool wait, - Watchdog_Interval timeout + Watchdog_Interval timeout, + ISR_lock_Context *lock_context ) { - CORE_message_queue_Buffer_control *the_message; - Thread_Control *the_thread; + CORE_message_queue_Buffer_control *the_message; + Thread_Control *the_thread; if ( size > the_message_queue->maximum_message_size ) { + _ISR_lock_ISR_enable( lock_context ); return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE; } + _CORE_message_queue_Acquire_critical( the_message_queue, lock_context ); + /* * Is there a thread currently waiting on this message queue? */ - if ( the_message_queue->number_of_pending_messages == 0 ) { - the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue ); - if ( the_thread ) { - _CORE_message_queue_Copy_buffer( - buffer, - the_thread->Wait.return_argument_second.mutable_object, - size - ); - *(size_t *) the_thread->Wait.return_argument = size; - the_thread->Wait.count = (uint32_t) submit_type; - - #if defined(RTEMS_MULTIPROCESSING) - if ( !_Objects_Is_local_id( the_thread->Object.id ) ) - (*api_message_queue_mp_support) ( the_thread, id ); - #endif - return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL; - } + + the_thread = _CORE_message_queue_Dequeue_receiver( + the_message_queue, + buffer, + size, + submit_type, + lock_context + ); + if ( the_thread != NULL ) { + #if defined(RTEMS_MULTIPROCESSING) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) + (*api_message_queue_mp_support) ( the_thread, id ); + + _Thread_Dispatch_enable( _Per_CPU_Get() ); + #endif + return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL; } /* @@ -77,23 +80,25 @@ CORE_message_queue_Status _CORE_message_queue_Submit( the_message = _CORE_message_queue_Allocate_message_buffer( the_message_queue ); if ( the_message ) { + the_message->Contents.size = size; + _CORE_message_queue_Set_message_priority( the_message, submit_type ); _CORE_message_queue_Copy_buffer( buffer, the_message->Contents.buffer, size ); - the_message->Contents.size = size; - _CORE_message_queue_Set_message_priority( the_message, submit_type ); _CORE_message_queue_Insert_message( the_message_queue, the_message, submit_type ); + _CORE_message_queue_Release( the_message_queue, lock_context ); return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL; } #if !defined(RTEMS_SCORE_COREMSG_ENABLE_BLOCKING_SEND) + _CORE_message_queue_Release( the_message_queue, lock_context ); return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY; #else /* @@ -102,6 +107,7 @@ CORE_message_queue_Status _CORE_message_queue_Submit( * on the queue. */ if ( !wait ) { + _CORE_message_queue_Release( the_message_queue, lock_context ); return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY; } @@ -110,6 +116,7 @@ CORE_message_queue_Status _CORE_message_queue_Submit( * deadly to block in an ISR. */ if ( _ISR_Is_in_progress() ) { + _CORE_message_queue_Release( the_message_queue, lock_context ); return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED; } @@ -119,25 +126,22 @@ CORE_message_queue_Status _CORE_message_queue_Submit( * it as a variable. Doing this emphasizes how dangerous it * would be to use this variable prior to here. */ - { - ISR_Level level; - - _ISR_Disable( level ); - _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue ); - executing->Wait.queue = &the_message_queue->Wait_queue; - executing->Wait.id = id; - executing->Wait.return_argument_second.immutable_object = buffer; - executing->Wait.option = (uint32_t) size; - executing->Wait.count = submit_type; - _ISR_Enable( level ); - - _Thread_queue_Enqueue( - &the_message_queue->Wait_queue, - executing, - STATES_WAITING_FOR_MESSAGE, - timeout - ); - } + executing->Wait.id = id; + executing->Wait.return_argument_second.immutable_object = buffer; + executing->Wait.option = (uint32_t) size; + executing->Wait.count = submit_type; + + _Thread_queue_Enqueue_critical( + &the_message_queue->Wait_queue, + executing, + STATES_WAITING_FOR_MESSAGE, + timeout, + CORE_MESSAGE_QUEUE_STATUS_TIMEOUT, + lock_context + ); + #if defined(RTEMS_MULTIPROCESSING) + _Thread_Dispatch_enable( _Per_CPU_Get() ); + #endif return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_WAIT; #endif diff --git a/cpukit/score/src/coremutex.c b/cpukit/score/src/coremutex.c index b5e8a5ed4b..6bb535a8ec 100644 --- a/cpukit/score/src/coremutex.c +++ b/cpukit/score/src/coremutex.c @@ -48,16 +48,22 @@ CORE_mutex_Status _CORE_mutex_Initialize( if ( is_priority_ceiling || _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ) { Priority_Control ceiling = the_mutex->Attributes.priority_ceiling; + Per_CPU_Control *cpu_self; + + /* The mutex initialization is only protected by the allocator lock */ + cpu_self = _Thread_Dispatch_disable(); /* - * The mutex initialization is only protected by the allocator lock in - * general. Disable thread dispatching before the priority check to - * prevent interference with priority inheritance. + * The test to check for a ceiling violation is a bit arbitrary. In case + * this thread is the owner of a priority inheritance mutex, then it may + * get a higher priority later or anytime on SMP configurations. */ - _Thread_Disable_dispatch(); - if ( is_priority_ceiling && executing->current_priority < ceiling ) { - _Thread_Enable_dispatch(); + /* + * There is no need to undo the previous work since this error aborts + * the object creation. + */ + _Thread_Dispatch_enable( cpu_self ); return CORE_MUTEX_STATUS_CEILING_VIOLATED; } @@ -70,10 +76,10 @@ CORE_mutex_Status _CORE_mutex_Initialize( executing->resource_count++; if ( is_priority_ceiling ) { - _Thread_Change_priority( executing, ceiling, false ); + _Thread_Raise_priority( executing, ceiling ); } - _Thread_Enable_dispatch(); + _Thread_Dispatch_enable( cpu_self ); } } else { the_mutex->nest_count = 0; @@ -83,8 +89,7 @@ CORE_mutex_Status _CORE_mutex_Initialize( _Thread_queue_Initialize( &the_mutex->Wait_queue, _CORE_mutex_Is_fifo( the_mutex_attributes ) ? - THREAD_QUEUE_DISCIPLINE_FIFO : THREAD_QUEUE_DISCIPLINE_PRIORITY, - CORE_MUTEX_TIMEOUT + THREAD_QUEUE_DISCIPLINE_FIFO : THREAD_QUEUE_DISCIPLINE_PRIORITY ); return CORE_MUTEX_STATUS_SUCCESSFUL; diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c index 3fc3765ef4..d5f85c5fad 100644 --- a/cpukit/score/src/coremutexseize.c +++ b/cpukit/score/src/coremutexseize.c @@ -21,7 +21,6 @@ #include <rtems/system.h> #include <rtems/score/isr.h> #include <rtems/score/coremuteximpl.h> -#include <rtems/score/schedulerimpl.h> #include <rtems/score/statesimpl.h> #include <rtems/score/thread.h> @@ -53,29 +52,47 @@ void _CORE_mutex_Seize_interrupt_blocking( ISR_lock_Context *lock_context ) { - _Thread_queue_Enter_critical_section( &the_mutex->Wait_queue ); - executing->Wait.queue = &the_mutex->Wait_queue; - _Thread_Disable_dispatch(); - _ISR_lock_ISR_enable( lock_context ); +#if !defined(RTEMS_SMP) + /* + * We must disable thread dispatching here since we enable the interrupts for + * priority inheritance mutexes. + */ + _Thread_Dispatch_disable(); +#endif if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ) { Thread_Control *holder = the_mutex->holder; - _Scheduler_Change_priority_if_higher( - _Scheduler_Get( holder ), - holder, - executing->current_priority, - false - ); +#if !defined(RTEMS_SMP) + /* + * To enable interrupts here works only since exactly one executing thread + * exists and only threads are allowed to seize and surrender mutexes with + * the priority inheritance protocol. On SMP configurations more than one + * executing thread may exist, so here we must not release the lock, since + * otherwise the current holder may be no longer the holder of the mutex + * once we released the lock. + */ + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); +#endif + + _Thread_Raise_priority( holder, executing->current_priority ); + +#if !defined(RTEMS_SMP) + _Thread_queue_Acquire( &the_mutex->Wait_queue, lock_context ); +#endif } - _Thread_queue_Enqueue( + _Thread_queue_Enqueue_critical( &the_mutex->Wait_queue, executing, STATES_WAITING_FOR_MUTEX, - timeout + timeout, + CORE_MUTEX_TIMEOUT, + lock_context ); - _Thread_Enable_dispatch(); +#if !defined(RTEMS_SMP) + _Thread_Dispatch_enable( _Per_CPU_Get() ); +#endif } diff --git a/cpukit/score/src/coremutexsurrender.c b/cpukit/score/src/coremutexsurrender.c index 8fba301de9..d5dde1e8e6 100644 --- a/cpukit/score/src/coremutexsurrender.c +++ b/cpukit/score/src/coremutexsurrender.c @@ -89,11 +89,12 @@ CORE_mutex_Status _CORE_mutex_Surrender( CORE_mutex_Control *the_mutex, #if defined(RTEMS_MULTIPROCESSING) Objects_Id id, - CORE_mutex_API_mp_support_callout api_mutex_mp_support + CORE_mutex_API_mp_support_callout api_mutex_mp_support, #else Objects_Id id __attribute__((unused)), - CORE_mutex_API_mp_support_callout api_mutex_mp_support __attribute__((unused)) + CORE_mutex_API_mp_support_callout api_mutex_mp_support __attribute__((unused)), #endif + ISR_lock_Context *lock_context ) { Thread_Control *the_thread; @@ -110,14 +111,20 @@ CORE_mutex_Status _CORE_mutex_Surrender( */ if ( the_mutex->Attributes.only_owner_release ) { - if ( !_Thread_Is_executing( holder ) ) + if ( !_Thread_Is_executing( holder ) ) { + _ISR_lock_ISR_enable( lock_context ); return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE; + } } + _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context ); + /* XXX already unlocked -- not right status */ - if ( !the_mutex->nest_count ) + if ( !the_mutex->nest_count ) { + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return CORE_MUTEX_STATUS_SUCCESSFUL; + } the_mutex->nest_count--; @@ -130,10 +137,12 @@ CORE_mutex_Status _CORE_mutex_Surrender( #if defined(RTEMS_DEBUG) switch ( the_mutex->Attributes.lock_nesting_behavior ) { case CORE_MUTEX_NESTING_ACQUIRES: + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return CORE_MUTEX_STATUS_SUCCESSFUL; #if defined(RTEMS_POSIX_API) case CORE_MUTEX_NESTING_IS_ERROR: /* should never occur */ + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED; #endif case CORE_MUTEX_NESTING_BLOCKS: @@ -141,6 +150,7 @@ CORE_mutex_Status _CORE_mutex_Surrender( break; } #else + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */ return CORE_MUTEX_STATUS_SUCCESSFUL; #endif @@ -155,20 +165,12 @@ CORE_mutex_Status _CORE_mutex_Surrender( CORE_mutex_Status pop_status = _CORE_mutex_Pop_priority( the_mutex, holder ); - if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL ) + if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL ) { + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return pop_status; + } holder->resource_count--; - - /* - * Whether or not someone is waiting for the mutex, an - * inherited priority must be lowered if this is the last - * mutex (i.e. resource) this task has. - */ - if ( !_Thread_Owns_resources( holder ) && - holder->real_priority != holder->current_priority ) { - _Thread_Change_priority( holder, holder->real_priority, true ); - } } the_mutex->holder = NULL; @@ -176,20 +178,21 @@ CORE_mutex_Status _CORE_mutex_Surrender( * Now we check if another thread was waiting for this mutex. If so, * transfer the mutex to that thread. */ - if ( ( the_thread = _Thread_queue_Dequeue( &the_mutex->Wait_queue ) ) ) { + if ( ( the_thread = _Thread_queue_First_locked( &the_mutex->Wait_queue ) ) ) { + /* + * We must extract the thread now since this will restore its default + * thread lock. This is necessary to avoid a deadlock in the + * _Thread_Change_priority() below due to a recursive thread queue lock + * acquire. + */ + _Thread_queue_Extract_locked( &the_mutex->Wait_queue, the_thread ); #if defined(RTEMS_MULTIPROCESSING) - if ( !_Objects_Is_local_id( the_thread->Object.id ) ) { - - the_mutex->holder = NULL; - the_mutex->nest_count = 1; - - ( *api_mutex_mp_support)( the_thread, id ); + _Thread_Dispatch_disable(); - } else + if ( _Objects_Is_local_id( the_thread->Object.id ) ) #endif { - the_mutex->holder = the_thread; the_mutex->nest_count = 1; @@ -204,17 +207,55 @@ CORE_mutex_Status _CORE_mutex_Surrender( case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING: _CORE_mutex_Push_priority( the_mutex, the_thread ); the_thread->resource_count++; - if (the_mutex->Attributes.priority_ceiling < - the_thread->current_priority){ - _Thread_Change_priority( - the_thread, - the_mutex->Attributes.priority_ceiling, - false - ); - } + _Thread_Raise_priority( + the_thread, + the_mutex->Attributes.priority_ceiling + ); break; } } + + _Thread_queue_Unblock_critical( + &the_mutex->Wait_queue, + the_thread, + lock_context + ); + +#if defined(RTEMS_MULTIPROCESSING) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) { + + the_mutex->holder = NULL; + the_mutex->nest_count = 1; + + ( *api_mutex_mp_support)( the_thread, id ); + + } + + _Thread_Dispatch_enable( _Per_CPU_Get() ); +#endif + } else { + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + } + + /* + * Whether or not someone is waiting for the mutex, an + * inherited priority must be lowered if this is the last + * mutex (i.e. resource) this task has. + */ + if ( !_Thread_Owns_resources( holder ) ) { + /* + * Ensure that the holder resource count is visible to all other processors + * and that we read the latest priority restore hint. + */ + _Atomic_Fence( ATOMIC_ORDER_ACQ_REL ); + + if ( holder->priority_restore_hint ) { + Per_CPU_Control *cpu_self; + + cpu_self = _Thread_Dispatch_disable(); + _Thread_Restore_priority( holder ); + _Thread_Dispatch_enable( cpu_self ); + } } return CORE_MUTEX_STATUS_SUCCESSFUL; diff --git a/cpukit/score/src/corerwlock.c b/cpukit/score/src/corerwlock.c index 0d47db4b23..8b74c4119c 100644 --- a/cpukit/score/src/corerwlock.c +++ b/cpukit/score/src/corerwlock.c @@ -36,7 +36,6 @@ void _CORE_RWLock_Initialize( _Thread_queue_Initialize( &the_rwlock->Wait_queue, - THREAD_QUEUE_DISCIPLINE_FIFO, - CORE_RWLOCK_TIMEOUT + THREAD_QUEUE_DISCIPLINE_FIFO ); } diff --git a/cpukit/score/src/corerwlockobtainread.c b/cpukit/score/src/corerwlockobtainread.c index 203680f6dc..0dea50f1e8 100644 --- a/cpukit/score/src/corerwlockobtainread.c +++ b/cpukit/score/src/corerwlockobtainread.c @@ -32,7 +32,7 @@ void _CORE_RWLock_Obtain_for_reading( CORE_RWLock_API_mp_support_callout api_rwlock_mp_support ) { - ISR_Level level; + ISR_lock_Context lock_context; /* * If unlocked, then OK to read. @@ -40,21 +40,21 @@ void _CORE_RWLock_Obtain_for_reading( * If any thread is waiting, then we wait. */ - _ISR_Disable( level ); + _Thread_queue_Acquire( &the_rwlock->Wait_queue, &lock_context ); switch ( the_rwlock->current_state ) { case CORE_RWLOCK_UNLOCKED: the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_READING; the_rwlock->number_of_readers += 1; - _ISR_Enable( level ); + _Thread_queue_Release( &the_rwlock->Wait_queue, &lock_context ); executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; return; case CORE_RWLOCK_LOCKED_FOR_READING: { Thread_Control *waiter; - waiter = _Thread_queue_First( &the_rwlock->Wait_queue ); + waiter = _Thread_queue_First_locked( &the_rwlock->Wait_queue ); if ( !waiter ) { the_rwlock->number_of_readers += 1; - _ISR_Enable( level ); + _Thread_queue_Release( &the_rwlock->Wait_queue, &lock_context ); executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; return; } @@ -69,7 +69,7 @@ void _CORE_RWLock_Obtain_for_reading( */ if ( !wait ) { - _ISR_Enable( level ); + _Thread_queue_Release( &the_rwlock->Wait_queue, &lock_context ); executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE; return; } @@ -78,18 +78,17 @@ void _CORE_RWLock_Obtain_for_reading( * We need to wait to enter this critical section */ - _Thread_queue_Enter_critical_section( &the_rwlock->Wait_queue ); - executing->Wait.queue = &the_rwlock->Wait_queue; executing->Wait.id = id; executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_READ; executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; - _ISR_Enable( level ); - _Thread_queue_Enqueue( + _Thread_queue_Enqueue_critical( &the_rwlock->Wait_queue, executing, STATES_WAITING_FOR_RWLOCK, - timeout + timeout, + CORE_RWLOCK_TIMEOUT, + &lock_context ); /* return to API level so it can dispatch and we block */ diff --git a/cpukit/score/src/corerwlockobtainwrite.c b/cpukit/score/src/corerwlockobtainwrite.c index 3499bcd4c8..76dfae2776 100644 --- a/cpukit/score/src/corerwlockobtainwrite.c +++ b/cpukit/score/src/corerwlockobtainwrite.c @@ -32,7 +32,7 @@ void _CORE_RWLock_Obtain_for_writing( CORE_RWLock_API_mp_support_callout api_rwlock_mp_support ) { - ISR_Level level; + ISR_lock_Context lock_context; /* * If unlocked, then OK to read. @@ -41,13 +41,13 @@ void _CORE_RWLock_Obtain_for_writing( * If any thread is waiting, then we wait. */ - _ISR_Disable( level ); + _Thread_queue_Acquire( &the_rwlock->Wait_queue, &lock_context ); switch ( the_rwlock->current_state ) { case CORE_RWLOCK_UNLOCKED: - the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_WRITING; - _ISR_Enable( level ); - executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; - return; + the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_WRITING; + _Thread_queue_Release( &the_rwlock->Wait_queue, &lock_context ); + executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; + return; case CORE_RWLOCK_LOCKED_FOR_READING: case CORE_RWLOCK_LOCKED_FOR_WRITING: @@ -59,7 +59,7 @@ void _CORE_RWLock_Obtain_for_writing( */ if ( !wait ) { - _ISR_Enable( level ); + _Thread_queue_Release( &the_rwlock->Wait_queue, &lock_context ); executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE; return; } @@ -68,18 +68,17 @@ void _CORE_RWLock_Obtain_for_writing( * We need to wait to enter this critical section */ - _Thread_queue_Enter_critical_section( &the_rwlock->Wait_queue ); - executing->Wait.queue = &the_rwlock->Wait_queue; executing->Wait.id = id; executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE; executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; - _ISR_Enable( level ); - _Thread_queue_Enqueue( + _Thread_queue_Enqueue_critical( &the_rwlock->Wait_queue, executing, STATES_WAITING_FOR_RWLOCK, - timeout + timeout, + CORE_RWLOCK_TIMEOUT, + &lock_context ); /* return to API level so it can dispatch and we block */ diff --git a/cpukit/score/src/corerwlockrelease.c b/cpukit/score/src/corerwlockrelease.c index efaf67d351..bd39213c87 100644 --- a/cpukit/score/src/corerwlockrelease.c +++ b/cpukit/score/src/corerwlockrelease.c @@ -87,7 +87,7 @@ CORE_RWLock_Status _CORE_RWLock_Release( next->Wait.option == CORE_RWLOCK_THREAD_WAITING_FOR_WRITE ) return CORE_RWLOCK_SUCCESSFUL; the_rwlock->number_of_readers += 1; - _Thread_queue_Extract( &the_rwlock->Wait_queue, next ); + _Thread_queue_Extract( next ); } } diff --git a/cpukit/score/src/coresem.c b/cpukit/score/src/coresem.c index eb1ba7e283..2475c349a7 100644 --- a/cpukit/score/src/coresem.c +++ b/cpukit/score/src/coresem.c @@ -33,7 +33,6 @@ void _CORE_semaphore_Initialize( _Thread_queue_Initialize( &the_semaphore->Wait_queue, _CORE_semaphore_Is_priority( the_semaphore_attributes ) ? - THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO, - CORE_SEMAPHORE_TIMEOUT + THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO ); } diff --git a/cpukit/score/src/coresemflush.c b/cpukit/score/src/coresemflush.c deleted file mode 100644 index 1db0db926b..0000000000 --- a/cpukit/score/src/coresemflush.c +++ /dev/null @@ -1,39 +0,0 @@ -/** - * @file - * - * @brief Core Semaphore Flush - * @ingroup ScoreSemaphore - */ - -/* - * COPYRIGHT (c) 1989-1999. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/system.h> -#include <rtems/score/isr.h> -#include <rtems/score/coresemimpl.h> -#include <rtems/score/thread.h> - -void _CORE_semaphore_Flush( - CORE_semaphore_Control *the_semaphore, - Thread_queue_Flush_callout remote_extract_callout, - uint32_t status -) -{ - - _Thread_queue_Flush( - &the_semaphore->Wait_queue, - remote_extract_callout, - status - ); - -} diff --git a/cpukit/score/src/coresemseize.c b/cpukit/score/src/coresemseize.c deleted file mode 100644 index 9c0db96204..0000000000 --- a/cpukit/score/src/coresemseize.c +++ /dev/null @@ -1,73 +0,0 @@ -/** - * @file - * - * @brief Core Semaphore Seize - * - * @ingroup ScoreSemaphore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/system.h> -#include <rtems/score/isr.h> -#include <rtems/score/coresemimpl.h> -#include <rtems/score/thread.h> - -#if defined(RTEMS_SCORE_CORESEM_ENABLE_SEIZE_BODY) - -void _CORE_semaphore_Seize( - CORE_semaphore_Control *the_semaphore, - Thread_Control *executing, - Objects_Id id, - bool wait, - Watchdog_Interval timeout -) -{ - ISR_Level level; - - executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL; - _ISR_Disable( level ); - if ( the_semaphore->count != 0 ) { - the_semaphore->count -= 1; - _ISR_Enable( level ); - return; - } - - /* - * If the semaphore was not available and the caller was not willing - * to block, then return immediately with a status indicating that - * the semaphore was not available and the caller never blocked. - */ - if ( !wait ) { - _ISR_Enable( level ); - executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT; - return; - } - - /* - * If the semaphore is not available and the caller is willing to - * block, then we now block the caller with optional timeout. - */ - _Thread_queue_Enter_critical_section( &the_semaphore->Wait_queue ); - executing->Wait.queue = &the_semaphore->Wait_queue; - executing->Wait.id = id; - _ISR_Enable( level ); - _Thread_queue_Enqueue( - &the_semaphore->Wait_queue, - executing, - STATES_WAITING_FOR_SEMAPHORE, - timeout - ); -} -#endif diff --git a/cpukit/score/src/coresemsurrender.c b/cpukit/score/src/coresemsurrender.c deleted file mode 100644 index 58ba6a3fdf..0000000000 --- a/cpukit/score/src/coresemsurrender.c +++ /dev/null @@ -1,54 +0,0 @@ -/** - * @file - * - * @brief Surrenders a Unit to a Semaphore - * - * @ingroup ScoreSemaphore - */ - -/* - * COPYRIGHT (c) 1989-1999. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/coresemimpl.h> -#include <rtems/score/objectimpl.h> - -CORE_semaphore_Status _CORE_semaphore_Surrender( - CORE_semaphore_Control *the_semaphore, - Objects_Id id, - CORE_semaphore_API_mp_support_callout api_semaphore_mp_support -) -{ - Thread_Control *the_thread; - ISR_Level level; - CORE_semaphore_Status status; - - status = CORE_SEMAPHORE_STATUS_SUCCESSFUL; - - if ( (the_thread = _Thread_queue_Dequeue(&the_semaphore->Wait_queue)) ) { - -#if defined(RTEMS_MULTIPROCESSING) - if ( !_Objects_Is_local_id( the_thread->Object.id ) ) - (*api_semaphore_mp_support) ( the_thread, id ); -#endif - - } else { - _ISR_Disable( level ); - if ( the_semaphore->count < the_semaphore->Attributes.maximum_count ) - the_semaphore->count += 1; - else - status = CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED; - _ISR_Enable( level ); - } - - return status; -} diff --git a/cpukit/score/src/coretod.c b/cpukit/score/src/coretod.c index 2deeebaeb3..975ffefc7d 100644 --- a/cpukit/score/src/coretod.c +++ b/cpukit/score/src/coretod.c @@ -20,24 +20,16 @@ #include <rtems/score/todimpl.h> -static uint32_t _TOD_Nanoseconds_since_tick_default_handler( void ) -{ - return 0; -} - void _TOD_Handler_initialization(void) { - TOD_Control *tod = &_TOD; - - _ISR_lock_Initialize( &tod->lock, "TOD" ); - - _Timestamp_Set( &tod->now, TOD_SECONDS_1970_THROUGH_1988, 0 ); + struct timespec ts; - _Timestamp_Set_to_zero( &tod->uptime ); + _Timecounter_Initialize(); - tod->nanoseconds_since_last_tick = - _TOD_Nanoseconds_since_tick_default_handler; + ts.tv_sec = TOD_SECONDS_1970_THROUGH_1988; + ts.tv_nsec = 0; + _Timecounter_Set_clock( &ts ); /* TOD has not been set */ - tod->is_set = false; + _TOD.is_set = false; } diff --git a/cpukit/score/src/coretodadjust.c b/cpukit/score/src/coretodadjust.c index 09cf01ad7c..6097e207dc 100644 --- a/cpukit/score/src/coretodadjust.c +++ b/cpukit/score/src/coretodadjust.c @@ -25,8 +25,7 @@ void _TOD_Adjust( const Timestamp_Control delta ) { - Timestamp_Control tod; - Timestamp_Control *tod_ptr; + Timestamp_Control tod; /* * Currently, RTEMS does the adjustment in one movement. @@ -41,11 +40,11 @@ void _TOD_Adjust( */ _Thread_Disable_dispatch(); - tod_ptr = _TOD_Get_with_nanoseconds( &tod, &_TOD.now ); + _TOD_Get( &tod ); - _Timestamp_Add_to( tod_ptr, &delta ); + _Timestamp_Add_to( &tod, &delta ); - _TOD_Set_with_timestamp( tod_ptr ); + _TOD_Set_with_timestamp( &tod ); _Thread_Enable_dispatch(); } diff --git a/cpukit/score/src/coretodget.c b/cpukit/score/src/coretodget.c deleted file mode 100644 index 70eb238a09..0000000000 --- a/cpukit/score/src/coretodget.c +++ /dev/null @@ -1,46 +0,0 @@ -/** - * @file - * - * @brief Returns a Current TOD with Nanosecond Granularity - * @ingroup ScoreTOD - */ - -/* - * COPYRIGHT (c) 1989-2014. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H - #include "config.h" -#endif - -#include <rtems/score/todimpl.h> -#include <rtems/score/isrlevel.h> - -Timestamp_Control *_TOD_Get_with_nanoseconds( - Timestamp_Control *snapshot, - const Timestamp_Control *clock -) -{ - TOD_Control *tod = &_TOD; - ISR_lock_Context lock_context; - Timestamp_Control offset; - Timestamp_Control now; - uint32_t nanoseconds; - - _TOD_Acquire( tod, &lock_context ); - nanoseconds = ( *tod->nanoseconds_since_last_tick )(); - now = *clock; - _TOD_Release( tod, &lock_context ); - - _Timestamp_Set( &offset, 0, nanoseconds ); - _Timestamp_Add_to( &now, &offset ); - - *snapshot = now; - - return snapshot; -} diff --git a/cpukit/score/src/coretodgetuptimetimespec.c b/cpukit/score/src/coretodgetuptimetimespec.c deleted file mode 100644 index 5980b2f4bd..0000000000 --- a/cpukit/score/src/coretodgetuptimetimespec.c +++ /dev/null @@ -1,32 +0,0 @@ -/** - * @file - * - * @brief Get Uptime as struct timespec - * @ingroup ScoreTOD - */ - -/* - * COPYRIGHT (c) 1989-2014. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/todimpl.h> - -void _TOD_Get_uptime_as_timespec( - struct timespec *uptime -) -{ - Timestamp_Control uptime_ts; - - /* assume time checked for NULL by caller */ - _TOD_Get_uptime( &uptime_ts ); - _Timestamp_To_timespec( &uptime_ts, uptime ); -} diff --git a/cpukit/score/src/coretodsecondssinceepoch.c b/cpukit/score/src/coretodsecondssinceepoch.c deleted file mode 100644 index b7bd2705c9..0000000000 --- a/cpukit/score/src/coretodsecondssinceepoch.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2013 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H - #include "config.h" -#endif - -#include <rtems/score/todimpl.h> - -uint32_t _TOD_Seconds_since_epoch( void ) -{ - TOD_Control *tod = &_TOD; - ISR_lock_Context lock_context; - Timestamp_Control now; - - _TOD_Acquire( tod, &lock_context ); - now = tod->now; - _TOD_Release( tod, &lock_context ); - - return _Timestamp_Get_seconds( &now ); -} diff --git a/cpukit/score/src/coretodset.c b/cpukit/score/src/coretodset.c index 3d117589a1..3230179414 100644 --- a/cpukit/score/src/coretodset.c +++ b/cpukit/score/src/coretodset.c @@ -26,20 +26,21 @@ void _TOD_Set_with_timestamp( const Timestamp_Control *tod_as_timestamp ) { - TOD_Control *tod = &_TOD; - uint32_t nanoseconds = _Timestamp_Get_nanoseconds( tod_as_timestamp ); - Watchdog_Interval seconds_next = _Timestamp_Get_seconds( tod_as_timestamp ); + struct timespec ts; + uint32_t nanoseconds; + Watchdog_Interval seconds_next; Watchdog_Interval seconds_now; - ISR_lock_Context lock_context; Watchdog_Header *header; + _Timestamp_To_timespec( tod_as_timestamp, &ts ); + nanoseconds = ts.tv_nsec; + seconds_next = ts.tv_sec; + _Thread_Disable_dispatch(); seconds_now = _TOD_Seconds_since_epoch(); - _TOD_Acquire( tod, &lock_context ); - tod->now = *tod_as_timestamp; - _TOD_Release( tod, &lock_context ); + _Timecounter_Set_clock( &ts ); header = &_Watchdog_Seconds_header; @@ -48,8 +49,8 @@ void _TOD_Set_with_timestamp( else _Watchdog_Adjust_forward( header, seconds_next - seconds_now ); - tod->seconds_trigger = nanoseconds; - tod->is_set = true; + _TOD.seconds_trigger = nanoseconds; + _TOD.is_set = true; _Thread_Enable_dispatch(); } diff --git a/cpukit/score/src/coretodtickle.c b/cpukit/score/src/coretodtickle.c index 9116fc1a3b..3d7c71e1c1 100644 --- a/cpukit/score/src/coretodtickle.c +++ b/cpukit/score/src/coretodtickle.c @@ -24,30 +24,10 @@ void _TOD_Tickle_ticks( void ) { - TOD_Control *tod = &_TOD; - ISR_lock_Context lock_context; - Timestamp_Control tick; - uint32_t nanoseconds_per_tick; - - nanoseconds_per_tick = rtems_configuration_get_nanoseconds_per_tick(); - - /* Convert the tick quantum to a timestamp */ - _Timestamp_Set( &tick, 0, nanoseconds_per_tick ); - /* Update the counter of ticks since boot */ _Watchdog_Ticks_since_boot += 1; - _TOD_Acquire( tod, &lock_context ); - - /* Update the uptime */ - _Timestamp_Add_to( &tod->uptime, &tick ); - - /* Update the current TOD */ - _Timestamp_Add_to( &tod->now, &tick ); - - _TOD_Release( tod, &lock_context ); - - _TOD.seconds_trigger += nanoseconds_per_tick; + _TOD.seconds_trigger += rtems_configuration_get_nanoseconds_per_tick(); if ( _TOD.seconds_trigger >= 1000000000UL ) { _TOD.seconds_trigger -= 1000000000UL; _Watchdog_Tickle_seconds(); diff --git a/cpukit/score/src/debugisownerofallocator.c b/cpukit/score/src/debugisownerofallocator.c index 7879902025..57da2ca001 100644 --- a/cpukit/score/src/debugisownerofallocator.c +++ b/cpukit/score/src/debugisownerofallocator.c @@ -18,8 +18,7 @@ #include <rtems/score/assert.h> #include <rtems/score/apimutex.h> -#include <rtems/score/thread.h> -#include <rtems/score/threaddispatch.h> +#include <rtems/score/threadimpl.h> #if defined( RTEMS_DEBUG ) bool _Debug_Is_owner_of_allocator( void ) @@ -27,20 +26,12 @@ API_Mutex_Control *mutex = _RTEMS_Allocator_Mutex; bool owner; - /* - * We have to synchronize with the _CORE_mutex_Surrender() operation, - * otherwise we may observe an outdated mutex holder. - */ - _Thread_Disable_dispatch(); - if ( mutex != NULL ) { - owner = mutex->Mutex.holder == _Thread_Executing; + owner = mutex->Mutex.holder == _Thread_Get_executing(); } else { owner = false; } - _Thread_Enable_dispatch(); - return owner; } #endif diff --git a/cpukit/score/src/kern_tc.c b/cpukit/score/src/kern_tc.c new file mode 100644 index 0000000000..54799273b7 --- /dev/null +++ b/cpukit/score/src/kern_tc.c @@ -0,0 +1,2220 @@ +/*- + * ---------------------------------------------------------------------------- + * "THE BEER-WARE LICENSE" (Revision 42): + * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you + * can do whatever you want with this stuff. If we meet some day, and you think + * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + * ---------------------------------------------------------------------------- + * + * Copyright (c) 2011 The FreeBSD Foundation + * All rights reserved. + * + * Portions of this software were developed by Julien Ridoux at the University + * of Melbourne under sponsorship from the FreeBSD Foundation. + */ + +#ifdef __rtems__ +#define _KERNEL +#define bintime _Timecounter_Bintime +#define binuptime _Timecounter_Binuptime +#define boottimebin _Timecounter_Boottimebin +#define getbintime _Timecounter_Getbintime +#define getbinuptime _Timecounter_Getbinuptime +#define getmicrotime _Timecounter_Getmicrotime +#define getmicrouptime _Timecounter_Getmicrouptime +#define getnanotime _Timecounter_Getnanotime +#define getnanouptime _Timecounter_Getnanouptime +#define microtime _Timecounter_Microtime +#define microuptime _Timecounter_Microuptime +#define nanotime _Timecounter_Nanotime +#define nanouptime _Timecounter_Nanouptime +#define tc_init _Timecounter_Install +#define timecounter _Timecounter +#define time_second _Timecounter_Time_second +#define time_uptime _Timecounter_Time_uptime +#include <rtems/score/timecounterimpl.h> +#include <rtems/score/watchdogimpl.h> +#endif /* __rtems__ */ +#include <sys/cdefs.h> +__FBSDID("$FreeBSD r277406 2015-01-20T03:54:30Z$"); + +#include "opt_compat.h" +#include "opt_ntp.h" +#include "opt_ffclock.h" + +#include <sys/param.h> +#ifndef __rtems__ +#include <sys/kernel.h> +#include <sys/limits.h> +#else /* __rtems__ */ +#include <limits.h> +#endif /* __rtems__ */ +#ifdef FFCLOCK +#include <sys/lock.h> +#include <sys/mutex.h> +#endif +#ifndef __rtems__ +#include <sys/sysctl.h> +#include <sys/syslog.h> +#include <sys/systm.h> +#endif /* __rtems__ */ +#include <sys/timeffc.h> +#include <sys/timepps.h> +#include <sys/timetc.h> +#include <sys/timex.h> +#ifndef __rtems__ +#include <sys/vdso.h> +#endif /* __rtems__ */ +#ifdef __rtems__ +#include <rtems.h> +ISR_LOCK_DEFINE(static, _Timecounter_Lock, "Timecounter"); +#define hz rtems_clock_get_ticks_per_second() +#define printf(...) +#define log(...) +static inline int +fls(int x) +{ + return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; +} +/* FIXME: https://devel.rtems.org/ticket/2348 */ +#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0) +#endif /* __rtems__ */ + +/* + * A large step happens on boot. This constant detects such steps. + * It is relatively small so that ntp_update_second gets called enough + * in the typical 'missed a couple of seconds' case, but doesn't loop + * forever when the time step is large. + */ +#define LARGE_STEP 200 + +/* + * Implement a dummy timecounter which we can use until we get a real one + * in the air. This allows the console and other early stuff to use + * time services. + */ + +static uint32_t +dummy_get_timecount(struct timecounter *tc) +{ +#ifndef __rtems__ + static uint32_t now; + + return (++now); +#else /* __rtems__ */ + return 0; +#endif /* __rtems__ */ +} + +static struct timecounter dummy_timecounter = { + dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000 +}; + +struct timehands { + /* These fields must be initialized by the driver. */ + struct timecounter *th_counter; + int64_t th_adjustment; + uint64_t th_scale; + uint32_t th_offset_count; + struct bintime th_offset; + struct timeval th_microtime; + struct timespec th_nanotime; + /* Fields not to be copied in tc_windup start with th_generation. */ + volatile uint32_t th_generation; + struct timehands *th_next; +}; + +#if defined(RTEMS_SMP) +static struct timehands th0; +static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0}; +static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9}; +static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8}; +static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7}; +static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6}; +static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5}; +static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4}; +static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3}; +static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2}; +#endif +static struct timehands th0 = { + &dummy_timecounter, + 0, + (uint64_t)-1 / 1000000, + 0, + {1, 0}, + {0, 0}, + {0, 0}, + 1, +#if defined(RTEMS_SMP) + &th1 +#else + &th0 +#endif +}; + +static struct timehands *volatile timehands = &th0; +struct timecounter *timecounter = &dummy_timecounter; +static struct timecounter *timecounters = &dummy_timecounter; + +#ifndef __rtems__ +int tc_min_ticktock_freq = 1; +#endif /* __rtems__ */ + +volatile time_t time_second = 1; +volatile time_t time_uptime = 1; + +struct bintime boottimebin; +#ifndef __rtems__ +struct timeval boottime; +static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS); +SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD, + NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime"); + +SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); +static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, ""); + +static int timestepwarnings; +SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW, + ×tepwarnings, 0, "Log time steps"); + +struct bintime bt_timethreshold; +struct bintime bt_tickthreshold; +sbintime_t sbt_timethreshold; +sbintime_t sbt_tickthreshold; +struct bintime tc_tick_bt; +sbintime_t tc_tick_sbt; +int tc_precexp; +int tc_timepercentage = TC_DEFAULTPERC; +static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS); +SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation, + CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0, + sysctl_kern_timecounter_adjprecision, "I", + "Allowed time interval deviation in percents"); +#endif /* __rtems__ */ + +static void tc_windup(void); +#ifndef __rtems__ +static void cpu_tick_calibrate(int); +#endif /* __rtems__ */ + +void dtrace_getnanotime(struct timespec *tsp); + +#ifndef __rtems__ +static int +sysctl_kern_boottime(SYSCTL_HANDLER_ARGS) +{ +#ifndef __mips__ +#ifdef SCTL_MASK32 + int tv[2]; + + if (req->flags & SCTL_MASK32) { + tv[0] = boottime.tv_sec; + tv[1] = boottime.tv_usec; + return SYSCTL_OUT(req, tv, sizeof(tv)); + } else +#endif +#endif + return SYSCTL_OUT(req, &boottime, sizeof(boottime)); +} + +static int +sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS) +{ + uint32_t ncount; + struct timecounter *tc = arg1; + + ncount = tc->tc_get_timecount(tc); + return sysctl_handle_int(oidp, &ncount, 0, req); +} + +static int +sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS) +{ + uint64_t freq; + struct timecounter *tc = arg1; + + freq = tc->tc_frequency; + return sysctl_handle_64(oidp, &freq, 0, req); +} +#endif /* __rtems__ */ + +/* + * Return the difference between the timehands' counter value now and what + * was when we copied it to the timehands' offset_count. + */ +static __inline uint32_t +tc_delta(struct timehands *th) +{ + struct timecounter *tc; + + tc = th->th_counter; + return ((tc->tc_get_timecount(tc) - th->th_offset_count) & + tc->tc_counter_mask); +} + +/* + * Functions for reading the time. We have to loop until we are sure that + * the timehands that we operated on was not updated under our feet. See + * the comment in <sys/time.h> for a description of these 12 functions. + */ + +#ifdef FFCLOCK +void +fbclock_binuptime(struct bintime *bt) +{ + struct timehands *th; + unsigned int gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + bintime_addx(bt, th->th_scale * tc_delta(th)); + } while (gen == 0 || gen != th->th_generation); +} + +void +fbclock_nanouptime(struct timespec *tsp) +{ + struct bintime bt; + + fbclock_binuptime(&bt); + bintime2timespec(&bt, tsp); +} + +void +fbclock_microuptime(struct timeval *tvp) +{ + struct bintime bt; + + fbclock_binuptime(&bt); + bintime2timeval(&bt, tvp); +} + +void +fbclock_bintime(struct bintime *bt) +{ + + fbclock_binuptime(bt); + bintime_add(bt, &boottimebin); +} + +void +fbclock_nanotime(struct timespec *tsp) +{ + struct bintime bt; + + fbclock_bintime(&bt); + bintime2timespec(&bt, tsp); +} + +void +fbclock_microtime(struct timeval *tvp) +{ + struct bintime bt; + + fbclock_bintime(&bt); + bintime2timeval(&bt, tvp); +} + +void +fbclock_getbinuptime(struct bintime *bt) +{ + struct timehands *th; + unsigned int gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + } while (gen == 0 || gen != th->th_generation); +} + +void +fbclock_getnanouptime(struct timespec *tsp) +{ + struct timehands *th; + unsigned int gen; + + do { + th = timehands; + gen = th->th_generation; + bintime2timespec(&th->th_offset, tsp); + } while (gen == 0 || gen != th->th_generation); +} + +void +fbclock_getmicrouptime(struct timeval *tvp) +{ + struct timehands *th; + unsigned int gen; + + do { + th = timehands; + gen = th->th_generation; + bintime2timeval(&th->th_offset, tvp); + } while (gen == 0 || gen != th->th_generation); +} + +void +fbclock_getbintime(struct bintime *bt) +{ + struct timehands *th; + unsigned int gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + } while (gen == 0 || gen != th->th_generation); + bintime_add(bt, &boottimebin); +} + +void +fbclock_getnanotime(struct timespec *tsp) +{ + struct timehands *th; + unsigned int gen; + + do { + th = timehands; + gen = th->th_generation; + *tsp = th->th_nanotime; + } while (gen == 0 || gen != th->th_generation); +} + +void +fbclock_getmicrotime(struct timeval *tvp) +{ + struct timehands *th; + unsigned int gen; + + do { + th = timehands; + gen = th->th_generation; + *tvp = th->th_microtime; + } while (gen == 0 || gen != th->th_generation); +} +#else /* !FFCLOCK */ +void +binuptime(struct bintime *bt) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + bintime_addx(bt, th->th_scale * tc_delta(th)); + } while (gen == 0 || gen != th->th_generation); +} + +void +nanouptime(struct timespec *tsp) +{ + struct bintime bt; + + binuptime(&bt); + bintime2timespec(&bt, tsp); +} + +void +microuptime(struct timeval *tvp) +{ + struct bintime bt; + + binuptime(&bt); + bintime2timeval(&bt, tvp); +} + +void +bintime(struct bintime *bt) +{ + + binuptime(bt); + bintime_add(bt, &boottimebin); +} + +void +nanotime(struct timespec *tsp) +{ + struct bintime bt; + + bintime(&bt); + bintime2timespec(&bt, tsp); +} + +void +microtime(struct timeval *tvp) +{ + struct bintime bt; + + bintime(&bt); + bintime2timeval(&bt, tvp); +} + +void +getbinuptime(struct bintime *bt) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + } while (gen == 0 || gen != th->th_generation); +} + +void +getnanouptime(struct timespec *tsp) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + bintime2timespec(&th->th_offset, tsp); + } while (gen == 0 || gen != th->th_generation); +} + +void +getmicrouptime(struct timeval *tvp) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + bintime2timeval(&th->th_offset, tvp); + } while (gen == 0 || gen != th->th_generation); +} + +void +getbintime(struct bintime *bt) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + } while (gen == 0 || gen != th->th_generation); + bintime_add(bt, &boottimebin); +} + +void +getnanotime(struct timespec *tsp) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + *tsp = th->th_nanotime; + } while (gen == 0 || gen != th->th_generation); +} + +void +getmicrotime(struct timeval *tvp) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + *tvp = th->th_microtime; + } while (gen == 0 || gen != th->th_generation); +} +#endif /* FFCLOCK */ + +#ifdef FFCLOCK +/* + * Support for feed-forward synchronization algorithms. This is heavily inspired + * by the timehands mechanism but kept independent from it. *_windup() functions + * have some connection to avoid accessing the timecounter hardware more than + * necessary. + */ + +/* Feed-forward clock estimates kept updated by the synchronization daemon. */ +struct ffclock_estimate ffclock_estimate; +struct bintime ffclock_boottime; /* Feed-forward boot time estimate. */ +uint32_t ffclock_status; /* Feed-forward clock status. */ +int8_t ffclock_updated; /* New estimates are available. */ +struct mtx ffclock_mtx; /* Mutex on ffclock_estimate. */ + +struct fftimehands { + struct ffclock_estimate cest; + struct bintime tick_time; + struct bintime tick_time_lerp; + ffcounter tick_ffcount; + uint64_t period_lerp; + volatile uint8_t gen; + struct fftimehands *next; +}; + +#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x)) + +static struct fftimehands ffth[10]; +static struct fftimehands *volatile fftimehands = ffth; + +static void +ffclock_init(void) +{ + struct fftimehands *cur; + struct fftimehands *last; + + memset(ffth, 0, sizeof(ffth)); + + last = ffth + NUM_ELEMENTS(ffth) - 1; + for (cur = ffth; cur < last; cur++) + cur->next = cur + 1; + last->next = ffth; + + ffclock_updated = 0; + ffclock_status = FFCLOCK_STA_UNSYNC; + mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF); +} + +/* + * Reset the feed-forward clock estimates. Called from inittodr() to get things + * kick started and uses the timecounter nominal frequency as a first period + * estimate. Note: this function may be called several time just after boot. + * Note: this is the only function that sets the value of boot time for the + * monotonic (i.e. uptime) version of the feed-forward clock. + */ +void +ffclock_reset_clock(struct timespec *ts) +{ + struct timecounter *tc; + struct ffclock_estimate cest; + + tc = timehands->th_counter; + memset(&cest, 0, sizeof(struct ffclock_estimate)); + + timespec2bintime(ts, &ffclock_boottime); + timespec2bintime(ts, &(cest.update_time)); + ffclock_read_counter(&cest.update_ffcount); + cest.leapsec_next = 0; + cest.period = ((1ULL << 63) / tc->tc_frequency) << 1; + cest.errb_abs = 0; + cest.errb_rate = 0; + cest.status = FFCLOCK_STA_UNSYNC; + cest.leapsec_total = 0; + cest.leapsec = 0; + + mtx_lock(&ffclock_mtx); + bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate)); + ffclock_updated = INT8_MAX; + mtx_unlock(&ffclock_mtx); + + printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name, + (unsigned long long)tc->tc_frequency, (long)ts->tv_sec, + (unsigned long)ts->tv_nsec); +} + +/* + * Sub-routine to convert a time interval measured in RAW counter units to time + * in seconds stored in bintime format. + * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be + * larger than the max value of u_int (on 32 bit architecture). Loop to consume + * extra cycles. + */ +static void +ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt) +{ + struct bintime bt2; + ffcounter delta, delta_max; + + delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1; + bintime_clear(bt); + do { + if (ffdelta > delta_max) + delta = delta_max; + else + delta = ffdelta; + bt2.sec = 0; + bt2.frac = period; + bintime_mul(&bt2, (unsigned int)delta); + bintime_add(bt, &bt2); + ffdelta -= delta; + } while (ffdelta > 0); +} + +/* + * Update the fftimehands. + * Push the tick ffcount and time(s) forward based on current clock estimate. + * The conversion from ffcounter to bintime relies on the difference clock + * principle, whose accuracy relies on computing small time intervals. If a new + * clock estimate has been passed by the synchronisation daemon, make it + * current, and compute the linear interpolation for monotonic time if needed. + */ +static void +ffclock_windup(unsigned int delta) +{ + struct ffclock_estimate *cest; + struct fftimehands *ffth; + struct bintime bt, gap_lerp; + ffcounter ffdelta; + uint64_t frac; + unsigned int polling; + uint8_t forward_jump, ogen; + + /* + * Pick the next timehand, copy current ffclock estimates and move tick + * times and counter forward. + */ + forward_jump = 0; + ffth = fftimehands->next; + ogen = ffth->gen; + ffth->gen = 0; + cest = &ffth->cest; + bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate)); + ffdelta = (ffcounter)delta; + ffth->period_lerp = fftimehands->period_lerp; + + ffth->tick_time = fftimehands->tick_time; + ffclock_convert_delta(ffdelta, cest->period, &bt); + bintime_add(&ffth->tick_time, &bt); + + ffth->tick_time_lerp = fftimehands->tick_time_lerp; + ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt); + bintime_add(&ffth->tick_time_lerp, &bt); + + ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta; + + /* + * Assess the status of the clock, if the last update is too old, it is + * likely the synchronisation daemon is dead and the clock is free + * running. + */ + if (ffclock_updated == 0) { + ffdelta = ffth->tick_ffcount - cest->update_ffcount; + ffclock_convert_delta(ffdelta, cest->period, &bt); + if (bt.sec > 2 * FFCLOCK_SKM_SCALE) + ffclock_status |= FFCLOCK_STA_UNSYNC; + } + + /* + * If available, grab updated clock estimates and make them current. + * Recompute time at this tick using the updated estimates. The clock + * estimates passed the feed-forward synchronisation daemon may result + * in time conversion that is not monotonically increasing (just after + * the update). time_lerp is a particular linear interpolation over the + * synchronisation algo polling period that ensures monotonicity for the + * clock ids requesting it. + */ + if (ffclock_updated > 0) { + bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate)); + ffdelta = ffth->tick_ffcount - cest->update_ffcount; + ffth->tick_time = cest->update_time; + ffclock_convert_delta(ffdelta, cest->period, &bt); + bintime_add(&ffth->tick_time, &bt); + + /* ffclock_reset sets ffclock_updated to INT8_MAX */ + if (ffclock_updated == INT8_MAX) + ffth->tick_time_lerp = ffth->tick_time; + + if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >)) + forward_jump = 1; + else + forward_jump = 0; + + bintime_clear(&gap_lerp); + if (forward_jump) { + gap_lerp = ffth->tick_time; + bintime_sub(&gap_lerp, &ffth->tick_time_lerp); + } else { + gap_lerp = ffth->tick_time_lerp; + bintime_sub(&gap_lerp, &ffth->tick_time); + } + + /* + * The reset from the RTC clock may be far from accurate, and + * reducing the gap between real time and interpolated time + * could take a very long time if the interpolated clock insists + * on strict monotonicity. The clock is reset under very strict + * conditions (kernel time is known to be wrong and + * synchronization daemon has been restarted recently. + * ffclock_boottime absorbs the jump to ensure boot time is + * correct and uptime functions stay consistent. + */ + if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) && + ((cest->status & FFCLOCK_STA_UNSYNC) == 0) && + ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) { + if (forward_jump) + bintime_add(&ffclock_boottime, &gap_lerp); + else + bintime_sub(&ffclock_boottime, &gap_lerp); + ffth->tick_time_lerp = ffth->tick_time; + bintime_clear(&gap_lerp); + } + + ffclock_status = cest->status; + ffth->period_lerp = cest->period; + + /* + * Compute corrected period used for the linear interpolation of + * time. The rate of linear interpolation is capped to 5000PPM + * (5ms/s). + */ + if (bintime_isset(&gap_lerp)) { + ffdelta = cest->update_ffcount; + ffdelta -= fftimehands->cest.update_ffcount; + ffclock_convert_delta(ffdelta, cest->period, &bt); + polling = bt.sec; + bt.sec = 0; + bt.frac = 5000000 * (uint64_t)18446744073LL; + bintime_mul(&bt, polling); + if (bintime_cmp(&gap_lerp, &bt, >)) + gap_lerp = bt; + + /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */ + frac = 0; + if (gap_lerp.sec > 0) { + frac -= 1; + frac /= ffdelta / gap_lerp.sec; + } + frac += gap_lerp.frac / ffdelta; + + if (forward_jump) + ffth->period_lerp += frac; + else + ffth->period_lerp -= frac; + } + + ffclock_updated = 0; + } + if (++ogen == 0) + ogen = 1; + ffth->gen = ogen; + fftimehands = ffth; +} + +/* + * Adjust the fftimehands when the timecounter is changed. Stating the obvious, + * the old and new hardware counter cannot be read simultaneously. tc_windup() + * does read the two counters 'back to back', but a few cycles are effectively + * lost, and not accumulated in tick_ffcount. This is a fairly radical + * operation for a feed-forward synchronization daemon, and it is its job to not + * pushing irrelevant data to the kernel. Because there is no locking here, + * simply force to ignore pending or next update to give daemon a chance to + * realize the counter has changed. + */ +static void +ffclock_change_tc(struct timehands *th) +{ + struct fftimehands *ffth; + struct ffclock_estimate *cest; + struct timecounter *tc; + uint8_t ogen; + + tc = th->th_counter; + ffth = fftimehands->next; + ogen = ffth->gen; + ffth->gen = 0; + + cest = &ffth->cest; + bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate)); + cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1; + cest->errb_abs = 0; + cest->errb_rate = 0; + cest->status |= FFCLOCK_STA_UNSYNC; + + ffth->tick_ffcount = fftimehands->tick_ffcount; + ffth->tick_time_lerp = fftimehands->tick_time_lerp; + ffth->tick_time = fftimehands->tick_time; + ffth->period_lerp = cest->period; + + /* Do not lock but ignore next update from synchronization daemon. */ + ffclock_updated--; + + if (++ogen == 0) + ogen = 1; + ffth->gen = ogen; + fftimehands = ffth; +} + +/* + * Retrieve feed-forward counter and time of last kernel tick. + */ +void +ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags) +{ + struct fftimehands *ffth; + uint8_t gen; + + /* + * No locking but check generation has not changed. Also need to make + * sure ffdelta is positive, i.e. ffcount > tick_ffcount. + */ + do { + ffth = fftimehands; + gen = ffth->gen; + if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) + *bt = ffth->tick_time_lerp; + else + *bt = ffth->tick_time; + *ffcount = ffth->tick_ffcount; + } while (gen == 0 || gen != ffth->gen); +} + +/* + * Absolute clock conversion. Low level function to convert ffcounter to + * bintime. The ffcounter is converted using the current ffclock period estimate + * or the "interpolated period" to ensure monotonicity. + * NOTE: this conversion may have been deferred, and the clock updated since the + * hardware counter has been read. + */ +void +ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags) +{ + struct fftimehands *ffth; + struct bintime bt2; + ffcounter ffdelta; + uint8_t gen; + + /* + * No locking but check generation has not changed. Also need to make + * sure ffdelta is positive, i.e. ffcount > tick_ffcount. + */ + do { + ffth = fftimehands; + gen = ffth->gen; + if (ffcount > ffth->tick_ffcount) + ffdelta = ffcount - ffth->tick_ffcount; + else + ffdelta = ffth->tick_ffcount - ffcount; + + if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) { + *bt = ffth->tick_time_lerp; + ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2); + } else { + *bt = ffth->tick_time; + ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2); + } + + if (ffcount > ffth->tick_ffcount) + bintime_add(bt, &bt2); + else + bintime_sub(bt, &bt2); + } while (gen == 0 || gen != ffth->gen); +} + +/* + * Difference clock conversion. + * Low level function to Convert a time interval measured in RAW counter units + * into bintime. The difference clock allows measuring small intervals much more + * reliably than the absolute clock. + */ +void +ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt) +{ + struct fftimehands *ffth; + uint8_t gen; + + /* No locking but check generation has not changed. */ + do { + ffth = fftimehands; + gen = ffth->gen; + ffclock_convert_delta(ffdelta, ffth->cest.period, bt); + } while (gen == 0 || gen != ffth->gen); +} + +/* + * Access to current ffcounter value. + */ +void +ffclock_read_counter(ffcounter *ffcount) +{ + struct timehands *th; + struct fftimehands *ffth; + unsigned int gen, delta; + + /* + * ffclock_windup() called from tc_windup(), safe to rely on + * th->th_generation only, for correct delta and ffcounter. + */ + do { + th = timehands; + gen = th->th_generation; + ffth = fftimehands; + delta = tc_delta(th); + *ffcount = ffth->tick_ffcount; + } while (gen == 0 || gen != th->th_generation); + + *ffcount += delta; +} + +void +binuptime(struct bintime *bt) +{ + + binuptime_fromclock(bt, sysclock_active); +} + +void +nanouptime(struct timespec *tsp) +{ + + nanouptime_fromclock(tsp, sysclock_active); +} + +void +microuptime(struct timeval *tvp) +{ + + microuptime_fromclock(tvp, sysclock_active); +} + +void +bintime(struct bintime *bt) +{ + + bintime_fromclock(bt, sysclock_active); +} + +void +nanotime(struct timespec *tsp) +{ + + nanotime_fromclock(tsp, sysclock_active); +} + +void +microtime(struct timeval *tvp) +{ + + microtime_fromclock(tvp, sysclock_active); +} + +void +getbinuptime(struct bintime *bt) +{ + + getbinuptime_fromclock(bt, sysclock_active); +} + +void +getnanouptime(struct timespec *tsp) +{ + + getnanouptime_fromclock(tsp, sysclock_active); +} + +void +getmicrouptime(struct timeval *tvp) +{ + + getmicrouptime_fromclock(tvp, sysclock_active); +} + +void +getbintime(struct bintime *bt) +{ + + getbintime_fromclock(bt, sysclock_active); +} + +void +getnanotime(struct timespec *tsp) +{ + + getnanotime_fromclock(tsp, sysclock_active); +} + +void +getmicrotime(struct timeval *tvp) +{ + + getmicrouptime_fromclock(tvp, sysclock_active); +} + +#endif /* FFCLOCK */ + +#ifndef __rtems__ +/* + * This is a clone of getnanotime and used for walltimestamps. + * The dtrace_ prefix prevents fbt from creating probes for + * it so walltimestamp can be safely used in all fbt probes. + */ +void +dtrace_getnanotime(struct timespec *tsp) +{ + struct timehands *th; + uint32_t gen; + + do { + th = timehands; + gen = th->th_generation; + *tsp = th->th_nanotime; + } while (gen == 0 || gen != th->th_generation); +} +#endif /* __rtems__ */ + +#ifdef FFCLOCK +/* + * System clock currently providing time to the system. Modifiable via sysctl + * when the FFCLOCK option is defined. + */ +int sysclock_active = SYSCLOCK_FBCK; +#endif + +/* Internal NTP status and error estimates. */ +extern int time_status; +extern long time_esterror; + +#ifndef __rtems__ +/* + * Take a snapshot of sysclock data which can be used to compare system clocks + * and generate timestamps after the fact. + */ +void +sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast) +{ + struct fbclock_info *fbi; + struct timehands *th; + struct bintime bt; + unsigned int delta, gen; +#ifdef FFCLOCK + ffcounter ffcount; + struct fftimehands *ffth; + struct ffclock_info *ffi; + struct ffclock_estimate cest; + + ffi = &clock_snap->ff_info; +#endif + + fbi = &clock_snap->fb_info; + delta = 0; + + do { + th = timehands; + gen = th->th_generation; + fbi->th_scale = th->th_scale; + fbi->tick_time = th->th_offset; +#ifdef FFCLOCK + ffth = fftimehands; + ffi->tick_time = ffth->tick_time_lerp; + ffi->tick_time_lerp = ffth->tick_time_lerp; + ffi->period = ffth->cest.period; + ffi->period_lerp = ffth->period_lerp; + clock_snap->ffcount = ffth->tick_ffcount; + cest = ffth->cest; +#endif + if (!fast) + delta = tc_delta(th); + } while (gen == 0 || gen != th->th_generation); + + clock_snap->delta = delta; +#ifdef FFCLOCK + clock_snap->sysclock_active = sysclock_active; +#endif + + /* Record feedback clock status and error. */ + clock_snap->fb_info.status = time_status; + /* XXX: Very crude estimate of feedback clock error. */ + bt.sec = time_esterror / 1000000; + bt.frac = ((time_esterror - bt.sec) * 1000000) * + (uint64_t)18446744073709ULL; + clock_snap->fb_info.error = bt; + +#ifdef FFCLOCK + if (!fast) + clock_snap->ffcount += delta; + + /* Record feed-forward clock leap second adjustment. */ + ffi->leapsec_adjustment = cest.leapsec_total; + if (clock_snap->ffcount > cest.leapsec_next) + ffi->leapsec_adjustment -= cest.leapsec; + + /* Record feed-forward clock status and error. */ + clock_snap->ff_info.status = cest.status; + ffcount = clock_snap->ffcount - cest.update_ffcount; + ffclock_convert_delta(ffcount, cest.period, &bt); + /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */ + bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL); + /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */ + bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL); + clock_snap->ff_info.error = bt; +#endif +} + +/* + * Convert a sysclock snapshot into a struct bintime based on the specified + * clock source and flags. + */ +int +sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt, + int whichclock, uint32_t flags) +{ +#ifdef FFCLOCK + struct bintime bt2; + uint64_t period; +#endif + + switch (whichclock) { + case SYSCLOCK_FBCK: + *bt = cs->fb_info.tick_time; + + /* If snapshot was created with !fast, delta will be >0. */ + if (cs->delta > 0) + bintime_addx(bt, cs->fb_info.th_scale * cs->delta); + + if ((flags & FBCLOCK_UPTIME) == 0) + bintime_add(bt, &boottimebin); + break; +#ifdef FFCLOCK + case SYSCLOCK_FFWD: + if (flags & FFCLOCK_LERP) { + *bt = cs->ff_info.tick_time_lerp; + period = cs->ff_info.period_lerp; + } else { + *bt = cs->ff_info.tick_time; + period = cs->ff_info.period; + } + + /* If snapshot was created with !fast, delta will be >0. */ + if (cs->delta > 0) { + ffclock_convert_delta(cs->delta, period, &bt2); + bintime_add(bt, &bt2); + } + + /* Leap second adjustment. */ + if (flags & FFCLOCK_LEAPSEC) + bt->sec -= cs->ff_info.leapsec_adjustment; + + /* Boot time adjustment, for uptime/monotonic clocks. */ + if (flags & FFCLOCK_UPTIME) + bintime_sub(bt, &ffclock_boottime); + break; +#endif + default: + return (EINVAL); + break; + } + + return (0); +} +#endif /* __rtems__ */ + +/* + * Initialize a new timecounter and possibly use it. + */ +void +tc_init(struct timecounter *tc) +{ +#ifndef __rtems__ + uint32_t u; + struct sysctl_oid *tc_root; + + u = tc->tc_frequency / tc->tc_counter_mask; + /* XXX: We need some margin here, 10% is a guess */ + u *= 11; + u /= 10; + if (u > hz && tc->tc_quality >= 0) { + tc->tc_quality = -2000; + if (bootverbose) { + printf("Timecounter \"%s\" frequency %ju Hz", + tc->tc_name, (uintmax_t)tc->tc_frequency); + printf(" -- Insufficient hz, needs at least %u\n", u); + } + } else if (tc->tc_quality >= 0 || bootverbose) { + printf("Timecounter \"%s\" frequency %ju Hz quality %d\n", + tc->tc_name, (uintmax_t)tc->tc_frequency, + tc->tc_quality); + } +#endif /* __rtems__ */ + + tc->tc_next = timecounters; + timecounters = tc; +#ifndef __rtems__ + /* + * Set up sysctl tree for this counter. + */ + tc_root = SYSCTL_ADD_NODE(NULL, + SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name, + CTLFLAG_RW, 0, "timecounter description"); + SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0, + "mask for implemented bits"); + SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc), + sysctl_kern_timecounter_get, "IU", "current timecounter value"); + SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc), + sysctl_kern_timecounter_freq, "QU", "timecounter frequency"); + SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "quality", CTLFLAG_RD, &(tc->tc_quality), 0, + "goodness of time counter"); + /* + * Never automatically use a timecounter with negative quality. + * Even though we run on the dummy counter, switching here may be + * worse since this timecounter may not be monotonous. + */ + if (tc->tc_quality < 0) + return; + if (tc->tc_quality < timecounter->tc_quality) + return; + if (tc->tc_quality == timecounter->tc_quality && + tc->tc_frequency < timecounter->tc_frequency) + return; +#endif /* __rtems__ */ + (void)tc->tc_get_timecount(tc); + (void)tc->tc_get_timecount(tc); + timecounter = tc; +#ifdef __rtems__ + tc_windup(); +#endif /* __rtems__ */ +} + +#ifndef __rtems__ +/* Report the frequency of the current timecounter. */ +uint64_t +tc_getfrequency(void) +{ + + return (timehands->th_counter->tc_frequency); +} +#endif /* __rtems__ */ + +/* + * Step our concept of UTC. This is done by modifying our estimate of + * when we booted. + * XXX: not locked. + */ +#ifndef __rtems__ +void +tc_setclock(struct timespec *ts) +#else /* __rtems__ */ +void +_Timecounter_Set_clock(const struct timespec *ts) +#endif /* __rtems__ */ +{ +#ifndef __rtems__ + struct timespec tbef, taft; +#endif /* __rtems__ */ + struct bintime bt, bt2; + +#ifndef __rtems__ + cpu_tick_calibrate(1); + nanotime(&tbef); +#endif /* __rtems__ */ + timespec2bintime(ts, &bt); + binuptime(&bt2); + bintime_sub(&bt, &bt2); + bintime_add(&bt2, &boottimebin); + boottimebin = bt; +#ifndef __rtems__ + bintime2timeval(&bt, &boottime); +#endif /* __rtems__ */ + + /* XXX fiddle all the little crinkly bits around the fiords... */ + tc_windup(); +#ifndef __rtems__ + nanotime(&taft); + if (timestepwarnings) { + log(LOG_INFO, + "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n", + (intmax_t)tbef.tv_sec, tbef.tv_nsec, + (intmax_t)taft.tv_sec, taft.tv_nsec, + (intmax_t)ts->tv_sec, ts->tv_nsec); + } + cpu_tick_calibrate(1); +#endif /* __rtems__ */ +} + +/* + * Initialize the next struct timehands in the ring and make + * it the active timehands. Along the way we might switch to a different + * timecounter and/or do seconds processing in NTP. Slightly magic. + */ +static void +tc_windup(void) +{ + struct bintime bt; + struct timehands *th, *tho; + uint64_t scale; + uint32_t delta, ncount, ogen; + int i; + time_t t; +#ifdef __rtems__ + ISR_lock_Context lock_context; + + _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context); +#endif /* __rtems__ */ + + /* + * Make the next timehands a copy of the current one, but do not + * overwrite the generation or next pointer. While we update + * the contents, the generation must be zero. + */ + tho = timehands; + th = tho->th_next; + ogen = th->th_generation; + th->th_generation = 0; + bcopy(tho, th, offsetof(struct timehands, th_generation)); + + /* + * Capture a timecounter delta on the current timecounter and if + * changing timecounters, a counter value from the new timecounter. + * Update the offset fields accordingly. + */ + delta = tc_delta(th); + if (th->th_counter != timecounter) + ncount = timecounter->tc_get_timecount(timecounter); + else + ncount = 0; +#ifdef FFCLOCK + ffclock_windup(delta); +#endif + th->th_offset_count += delta; + th->th_offset_count &= th->th_counter->tc_counter_mask; + while (delta > th->th_counter->tc_frequency) { + /* Eat complete unadjusted seconds. */ + delta -= th->th_counter->tc_frequency; + th->th_offset.sec++; + } + if ((delta > th->th_counter->tc_frequency / 2) && + (th->th_scale * delta < ((uint64_t)1 << 63))) { + /* The product th_scale * delta just barely overflows. */ + th->th_offset.sec++; + } + bintime_addx(&th->th_offset, th->th_scale * delta); + + /* + * Hardware latching timecounters may not generate interrupts on + * PPS events, so instead we poll them. There is a finite risk that + * the hardware might capture a count which is later than the one we + * got above, and therefore possibly in the next NTP second which might + * have a different rate than the current NTP second. It doesn't + * matter in practice. + */ + if (tho->th_counter->tc_poll_pps) + tho->th_counter->tc_poll_pps(tho->th_counter); + + /* + * Deal with NTP second processing. The for loop normally + * iterates at most once, but in extreme situations it might + * keep NTP sane if timeouts are not run for several seconds. + * At boot, the time step can be large when the TOD hardware + * has been read, so on really large steps, we call + * ntp_update_second only twice. We need to call it twice in + * case we missed a leap second. + */ + bt = th->th_offset; + bintime_add(&bt, &boottimebin); + i = bt.sec - tho->th_microtime.tv_sec; + if (i > LARGE_STEP) + i = 2; + for (; i > 0; i--) { + t = bt.sec; + ntp_update_second(&th->th_adjustment, &bt.sec); + if (bt.sec != t) + boottimebin.sec += bt.sec - t; + } + /* Update the UTC timestamps used by the get*() functions. */ + /* XXX shouldn't do this here. Should force non-`get' versions. */ + bintime2timeval(&bt, &th->th_microtime); + bintime2timespec(&bt, &th->th_nanotime); + + /* Now is a good time to change timecounters. */ + if (th->th_counter != timecounter) { +#ifndef __rtems__ +#ifndef __arm__ + if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0) + cpu_disable_c2_sleep++; + if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0) + cpu_disable_c2_sleep--; +#endif +#endif /* __rtems__ */ + th->th_counter = timecounter; + th->th_offset_count = ncount; +#ifndef __rtems__ + tc_min_ticktock_freq = max(1, timecounter->tc_frequency / + (((uint64_t)timecounter->tc_counter_mask + 1) / 3)); +#endif /* __rtems__ */ +#ifdef FFCLOCK + ffclock_change_tc(th); +#endif + } + + /*- + * Recalculate the scaling factor. We want the number of 1/2^64 + * fractions of a second per period of the hardware counter, taking + * into account the th_adjustment factor which the NTP PLL/adjtime(2) + * processing provides us with. + * + * The th_adjustment is nanoseconds per second with 32 bit binary + * fraction and we want 64 bit binary fraction of second: + * + * x = a * 2^32 / 10^9 = a * 4.294967296 + * + * The range of th_adjustment is +/- 5000PPM so inside a 64bit int + * we can only multiply by about 850 without overflowing, that + * leaves no suitably precise fractions for multiply before divide. + * + * Divide before multiply with a fraction of 2199/512 results in a + * systematic undercompensation of 10PPM of th_adjustment. On a + * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. + * + * We happily sacrifice the lowest of the 64 bits of our result + * to the goddess of code clarity. + * + */ + scale = (uint64_t)1 << 63; + scale += (th->th_adjustment / 1024) * 2199; + scale /= th->th_counter->tc_frequency; + th->th_scale = scale * 2; + + /* + * Now that the struct timehands is again consistent, set the new + * generation number, making sure to not make it zero. + */ + if (++ogen == 0) + ogen = 1; + th->th_generation = ogen; + + /* Go live with the new struct timehands. */ +#ifdef FFCLOCK + switch (sysclock_active) { + case SYSCLOCK_FBCK: +#endif + time_second = th->th_microtime.tv_sec; + time_uptime = th->th_offset.sec; +#ifdef FFCLOCK + break; + case SYSCLOCK_FFWD: + time_second = fftimehands->tick_time_lerp.sec; + time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec; + break; + } +#endif + + timehands = th; +#ifndef __rtems__ + timekeep_push_vdso(); +#endif /* __rtems__ */ +#ifdef __rtems__ + _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context); +#endif /* __rtems__ */ +} + +#ifndef __rtems__ +/* Report or change the active timecounter hardware. */ +static int +sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS) +{ + char newname[32]; + struct timecounter *newtc, *tc; + int error; + + tc = timecounter; + strlcpy(newname, tc->tc_name, sizeof(newname)); + + error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req); + if (error != 0 || req->newptr == NULL || + strcmp(newname, tc->tc_name) == 0) + return (error); + for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) { + if (strcmp(newname, newtc->tc_name) != 0) + continue; + + /* Warm up new timecounter. */ + (void)newtc->tc_get_timecount(newtc); + (void)newtc->tc_get_timecount(newtc); + + timecounter = newtc; + + /* + * The vdso timehands update is deferred until the next + * 'tc_windup()'. + * + * This is prudent given that 'timekeep_push_vdso()' does not + * use any locking and that it can be called in hard interrupt + * context via 'tc_windup()'. + */ + return (0); + } + return (EINVAL); +} + +SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW, + 0, 0, sysctl_kern_timecounter_hardware, "A", + "Timecounter hardware selected"); + + +/* Report or change the active timecounter hardware. */ +static int +sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS) +{ + char buf[32], *spc; + struct timecounter *tc; + int error; + + spc = ""; + error = 0; + for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) { + sprintf(buf, "%s%s(%d)", + spc, tc->tc_name, tc->tc_quality); + error = SYSCTL_OUT(req, buf, strlen(buf)); + spc = " "; + } + return (error); +} + +SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD, + 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected"); +#endif /* __rtems__ */ + +#ifndef __rtems__ +/* + * RFC 2783 PPS-API implementation. + */ + +static int +pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps) +{ + int err, timo; + pps_seq_t aseq, cseq; + struct timeval tv; + + if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) + return (EINVAL); + + /* + * If no timeout is requested, immediately return whatever values were + * most recently captured. If timeout seconds is -1, that's a request + * to block without a timeout. WITNESS won't let us sleep forever + * without a lock (we really don't need a lock), so just repeatedly + * sleep a long time. + */ + if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) { + if (fapi->timeout.tv_sec == -1) + timo = 0x7fffffff; + else { + tv.tv_sec = fapi->timeout.tv_sec; + tv.tv_usec = fapi->timeout.tv_nsec / 1000; + timo = tvtohz(&tv); + } + aseq = pps->ppsinfo.assert_sequence; + cseq = pps->ppsinfo.clear_sequence; + while (aseq == pps->ppsinfo.assert_sequence && + cseq == pps->ppsinfo.clear_sequence) { + err = tsleep(pps, PCATCH, "ppsfch", timo); + if (err == EWOULDBLOCK && fapi->timeout.tv_sec == -1) { + continue; + } else if (err != 0) { + return (err); + } + } + } + + pps->ppsinfo.current_mode = pps->ppsparam.mode; + fapi->pps_info_buf = pps->ppsinfo; + + return (0); +} + +int +pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) +{ + pps_params_t *app; + struct pps_fetch_args *fapi; +#ifdef FFCLOCK + struct pps_fetch_ffc_args *fapi_ffc; +#endif +#ifdef PPS_SYNC + struct pps_kcbind_args *kapi; +#endif + + KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl")); + switch (cmd) { + case PPS_IOC_CREATE: + return (0); + case PPS_IOC_DESTROY: + return (0); + case PPS_IOC_SETPARAMS: + app = (pps_params_t *)data; + if (app->mode & ~pps->ppscap) + return (EINVAL); +#ifdef FFCLOCK + /* Ensure only a single clock is selected for ffc timestamp. */ + if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK) + return (EINVAL); +#endif + pps->ppsparam = *app; + return (0); + case PPS_IOC_GETPARAMS: + app = (pps_params_t *)data; + *app = pps->ppsparam; + app->api_version = PPS_API_VERS_1; + return (0); + case PPS_IOC_GETCAP: + *(int*)data = pps->ppscap; + return (0); + case PPS_IOC_FETCH: + fapi = (struct pps_fetch_args *)data; + return (pps_fetch(fapi, pps)); +#ifdef FFCLOCK + case PPS_IOC_FETCH_FFCOUNTER: + fapi_ffc = (struct pps_fetch_ffc_args *)data; + if (fapi_ffc->tsformat && fapi_ffc->tsformat != + PPS_TSFMT_TSPEC) + return (EINVAL); + if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec) + return (EOPNOTSUPP); + pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode; + fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc; + /* Overwrite timestamps if feedback clock selected. */ + switch (pps->ppsparam.mode & PPS_TSCLK_MASK) { + case PPS_TSCLK_FBCK: + fapi_ffc->pps_info_buf_ffc.assert_timestamp = + pps->ppsinfo.assert_timestamp; + fapi_ffc->pps_info_buf_ffc.clear_timestamp = + pps->ppsinfo.clear_timestamp; + break; + case PPS_TSCLK_FFWD: + break; + default: + break; + } + return (0); +#endif /* FFCLOCK */ + case PPS_IOC_KCBIND: +#ifdef PPS_SYNC + kapi = (struct pps_kcbind_args *)data; + /* XXX Only root should be able to do this */ + if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) + return (EINVAL); + if (kapi->kernel_consumer != PPS_KC_HARDPPS) + return (EINVAL); + if (kapi->edge & ~pps->ppscap) + return (EINVAL); + pps->kcmode = kapi->edge; + return (0); +#else + return (EOPNOTSUPP); +#endif + default: + return (ENOIOCTL); + } +} + +void +pps_init(struct pps_state *pps) +{ + pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT; + if (pps->ppscap & PPS_CAPTUREASSERT) + pps->ppscap |= PPS_OFFSETASSERT; + if (pps->ppscap & PPS_CAPTURECLEAR) + pps->ppscap |= PPS_OFFSETCLEAR; +#ifdef FFCLOCK + pps->ppscap |= PPS_TSCLK_MASK; +#endif +} + +void +pps_capture(struct pps_state *pps) +{ + struct timehands *th; + + KASSERT(pps != NULL, ("NULL pps pointer in pps_capture")); + th = timehands; + pps->capgen = th->th_generation; + pps->capth = th; +#ifdef FFCLOCK + pps->capffth = fftimehands; +#endif + pps->capcount = th->th_counter->tc_get_timecount(th->th_counter); + if (pps->capgen != th->th_generation) + pps->capgen = 0; +} + +void +pps_event(struct pps_state *pps, int event) +{ + struct bintime bt; + struct timespec ts, *tsp, *osp; + uint32_t tcount, *pcount; + int foff, fhard; + pps_seq_t *pseq; +#ifdef FFCLOCK + struct timespec *tsp_ffc; + pps_seq_t *pseq_ffc; + ffcounter *ffcount; +#endif + + KASSERT(pps != NULL, ("NULL pps pointer in pps_event")); + /* If the timecounter was wound up underneath us, bail out. */ + if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation) + return; + + /* Things would be easier with arrays. */ + if (event == PPS_CAPTUREASSERT) { + tsp = &pps->ppsinfo.assert_timestamp; + osp = &pps->ppsparam.assert_offset; + foff = pps->ppsparam.mode & PPS_OFFSETASSERT; + fhard = pps->kcmode & PPS_CAPTUREASSERT; + pcount = &pps->ppscount[0]; + pseq = &pps->ppsinfo.assert_sequence; +#ifdef FFCLOCK + ffcount = &pps->ppsinfo_ffc.assert_ffcount; + tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp; + pseq_ffc = &pps->ppsinfo_ffc.assert_sequence; +#endif + } else { + tsp = &pps->ppsinfo.clear_timestamp; + osp = &pps->ppsparam.clear_offset; + foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; + fhard = pps->kcmode & PPS_CAPTURECLEAR; + pcount = &pps->ppscount[1]; + pseq = &pps->ppsinfo.clear_sequence; +#ifdef FFCLOCK + ffcount = &pps->ppsinfo_ffc.clear_ffcount; + tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp; + pseq_ffc = &pps->ppsinfo_ffc.clear_sequence; +#endif + } + + /* + * If the timecounter changed, we cannot compare the count values, so + * we have to drop the rest of the PPS-stuff until the next event. + */ + if (pps->ppstc != pps->capth->th_counter) { + pps->ppstc = pps->capth->th_counter; + *pcount = pps->capcount; + pps->ppscount[2] = pps->capcount; + return; + } + + /* Convert the count to a timespec. */ + tcount = pps->capcount - pps->capth->th_offset_count; + tcount &= pps->capth->th_counter->tc_counter_mask; + bt = pps->capth->th_offset; + bintime_addx(&bt, pps->capth->th_scale * tcount); + bintime_add(&bt, &boottimebin); + bintime2timespec(&bt, &ts); + + /* If the timecounter was wound up underneath us, bail out. */ + if (pps->capgen != pps->capth->th_generation) + return; + + *pcount = pps->capcount; + (*pseq)++; + *tsp = ts; + + if (foff) { + timespecadd(tsp, osp); + if (tsp->tv_nsec < 0) { + tsp->tv_nsec += 1000000000; + tsp->tv_sec -= 1; + } + } + +#ifdef FFCLOCK + *ffcount = pps->capffth->tick_ffcount + tcount; + bt = pps->capffth->tick_time; + ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt); + bintime_add(&bt, &pps->capffth->tick_time); + bintime2timespec(&bt, &ts); + (*pseq_ffc)++; + *tsp_ffc = ts; +#endif + +#ifdef PPS_SYNC + if (fhard) { + uint64_t scale; + + /* + * Feed the NTP PLL/FLL. + * The FLL wants to know how many (hardware) nanoseconds + * elapsed since the previous event. + */ + tcount = pps->capcount - pps->ppscount[2]; + pps->ppscount[2] = pps->capcount; + tcount &= pps->capth->th_counter->tc_counter_mask; + scale = (uint64_t)1 << 63; + scale /= pps->capth->th_counter->tc_frequency; + scale *= 2; + bt.sec = 0; + bt.frac = 0; + bintime_addx(&bt, scale * tcount); + bintime2timespec(&bt, &ts); + hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec); + } +#endif + + /* Wakeup anyone sleeping in pps_fetch(). */ + wakeup(pps); +} +#else /* __rtems__ */ +/* FIXME: https://devel.rtems.org/ticket/2349 */ +#endif /* __rtems__ */ + +/* + * Timecounters need to be updated every so often to prevent the hardware + * counter from overflowing. Updating also recalculates the cached values + * used by the get*() family of functions, so their precision depends on + * the update frequency. + */ + +#ifndef __rtems__ +static int tc_tick; +SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, + "Approximate number of hardclock ticks in a millisecond"); +#endif /* __rtems__ */ + +#ifndef __rtems__ +void +tc_ticktock(int cnt) +{ + static int count; + + count += cnt; + if (count < tc_tick) + return; + count = 0; +#else /* __rtems__ */ +void +_Timecounter_Tick(void) +{ +#endif /* __rtems__ */ + tc_windup(); +#ifdef __rtems__ + _Watchdog_Tick(); +#endif /* __rtems__ */ +} +#ifdef __rtems__ +void +_Timecounter_Tick_simple(uint32_t delta, uint32_t offset) +{ + struct bintime bt; + struct timehands *th; + uint32_t ogen; + ISR_lock_Context lock_context; + + _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context); + + th = timehands; + ogen = th->th_generation; + th->th_offset_count = offset; + bintime_addx(&th->th_offset, th->th_scale * delta); + + bt = th->th_offset; + bintime_add(&bt, &boottimebin); + + /* Update the UTC timestamps used by the get*() functions. */ + /* XXX shouldn't do this here. Should force non-`get' versions. */ + bintime2timeval(&bt, &th->th_microtime); + bintime2timespec(&bt, &th->th_nanotime); + + /* + * Now that the struct timehands is again consistent, set the new + * generation number, making sure to not make it zero. + */ + if (++ogen == 0) + ogen = 1; + th->th_generation = ogen; + + /* Go live with the new struct timehands. */ + time_second = th->th_microtime.tv_sec; + time_uptime = th->th_offset.sec; + + _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context); + + _Watchdog_Tick(); +} +#endif /* __rtems__ */ + +#ifndef __rtems__ +static void __inline +tc_adjprecision(void) +{ + int t; + + if (tc_timepercentage > 0) { + t = (99 + tc_timepercentage) / tc_timepercentage; + tc_precexp = fls(t + (t >> 1)) - 1; + FREQ2BT(hz / tc_tick, &bt_timethreshold); + FREQ2BT(hz, &bt_tickthreshold); + bintime_shift(&bt_timethreshold, tc_precexp); + bintime_shift(&bt_tickthreshold, tc_precexp); + } else { + tc_precexp = 31; + bt_timethreshold.sec = INT_MAX; + bt_timethreshold.frac = ~(uint64_t)0; + bt_tickthreshold = bt_timethreshold; + } + sbt_timethreshold = bttosbt(bt_timethreshold); + sbt_tickthreshold = bttosbt(bt_tickthreshold); +} +#endif /* __rtems__ */ + +#ifndef __rtems__ +static int +sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS) +{ + int error, val; + + val = tc_timepercentage; + error = sysctl_handle_int(oidp, &val, 0, req); + if (error != 0 || req->newptr == NULL) + return (error); + tc_timepercentage = val; + if (cold) + goto done; + tc_adjprecision(); +done: + return (0); +} +#endif /* __rtems__ */ + +#ifndef __rtems__ +static void +inittimecounter(void *dummy) +#else /* __rtems__ */ +void +_Timecounter_Initialize(void) +#endif /* __rtems__ */ +{ +#ifndef __rtems__ + u_int p; + int tick_rate; + + /* + * Set the initial timeout to + * max(1, <approx. number of hardclock ticks in a millisecond>). + * People should probably not use the sysctl to set the timeout + * to smaller than its inital value, since that value is the + * smallest reasonable one. If they want better timestamps they + * should use the non-"get"* functions. + */ + if (hz > 1000) + tc_tick = (hz + 500) / 1000; + else + tc_tick = 1; + tc_adjprecision(); + FREQ2BT(hz, &tick_bt); + tick_sbt = bttosbt(tick_bt); + tick_rate = hz / tc_tick; + FREQ2BT(tick_rate, &tc_tick_bt); + tc_tick_sbt = bttosbt(tc_tick_bt); + p = (tc_tick * 1000000) / hz; + printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); +#endif /* __rtems__ */ + +#ifdef FFCLOCK + ffclock_init(); +#endif + /* warm up new timecounter (again) and get rolling. */ + (void)timecounter->tc_get_timecount(timecounter); + (void)timecounter->tc_get_timecount(timecounter); + tc_windup(); +} + +#ifndef __rtems__ +SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL); +#endif /* __rtems__ */ + +#ifndef __rtems__ +/* Cpu tick handling -------------------------------------------------*/ + +static int cpu_tick_variable; +static uint64_t cpu_tick_frequency; + +static uint64_t +tc_cpu_ticks(void) +{ + static uint64_t base; + static unsigned last; + unsigned u; + struct timecounter *tc; + + tc = timehands->th_counter; + u = tc->tc_get_timecount(tc) & tc->tc_counter_mask; + if (u < last) + base += (uint64_t)tc->tc_counter_mask + 1; + last = u; + return (u + base); +} + +void +cpu_tick_calibration(void) +{ + static time_t last_calib; + + if (time_uptime != last_calib && !(time_uptime & 0xf)) { + cpu_tick_calibrate(0); + last_calib = time_uptime; + } +} + +/* + * This function gets called every 16 seconds on only one designated + * CPU in the system from hardclock() via cpu_tick_calibration()(). + * + * Whenever the real time clock is stepped we get called with reset=1 + * to make sure we handle suspend/resume and similar events correctly. + */ + +static void +cpu_tick_calibrate(int reset) +{ + static uint64_t c_last; + uint64_t c_this, c_delta; + static struct bintime t_last; + struct bintime t_this, t_delta; + uint32_t divi; + + if (reset) { + /* The clock was stepped, abort & reset */ + t_last.sec = 0; + return; + } + + /* we don't calibrate fixed rate cputicks */ + if (!cpu_tick_variable) + return; + + getbinuptime(&t_this); + c_this = cpu_ticks(); + if (t_last.sec != 0) { + c_delta = c_this - c_last; + t_delta = t_this; + bintime_sub(&t_delta, &t_last); + /* + * Headroom: + * 2^(64-20) / 16[s] = + * 2^(44) / 16[s] = + * 17.592.186.044.416 / 16 = + * 1.099.511.627.776 [Hz] + */ + divi = t_delta.sec << 20; + divi |= t_delta.frac >> (64 - 20); + c_delta <<= 20; + c_delta /= divi; + if (c_delta > cpu_tick_frequency) { + if (0 && bootverbose) + printf("cpu_tick increased to %ju Hz\n", + c_delta); + cpu_tick_frequency = c_delta; + } + } + c_last = c_this; + t_last = t_this; +} + +void +set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var) +{ + + if (func == NULL) { + cpu_ticks = tc_cpu_ticks; + } else { + cpu_tick_frequency = freq; + cpu_tick_variable = var; + cpu_ticks = func; + } +} + +uint64_t +cpu_tickrate(void) +{ + + if (cpu_ticks == tc_cpu_ticks) + return (tc_getfrequency()); + return (cpu_tick_frequency); +} + +/* + * We need to be slightly careful converting cputicks to microseconds. + * There is plenty of margin in 64 bits of microseconds (half a million + * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply + * before divide conversion (to retain precision) we find that the + * margin shrinks to 1.5 hours (one millionth of 146y). + * With a three prong approach we never lose significant bits, no + * matter what the cputick rate and length of timeinterval is. + */ + +uint64_t +cputick2usec(uint64_t tick) +{ + + if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */ + return (tick / (cpu_tickrate() / 1000000LL)); + else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */ + return ((tick * 1000LL) / (cpu_tickrate() / 1000LL)); + else + return ((tick * 1000000LL) / cpu_tickrate()); +} + +cpu_tick_f *cpu_ticks = tc_cpu_ticks; +#endif /* __rtems__ */ + +#ifndef __rtems__ +static int vdso_th_enable = 1; +static int +sysctl_fast_gettime(SYSCTL_HANDLER_ARGS) +{ + int old_vdso_th_enable, error; + + old_vdso_th_enable = vdso_th_enable; + error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req); + if (error != 0) + return (error); + vdso_th_enable = old_vdso_th_enable; + return (0); +} +SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, + NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day"); + +uint32_t +tc_fill_vdso_timehands(struct vdso_timehands *vdso_th) +{ + struct timehands *th; + uint32_t enabled; + + th = timehands; + vdso_th->th_algo = VDSO_TH_ALGO_1; + vdso_th->th_scale = th->th_scale; + vdso_th->th_offset_count = th->th_offset_count; + vdso_th->th_counter_mask = th->th_counter->tc_counter_mask; + vdso_th->th_offset = th->th_offset; + vdso_th->th_boottime = boottimebin; + enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter); + if (!vdso_th_enable) + enabled = 0; + return (enabled); +} +#endif /* __rtems__ */ + +#ifdef COMPAT_FREEBSD32 +uint32_t +tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) +{ + struct timehands *th; + uint32_t enabled; + + th = timehands; + vdso_th32->th_algo = VDSO_TH_ALGO_1; + *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale; + vdso_th32->th_offset_count = th->th_offset_count; + vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask; + vdso_th32->th_offset.sec = th->th_offset.sec; + *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac; + vdso_th32->th_boottime.sec = boottimebin.sec; + *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac; + enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter); + if (!vdso_th_enable) + enabled = 0; + return (enabled); +} +#endif diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c index 9b623b253f..2b1f6fff44 100644 --- a/cpukit/score/src/mpci.c +++ b/cpukit/score/src/mpci.c @@ -82,8 +82,7 @@ void _MPCI_Handler_initialization( _Thread_queue_Initialize( &_MPCI_Remote_blocked_threads, - THREAD_QUEUE_DISCIPLINE_FIFO, - timeout_status + THREAD_QUEUE_DISCIPLINE_FIFO ); } @@ -188,7 +187,8 @@ void _MPCI_Send_process_packet ( uint32_t _MPCI_Send_request_packet ( uint32_t destination, MP_packet_Prefix *the_packet, - States_Control extra_state + States_Control extra_state, + uint32_t timeout_code ) { Thread_Control *executing = _Thread_Executing; @@ -206,8 +206,6 @@ uint32_t _MPCI_Send_request_packet ( (*_MPCI_table->send_packet)( destination, the_packet ); - _Thread_queue_Enter_critical_section( &_MPCI_Remote_blocked_threads ); - /* * See if we need a default timeout */ @@ -219,7 +217,8 @@ uint32_t _MPCI_Send_request_packet ( &_MPCI_Remote_blocked_threads, executing, STATES_WAITING_FOR_RPC_REPLY | extra_state, - the_packet->timeout + the_packet->timeout, + timeout_code ); _Thread_Enable_dispatch(); @@ -262,7 +261,7 @@ Thread_Control *_MPCI_Process_response ( the_thread = NULL; /* IMPOSSIBLE */ break; case OBJECTS_LOCAL: - _Thread_queue_Extract( &_MPCI_Remote_blocked_threads, the_thread ); + _Thread_queue_Extract( the_thread ); the_thread->Wait.return_code = the_packet->return_code; _Objects_Put_without_thread_dispatch( &the_thread->Object ); break; @@ -284,6 +283,7 @@ Thread _MPCI_Receive_server( MP_packet_Prefix *the_packet; MPCI_Packet_processor the_function; Thread_Control *executing; + ISR_lock_Context lock_context; executing = _Thread_Get_executing(); @@ -291,15 +291,15 @@ Thread _MPCI_Receive_server( executing->receive_packet = NULL; - _Thread_Disable_dispatch(); + _ISR_lock_ISR_disable( &lock_context ); _CORE_semaphore_Seize( &_MPCI_Semaphore, executing, 0, true, - WATCHDOG_NO_TIMEOUT + WATCHDOG_NO_TIMEOUT, + &lock_context ); - _Thread_Enable_dispatch(); for ( ; ; ) { the_packet = _MPCI_Receive_packet(); @@ -330,9 +330,10 @@ Thread _MPCI_Receive_server( void _MPCI_Announce ( void ) { - _Thread_Disable_dispatch(); - (void) _CORE_semaphore_Surrender( &_MPCI_Semaphore, 0, 0 ); - _Thread_Enable_dispatch(); + ISR_lock_Context lock_context; + + _ISR_lock_ISR_disable( &lock_context ); + (void) _CORE_semaphore_Surrender( &_MPCI_Semaphore, 0, 0, &lock_context ); } void _MPCI_Internal_packets_Send_process_packet ( diff --git a/cpukit/score/src/opt_compat.h b/cpukit/score/src/opt_compat.h new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/cpukit/score/src/opt_compat.h diff --git a/cpukit/score/src/opt_ffclock.h b/cpukit/score/src/opt_ffclock.h new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/cpukit/score/src/opt_ffclock.h diff --git a/cpukit/score/src/opt_ntp.h b/cpukit/score/src/opt_ntp.h new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/cpukit/score/src/opt_ntp.h diff --git a/cpukit/score/src/schedulercbs.c b/cpukit/score/src/schedulercbs.c index 44221cdbdf..98ec0eb29e 100644 --- a/cpukit/score/src/schedulercbs.c +++ b/cpukit/score/src/schedulercbs.c @@ -27,15 +27,13 @@ void _Scheduler_CBS_Budget_callout( ) { Priority_Control new_priority; + Priority_Control unused; Scheduler_CBS_Node *node; Scheduler_CBS_Server_id server_id; /* Put violating task to background until the end of period. */ new_priority = the_thread->Start.initial_priority; - if ( the_thread->real_priority != new_priority ) - the_thread->real_priority = new_priority; - if ( the_thread->current_priority != new_priority ) - _Thread_Change_priority(the_thread, new_priority, true); + _Thread_Set_priority( the_thread, new_priority, &unused, true ); /* Invoke callback function if any. */ node = _Scheduler_CBS_Thread_get_node( the_thread ); diff --git a/cpukit/score/src/schedulercbsreleasejob.c b/cpukit/score/src/schedulercbsreleasejob.c index 36a31551cf..a9f8e33201 100644 --- a/cpukit/score/src/schedulercbsreleasejob.c +++ b/cpukit/score/src/schedulercbsreleasejob.c @@ -32,6 +32,7 @@ void _Scheduler_CBS_Release_job( Scheduler_CBS_Node *node = _Scheduler_CBS_Thread_get_node( the_thread ); Scheduler_CBS_Server *serv_info = node->cbs_server; Priority_Control new_priority; + Priority_Control unused; if (deadline) { /* Initializing or shifting deadline. */ @@ -51,6 +52,5 @@ void _Scheduler_CBS_Release_job( if (serv_info) the_thread->cpu_time_budget = serv_info->parameters.budget; - the_thread->real_priority = new_priority; - _Thread_Change_priority(the_thread, new_priority, true); + _Thread_Set_priority( the_thread, new_priority, &unused, true ); } diff --git a/cpukit/score/src/schedulerchangeroot.c b/cpukit/score/src/schedulerchangeroot.c index f731117b4c..d036fd8a8b 100644 --- a/cpukit/score/src/schedulerchangeroot.c +++ b/cpukit/score/src/schedulerchangeroot.c @@ -61,9 +61,6 @@ void _Scheduler_Thread_change_resource_root( Thread_Control *offers_help = top; Scheduler_Node *offers_help_node; Thread_Control *offers_help_too; - ISR_Level level; - - _ISR_Disable( level ); offers_help_node = _Scheduler_Thread_get_node( offers_help ); offers_help_too = _Scheduler_Node_get_owner( offers_help_node ); @@ -80,6 +77,4 @@ void _Scheduler_Thread_change_resource_root( if ( ctx.needs_help != NULL ) { _Scheduler_Ask_for_help( ctx.needs_help ); } - - _ISR_Enable( level ); } diff --git a/cpukit/score/src/scheduleredfreleasejob.c b/cpukit/score/src/scheduleredfreleasejob.c index 6c1b642890..2c3db65b64 100644 --- a/cpukit/score/src/scheduleredfreleasejob.c +++ b/cpukit/score/src/scheduleredfreleasejob.c @@ -29,6 +29,7 @@ void _Scheduler_EDF_Release_job( ) { Priority_Control new_priority; + Priority_Control unused; (void) scheduler; @@ -42,6 +43,5 @@ void _Scheduler_EDF_Release_job( new_priority = the_thread->Start.initial_priority; } - the_thread->real_priority = new_priority; - _Thread_Change_priority(the_thread, new_priority, true); + _Thread_Set_priority( the_thread, new_priority, &unused, true ); } diff --git a/cpukit/score/src/smplock.c b/cpukit/score/src/smplock.c new file mode 100644 index 0000000000..14400917ef --- /dev/null +++ b/cpukit/score/src/smplock.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#if HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/score/smplock.h> + +#if defined(RTEMS_SMP_LOCK_DO_NOT_INLINE) + +void _SMP_lock_Initialize( + SMP_lock_Control *lock, + const char *name +) +{ + _SMP_lock_Initialize_body( lock, name ); +} + +void _SMP_lock_Destroy( SMP_lock_Control *lock ) +{ + _SMP_lock_Destroy_body( lock ); +} + +void _SMP_lock_Acquire( + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + _SMP_lock_Acquire_body( lock, context ); +} + +void _SMP_lock_Release( + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + _SMP_lock_Release_body( lock, context ); +} + +void _SMP_lock_ISR_disable_and_acquire( + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + _SMP_lock_ISR_disable_and_acquire_body( lock, context ); +} + +void _SMP_lock_Release_and_ISR_enable( + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + _SMP_lock_Release_and_ISR_enable_body( lock, context ); +} + +#endif /* defined(RTEMS_SMP_LOCK_DO_NOT_INLINE) */ diff --git a/cpukit/score/src/thread.c b/cpukit/score/src/thread.c index 88b3272d0a..ef9788ca41 100644 --- a/cpukit/score/src/thread.c +++ b/cpukit/score/src/thread.c @@ -28,11 +28,11 @@ ) THREAD_OFFSET_ASSERT( Object ); -THREAD_OFFSET_ASSERT( RBNode ); THREAD_OFFSET_ASSERT( current_state ); THREAD_OFFSET_ASSERT( current_priority ); THREAD_OFFSET_ASSERT( real_priority ); -THREAD_OFFSET_ASSERT( Priority ); +THREAD_OFFSET_ASSERT( priority_generation ); +THREAD_OFFSET_ASSERT( priority_restore_hint ); THREAD_OFFSET_ASSERT( resource_count ); THREAD_OFFSET_ASSERT( Wait ); THREAD_OFFSET_ASSERT( Timer ); diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c index a011a8f5e5..8f5d14f412 100644 --- a/cpukit/score/src/threadchangepriority.c +++ b/cpukit/score/src/threadchangepriority.c @@ -21,12 +21,13 @@ #include <rtems/score/threadimpl.h> #include <rtems/score/schedulerimpl.h> -#include <rtems/score/threadqimpl.h> void _Thread_Change_priority( - Thread_Control *the_thread, - Priority_Control new_priority, - bool prepend_it + Thread_Control *the_thread, + Priority_Control new_priority, + void *arg, + Thread_Change_priority_filter filter, + bool prepend_it ) { ISR_lock_Context lock_context; @@ -35,27 +36,37 @@ void _Thread_Change_priority( lock = _Thread_Lock_acquire( the_thread, &lock_context ); /* + * For simplicity set the priority restore hint unconditionally since this is + * an average case optimization. Otherwise complicated atomic operations + * would be necessary. Synchronize with a potential read of the resource + * count in the filter function. See also _CORE_mutex_Surrender(), + * _Thread_Set_priority_filter() and _Thread_Restore_priority_filter(). + */ + the_thread->priority_restore_hint = true; + _Atomic_Fence( ATOMIC_ORDER_ACQ_REL ); + + /* * Do not bother recomputing all the priority related information if * we are not REALLY changing priority. */ - if ( the_thread->current_priority != new_priority ) { + if ( ( *filter )( the_thread, &new_priority, arg ) ) { uint32_t my_generation; - my_generation = the_thread->Priority.generation + 1; + my_generation = the_thread->priority_generation + 1; the_thread->current_priority = new_priority; - the_thread->Priority.generation = my_generation; + the_thread->priority_generation = my_generation; - (*the_thread->Priority.change_handler)( + ( *the_thread->Wait.operations->priority_change )( the_thread, new_priority, - the_thread->Priority.change_handler_context + the_thread->Wait.queue ); _Thread_Lock_release( lock, &lock_context ); _Scheduler_Acquire( the_thread, &lock_context ); - if ( the_thread->Priority.generation == my_generation ) { + if ( the_thread->priority_generation == my_generation ) { if ( _States_Is_ready( the_thread->current_state ) ) { _Scheduler_Change_priority( the_thread, @@ -72,3 +83,53 @@ void _Thread_Change_priority( _Thread_Lock_release( lock, &lock_context ); } } + +static bool _Thread_Raise_priority_filter( + Thread_Control *the_thread, + Priority_Control *new_priority, + void *arg +) +{ + return _Thread_Priority_less_than( + the_thread->current_priority, + *new_priority + ); +} + +void _Thread_Raise_priority( + Thread_Control *the_thread, + Priority_Control new_priority +) +{ + _Thread_Change_priority( + the_thread, + new_priority, + NULL, + _Thread_Raise_priority_filter, + false + ); +} + +static bool _Thread_Restore_priority_filter( + Thread_Control *the_thread, + Priority_Control *new_priority, + void *arg +) +{ + *new_priority = the_thread->real_priority; + + the_thread->priority_restore_hint = false; + + return *new_priority != the_thread->current_priority; +} + +void _Thread_Restore_priority( Thread_Control *the_thread ) +{ + _Thread_Change_priority( + the_thread, + 0, + NULL, + _Thread_Restore_priority_filter, + true + ); +} diff --git a/cpukit/score/src/threaddelayended.c b/cpukit/score/src/threaddelayended.c deleted file mode 100644 index 95dae7d0fa..0000000000 --- a/cpukit/score/src/threaddelayended.c +++ /dev/null @@ -1,38 +0,0 @@ -/** - * @file - * - * @brief End the Delay of a Thread - * @ingroup ScoreThread - */ - -/* - * COPYRIGHT (c) 1989-2007. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/threadimpl.h> - -void _Thread_Delay_ended( - Objects_Id id, - void *arg -) -{ - Thread_Control *the_thread = arg; - - (void) id; - - _Thread_Clear_state( - the_thread, - STATES_DELAYING - | STATES_WAITING_FOR_TIME - | STATES_INTERRUPTIBLE_BY_SIGNAL - ); -} diff --git a/cpukit/score/src/threaddispatchdisablelevel.c b/cpukit/score/src/threaddispatchdisablelevel.c index f78eb52016..9170f2eb09 100644 --- a/cpukit/score/src/threaddispatchdisablelevel.c +++ b/cpukit/score/src/threaddispatchdisablelevel.c @@ -113,8 +113,10 @@ uint32_t _Thread_Dispatch_decrement_disable_level( void ) _Giant_Do_release( cpu_self ); _Assert( - ( disable_level == 0 && _Giant.owner_cpu != cpu_self ) - || ( disable_level != 0 && _Giant.owner_cpu == cpu_self ) + ( disable_level == cpu_self->isr_nest_level + && _Giant.owner_cpu != cpu_self ) + || ( disable_level > cpu_self->isr_nest_level + && _Giant.owner_cpu == cpu_self ) ); _Profiling_Thread_dispatch_enable( cpu_self, disable_level ); diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c index 27c3f06af7..2133d7485b 100644 --- a/cpukit/score/src/threadinitialize.c +++ b/cpukit/score/src/threadinitialize.c @@ -29,15 +29,6 @@ #include <rtems/score/cpusetimpl.h> #include <rtems/config.h> -void _Thread_Priority_change_do_nothing( - Thread_Control *the_thread, - Priority_Control new_priority, - void *context -) -{ - /* Do nothing */ -} - bool _Thread_Initialize( Objects_Information *information, Thread_Control *the_thread, @@ -208,10 +199,11 @@ bool _Thread_Initialize( the_thread->current_state = STATES_DORMANT; the_thread->Wait.queue = NULL; + the_thread->Wait.operations = &_Thread_queue_Operations_default; the_thread->resource_count = 0; + the_thread->current_priority = priority; the_thread->real_priority = priority; - the_thread->Priority.generation = 0; - the_thread->Priority.change_handler = _Thread_Priority_change_do_nothing; + the_thread->priority_generation = 0; the_thread->Start.initial_priority = priority; _Thread_Wait_flags_set( the_thread, THREAD_WAIT_FLAGS_INITIAL ); @@ -219,7 +211,7 @@ bool _Thread_Initialize( _Scheduler_Node_initialize( scheduler, the_thread ); scheduler_node_initialized = true; - _Thread_Set_priority( the_thread, priority ); + _Scheduler_Update_priority( the_thread, priority ); /* * Initialize the CPU usage statistics diff --git a/cpukit/score/src/threadq.c b/cpukit/score/src/threadq.c index bdd380d188..3b1b47bf3a 100644 --- a/cpukit/score/src/threadq.c +++ b/cpukit/score/src/threadq.c @@ -19,9 +19,7 @@ #endif #include <rtems/score/threadqimpl.h> -#include <rtems/score/chainimpl.h> #include <rtems/score/rbtreeimpl.h> -#include <rtems/score/scheduler.h> #include <rtems/score/threadimpl.h> RBTree_Compare_result _Thread_queue_Compare_priority( @@ -29,35 +27,38 @@ RBTree_Compare_result _Thread_queue_Compare_priority( const RBTree_Node *right ) { - Priority_Control left_priority = - THREAD_RBTREE_NODE_TO_THREAD( left )->current_priority; - Priority_Control right_priority = - THREAD_RBTREE_NODE_TO_THREAD( right )->current_priority; + const Thread_Control *left_thread; + const Thread_Control *right_thread; + Priority_Control left_prio; + Priority_Control right_prio; + + left_thread = THREAD_RBTREE_NODE_TO_THREAD( left ); + right_thread = THREAD_RBTREE_NODE_TO_THREAD( right ); + left_prio = left_thread->current_priority; + right_prio = right_thread->current_priority; /* * SuperCore priorities use lower numbers to indicate greater importance. */ - if ( left_priority == right_priority ) - return 0; - if ( left_priority < right_priority ) - return -1; - return 1; + return ( left_prio > right_prio ) - ( left_prio < right_prio ); } void _Thread_queue_Initialize( - Thread_queue_Control *the_thread_queue, - Thread_queue_Disciplines the_discipline, - uint32_t timeout_status + Thread_queue_Control *the_thread_queue, + Thread_queue_Disciplines the_discipline ) { - the_thread_queue->discipline = the_discipline; - the_thread_queue->timeout_status = timeout_status; - the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; + const Thread_queue_Operations *operations; + + _ISR_lock_Initialize( &the_thread_queue->Lock, "Thread Queue" ); if ( the_discipline == THREAD_QUEUE_DISCIPLINE_PRIORITY ) { - _RBTree_Initialize_empty( &the_thread_queue->Queues.Priority ); - } else { /* must be THREAD_QUEUE_DISCIPLINE_FIFO */ - _Chain_Initialize_empty( &the_thread_queue->Queues.Fifo ); + operations = &_Thread_queue_Operations_priority; + } else { + _Assert( the_discipline == THREAD_QUEUE_DISCIPLINE_FIFO ); + operations = &_Thread_queue_Operations_FIFO; } + the_thread_queue->operations = operations; + ( *operations->initialize )( the_thread_queue ); } diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c index 5c237560f4..f73fa01173 100644 --- a/cpukit/score/src/threadqenqueue.c +++ b/cpukit/score/src/threadqenqueue.c @@ -20,96 +20,55 @@ #include <rtems/score/threadqimpl.h> #include <rtems/score/assert.h> -#include <rtems/score/rbtreeimpl.h> +#include <rtems/score/threaddispatch.h> #include <rtems/score/threadimpl.h> #include <rtems/score/watchdogimpl.h> -ISR_LOCK_DEFINE( static, _Thread_queue_Lock, "Thread Queue" ) +#define THREAD_QUEUE_INTEND_TO_BLOCK \ + (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK) -static void _Thread_queue_Acquire( ISR_lock_Context *lock_context ) -{ - _ISR_lock_ISR_disable_and_acquire( &_Thread_queue_Lock, lock_context ); -} +#define THREAD_QUEUE_BLOCKED \ + (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED) -static void _Thread_queue_Release( ISR_lock_Context *lock_context ) -{ - _ISR_lock_Release_and_ISR_enable( &_Thread_queue_Lock, lock_context ); -} +#define THREAD_QUEUE_READY_AGAIN \ + (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN) -/** - * @brief Finalize a blocking operation. - * - * This method is used to finalize a blocking operation that was - * satisfied. It may be used with thread queues or any other synchronization - * object that uses the blocking states and watchdog times for timeout. - * - * This method will restore the previous ISR disable level during the cancel - * operation. Thus it is an implicit _ISR_Enable(). - * - * @param[in] the_thread is the thread whose blocking is canceled - * @param[in] lock_context is the previous ISR disable level - */ -static void _Thread_blocking_operation_Finalize( - Thread_Control *the_thread, - ISR_lock_Context *lock_context -) +static void _Thread_queue_Unblock( Thread_Control *the_thread ) { - /* - * The thread is not waiting on anything after this completes. - */ - the_thread->Wait.queue = NULL; - - /* - * If the sync state is timed out, this is very likely not needed. - * But better safe than sorry when it comes to critical sections. - */ - if ( _Watchdog_Is_active( &the_thread->Timer ) ) { - _Watchdog_Deactivate( &the_thread->Timer ); - _Thread_queue_Release( lock_context ); - (void) _Watchdog_Remove( &the_thread->Timer ); - } else - _Thread_queue_Release( lock_context ); - - /* - * Global objects with thread queue's should not be operated on from an - * ISR. But the sync code still must allow short timeouts to be processed - * correctly. - */ - + _Watchdog_Remove_ticks( &the_thread->Timer ); _Thread_Unblock( the_thread ); #if defined(RTEMS_MULTIPROCESSING) - if ( !_Objects_Is_local_id( the_thread->Object.id ) ) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) { _Thread_MP_Free_proxy( the_thread ); + } #endif } -static void _Thread_queue_Requeue_priority( - Thread_Control *the_thread, - Priority_Control new_priority, - void *context -) -{ - Thread_queue_Control *tq = context; - - _RBTree_Extract( &tq->Queues.Priority, &the_thread->RBNode ); - _RBTree_Insert( - &tq->Queues.Priority, - &the_thread->RBNode, - _Thread_queue_Compare_priority, - false - ); -} - -void _Thread_queue_Enqueue( +void _Thread_queue_Enqueue_critical( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread, States_Control state, - Watchdog_Interval timeout + Watchdog_Interval timeout, + uint32_t timeout_code, + ISR_lock_Context *lock_context ) { - ISR_lock_Context lock_context; - Thread_blocking_operation_States sync_state; + const Thread_queue_Operations *operations; + Per_CPU_Control *cpu_self; + bool success; + + _Thread_Lock_set( the_thread, &the_thread_queue->Lock ); + + operations = the_thread_queue->operations; + _Thread_Wait_set_queue( the_thread, the_thread_queue ); + _Thread_Wait_set_operations( the_thread, operations ); + + ( *operations->enqueue )( the_thread_queue, the_thread ); + + _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _Thread_queue_Release( the_thread_queue, lock_context ); #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) @@ -124,166 +83,117 @@ void _Thread_queue_Enqueue( /* * If the thread wants to timeout, then schedule its timer. */ - if ( timeout ) { - _Watchdog_Initialize( - &the_thread->Timer, - _Thread_queue_Timeout, - the_thread->Object.id, - NULL - ); - + if ( timeout != WATCHDOG_NO_TIMEOUT ) { + _Thread_Wait_set_timeout_code( the_thread, timeout_code ); + _Watchdog_Initialize( &the_thread->Timer, _Thread_Timeout, 0, the_thread ); _Watchdog_Insert_ticks( &the_thread->Timer, timeout ); } - /* - * Now initiate the enqueuing and checking if the blocking operation - * should be completed or the thread has had its blocking condition - * satisfied before we got here. - */ - _Thread_queue_Acquire( &lock_context ); - - sync_state = the_thread_queue->sync_state; - the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; - - if ( sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) { - /* - * Invoke the discipline specific enqueue method. - */ - if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { - _Chain_Append_unprotected( - &the_thread_queue->Queues.Fifo, - &the_thread->Object.Node - ); - } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ - _Thread_Lock_set( the_thread, &_Thread_queue_Lock ); - _Thread_Priority_set_change_handler( - the_thread, - _Thread_queue_Requeue_priority, - the_thread_queue - ); - _RBTree_Insert( - &the_thread_queue->Queues.Priority, - &the_thread->RBNode, - _Thread_queue_Compare_priority, - false - ); - } - - the_thread->Wait.queue = the_thread_queue; - the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; - _Thread_queue_Release( &lock_context ); - } else { - /* Cancel a blocking operation due to ISR */ + success = _Thread_Wait_flags_try_change( + the_thread, + THREAD_QUEUE_INTEND_TO_BLOCK, + THREAD_QUEUE_BLOCKED + ); + if ( !success ) { + _Thread_queue_Unblock( the_thread ); + } - _Assert( - sync_state == THREAD_BLOCKING_OPERATION_TIMEOUT || - sync_state == THREAD_BLOCKING_OPERATION_SATISFIED - ); + _Thread_Dispatch_enable( cpu_self ); +} - _Thread_blocking_operation_Finalize( the_thread, &lock_context ); - } +void _Thread_queue_Extract_locked( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +) +{ + ( *the_thread_queue->operations->extract )( the_thread_queue, the_thread ); + + _Thread_Wait_set_queue( the_thread, NULL ); + _Thread_Wait_restore_default_operations( the_thread ); + _Thread_Lock_restore_default( the_thread ); } -void _Thread_queue_Extract_with_return_code( +void _Thread_queue_Unblock_critical( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread, - uint32_t return_code + ISR_lock_Context *lock_context ) { - ISR_lock_Context lock_context; - - _Thread_queue_Acquire( &lock_context ); + bool success; + bool unblock; - if ( !_States_Is_waiting_on_thread_queue( the_thread->current_state ) ) { - _Thread_queue_Release( &lock_context ); - return; + success = _Thread_Wait_flags_try_change_critical( + the_thread, + THREAD_QUEUE_INTEND_TO_BLOCK, + THREAD_QUEUE_READY_AGAIN + ); + if ( success ) { + unblock = false; + } else { + _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED ); + _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN ); + unblock = true; } - if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { - _Chain_Extract_unprotected( &the_thread->Object.Node ); - } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ - _RBTree_Extract( - &the_thread->Wait.queue->Queues.Priority, - &the_thread->RBNode - ); - _Thread_Priority_restore_default_change_handler( the_thread ); - _Thread_Lock_restore_default( the_thread ); - } + if ( unblock ) { + Per_CPU_Control *cpu_self; - the_thread->Wait.return_code = return_code; + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _Thread_queue_Release( the_thread_queue, lock_context ); - /* - * We found a thread to unblock. - * - * NOTE: This is invoked with interrupts still disabled. - */ - _Thread_blocking_operation_Finalize( the_thread, &lock_context ); + _Thread_queue_Unblock( the_thread ); + + _Thread_Dispatch_enable( cpu_self ); + } else { + _Thread_queue_Release( the_thread_queue, lock_context ); + } } -void _Thread_queue_Extract( +void _Thread_queue_Extract_critical( Thread_queue_Control *the_thread_queue, - Thread_Control *the_thread + Thread_Control *the_thread, + ISR_lock_Context *lock_context ) { - _Thread_queue_Extract_with_return_code( - the_thread_queue, - the_thread, - the_thread->Wait.return_code - ); + _Thread_queue_Extract_locked( the_thread_queue, the_thread ); + _Thread_queue_Unblock_critical( the_thread_queue, the_thread, lock_context ); } -Thread_Control *_Thread_queue_Dequeue( - Thread_queue_Control *the_thread_queue -) +void _Thread_queue_Extract( Thread_Control *the_thread ) { - Thread_Control *the_thread; - ISR_lock_Context lock_context; - Thread_blocking_operation_States sync_state; + ISR_lock_Context lock_context; + ISR_lock_Control *lock; + Thread_queue_Control *the_thread_queue; - the_thread = NULL; - _Thread_queue_Acquire( &lock_context ); + lock = _Thread_Lock_acquire( the_thread, &lock_context ); - /* - * Invoke the discipline specific dequeue method. - */ - if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { - if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) ) { - the_thread = (Thread_Control *) - _Chain_Get_first_unprotected( &the_thread_queue->Queues.Fifo ); - } - } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ - RBTree_Node *first; - - first = _RBTree_Get( &the_thread_queue->Queues.Priority, RBT_LEFT ); - if ( first ) { - the_thread = THREAD_RBTREE_NODE_TO_THREAD( first ); - _Thread_Priority_restore_default_change_handler( the_thread ); - _Thread_Lock_restore_default( the_thread ); - } - } + the_thread_queue = the_thread->Wait.queue; + + if ( the_thread_queue != NULL ) { + _SMP_Assert( lock == &the_thread_queue->Lock ); - if ( the_thread == NULL ) { - /* - * We did not find a thread to unblock in the queue. Maybe the executing - * thread is about to block on this thread queue. - */ - sync_state = the_thread_queue->sync_state; - if ( (sync_state == THREAD_BLOCKING_OPERATION_TIMEOUT) || - (sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED) ) { - the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SATISFIED; - the_thread = _Thread_Executing; - } else { - _Thread_queue_Release( &lock_context ); - return NULL; - } + _Thread_queue_Extract_critical( the_thread_queue, the_thread, &lock_context ); + } else { + _Thread_Lock_release( lock, &lock_context ); } +} - /* - * We found a thread to unblock. - * - * NOTE: This is invoked with interrupts still disabled. - */ - _Thread_blocking_operation_Finalize( the_thread, &lock_context ); +Thread_Control *_Thread_queue_Dequeue( Thread_queue_Control *the_thread_queue ) +{ + ISR_lock_Context lock_context; + Thread_Control *the_thread; + + _Thread_queue_Acquire( the_thread_queue, &lock_context ); + + the_thread = _Thread_queue_First_locked( the_thread_queue ); + + if ( the_thread != NULL ) { + _SMP_Assert( the_thread->Lock.current == &the_thread_queue->Lock ); + + _Thread_queue_Extract_critical( the_thread_queue, the_thread, &lock_context ); + } else { + _Thread_queue_Release( the_thread_queue, &lock_context ); + } return the_thread; } diff --git a/cpukit/score/src/threadqextractwithproxy.c b/cpukit/score/src/threadqextractwithproxy.c index fb06526e9e..72043a094d 100644 --- a/cpukit/score/src/threadqextractwithproxy.c +++ b/cpukit/score/src/threadqextractwithproxy.c @@ -31,8 +31,6 @@ void _Thread_queue_Extract_with_proxy( Thread_Control *the_thread ) { - Thread_queue_Control *the_thread_queue; - #if defined(RTEMS_MULTIPROCESSING) States_Control state; @@ -50,8 +48,5 @@ void _Thread_queue_Extract_with_proxy( } #endif - the_thread_queue = the_thread->Wait.queue; - if ( the_thread_queue != NULL ) { - _Thread_queue_Extract( the_thread_queue, the_thread ); - } + _Thread_queue_Extract( the_thread ); } diff --git a/cpukit/score/src/threadqfirst.c b/cpukit/score/src/threadqfirst.c index 5d97ae156d..c46f005c3e 100644 --- a/cpukit/score/src/threadqfirst.c +++ b/cpukit/score/src/threadqfirst.c @@ -19,33 +19,17 @@ #endif #include <rtems/score/threadqimpl.h> -#include <rtems/score/chainimpl.h> -#include <rtems/score/isrlevel.h> -#include <rtems/score/threadimpl.h> Thread_Control *_Thread_queue_First( Thread_queue_Control *the_thread_queue ) { - ISR_Level level; - Thread_Control *thread; + Thread_Control *the_thread; + ISR_lock_Context lock_context; - thread = NULL; + _Thread_queue_Acquire( the_thread_queue, &lock_context ); + the_thread = _Thread_queue_First_locked( the_thread_queue ); + _Thread_queue_Release( the_thread_queue, &lock_context ); - _ISR_Disable( level ); - - if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { - if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) ) - thread = (Thread_Control *) _Chain_First(&the_thread_queue->Queues.Fifo); - } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ - RBTree_Node *first; - - first = _RBTree_First( &the_thread_queue->Queues.Priority, RBT_LEFT ); - if ( first ) - thread = THREAD_RBTREE_NODE_TO_THREAD( first ); - } - - _ISR_Enable( level ); - - return thread; + return the_thread; } diff --git a/cpukit/score/src/threadqflush.c b/cpukit/score/src/threadqflush.c index 1abe8aca3e..d37b9e8ae7 100644 --- a/cpukit/score/src/threadqflush.c +++ b/cpukit/score/src/threadqflush.c @@ -31,14 +31,30 @@ void _Thread_queue_Flush( uint32_t status ) { - Thread_Control *the_thread; + ISR_lock_Context lock_context; + Thread_Control *the_thread; + + _Thread_queue_Acquire( the_thread_queue, &lock_context ); + + while ( (the_thread = _Thread_queue_First_locked( the_thread_queue ) ) ) { +#if defined(RTEMS_MULTIPROCESSING) + if ( _Objects_Is_local_id( the_thread->Object.id ) ) +#endif + the_thread->Wait.return_code = status; + + _Thread_queue_Extract_critical( + the_thread_queue, + the_thread, + &lock_context + ); - while ( (the_thread = _Thread_queue_Dequeue( the_thread_queue )) ) { #if defined(RTEMS_MULTIPROCESSING) if ( !_Objects_Is_local_id( the_thread->Object.id ) ) ( *remote_extract_callout )( the_thread ); - else #endif - the_thread->Wait.return_code = status; + + _Thread_queue_Acquire( the_thread_queue, &lock_context ); } + + _Thread_queue_Release( the_thread_queue, &lock_context ); } diff --git a/cpukit/score/src/threadqops.c b/cpukit/score/src/threadqops.c new file mode 100644 index 0000000000..2967a0efc3 --- /dev/null +++ b/cpukit/score/src/threadqops.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#if HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/score/threadimpl.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/rbtreeimpl.h> + +static void _Thread_queue_Do_nothing_priority_change( + Thread_Control *the_thread, + Priority_Control new_priority, + Thread_queue_Control *the_thread_queue +) +{ + /* Do nothing */ +} + +static void _Thread_queue_Do_nothing_extract( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +) +{ + /* Do nothing */ +} + +static void _Thread_queue_FIFO_initialize( + Thread_queue_Control *the_thread_queue +) +{ + _Chain_Initialize_empty( &the_thread_queue->Queues.Fifo ); +} + +static void _Thread_queue_FIFO_enqueue( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +) +{ + _Chain_Append_unprotected( + &the_thread_queue->Queues.Fifo, + &the_thread->Wait.Node.Chain + ); +} + +static void _Thread_queue_FIFO_extract( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +) +{ + _Chain_Extract_unprotected( &the_thread->Wait.Node.Chain ); +} + +static Thread_Control *_Thread_queue_FIFO_first( + Thread_queue_Control *the_thread_queue +) +{ + Chain_Control *fifo = &the_thread_queue->Queues.Fifo; + + return _Chain_Is_empty( fifo ) ? + NULL : THREAD_CHAIN_NODE_TO_THREAD( _Chain_First( fifo ) ); +} + +static void _Thread_queue_Priority_priority_change( + Thread_Control *the_thread, + Priority_Control new_priority, + Thread_queue_Control *the_thread_queue +) +{ + _RBTree_Extract( + &the_thread_queue->Queues.Priority, + &the_thread->Wait.Node.RBTree + ); + _RBTree_Insert( + &the_thread_queue->Queues.Priority, + &the_thread->Wait.Node.RBTree, + _Thread_queue_Compare_priority, + false + ); +} + +static void _Thread_queue_Priority_initialize( + Thread_queue_Control *the_thread_queue +) +{ + _RBTree_Initialize_empty( &the_thread_queue->Queues.Priority ); +} + +static void _Thread_queue_Priority_enqueue( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +) +{ + _RBTree_Insert( + &the_thread_queue->Queues.Priority, + &the_thread->Wait.Node.RBTree, + _Thread_queue_Compare_priority, + false + ); +} + +static void _Thread_queue_Priority_extract( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread +) +{ + _RBTree_Extract( + &the_thread_queue->Queues.Priority, + &the_thread->Wait.Node.RBTree + ); +} + +static Thread_Control *_Thread_queue_Priority_first( + Thread_queue_Control *the_thread_queue +) +{ + RBTree_Node *first; + + first = _RBTree_First( &the_thread_queue->Queues.Priority, RBT_LEFT ); + + return first != NULL ? THREAD_RBTREE_NODE_TO_THREAD( first ) : NULL; +} + +const Thread_queue_Operations _Thread_queue_Operations_default = { + .priority_change = _Thread_queue_Do_nothing_priority_change, + .extract = _Thread_queue_Do_nothing_extract + /* + * The default operations are only used in _Thread_Change_priority() and + * _Thread_Timeout() and don't have a thread queue associated with them, so + * the enqueue and first operations are superfluous. + */ +}; + +const Thread_queue_Operations _Thread_queue_Operations_FIFO = { + .priority_change = _Thread_queue_Do_nothing_priority_change, + .initialize = _Thread_queue_FIFO_initialize, + .enqueue = _Thread_queue_FIFO_enqueue, + .extract = _Thread_queue_FIFO_extract, + .first = _Thread_queue_FIFO_first +}; + +const Thread_queue_Operations _Thread_queue_Operations_priority = { + .priority_change = _Thread_queue_Priority_priority_change, + .initialize = _Thread_queue_Priority_initialize, + .enqueue = _Thread_queue_Priority_enqueue, + .extract = _Thread_queue_Priority_extract, + .first = _Thread_queue_Priority_first +}; diff --git a/cpukit/score/src/threadqprocesstimeout.c b/cpukit/score/src/threadqprocesstimeout.c deleted file mode 100644 index 616901900d..0000000000 --- a/cpukit/score/src/threadqprocesstimeout.c +++ /dev/null @@ -1,81 +0,0 @@ -/** - * @file - * - * @brief Thread Queue Handler Process Timeout Handler - * @ingroup ScoreThreadQ - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/threadqimpl.h> -#include <rtems/score/threadimpl.h> - -void _Thread_queue_Process_timeout( - Thread_Control *the_thread -) -{ - Thread_queue_Control *the_thread_queue; - ISR_Level level; - - /* - * If the_thread_queue is not synchronized, then it is either - * "nothing happened", "timeout", or "satisfied". If the_thread - * is the executing thread, then it is in the process of blocking - * and it is the thread which is responsible for the synchronization - * process. - * - * If it is not satisfied, then it is "nothing happened" and - * this is the "timeout" transition. After a request is satisfied, - * a timeout is not allowed to occur. - */ - - _ISR_Disable( level ); - the_thread_queue = the_thread->Wait.queue; - if ( the_thread_queue != NULL ) { - if ( the_thread_queue->sync_state != THREAD_BLOCKING_OPERATION_SYNCHRONIZED && - _Thread_Is_executing( the_thread ) ) { - if ( the_thread_queue->sync_state != THREAD_BLOCKING_OPERATION_SATISFIED ) { - the_thread->Wait.return_code = the_thread_queue->timeout_status; - the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_TIMEOUT; - } - _ISR_Enable( level ); - } else { - _ISR_Enable( level ); - - /* - * After we enable interrupts here, a lot may happen in the meantime, - * e.g. nested interrupts may release the resource that times out here. - * So we enter _Thread_queue_Extract() speculatively. Inside this - * function we check the actual status under ISR disable protection. - * This ensures that exactly one executing context performs the extract - * operation (other parties may call _Thread_queue_Dequeue()). If this - * context won, then we have a timeout. - * - * We can use the_thread_queue pointer here even if - * the_thread->Wait.queue is already set to NULL since the extract - * operation will only use the thread queue discipline to select the - * right extract operation. The timeout status is set during thread - * queue initialization. - */ - _Thread_queue_Extract_with_return_code( - the_thread_queue, - the_thread, - the_thread_queue->timeout_status - ); - } - } else { - _ISR_Enable( level ); - } -} - diff --git a/cpukit/score/src/threadqtimeout.c b/cpukit/score/src/threadqtimeout.c deleted file mode 100644 index fcacd1c781..0000000000 --- a/cpukit/score/src/threadqtimeout.c +++ /dev/null @@ -1,44 +0,0 @@ -/** - * @file - * - * @brief Thread Queue Timeout - * @ingroup ScoreThreadQ - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/threadqimpl.h> -#include <rtems/score/threadimpl.h> - -void _Thread_queue_Timeout( - Objects_Id id, - void *ignored __attribute__((unused)) -) -{ - Thread_Control *the_thread; - Objects_Locations location; - - the_thread = _Thread_Get( id, &location ); - switch ( location ) { - case OBJECTS_ERROR: -#if defined(RTEMS_MULTIPROCESSING) - case OBJECTS_REMOTE: /* impossible */ -#endif - break; - case OBJECTS_LOCAL: - _Thread_queue_Process_timeout( the_thread ); - _Objects_Put_without_thread_dispatch( &the_thread->Object ); - break; - } -} diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c index e759b5b42b..b98b6388f8 100644 --- a/cpukit/score/src/threadrestart.c +++ b/cpukit/score/src/threadrestart.c @@ -42,6 +42,28 @@ static Thread_Zombie_control _Thread_Zombies = { .Lock = ISR_LOCK_INITIALIZER( "thread zombies" ) }; +static bool _Thread_Raise_real_priority_filter( + Thread_Control *the_thread, + Priority_Control *new_priority_ptr, + void *arg +) +{ + Priority_Control real_priority; + Priority_Control new_priority; + Priority_Control current_priority; + + real_priority = the_thread->real_priority; + new_priority = *new_priority_ptr; + current_priority = the_thread->current_priority; + + new_priority = _Thread_Priority_highest( real_priority, new_priority ); + *new_priority_ptr = new_priority; + + the_thread->real_priority = new_priority; + + return _Thread_Priority_less_than( current_priority, new_priority ); +} + static void _Thread_Make_zombie( Thread_Control *the_thread ) { ISR_lock_Context lock_context; @@ -62,7 +84,7 @@ static void _Thread_Make_zombie( Thread_Control *the_thread ) _Thread_Set_state( the_thread, STATES_ZOMBIE ); _Thread_queue_Extract_with_proxy( the_thread ); - _Watchdog_Remove( &the_thread->Timer ); + _Watchdog_Remove_ticks( &the_thread->Timer ); _ISR_lock_ISR_disable_and_acquire( &zombies->Lock, &lock_context ); _Chain_Append_unprotected( &zombies->Chain, &the_thread->Object.Node ); @@ -231,12 +253,17 @@ static void _Thread_Start_life_change( the_thread->is_preemptible = the_thread->Start.is_preemptible; the_thread->budget_algorithm = the_thread->Start.budget_algorithm; the_thread->budget_callout = the_thread->Start.budget_callout; - the_thread->real_priority = priority; _Thread_Set_state( the_thread, STATES_RESTARTING ); _Thread_queue_Extract_with_proxy( the_thread ); - _Watchdog_Remove( &the_thread->Timer ); - _Scheduler_Set_priority_if_higher( scheduler, the_thread, priority ); + _Watchdog_Remove_ticks( &the_thread->Timer ); + _Thread_Change_priority( + the_thread, + priority, + NULL, + _Thread_Raise_real_priority_filter, + false + ); _Thread_Add_post_switch_action( the_thread, &the_thread->Life.Action ); _Thread_Ready( the_thread ); } @@ -260,9 +287,9 @@ static void _Thread_Request_life_change( scheduler = _Scheduler_Get( the_thread ); if ( the_thread == executing ) { - executing->real_priority = priority; + Priority_Control unused; - _Scheduler_Set_priority_if_higher( scheduler, the_thread, priority ); + _Thread_Set_priority( the_thread, priority, &unused, true ); _Thread_Start_life_change_for_executing( executing ); } else if ( previous_life_state == THREAD_LIFE_NORMAL ) { _Thread_Start_life_change( the_thread, scheduler, priority ); @@ -270,16 +297,11 @@ static void _Thread_Request_life_change( _Thread_Clear_state( the_thread, STATES_SUSPENDED ); if ( _Thread_Is_life_terminating( additional_life_state ) ) { - the_thread->real_priority = _Scheduler_Highest_priority_of_two( - scheduler, - the_thread->real_priority, - priority - ); - - _Scheduler_Change_priority_if_higher( - scheduler, + _Thread_Change_priority( the_thread, priority, + NULL, + _Thread_Raise_real_priority_filter, false ); } diff --git a/cpukit/score/src/threadsetpriority.c b/cpukit/score/src/threadsetpriority.c index e1ff118c7e..f6a061a281 100644 --- a/cpukit/score/src/threadsetpriority.c +++ b/cpukit/score/src/threadsetpriority.c @@ -19,14 +19,41 @@ #endif #include <rtems/score/threadimpl.h> -#include <rtems/score/schedulerimpl.h> -void _Thread_Set_priority( +static bool _Thread_Set_priority_filter( Thread_Control *the_thread, - Priority_Control new_priority + Priority_Control *new_priority_ptr, + void *arg ) { - the_thread->current_priority = new_priority; + Priority_Control current_priority; + Priority_Control new_priority; + Priority_Control *old_priority_ptr; + + current_priority = the_thread->current_priority; + new_priority = *new_priority_ptr; + + old_priority_ptr = arg; + *old_priority_ptr = current_priority; + + the_thread->real_priority = new_priority; + + return _Thread_Priority_less_than( current_priority, new_priority ) + || !_Thread_Owns_resources( the_thread ); +} - _Scheduler_Update_priority( the_thread, new_priority ); +void _Thread_Set_priority( + Thread_Control *the_thread, + Priority_Control new_priority, + Priority_Control *old_priority, + bool prepend_it +) +{ + _Thread_Change_priority( + the_thread, + new_priority, + old_priority, + _Thread_Set_priority_filter, + prepend_it + ); } diff --git a/cpukit/rtems/src/eventtimeout.c b/cpukit/score/src/threadtimeout.c index 9c091748c0..f69bc35ea2 100644 --- a/cpukit/rtems/src/eventtimeout.c +++ b/cpukit/score/src/threadtimeout.c @@ -1,8 +1,9 @@ /** - * @file + * @file * - * @brief Timeout Event - * @ingroup ClassicEvent + * @brief Thread Wait Timeout + * + * @ingroup ScoreThread */ /* @@ -18,15 +19,24 @@ #include "config.h" #endif -#include <rtems/rtems/eventimpl.h> #include <rtems/score/threadimpl.h> -void _Event_Timeout( - Objects_Id id, - void *arg -) +static void _Thread_Do_timeout( Thread_Control *the_thread ) +{ + the_thread->Wait.return_code = the_thread->Wait.timeout_code; + ( *the_thread->Wait.operations->extract )( + the_thread->Wait.queue, + the_thread + ); + _Thread_Wait_set_queue( the_thread, NULL ); + _Thread_Wait_restore_default_operations( the_thread ); + _Thread_Lock_restore_default( the_thread ); +} + +void _Thread_Timeout( Objects_Id id, void *arg ) { Thread_Control *the_thread; + ISR_lock_Control *thread_lock; ISR_lock_Context lock_context; Thread_Wait_flags wait_flags; Thread_Wait_flags wait_class; @@ -36,7 +46,7 @@ void _Event_Timeout( bool unblock; the_thread = arg; - _Thread_Lock_acquire_default( the_thread, &lock_context ); + thread_lock = _Thread_Lock_acquire( the_thread, &lock_context ); wait_flags = _Thread_Wait_flags_get( the_thread ); wait_class = wait_flags & THREAD_WAIT_CLASS_MASK; @@ -45,26 +55,32 @@ void _Event_Timeout( success = _Thread_Wait_flags_try_change_critical( the_thread, intend_to_block, - wait_class | THREAD_WAIT_STATE_INTERRUPT_TIMEOUT + wait_class | THREAD_WAIT_STATE_READY_AGAIN ); if ( success ) { - the_thread->Wait.return_code = RTEMS_TIMEOUT; + _Thread_Do_timeout( the_thread ); unblock = false; } else if ( _Thread_Wait_flags_get( the_thread ) == blocked ) { - the_thread->Wait.return_code = RTEMS_TIMEOUT; _Thread_Wait_flags_set( the_thread, - wait_class | THREAD_WAIT_STATE_TIMEOUT + wait_class | THREAD_WAIT_STATE_READY_AGAIN ); + _Thread_Do_timeout( the_thread ); unblock = true; } else { unblock = false; } - _Thread_Lock_release_default( the_thread, &lock_context ); + _Thread_Lock_release( thread_lock, &lock_context ); if ( unblock ) { _Thread_Unblock( the_thread ); + +#if defined(RTEMS_MULTIPROCESSING) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) { + _Thread_MP_Free_proxy( the_thread ); + } +#endif } } diff --git a/cpukit/score/src/timespecgetasnanoseconds.c b/cpukit/score/src/timespecgetasnanoseconds.c index 4ef1af4f62..2f8d17f4a4 100644 --- a/cpukit/score/src/timespecgetasnanoseconds.c +++ b/cpukit/score/src/timespecgetasnanoseconds.c @@ -20,10 +20,9 @@ #include <rtems/score/timespec.h> #include <rtems/score/todimpl.h> -uint64_t _Timespec_Get_As_nanoseconds( - const struct timespec *time, - const uint32_t nanoseconds +uint64_t _Timespec_Get_as_nanoseconds( + const struct timespec *time ) { - return ( ((uint64_t) time->tv_sec) * 1000000000ULL ) + time->tv_nsec + nanoseconds; + return ( ((uint64_t) time->tv_sec) * 1000000000ULL ) + time->tv_nsec; } diff --git a/cpukit/score/src/ts64addto.c b/cpukit/score/src/ts64addto.c deleted file mode 100644 index a0f4b3cf30..0000000000 --- a/cpukit/score/src/ts64addto.c +++ /dev/null @@ -1,31 +0,0 @@ -/** - * @file score/src/ts64addto.c - * - * @brief Add to a Timestamp - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -void _Timestamp64_Add_to( - Timestamp64_Control *_time, - const Timestamp64_Control *_add -) -{ - _Timestamp64_implementation_Add_to( _time, _add ); -} -#endif diff --git a/cpukit/score/src/ts64divide.c b/cpukit/score/src/ts64divide.c deleted file mode 100644 index d9a5099892..0000000000 --- a/cpukit/score/src/ts64divide.c +++ /dev/null @@ -1,51 +0,0 @@ -/** - * @file - * - * @brief Divide Timestamp - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2007. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -/* This method is never inlined. */ -#if CPU_TIMESTAMP_USE_INT64 == TRUE || CPU_TIMESTAMP_USE_INT64_INLINE == TRUE -void _Timestamp64_Divide( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs, - uint32_t *_ival_percentage, - uint32_t *_fval_percentage -) -{ - Timestamp64_Control answer; - - if ( *_rhs == 0 ) { - *_ival_percentage = 0; - *_fval_percentage = 0; - return; - } - - /* - * This looks odd but gives the results the proper precision. - * - * TODO: Rounding on the last digit of the fval. - */ - - answer = (*_lhs * 100000) / *_rhs; - - *_ival_percentage = answer / 1000; - *_fval_percentage = answer % 1000; -} -#endif diff --git a/cpukit/score/src/ts64equalto.c b/cpukit/score/src/ts64equalto.c deleted file mode 100644 index fd07474762..0000000000 --- a/cpukit/score/src/ts64equalto.c +++ /dev/null @@ -1,31 +0,0 @@ -/** - * @file - * - * @brief Timestamp equal to Operator - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -bool _Timestamp64_Equal_to( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs -) -{ - return _Timestamp64_implementation_Equal_to( _lhs, _rhs ); -} -#endif diff --git a/cpukit/score/src/ts64getnanoseconds.c b/cpukit/score/src/ts64getnanoseconds.c deleted file mode 100644 index a5da43b1f1..0000000000 --- a/cpukit/score/src/ts64getnanoseconds.c +++ /dev/null @@ -1,30 +0,0 @@ -/** - * @file score/src/ts64toticks.c - * - * @brief Get Nanoseconds Portion of Timestamp - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -uint32_t _Timestamp64_Get_nanoseconds( - const Timestamp64_Control *_time -) -{ - return _Timestamp64_implementation_Get_nanoseconds( _time ); -} -#endif diff --git a/cpukit/score/src/ts64getseconds.c b/cpukit/score/src/ts64getseconds.c deleted file mode 100644 index eca0536e0e..0000000000 --- a/cpukit/score/src/ts64getseconds.c +++ /dev/null @@ -1,30 +0,0 @@ -/** - * @file - * - * @brief Get Seconds Portion of Timestamp - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -uint32_t _Timestamp64_Get_seconds( - const Timestamp64_Control *_time -) -{ - return _Timestamp64_implementation_Get_seconds( _time ); -} -#endif diff --git a/cpukit/score/src/ts64lessthan.c b/cpukit/score/src/ts64lessthan.c deleted file mode 100644 index d1478147dc..0000000000 --- a/cpukit/score/src/ts64lessthan.c +++ /dev/null @@ -1,31 +0,0 @@ -/** - * @file - * - * @brief Timestamp Less Than Operator - * @ingroup SuperCore Timestamp64 -*/ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -bool _Timestamp64_Less_than( - const Timestamp64_Control *_lhs, - const Timestamp64_Control *_rhs -) -{ - return _Timestamp64_implementation_Less_than( _lhs, _rhs ); -} -#endif diff --git a/cpukit/score/src/ts64set.c b/cpukit/score/src/ts64set.c deleted file mode 100644 index 22771d6980..0000000000 --- a/cpukit/score/src/ts64set.c +++ /dev/null @@ -1,33 +0,0 @@ -/** - * @file - * - * @brief Set Timestamp to Specified Seconds and Nanoseconds - * - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -void _Timestamp64_Set( - Timestamp64_Control *_time, - Timestamp64_Control _seconds, - Timestamp64_Control _nanoseconds -) -{ - _Timestamp64_implementation_Set( _time, _seconds, _nanoseconds ); -} -#endif diff --git a/cpukit/score/src/ts64settozero.c b/cpukit/score/src/ts64settozero.c deleted file mode 100644 index 7b319df1cf..0000000000 --- a/cpukit/score/src/ts64settozero.c +++ /dev/null @@ -1,31 +0,0 @@ -/** - * @file - * - * @brief Zero a Timestamp64 Instance - * - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -void _Timestamp64_Set_to_zero( - Timestamp64_Control *_time -) -{ - _Timestamp64_implementation_Set_to_zero( _time ); -} -#endif diff --git a/cpukit/score/src/ts64subtract.c b/cpukit/score/src/ts64subtract.c deleted file mode 100644 index 1ee917bb2e..0000000000 --- a/cpukit/score/src/ts64subtract.c +++ /dev/null @@ -1,31 +0,0 @@ -/** - * @file - * - * @brief Subtract Two Timestamps - * @ingroup Timestamp - */ -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -void _Timestamp64_Subtract( - const Timestamp64_Control *_start, - const Timestamp64_Control *_end, - Timestamp64_Control *_result -) -{ - _Timestamp64_implementation_Subtract( _start, _end, _result ); -} -#endif diff --git a/cpukit/score/src/ts64totimespec.c b/cpukit/score/src/ts64totimespec.c deleted file mode 100644 index 7e81da74e3..0000000000 --- a/cpukit/score/src/ts64totimespec.c +++ /dev/null @@ -1,32 +0,0 @@ -/** - * @file - * - * @brief Convert Timestamp to Struct Timespec - * - * @ingroup SuperCore - */ - -/* - * COPYRIGHT (c) 1989-2008. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -void _Timestamp64_To_timespec( - const Timestamp64_Control *_timestamp, - struct timespec *_timespec -) -{ - _Timestamp64_implementation_To_timespec( _timestamp, _timespec ); -} -#endif diff --git a/cpukit/score/src/ts64totimeval.c b/cpukit/score/src/ts64totimeval.c deleted file mode 100644 index 1ac765b1ff..0000000000 --- a/cpukit/score/src/ts64totimeval.c +++ /dev/null @@ -1,37 +0,0 @@ -/** - * @file - * - * @brief Convert 64-bit Timestamp to struct timeval - * - * @ingroup SuperCore - */ - -/* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H - #include "config.h" -#endif - -#include <rtems/score/timestamp.h> - -#if CPU_TIMESTAMP_USE_INT64 == TRUE -void _Timestamp64_To_timeval( - const Timestamp64_Control *_timestamp, - struct timeval *_timeval -) -{ - _Timestamp64_implementation_To_timeval( _timestamp, _timeval ); -} -#endif diff --git a/cpukit/score/src/watchdog.c b/cpukit/score/src/watchdog.c index 0db60efe6b..11d3cf289f 100644 --- a/cpukit/score/src/watchdog.c +++ b/cpukit/score/src/watchdog.c @@ -25,8 +25,6 @@ void _Watchdog_Handler_initialization( void ) { - _Watchdog_Sync_count = 0; - _Watchdog_Sync_level = 0; _Watchdog_Ticks_since_boot = 0; _Watchdog_Header_initialize( &_Watchdog_Ticks_header ); diff --git a/cpukit/score/src/watchdogadjust.c b/cpukit/score/src/watchdogadjust.c index 687f063482..32b5f7990e 100644 --- a/cpukit/score/src/watchdogadjust.c +++ b/cpukit/score/src/watchdogadjust.c @@ -19,34 +19,35 @@ #endif #include <rtems/score/watchdogimpl.h> -#include <rtems/score/chainimpl.h> -#include <rtems/score/isrlevel.h> -void _Watchdog_Adjust_backward( +void _Watchdog_Adjust_backward_locked( Watchdog_Header *header, Watchdog_Interval units ) { - ISR_Level level; - - _ISR_Disable( level ); - if ( !_Watchdog_Is_empty( header ) ) { _Watchdog_First( header )->delta_interval += units; } - - _ISR_Enable( level ); } -void _Watchdog_Adjust_forward( +void _Watchdog_Adjust_backward( Watchdog_Header *header, Watchdog_Interval units ) { - ISR_Level level; + ISR_lock_Context lock_context; - _ISR_Disable( level ); + _Watchdog_Acquire( header, &lock_context ); + _Watchdog_Adjust_backward_locked( header, units ); + _Watchdog_Release( header, &lock_context ); +} +void _Watchdog_Adjust_forward_locked( + Watchdog_Header *header, + Watchdog_Interval units, + ISR_lock_Context *lock_context +) +{ while ( !_Watchdog_Is_empty( header ) && units > 0 ) { Watchdog_Control *first = _Watchdog_First( header ); @@ -57,13 +58,23 @@ void _Watchdog_Adjust_forward( units -= first->delta_interval; first->delta_interval = 1; - _ISR_Enable( level ); + _Watchdog_Release( header, lock_context ); _Watchdog_Tickle( header ); - _ISR_Disable( level ); + _Watchdog_Acquire( header, lock_context ); } } +} + +void _Watchdog_Adjust_forward( + Watchdog_Header *header, + Watchdog_Interval units +) +{ + ISR_lock_Context lock_context; - _ISR_Enable( level ); + _Watchdog_Acquire( header, &lock_context ); + _Watchdog_Adjust_forward_locked( header, units, &lock_context ); + _Watchdog_Release( header, &lock_context ); } diff --git a/cpukit/score/src/watchdogadjusttochain.c b/cpukit/score/src/watchdogadjusttochain.c deleted file mode 100644 index 1926656ca3..0000000000 --- a/cpukit/score/src/watchdogadjusttochain.c +++ /dev/null @@ -1,75 +0,0 @@ -/** - * @file - * - * @brief Watchdog Adjust to Chain - * @ingroup ScoreWatchdog - */ - -/* - * COPYRIGHT (c) 1989-2009. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/watchdogimpl.h> -#include <rtems/score/isrlevel.h> - -void _Watchdog_Adjust_to_chain( - Watchdog_Header *header, - Watchdog_Interval units_arg, - Chain_Control *to_fire - -) -{ - Watchdog_Interval units = units_arg; - ISR_Level level; - Watchdog_Control *first; - - _ISR_Disable( level ); - - while ( 1 ) { - if ( _Watchdog_Is_empty( header ) ) { - break; - } - first = _Watchdog_First( header ); - - /* - * If it is longer than "units" until the first element on the chain - * fires, then bump it and quit. - */ - if ( units < first->delta_interval ) { - first->delta_interval -= units; - break; - } - - /* - * The first set happens in less than units, so take all of them - * off the chain and adjust units to reflect this. - */ - units -= first->delta_interval; - first->delta_interval = 0; - - while ( 1 ) { - _Chain_Extract_unprotected( &first->Node ); - _Chain_Append_unprotected( to_fire, &first->Node ); - - _ISR_Flash( level ); - - if ( _Watchdog_Is_empty( header ) ) - break; - first = _Watchdog_First( header ); - if ( first->delta_interval != 0 ) - break; - } - } - - _ISR_Enable( level ); -} - diff --git a/cpukit/score/src/watchdoginsert.c b/cpukit/score/src/watchdoginsert.c index 272cac8db1..6b81c7b872 100644 --- a/cpukit/score/src/watchdoginsert.c +++ b/cpukit/score/src/watchdoginsert.c @@ -19,76 +19,99 @@ #endif #include <rtems/score/watchdogimpl.h> -#include <rtems/score/isrlevel.h> -#include <rtems/score/percpu.h> -void _Watchdog_Insert( - Watchdog_Header *header, - Watchdog_Control *the_watchdog +static void _Watchdog_Insert_fixup( + Watchdog_Header *header, + Watchdog_Control *next_watchdog, + Watchdog_Interval delta ) { - ISR_Level level; - Watchdog_Control *after; - uint32_t insert_isr_nest_level; - Watchdog_Interval delta_interval; + const Chain_Node *iterator_tail; + Chain_Node *iterator_node; + + next_watchdog->delta_interval -= delta; + iterator_node = _Chain_First( &header->Iterators ); + iterator_tail = _Chain_Immutable_tail( &header->Iterators ); - insert_isr_nest_level = _ISR_Nest_level; + while ( iterator_node != iterator_tail ) { + Watchdog_Iterator *iterator; - _ISR_Disable( level ); + iterator = (Watchdog_Iterator *) iterator_node; - /* - * Check to see if the watchdog has just been inserted by a - * higher priority interrupt. If so, abandon this insert. - */ + if ( iterator->current == &next_watchdog->Node ) { + iterator->delta_interval -= delta; + } - if ( the_watchdog->state != WATCHDOG_INACTIVE ) { - _ISR_Enable( level ); - return; + iterator_node = _Chain_Next( iterator_node ); } +} - the_watchdog->state = WATCHDOG_BEING_INSERTED; - _Watchdog_Sync_count++; +void _Watchdog_Insert_locked( + Watchdog_Header *header, + Watchdog_Control *the_watchdog, + ISR_lock_Context *lock_context +) +{ + if ( the_watchdog->state == WATCHDOG_INACTIVE ) { + Watchdog_Iterator iterator; + Chain_Node *current; + Chain_Node *next; + Watchdog_Interval delta; -restart: - delta_interval = the_watchdog->initial; + the_watchdog->state = WATCHDOG_BEING_INSERTED; - for ( after = _Watchdog_First( header ) ; - ; - after = _Watchdog_Next( after ) ) { + _Chain_Append_unprotected( &header->Iterators, &iterator.Node ); - if ( delta_interval == 0 || !_Watchdog_Next( after ) ) - break; + delta = the_watchdog->initial; + current = _Chain_Head( &header->Watchdogs ); - if ( delta_interval < after->delta_interval ) { - after->delta_interval -= delta_interval; - break; - } + while ( + ( next = _Chain_Next( current ) ) != _Chain_Tail( &header->Watchdogs ) + ) { + Watchdog_Control *next_watchdog; + Watchdog_Interval delta_next; - delta_interval -= after->delta_interval; + next_watchdog = (Watchdog_Control *) next; + delta_next = next_watchdog->delta_interval; - _ISR_Flash( level ); + if ( delta < delta_next ) { + _Watchdog_Insert_fixup( header, next_watchdog, delta ); + break; + } - if ( the_watchdog->state != WATCHDOG_BEING_INSERTED ) { - goto exit_insert; - } + iterator.delta_interval = delta - delta_next; + iterator.current = next; - if ( _Watchdog_Sync_level > insert_isr_nest_level ) { - _Watchdog_Sync_level = insert_isr_nest_level; - goto restart; - } - } + _Watchdog_Flash( header, lock_context ); + + if ( the_watchdog->state != WATCHDOG_BEING_INSERTED ) { + goto abort_insert; + } - _Watchdog_Activate( the_watchdog ); + delta = iterator.delta_interval; + current = iterator.current; + } - the_watchdog->delta_interval = delta_interval; + the_watchdog->delta_interval = delta; + the_watchdog->start_time = _Watchdog_Ticks_since_boot; + _Watchdog_Activate( the_watchdog ); + _Chain_Insert_unprotected( current, &the_watchdog->Node ); - _Chain_Insert_unprotected( after->Node.previous, &the_watchdog->Node ); +abort_insert: - the_watchdog->start_time = _Watchdog_Ticks_since_boot; + _Chain_Extract_unprotected( &iterator.Node ); + } +} + +void _Watchdog_Insert( + Watchdog_Header *header, + Watchdog_Control *the_watchdog +) +{ + ISR_lock_Context lock_context; -exit_insert: - _Watchdog_Sync_level = insert_isr_nest_level; - _Watchdog_Sync_count--; - _ISR_Enable( level ); + _Watchdog_Acquire( header, &lock_context ); + _Watchdog_Insert_locked( header, the_watchdog, &lock_context ); + _Watchdog_Release( header, &lock_context ); } diff --git a/cpukit/score/src/watchdogremove.c b/cpukit/score/src/watchdogremove.c index 34d97b0eae..2ac63fe998 100644 --- a/cpukit/score/src/watchdogremove.c +++ b/cpukit/score/src/watchdogremove.c @@ -18,19 +18,69 @@ #include "config.h" #endif -#include <rtems/system.h> -#include <rtems/score/isr.h> #include <rtems/score/watchdogimpl.h> +#include <rtems/score/assert.h> + +static void _Watchdog_Remove_it( + Watchdog_Header *header, + Watchdog_Control *the_watchdog +) +{ + Chain_Node *next; + Watchdog_Interval delta; + const Chain_Node *iterator_tail; + Chain_Node *iterator_node; + + _Assert( + the_watchdog->state == WATCHDOG_ACTIVE + || the_watchdog->state == WATCHDOG_REMOVE_IT + ); + + the_watchdog->state = WATCHDOG_INACTIVE; + the_watchdog->stop_time = _Watchdog_Ticks_since_boot; + + next = _Chain_Next( &the_watchdog->Node ); + delta = the_watchdog->delta_interval; + + if ( next != _Chain_Tail( &header->Watchdogs ) ) { + Watchdog_Control *next_watchdog; + + next_watchdog = (Watchdog_Control *) next; + next_watchdog->delta_interval += delta; + } + + _Chain_Extract_unprotected( &the_watchdog->Node ); + + iterator_node = _Chain_First( &header->Iterators ); + iterator_tail = _Chain_Immutable_tail( &header->Iterators ); + + while ( iterator_node != iterator_tail ) { + Watchdog_Iterator *iterator; + + iterator = (Watchdog_Iterator *) iterator_node; + + if ( iterator->current == next ) { + iterator->delta_interval += delta; + } + + if ( iterator->current == &the_watchdog->Node ) { + iterator->current = _Chain_Previous( &the_watchdog->Node ); + } + + iterator_node = _Chain_Next( iterator_node ); + } +} Watchdog_States _Watchdog_Remove( + Watchdog_Header *header, Watchdog_Control *the_watchdog ) { - ISR_Level level; + ISR_lock_Context lock_context; Watchdog_States previous_state; - Watchdog_Control *next_watchdog; + Watchdog_Interval now; - _ISR_Disable( level ); + _Watchdog_Acquire( header, &lock_context ); previous_state = the_watchdog->state; switch ( previous_state ) { case WATCHDOG_INACTIVE: @@ -43,25 +93,82 @@ Watchdog_States _Watchdog_Remove( * the Insert operation we interrupted will be aborted. */ the_watchdog->state = WATCHDOG_INACTIVE; + now = _Watchdog_Ticks_since_boot; + the_watchdog->start_time = now; + the_watchdog->stop_time = now; break; case WATCHDOG_ACTIVE: case WATCHDOG_REMOVE_IT: + _Watchdog_Remove_it( header, the_watchdog ); + break; + } - the_watchdog->state = WATCHDOG_INACTIVE; - next_watchdog = _Watchdog_Next( the_watchdog ); + _Watchdog_Release( header, &lock_context ); + return( previous_state ); +} + +void _Watchdog_Tickle( + Watchdog_Header *header +) +{ + ISR_lock_Context lock_context; - if ( _Watchdog_Next(next_watchdog) ) - next_watchdog->delta_interval += the_watchdog->delta_interval; + _Watchdog_Acquire( header, &lock_context ); - if ( _Watchdog_Sync_count ) - _Watchdog_Sync_level = _ISR_Nest_level; + if ( !_Watchdog_Is_empty( header ) ) { + Watchdog_Control *first; + Watchdog_Interval delta; - _Chain_Extract_unprotected( &the_watchdog->Node ); - break; + first = _Watchdog_First( header ); + delta = first->delta_interval; + + /* + * Although it is forbidden to insert watchdogs with a delta interval of + * zero it is possible to observe watchdogs with a delta interval of zero + * at this point. For example lets have a watchdog chain of one watchdog + * with a delta interval of one and insert a new one with an initial value + * of one. At the start of the insert procedure it will advance one step + * and reduce its delta interval by one yielding zero. Now a tick happens. + * This will remove the watchdog on the chain and update the insert + * iterator. Now the insert operation continues and will insert the new + * watchdog with a delta interval of zero. + */ + if ( delta > 0 ) { + --delta; + first->delta_interval = delta; + } + + while ( delta == 0 ) { + bool run; + Watchdog_Service_routine_entry routine; + Objects_Id id; + void *user_data; + + run = ( first->state == WATCHDOG_ACTIVE ); + + _Watchdog_Remove_it( header, first ); + + routine = first->routine; + id = first->id; + user_data = first->user_data; + + _Watchdog_Release( header, &lock_context ); + + if ( run ) { + (*routine)( id, user_data ); + } + + _Watchdog_Acquire( header, &lock_context ); + + if ( _Watchdog_Is_empty( header ) ) { + break; + } + + first = _Watchdog_First( header ); + delta = first->delta_interval; + } } - the_watchdog->stop_time = _Watchdog_Ticks_since_boot; - _ISR_Enable( level ); - return( previous_state ); + _Watchdog_Release( header, &lock_context ); } diff --git a/cpukit/score/src/watchdogtick.c b/cpukit/score/src/watchdogtick.c new file mode 100644 index 0000000000..b9bc3f7a9c --- /dev/null +++ b/cpukit/score/src/watchdogtick.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#include <rtems/score/schedulerimpl.h> +#include <rtems/score/threadimpl.h> +#include <rtems/score/todimpl.h> +#include <rtems/score/watchdogimpl.h> + +#if HAVE_CONFIG_H +#include "config.h" +#endif + +void _Watchdog_Tick( void ) +{ + _TOD_Tickle_ticks(); + + _Watchdog_Tickle_ticks(); + + _Scheduler_Tick(); + + if ( _Thread_Dispatch_is_enabled() ) + _Thread_Dispatch(); +} diff --git a/cpukit/score/src/watchdogtickle.c b/cpukit/score/src/watchdogtickle.c deleted file mode 100644 index 8c1a3a74b9..0000000000 --- a/cpukit/score/src/watchdogtickle.c +++ /dev/null @@ -1,117 +0,0 @@ -/** - * @file - * - * @ingroup ScoreWatchdog - * @brief Watchdog Tickle - */ - -/* - * COPYRIGHT (c) 1989-1999. - * On-Line Applications Research Corporation (OAR). - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#if HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/score/watchdogimpl.h> -#include <rtems/score/isrlevel.h> - -void _Watchdog_Tickle( - Watchdog_Header *header -) -{ - ISR_Level level; - Watchdog_Control *the_watchdog; - Watchdog_States watchdog_state; - - /* - * See the comment in watchdoginsert.c and watchdogadjust.c - * about why it's safe not to declare header a pointer to - * volatile data - till, 2003/7 - */ - - _ISR_Disable( level ); - - if ( _Watchdog_Is_empty( header ) ) - goto leave; - - the_watchdog = _Watchdog_First( header ); - - /* - * For some reason, on rare occasions the_watchdog->delta_interval - * of the head of the watchdog chain is 0. Before this test was - * added, on these occasions an event (which usually was supposed - * to have a timeout of 1 tick would have a delta_interval of 0, which - * would be decremented to 0xFFFFFFFF by the unprotected - * "the_watchdog->delta_interval--;" operation. - * This would mean the event would not timeout, and also the chain would - * be blocked, because a timeout with a very high number would be at the - * head, rather than at the end. - * The test "if (the_watchdog->delta_interval != 0)" - * here prevents this from occuring. - * - * We were not able to categorically identify the situation that causes - * this, but proved it to be true empirically. So this check causes - * correct behaviour in this circumstance. - * - * The belief is that a race condition exists whereby an event at the head - * of the chain is removed (by a pending ISR or higher priority task) - * during the _ISR_Flash( level ); in _Watchdog_Insert, but the watchdog - * to be inserted has already had its delta_interval adjusted to 0, and - * so is added to the head of the chain with a delta_interval of 0. - * - * Steven Johnson - 12/2005 (gcc-3.2.3 -O3 on powerpc) - */ - if (the_watchdog->delta_interval != 0) { - the_watchdog->delta_interval--; - if ( the_watchdog->delta_interval != 0 ) - goto leave; - } - - do { - watchdog_state = _Watchdog_Remove( the_watchdog ); - - _ISR_Enable( level ); - - switch( watchdog_state ) { - case WATCHDOG_ACTIVE: - (*the_watchdog->routine)( - the_watchdog->id, - the_watchdog->user_data - ); - break; - - case WATCHDOG_INACTIVE: - /* - * This state indicates that the watchdog is not on any chain. - * Thus, it is NOT on a chain being tickled. This case should - * never occur. - */ - break; - - case WATCHDOG_BEING_INSERTED: - /* - * This state indicates that the watchdog is in the process of - * BEING inserted on the chain. Thus, it can NOT be on a chain - * being tickled. This case should never occur. - */ - break; - - case WATCHDOG_REMOVE_IT: - break; - } - - _ISR_Disable( level ); - - the_watchdog = _Watchdog_First( header ); - } while ( !_Watchdog_Is_empty( header ) && - (the_watchdog->delta_interval == 0) ); - -leave: - _ISR_Enable(level); -} diff --git a/doc/bsp_howto/clock.t b/doc/bsp_howto/clock.t index 396634b067..f58b89850f 100644 --- a/doc/bsp_howto/clock.t +++ b/doc/bsp_howto/clock.t @@ -7,84 +7,282 @@ @section Introduction -The purpose of the clock driver is to provide a steady time -basis to the kernel, so that the RTEMS primitives that need -a clock tick work properly. See the @code{Clock Manager} chapter -of the @b{RTEMS Application C User's Guide} for more details. +The purpose of the clock driver is to provide two services for the operating +system. +@itemize @bullet +@item A steady time basis to the kernel, so that the RTEMS primitives that need +a clock tick work properly. See the @cite{Clock Manager} chapter of the +@cite{RTEMS Application C User's Guide} for more details. +@item An optional time counter to generate timestamps of the uptime and wall +clock time. +@end itemize -The clock driver is located in the @code{clock} directory of the BSP. +The clock driver is usually located in the @file{clock} directory of the BSP. +Clock drivers should use the @dfn{Clock Driver Shell} available via the +@file{clockdrv_shell.h} include file. -@section Clock Driver Global Variables +@section Clock Driver Shell -This section describes the global variables expected to be provided by -this driver. +The @dfn{Clock Driver Shell} include file defines the clock driver functions +declared in @code{#include <rtems/clockdrv.h>} which are used by RTEMS +configuration file @code{#include <rtems/confdefs.h>}. In case the application +configuration defines @code{#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER}, +then the clock driver is registered and should provide its services to the +operating system. A hardware specific clock driver must provide some +functions, defines and macros for the @dfn{Clock Driver Shell} which are +explained here step by step. A clock driver file looks in general like this. -@subsection Ticks Counter +@example +/* + * A section with functions, defines and macros to provide hardware specific + * functions for the Clock Driver Shell. + */ + +#include "../../../shared/clockdrv_shell.h" +@end example + +@subsection Initialization + +Depending on the hardware capabilities one out of three clock driver variants +must be selected. +@itemize @bullet +@item The most basic clock driver provides only a periodic interrupt service +routine which calls @code{rtems_clock_tick()}. The interval is determined by +the application configuration via @code{#define +CONFIGURE_MICROSECONDS_PER_TICK} and can be obtained via +@code{rtems_configuration_get_microseconds_per_tick()}. The timestamp +resolution is limited to the clock tick interval. +@item In case the hardware lacks support for a free running counter, then the +module used for the clock tick may provide support for timestamps with a +resolution below the clock tick interval. For this so called simple +timecounters can be used. +@item The desired variant uses a free running counter to provide accurate +timestamps. This variant is mandatory on SMP configurations. +@end itemize -Most of the clock device drivers provide a global variable -that is simply a count of the number of clock driver interrupt service -routines that have occured. This information is valuable when debugging -a system. This variable is declared as follows: +@subsubsection Clock Tick Only Variant @example -volatile uint32_t Clock_driver_ticks; +static void some_support_initialize_hardware( void ) +@{ + /* Initialize hardware */ +@} + +#define Clock_driver_support_initialize_hardware() \ + some_support_initialize_hardware() + +/* Indicate that this clock driver lacks a proper timecounter in hardware */ +#define CLOCK_DRIVER_USE_DUMMY_TIMECOUNTER + +#include "../../../shared/clockdrv_shell.h" @end example -@section Initialization +@subsubsection Simple Timecounter Variant + +@example +#include <rtems/timecounter.h> + +static rtems_timecounter_simple some_tc; + +static uint32_t some_tc_get( rtems_timecounter_simple *tc ) +@{ + return some.counter; +@} + +static bool some_tc_is_pending( rtems_timecounter_simple *tc ) +@{ + return some.is_pending; +@} + +static uint32_t some_tc_get_timecount( struct timecounter *tc ) +@{ + return rtems_timecounter_simple_downcounter_get( + tc, + some_tc_get, + some_tc_is_pending + ); +@} + +static void some_tc_tick( void ) +@{ + rtems_timecounter_simple_downcounter_tick( &some_tc, some_tc_get ); +@} + +static void some_support_initialize_hardware( void ) +@{ + uint32_t frequency = 123456; + uint64_t us_per_tick = rtems_configuration_get_microseconds_per_tick(); + uint32_t timecounter_ticks_per_clock_tick = + ( frequency * us_per_tick ) / 1000000; + + /* Initialize hardware */ + + rtems_timecounter_simple_install( + &some_tc, + frequency, + timecounter_ticks_per_clock_tick, + some_tc_get_timecount + ); +@} + +#define Clock_driver_support_initialize_hardware() \ + some_support_initialize_hardware() + +#define Clock_driver_timecounter_tick() \ + some_tc_tick() + +#include "../../../shared/clockdrv_shell.h" +@end example + +@subsubsection Timecounter Variant + +This variant is preferred since it is the most efficient and yields the most +accurate timestamps. It is also mandatory on SMP configurations to obtain +valid timestamps. The hardware must provide a periodic interrupt to service +the clock tick and a free running counter for the timecounter. The free +running counter must have a power of two period. The @code{tc_counter_mask} +must be initialized to the free running counter period minus one, e.g. for a +32-bit counter this is 0xffffffff. The @code{tc_get_timecount} function must +return the current counter value (the counter values must increase, so if the +counter counts down, a conversion is necessary). Use +@code{RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER} for the @code{tc_quality}. Set +@code{tc_frequency} to the frequency of the free running counter in Hz. All +other fields of the @code{struct timecounter} must be zero initialized. +Install the initialized timecounter via @code{rtems_timecounter_install()}. -The initialization routine is responsible for -programming the hardware that will periodically -generate an interrupt. A programmable interval timer is commonly -used as the source of the clock tick. +@example +#include <rtems/timecounter.h> + +static struct timecounter some_tc; + +static uint32_t some_tc_get_timecount( struct timecounter *tc ) +@{ + some.free_running_counter; +@} -The device should be programmed such that an interrupt is generated -every @i{m} microseconds, where @i{m} is equal to -@code{rtems_configuration_get_microseconds_per_tick()}. Sometimes -the periodic interval timer can use a prescaler so you have to look -carefully at your user's manual to determine the correct value. +static void some_support_initialize_hardware( void ) +@{ + uint64_t us_per_tick = rtems_configuration_get_microseconds_per_tick(); + uint32_t frequency = 123456; + + /* + * The multiplication must be done in 64-bit arithmetic to avoid an integer + * overflow on targets with a high enough counter frequency. + */ + uint32_t interval = (uint32_t) ( ( frequency * us_per_tick ) / 1000000 ); + + /* + * Initialize hardware and set up a periodic interrupt for the configuration + * based interval. + */ + + some_tc.tc_get_timecount = some_tc_get_timecount; + some_tc.tc_counter_mask = 0xffffffff; + some_tc.tc_frequency = frequency; + some_tc.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER; + rtems_timecounter_install( &some_tc ); +@} -You must use the RTEMS primitive @code{rtems_interrupt_catch} to install -your clock interrupt service routine: +#define Clock_driver_support_initialize_hardware() \ + some_support_initialize_hardware() + +#include "../../../shared/clockdrv_shell.h" +@end example + +@subsection Install Clock Tick Interrupt Service Routine + +The clock driver must provide a function to install the clock tick interrupt +service routine via @code{Clock_driver_support_install_isr()}. @example -rtems_interrupt_catch (Clock_ISR, CLOCK_VECTOR, &old_handler); +#include <bsp/irq.h> +#include <bsp/fatal.h> + +static void some_support_install_isr( rtems_interrupt_handler isr ) +@{ + rtems_status_code sc; + + sc = rtems_interrupt_handler_install( + SOME_IRQ, + "Clock", + RTEMS_INTERRUPT_UNIQUE, + isr, + NULL + ); + if ( sc != RTEMS_SUCCESSFUL ) @{ + bsp_fatal( SOME_FATAL_IRQ_INSTALL ); + @} +@} + +#define Clock_driver_support_install_isr( isr, old ) \ + some_support_install_isr( isr ) + +#include "../../../shared/clockdrv_shell.h" @end example -Since there is currently not a driver entry point invoked at system -shutdown, many clock device drivers use the @code{atexit} routine -to schedule their @code{Clock_exit} routine to execute when the -system is shutdown. +@subsection Support At Tick -By convention, many of the clock drivers do not install the clock -tick if the @code{ticks_per_timeslice} field of the Configuration -Table is 0. +The hardware specific support at tick is specified by +@code{Clock_driver_support_at_tick()}. -@section System shutdown +@example +static void some_support_at_tick( void ) +@{ + /* Clear interrupt */ +@} -Many drivers provide the routine @code{Clock_exit} that is scheduled -to be run during system shutdown via the @code{atexit} routine. -The @code{Clock_exit} routine will disable the clock tick source -if it was enabled. This can be used to prevent clock ticks after the -system is shutdown. +#define Clock_driver_support_at_tick() \ + some_support_at_tick() -@section Clock Interrupt Subroutine +#include "../../../shared/clockdrv_shell.h" +@end example -It only has to inform the kernel that a ticker has elapsed, so call : +@subsection System Shutdown Support + +The @dfn{Clock Driver Shell} provides the routine @code{Clock_exit()} that is +scheduled to be run during system shutdown via the @code{atexit()} routine. +The hardware specific shutdown support is specified by +@code{Clock_driver_support_shutdown_hardware()} which is used by +@code{Clock_exit()}. It should disable the clock tick source if it was +enabled. This can be used to prevent clock ticks after the system is shutdown. @example -@group -rtems_isr Clock_isr( rtems_vector_number vector ) +static void some_support_shutdown_hardware( void ) @{ - invoke the rtems_clock_tick() directive to announce the tick - if necessary for this hardware - reload the programmable timer + /* Shutdown hardware */ @} -@end group + +#define Clock_driver_support_shutdown_hardware() \ + some_support_shutdown_hardware() + +#include "../../../shared/clockdrv_shell.h" +@end example + +@subsection Multiple Clock Driver Ticks Per Clock Tick + +In case the hardware needs more than one clock driver tick per clock tick (e.g. +due to a limited range of the hardware timer), then this can be specified with +the optional @code{#define CLOCK_DRIVER_ISRS_PER_TICK} and @code{#define +CLOCK_DRIVER_ISRS_PER_TICK_VALUE} defines. This is currently used only for x86 +and it hopefully remains that way. + +@example +/* Enable multiple clock driver ticks per clock tick */ +#define CLOCK_DRIVER_ISRS_PER_TICK 1 + +/* Specifiy the clock driver ticks per clock tick value */ +#define CLOCK_DRIVER_ISRS_PER_TICK_VALUE 123 + +#include "../../../shared/clockdrv_shell.h" @end example -@section IO Control +@subsection Clock Driver Ticks Counter -Prior to RTEMS 4.9, the Shared Memory MPCI Driver required a special -IOCTL in the Clock Driver. This is no longer required and the Clock -Driver does not have to provide an IOCTL method at all. +The @dfn{Clock Driver Shell} provide a global variable that is simply a count +of the number of clock driver interrupt service routines that have occurred. +This information is valuable when debugging a system. This variable is +declared as follows: +@example +volatile uint32_t Clock_driver_ticks; +@end example diff --git a/doc/cpu_supplement/Makefile.am b/doc/cpu_supplement/Makefile.am index 300ff786cc..06ebf48970 100644 --- a/doc/cpu_supplement/Makefile.am +++ b/doc/cpu_supplement/Makefile.am @@ -15,6 +15,7 @@ GENERATED_FILES += general.texi GENERATED_FILES += arm.texi GENERATED_FILES += avr.texi GENERATED_FILES += bfin.texi +GENERATED_FILES += epiphany.texi GENERATED_FILES += h8300.texi GENERATED_FILES += i386.texi GENERATED_FILES += lm32.texi @@ -62,6 +63,11 @@ bfin.texi: bfin.t -u "Top" \ -n "" < $< > $@ +epiphany.texi: epiphany.t + $(BMENU2) -p "" \ + -u "Top" \ + -n "" < $< > $@ + h8300.texi: h8300.t $(BMENU2) -p "" \ -u "Top" \ diff --git a/doc/cpu_supplement/cpu_supplement.texi b/doc/cpu_supplement/cpu_supplement.texi index 5c484d031f..105a54e9a4 100644 --- a/doc/cpu_supplement/cpu_supplement.texi +++ b/doc/cpu_supplement/cpu_supplement.texi @@ -65,6 +65,7 @@ * ARM Specific Information:: * Atmel AVR Specific Information:: * Blackfin Specific Information:: +* Epiphany Specific Information:: * Renesas H8/300 Specific Information:: * Intel/AMD x86 Specific Information:: * Lattice Mico32 Specific Information:: @@ -89,6 +90,7 @@ @include arm.texi @include avr.texi @include bfin.texi +@include epiphany.texi @include h8300.texi @include i386.texi @include lm32.texi diff --git a/doc/cpu_supplement/epiphany.t b/doc/cpu_supplement/epiphany.t new file mode 100644 index 0000000000..a0976cf3b6 --- /dev/null +++ b/doc/cpu_supplement/epiphany.t @@ -0,0 +1,75 @@ +@c +@c Copyright (c) 2015 University of York. +@c Hesham ALMatary <hmka501@york.ac.uk> + +@ifinfo +@end ifinfo +@chapter Epiphany Specific Information + +This chapter discusses the +@uref{http://adapteva.com/docs/epiphany_sdk_ref.pdf, Epiphany Architecture} +dependencies in this port of RTEMS. Epiphany is a chip that can come with 16 and +64 cores, each of which can run RTEMS separately or they can work together to +run a SMP RTEMS application. + +@subheading Architecture Documents + +For information on the Epiphany architecture refer to the +@uref{http://adapteva.com/docs/epiphany_arch_ref.pdf,Epiphany Architecture Reference}. + +@section Calling Conventions + +Please refer to the +@uref{http://adapteva.com/docs/epiphany_sdk_ref.pdf, Epiphany SDK} +Appendix A: Application Binary Interface + +@subsection Floating Point Unit + +A floating point unit is currently not supported. + +@section Memory Model + +A flat 32-bit memory model is supported, no caches. Each core has its own 32 KiB +strictly ordered local memory along with an access to a shared 32 MiB external +DRAM. + +@section Interrupt Processing + +Every Epiphany core has 10 exception types: + +@itemize @bullet + +@item Reset +@item Software Exception +@item Data Page Fault +@item Timer 0 +@item Timer 1 +@item Message Interrupt +@item DMA0 Interrupt +@item DMA1 Interrupt +@item WANT Interrupt +@item User Interrupt + +@end itemize + +@subsection Interrupt Levels + +There are only two levels: interrupts enabled and interrupts disabled. + +@subsection Interrupt Stack + +The Epiphany RTEMS port uses a dedicated software interrupt stack. +The stack for interrupts is allocated during interrupt driver initialization. +When an interrupt is entered, the _ISR_Handler routine is responsible for +switching from the interrupted task stack to RTEMS software interrupt stack. + +@section Default Fatal Error Processing + +The default fatal error handler for this architecture performs the +following actions: + +@itemize @bullet +@item disables operating system supported interrupts (IRQ), +@item places the error code in @code{r0}, and +@item executes an infinite loop to simulate a halt processor instruction. +@end itemize diff --git a/doc/cpu_supplement/sparc.t b/doc/cpu_supplement/sparc.t index d21e9feef1..740643a4d3 100644 --- a/doc/cpu_supplement/sparc.t +++ b/doc/cpu_supplement/sparc.t @@ -425,10 +425,15 @@ f4, ... f30) f8, ... f28) @end itemize -The floating point status register (fpsr) specifies +The floating point status register (FSR) specifies the behavior of the floating point unit for rounding, contains its condition codes, version specification, and trap information. +According to the ABI all floating point registers and the floating point status +register (FSR) are volatile. Thus the floating point context of a thread is the +empty set. The rounding direction is a system global state and must not be +modified by threads. + A queue of the floating point instructions which have started execution but not yet completed is maintained. This queue is needed to support the multiple cycle nature of floating diff --git a/doc/user/msg.t b/doc/user/msg.t index f58d677150..eb3cb3561a 100644 --- a/doc/user/msg.t +++ b/doc/user/msg.t @@ -50,7 +50,10 @@ wait for a message to arrive at a queue. Also, a task may poll a queue for the arrival of a message. The maximum length message which can be sent is set -on a per message queue basis. +on a per message queue basis. The message content must be copied in general +to/from an internal buffer of the message queue or directly to a peer in +certain cases. This copy operation is performed with interrupts disabled. So +it is advisable to keep the messages as short as possible. @subsection Building a Message Queue Attribute Set diff --git a/testsuites/psxtests/psxualarm/init.c b/testsuites/psxtests/psxualarm/init.c index 03e03b9649..ffe4720b50 100644 --- a/testsuites/psxtests/psxualarm/init.c +++ b/testsuites/psxtests/psxualarm/init.c @@ -86,9 +86,9 @@ void *POSIX_Init( act.sa_handler = Signal_handler; act.sa_flags = 0; sigaction( SIGALRM, &act, NULL ); - puts( "Init: ualarm in 1 us" ); + puts( "Init: ualarm in 100000 us" ); sleep(3); - result = ualarm(1,0); + result = ualarm(100000,0); rtems_test_assert( result == 0 ); status = sleep(10); diff --git a/testsuites/smptests/smpscheduler03/init.c b/testsuites/smptests/smpscheduler03/init.c index d919482c8a..3a068a24b7 100644 --- a/testsuites/smptests/smpscheduler03/init.c +++ b/testsuites/smptests/smpscheduler03/init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * Copyright (c) 2014-2015 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -40,6 +40,30 @@ typedef struct { static test_context test_instance; +static bool change_priority_filter( + Thread_Control *thread, + Priority_Control *new_priority, + void *arg +) +{ + return thread->current_priority != *new_priority; +} + +static void change_priority( + Thread_Control *thread, + Priority_Control new_priority, + bool prepend_it +) +{ + _Thread_Change_priority( + thread, + new_priority, + NULL, + change_priority_filter, + prepend_it + ); +} + static void barrier_wait(test_context *ctx) { rtems_status_code sc; @@ -95,12 +119,16 @@ static void test_case_change_priority( Scheduler_SMP_Node_state new_state ) { + Per_CPU_Control *cpu_self; + + cpu_self = _Thread_Dispatch_disable(); + switch (start_state) { case SCHEDULER_SMP_NODE_SCHEDULED: - _Thread_Change_priority(executing, 1, true); + change_priority(executing, 1, true); break; case SCHEDULER_SMP_NODE_READY: - _Thread_Change_priority(executing, 4, true); + change_priority(executing, 4, true); break; default: rtems_test_assert(0); @@ -108,8 +136,13 @@ static void test_case_change_priority( } rtems_test_assert(node->state == start_state); - _Thread_Change_priority(executing, prio, prepend_it); + change_priority(executing, prio, prepend_it); rtems_test_assert(node->state == new_state); + + change_priority(executing, 1, true); + rtems_test_assert(node->state == SCHEDULER_SMP_NODE_SCHEDULED); + + _Thread_Dispatch_enable( cpu_self ); } static const Scheduler_SMP_Node_state states[2] = { @@ -132,11 +165,8 @@ static void test_change_priority(void) size_t k; task_id = start_task(3); - - _Thread_Disable_dispatch(); - - executing = _Thread_Executing; - node = _Scheduler_SMP_Thread_get_node( executing ); + executing = _Thread_Get_executing(); + node = _Scheduler_SMP_Thread_get_node(executing); for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) { for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) { @@ -153,11 +183,6 @@ static void test_change_priority(void) } } - _Thread_Change_priority(executing, 1, true); - rtems_test_assert(node->state == SCHEDULER_SMP_NODE_SCHEDULED); - - _Thread_Enable_dispatch(); - sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } @@ -196,13 +221,16 @@ static void test_case_change_priority_op( ) { Thread_Control *needs_help; + Per_CPU_Control *cpu_self; + + cpu_self = _Thread_Dispatch_disable(); switch (start_state) { case SCHEDULER_SMP_NODE_SCHEDULED: - _Thread_Change_priority(executing, 1, true); + change_priority(executing, 1, true); break; case SCHEDULER_SMP_NODE_READY: - _Thread_Change_priority(executing, 4, true); + change_priority(executing, 4, true); break; default: rtems_test_assert(0); @@ -228,6 +256,11 @@ static void test_case_change_priority_op( } else { rtems_test_assert(needs_help == NULL); } + + change_priority(executing, 1, true); + rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); + + _Thread_Dispatch_enable( cpu_self ); } static void test_change_priority_op(void) @@ -242,10 +275,7 @@ static void test_change_priority_op(void) size_t k; task_id = start_task(3); - - _Thread_Disable_dispatch(); - - executing = _Thread_Executing; + executing = _Thread_Get_executing(); executing_node = _Scheduler_SMP_Thread_get_node(executing); other = get_thread_by_id(task_id); @@ -266,11 +296,6 @@ static void test_change_priority_op(void) } } - _Thread_Change_priority(executing, 1, true); - rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); - - _Thread_Enable_dispatch(); - sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } @@ -279,11 +304,11 @@ static Thread_Control *yield_op(Thread_Control *thread) { const Scheduler_Control *scheduler = _Scheduler_Get(thread); Thread_Control *needs_help; - ISR_Level level; + ISR_lock_Context lock_context; - _ISR_Disable( level ); + _Scheduler_Acquire(thread, &lock_context); needs_help = (*scheduler->Operations.yield)(scheduler, thread); - _ISR_Enable( level ); + _Scheduler_Release(thread, &lock_context); return needs_help; } @@ -297,20 +322,23 @@ static void test_case_yield_op( ) { Thread_Control *needs_help; + Per_CPU_Control *cpu_self; + + cpu_self = _Thread_Dispatch_disable(); - _Thread_Change_priority(executing, 4, false); - _Thread_Change_priority(other, 4, false); + change_priority(executing, 4, false); + change_priority(other, 4, false); switch (start_state) { case SCHEDULER_SMP_NODE_SCHEDULED: switch (new_state) { case SCHEDULER_SMP_NODE_SCHEDULED: - _Thread_Change_priority(executing, 2, false); - _Thread_Change_priority(other, 3, false); + change_priority(executing, 2, false); + change_priority(other, 3, false); break; case SCHEDULER_SMP_NODE_READY: - _Thread_Change_priority(executing, 2, false); - _Thread_Change_priority(other, 2, false); + change_priority(executing, 2, false); + change_priority(other, 2, false); break; default: rtems_test_assert(0); @@ -323,8 +351,8 @@ static void test_case_yield_op( rtems_test_assert(0); break; case SCHEDULER_SMP_NODE_READY: - _Thread_Change_priority(executing, 3, false); - _Thread_Change_priority(other, 2, false); + change_priority(executing, 3, false); + change_priority(other, 2, false); break; default: rtems_test_assert(0); @@ -355,6 +383,11 @@ static void test_case_yield_op( } else { rtems_test_assert(needs_help == NULL); } + + change_priority(executing, 1, true); + rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); + + _Thread_Dispatch_enable( cpu_self ); } static void test_yield_op(void) @@ -368,10 +401,7 @@ static void test_yield_op(void) size_t j; task_id = start_task(2); - - _Thread_Disable_dispatch(); - - executing = _Thread_Executing; + executing = _Thread_Get_executing(); executing_node = _Scheduler_SMP_Thread_get_node(executing); other = get_thread_by_id(task_id); @@ -393,11 +423,6 @@ static void test_yield_op(void) } } - _Thread_Change_priority(executing, 1, true); - rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); - - _Thread_Enable_dispatch(); - sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } @@ -433,14 +458,17 @@ static void test_case_unblock_op( ) { Thread_Control *needs_help; + Per_CPU_Control *cpu_self; + + cpu_self = _Thread_Dispatch_disable(); switch (new_state) { case SCHEDULER_SMP_NODE_SCHEDULED: - _Thread_Change_priority(executing, 2, false); + change_priority(executing, 2, false); rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); break; case SCHEDULER_SMP_NODE_READY: - _Thread_Change_priority(executing, 4, false); + change_priority(executing, 4, false); rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_READY); break; default: @@ -465,6 +493,11 @@ static void test_case_unblock_op( rtems_test_assert(0); break; } + + change_priority(executing, 1, true); + rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); + + _Thread_Dispatch_enable( cpu_self ); } static void test_unblock_op(void) @@ -477,10 +510,7 @@ static void test_unblock_op(void) size_t i; task_id = start_task(3); - - _Thread_Disable_dispatch(); - - executing = _Thread_Executing; + executing = _Thread_Get_executing(); executing_node = _Scheduler_SMP_Thread_get_node(executing); other = get_thread_by_id(task_id); @@ -494,11 +524,6 @@ static void test_unblock_op(void) ); } - _Thread_Change_priority(executing, 1, true); - rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); - - _Thread_Enable_dispatch(); - sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } diff --git a/testsuites/sptests/Makefile.am b/testsuites/sptests/Makefile.am index 9025ff3535..c3fc443da0 100644 --- a/testsuites/sptests/Makefile.am +++ b/testsuites/sptests/Makefile.am @@ -38,6 +38,8 @@ else _SUBDIRS += sp29 endif _SUBDIRS += spintrcritical23 +_SUBDIRS += sptimecounter01 +_SUBDIRS += sptimecounter02 _SUBDIRS += spatomic01 _SUBDIRS += spintrcritical22 _SUBDIRS += spsem03 diff --git a/testsuites/sptests/configure.ac b/testsuites/sptests/configure.ac index ae3c763848..b8287a4ea7 100644 --- a/testsuites/sptests/configure.ac +++ b/testsuites/sptests/configure.ac @@ -41,6 +41,8 @@ AM_CONDITIONAL(HAS_SMP,test "$rtems_cv_RTEMS_SMP" = "yes") # Explicitly list all Makefiles here AC_CONFIG_FILES([Makefile spintrcritical23/Makefile +sptimecounter01/Makefile +sptimecounter02/Makefile spatomic01/Makefile spglobalcon01/Makefile spintrcritical22/Makefile diff --git a/testsuites/sptests/sp37/init.c b/testsuites/sptests/sp37/init.c index aaaf68426b..647485e555 100644 --- a/testsuites/sptests/sp37/init.c +++ b/testsuites/sptests/sp37/init.c @@ -431,11 +431,7 @@ rtems_timer_service_routine test_unblock_task( _Thread_Disable_dispatch(); status = rtems_task_resume( blocked_task_id ); _Thread_Unnest_dispatch(); -#if defined( RTEMS_SMP ) - directive_failed_with_level( status, "rtems_task_resume", 1 ); -#else directive_failed( status, "rtems_task_resume" ); -#endif } rtems_task Init( diff --git a/testsuites/sptests/spclock_err01/init.c b/testsuites/sptests/spclock_err01/init.c index ab5c00ab1e..087c8d4a05 100644 --- a/testsuites/sptests/spclock_err01/init.c +++ b/testsuites/sptests/spclock_err01/init.c @@ -115,14 +115,6 @@ rtems_task Init( puts( "TA1 - rtems_clock_get_tod_timeval - RTEMS_NOT_DEFINED" ); } - puts( "TA1 - rtems_clock_set_nanoseconds_extension - RTEMS_INVALID_ADDRESS" ); - status = rtems_clock_set_nanoseconds_extension( NULL ); - fatal_directive_status( - status, - RTEMS_INVALID_ADDRESS, - "rtems_clock_set_nanoseconds_extension NULL param" - ); - /* NULL parameter */ status = rtems_clock_set( NULL ); fatal_directive_status( diff --git a/testsuites/sptests/spintrcritical01/init.c b/testsuites/sptests/spintrcritical01/init.c index dc36aee4fd..b7cfee42f8 100644 --- a/testsuites/sptests/spintrcritical01/init.c +++ b/testsuites/sptests/spintrcritical01/init.c @@ -14,12 +14,7 @@ #include <tmacros.h> #include <intrcritical.h> -#include <rtems/rtems/semimpl.h> - -/* forward declarations to avoid warnings */ -rtems_task Init(rtems_task_argument argument); -rtems_timer_service_routine test_release_from_isr(rtems_id timer, void *arg); -Thread_blocking_operation_States getState(void); +#include <rtems/score/threadimpl.h> #if defined(FIFO_NO_TIMEOUT) #define TEST_NAME "1" @@ -58,33 +53,28 @@ Thread_blocking_operation_States getState(void); const char rtems_test_name[] = "SPINTRCRITICAL " TEST_NAME; -rtems_id Semaphore; -volatile bool case_hit = false; +static Thread_Control *thread; + +static rtems_id Semaphore; + +static bool case_hit; -Thread_blocking_operation_States getState(void) +static bool interrupts_blocking_op(void) { - Objects_Locations location; - Semaphore_Control *sem; - - sem = (Semaphore_Control *)_Objects_Get( - &_Semaphore_Information, Semaphore, &location ); - if ( location != OBJECTS_LOCAL ) { - puts( "Bad object lookup" ); - rtems_test_exit(0); - } - _Thread_Unnest_dispatch(); + Thread_Wait_flags flags = _Thread_Wait_flags_get( thread ); - return sem->Core_control.semaphore.Wait_queue.sync_state; + return + flags == ( THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK ); } -rtems_timer_service_routine test_release_from_isr( +static rtems_timer_service_routine test_release_from_isr( rtems_id timer, void *arg ) { rtems_status_code status; - if ( getState() == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) { + if ( interrupts_blocking_op() ) { case_hit = true; } @@ -109,7 +99,7 @@ static bool test_body( void *arg ) return case_hit; } -rtems_task Init( +static rtems_task Init( rtems_task_argument ignored ) { @@ -117,6 +107,8 @@ rtems_task Init( TEST_BEGIN(); + thread = _Thread_Get_executing(); + puts( "Init - Trying to generate semaphore release from ISR while blocking" ); puts( "Init - Variation is: " TEST_STRING ); status = rtems_semaphore_create( diff --git a/testsuites/sptests/spintrcritical08/init.c b/testsuites/sptests/spintrcritical08/init.c index 13544b24c7..f375cd49f3 100644 --- a/testsuites/sptests/spintrcritical08/init.c +++ b/testsuites/sptests/spintrcritical08/init.c @@ -55,7 +55,7 @@ static rtems_timer_service_routine test_release_from_isr( watchdog->delta_interval == 0 && watchdog->routine == _Rate_monotonic_Timeout ) { - Watchdog_States state = _Watchdog_Remove( watchdog ); + Watchdog_States state = _Watchdog_Remove_ticks( watchdog ); rtems_test_assert( state == WATCHDOG_ACTIVE ); (*watchdog->routine)( watchdog->id, watchdog->user_data ); diff --git a/testsuites/sptests/spintrcritical09/init.c b/testsuites/sptests/spintrcritical09/init.c index 2f9caa5ad6..cc119e88c1 100644 --- a/testsuites/sptests/spintrcritical09/init.c +++ b/testsuites/sptests/spintrcritical09/init.c @@ -14,28 +14,22 @@ #include <tmacros.h> #include <intrcritical.h> -#include <rtems/rtems/semimpl.h> +#include <rtems/score/threadimpl.h> #include <rtems/score/watchdogimpl.h> const char rtems_test_name[] = "SPINTRCRITICAL 9"; +static Thread_Control *thread; + static rtems_id Semaphore; -static bool case_hit = false; -static Thread_blocking_operation_States getState(void) +static bool case_hit; + +static bool is_interrupt_timeout(void) { - Objects_Locations location; - Semaphore_Control *sem; - - sem = (Semaphore_Control *)_Objects_Get( - &_Semaphore_Information, Semaphore, &location ); - if ( location != OBJECTS_LOCAL ) { - puts( "Bad object lookup" ); - rtems_test_exit(0); - } - _Thread_Unnest_dispatch(); + Thread_Wait_flags flags = _Thread_Wait_flags_get( thread ); - return sem->Core_control.semaphore.Wait_queue.sync_state; + return flags == ( THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN ); } static rtems_timer_service_routine test_release_from_isr( @@ -50,14 +44,14 @@ static rtems_timer_service_routine test_release_from_isr( if ( watchdog->delta_interval == 0 - && watchdog->routine == _Thread_queue_Timeout + && watchdog->routine == _Thread_Timeout ) { - Watchdog_States state = _Watchdog_Remove( watchdog ); + Watchdog_States state = _Watchdog_Remove_ticks( watchdog ); rtems_test_assert( state == WATCHDOG_ACTIVE ); (*watchdog->routine)( watchdog->id, watchdog->user_data ); - if ( getState() == THREAD_BLOCKING_OPERATION_TIMEOUT ) { + if ( is_interrupt_timeout() ) { case_hit = true; } } @@ -81,6 +75,8 @@ static rtems_task Init( TEST_BEGIN(); + thread = _Thread_Get_executing(); + puts( "Init - Test may not be able to detect case is hit reliably" ); puts( "Init - Trying to generate timeout from ISR while blocking" ); sc = rtems_semaphore_create( diff --git a/testsuites/sptests/spintrcritical10/init.c b/testsuites/sptests/spintrcritical10/init.c index 441b161b5b..e4a2a940a6 100644 --- a/testsuites/sptests/spintrcritical10/init.c +++ b/testsuites/sptests/spintrcritical10/init.c @@ -78,7 +78,7 @@ static void any_satisfy_before_timeout(rtems_id timer, void *arg) ); rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL); - _Event_Timeout(0, thread); + _Thread_Timeout(0, thread); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == GREEN @@ -88,7 +88,7 @@ static void any_satisfy_before_timeout(rtems_id timer, void *arg) if (ctx->hit) { rtems_test_assert( _Thread_Wait_flags_get(thread) - == (THREAD_WAIT_CLASS_EVENT | THREAD_WAIT_STATE_INTERRUPT_SATISFIED) + == (THREAD_WAIT_CLASS_EVENT | THREAD_WAIT_STATE_READY_AGAIN) ); } @@ -175,7 +175,7 @@ static void all_satisfy_before_timeout(rtems_id timer, void *arg) ); rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL); - _Event_Timeout(0, thread); + _Thread_Timeout(0, thread); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == EVENTS @@ -185,7 +185,7 @@ static void all_satisfy_before_timeout(rtems_id timer, void *arg) if (ctx->hit) { rtems_test_assert( _Thread_Wait_flags_get(thread) - == (THREAD_WAIT_CLASS_EVENT | THREAD_WAIT_STATE_INTERRUPT_SATISFIED) + == (THREAD_WAIT_CLASS_EVENT | THREAD_WAIT_STATE_READY_AGAIN) ); } @@ -251,7 +251,7 @@ static void timeout_before_satisfied(rtems_id timer, void *arg) ); rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL); - _Event_Timeout(0, thread); + _Thread_Timeout(0, thread); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF @@ -269,7 +269,7 @@ static void timeout_before_satisfied(rtems_id timer, void *arg) if (ctx->hit) { rtems_test_assert( _Thread_Wait_flags_get(thread) - == (THREAD_WAIT_CLASS_EVENT | THREAD_WAIT_STATE_INTERRUPT_TIMEOUT) + == (THREAD_WAIT_CLASS_EVENT | THREAD_WAIT_STATE_READY_AGAIN) ); } diff --git a/testsuites/sptests/spintrcritical16/init.c b/testsuites/sptests/spintrcritical16/init.c index 08eeb8b9b4..a094b419b3 100644 --- a/testsuites/sptests/spintrcritical16/init.c +++ b/testsuites/sptests/spintrcritical16/init.c @@ -14,47 +14,36 @@ #include <tmacros.h> #include <intrcritical.h> -#include <rtems/rtems/semimpl.h> +#include <rtems/score/threadimpl.h> const char rtems_test_name[] = "SPINTRCRITICAL 16"; -/* forward declarations to avoid warnings */ -rtems_task Init(rtems_task_argument argument); -rtems_timer_service_routine test_release_from_isr(rtems_id timer, void *arg); -Thread_blocking_operation_States getState(void); +static Thread_Control *Main_TCB; -Thread_Control *Main_TCB; -rtems_id Semaphore; -volatile bool case_hit = false; +static rtems_id Semaphore; -Thread_blocking_operation_States getState(void) +static bool case_hit; + +static bool interrupts_blocking_op(void) { - Objects_Locations location; - Semaphore_Control *sem; - - sem = (Semaphore_Control *)_Objects_Get( - &_Semaphore_Information, Semaphore, &location ); - if ( location != OBJECTS_LOCAL ) { - puts( "Bad object lookup" ); - rtems_test_exit(0); - } - _Thread_Unnest_dispatch(); + Thread_Wait_flags flags = _Thread_Wait_flags_get( Main_TCB ); - return sem->Core_control.semaphore.Wait_queue.sync_state; + return + flags == ( THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK ); } -rtems_timer_service_routine test_release_from_isr( +static rtems_timer_service_routine test_release_from_isr( rtems_id timer, void *arg ) { - if ( getState() == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) { + if ( interrupts_blocking_op() ) { case_hit = true; (void) rtems_semaphore_release( Semaphore ); } if ( Main_TCB->Wait.queue != NULL ) { - _Thread_queue_Process_timeout( Main_TCB ); + _Thread_Timeout( 0, Main_TCB ); } } @@ -70,7 +59,7 @@ static bool test_body( void *arg ) return case_hit; } -rtems_task Init( +static rtems_task Init( rtems_task_argument ignored ) { diff --git a/testsuites/sptests/spintrcritical17/init.c b/testsuites/sptests/spintrcritical17/init.c index 9dde48a06b..238493e71a 100644 --- a/testsuites/sptests/spintrcritical17/init.c +++ b/testsuites/sptests/spintrcritical17/init.c @@ -1,10 +1,11 @@ /* - * Copyright (c) 2009 - * embedded brains GmbH - * Obere Lagerstr. 30 - * D-82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Copyright (c) 2009-2014 embedded brains GmbH. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -22,141 +23,91 @@ const char rtems_test_name[] = "SPINTRCRITICAL 17"; -/* forward declarations to avoid warnings */ -rtems_task Init(rtems_task_argument argument); - -#define TIMER_COUNT 4 - -#define TIMER_TRIGGER 0 -#define TIMER_RESET 1 -#define TIMER_NEVER_INTERVAL 2 -#define TIMER_NEVER_TOD 3 - -static rtems_id timer [TIMER_COUNT]; +typedef struct { + rtems_id timer1; + rtems_id timer2; + bool done; +} test_context; -static rtems_time_of_day tod; - -static volatile bool case_hit; - -static void never_callback(rtems_id timer, void *arg) -{ - rtems_test_assert(false); -} +static test_context ctx_instance; -static void reset_tod_timer(void) +static void never(rtems_id timer_id, void *arg) { - rtems_status_code sc = RTEMS_SUCCESSFUL; - - sc = rtems_timer_server_fire_when( - timer [TIMER_NEVER_TOD], - &tod, - never_callback, - NULL - ); - directive_failed_with_level(sc, "rtems_timer_server_fire_after", -1); + rtems_test_assert(0); } -static void reset_callback(rtems_id timer_id, void *arg) +static void fire(rtems_id timer_id, void *arg) { - rtems_status_code sc = RTEMS_SUCCESSFUL; - - sc = rtems_timer_reset(timer [TIMER_RESET]); - directive_failed_with_level(sc, "rtems_timer_reset", -1); - - sc = rtems_timer_reset(timer [TIMER_NEVER_INTERVAL]); - directive_failed_with_level(sc, "rtems_timer_reset", -1); - - reset_tod_timer(); - - if (!case_hit) { - case_hit = _Timer_server->insert_chain != NULL; + /* The arg is NULL */ + test_context *ctx = &ctx_instance; + rtems_status_code sc; + + if (!ctx->done) { + ctx->done = + _Timer_server->Interval_watchdogs.system_watchdog_helper != NULL; + + if (ctx->done) { + sc = rtems_timer_server_fire_after(ctx->timer2, 100, never, NULL); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); + } } } -static void trigger_callback(rtems_id timer_id, void *arg) +static bool test_body(void *arg) { - rtems_status_code sc = RTEMS_SUCCESSFUL; + test_context *ctx = arg; + rtems_status_code sc; - if (case_hit) { - TEST_END(); + sc = rtems_timer_reset(ctx->timer1); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); - rtems_test_exit(0); - } else if (interrupt_critical_section_test_support_delay()) { - puts("test case not hit, give up"); - - rtems_test_exit(0); - } - - sc = rtems_timer_reset(timer [TIMER_TRIGGER]); - directive_failed(sc, "rtems_timer_reset"); + return ctx->done; } -rtems_task Init( rtems_task_argument ignored ) +static void Init(rtems_task_argument ignored) { - rtems_status_code sc = RTEMS_SUCCESSFUL; - size_t i = 0; + test_context *ctx = &ctx_instance; + rtems_status_code sc; TEST_BEGIN(); - build_time(&tod, 4, 12, 2009, 9, 34, 11, 0); - sc = rtems_clock_set(&tod); - directive_failed(sc, "rtems_clock_set"); - - ++tod.year; + sc = rtems_timer_create( + rtems_build_name('T', 'I', 'M', '1'), + &ctx->timer1 + ); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); - for (i = 0; i < TIMER_COUNT; ++i) { - sc = rtems_timer_create( - rtems_build_name('T', 'I', 'M', '0' + i), - &timer [i] - ); - directive_failed(sc, "rtems_timer_create"); - } + sc = rtems_timer_create( + rtems_build_name('T', 'I', 'M', '2'), + &ctx->timer2 + ); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_timer_initiate_server( RTEMS_MINIMUM_PRIORITY, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_ATTRIBUTES ); - directive_failed(sc, "rtems_timer_initiate_server"); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); - sc = rtems_timer_server_fire_after( - timer [TIMER_NEVER_INTERVAL], - 2, - never_callback, - NULL - ); - directive_failed(sc, "rtems_timer_server_fire_after"); - - reset_tod_timer(); - - sc = rtems_timer_fire_after( - timer [TIMER_RESET], - 1, - reset_callback, - NULL - ); - directive_failed(sc, "rtems_timer_fire_after"); - - sc = rtems_timer_server_fire_after( - timer [TIMER_TRIGGER], - 1, - trigger_callback, - NULL - ); - directive_failed(sc, "rtems_timer_server_fire_after"); + sc = rtems_timer_server_fire_after(ctx->timer1, 1000, never, NULL); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); - interrupt_critical_section_test_support_initialize(NULL); + interrupt_critical_section_test(test_body, ctx, fire); + rtems_test_assert(ctx->done); - rtems_task_delete(RTEMS_SELF); + TEST_END(); + rtems_test_exit(0); } #define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER -#define CONFIGURE_MICROSECONDS_PER_TICK 2000 +#define CONFIGURE_MICROSECONDS_PER_TICK 1000 #define CONFIGURE_MAXIMUM_TASKS 2 -#define CONFIGURE_MAXIMUM_TIMERS 4 +#define CONFIGURE_MAXIMUM_TIMERS 3 +#define CONFIGURE_MAXIMUM_USER_EXTENSIONS 1 #define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION diff --git a/testsuites/sptests/spintrcritical17/spintrcritical17.doc b/testsuites/sptests/spintrcritical17/spintrcritical17.doc index 3be8e60c52..809a9669d6 100644 --- a/testsuites/sptests/spintrcritical17/spintrcritical17.doc +++ b/testsuites/sptests/spintrcritical17/spintrcritical17.doc @@ -1,4 +1,4 @@ -# Copyright (c) 2009 embedded brains GmbH. +# Copyright (c) 2009-2015 embedded brains GmbH. # # The license and distribution terms for this file may be # found in the file LICENSE in this distribution or at @@ -11,9 +11,7 @@ test set name: spintrcritical17 directives: - _Timer_server_Get_watchdogs_that_fire_now - _Timer_server_Schedule_operation_method - _Timer_server_Process_insertions + _Timer_server_Update_system_watchdog concepts: diff --git a/testsuites/sptests/spintrcritical20/init.c b/testsuites/sptests/spintrcritical20/init.c index daa8ac7f7e..7e52211742 100644 --- a/testsuites/sptests/spintrcritical20/init.c +++ b/testsuites/sptests/spintrcritical20/init.c @@ -44,6 +44,10 @@ static void semaphore_task(rtems_task_argument arg) test_context *ctx = (test_context *) arg; ctx->semaphore_task_tcb = _Thread_Get_executing(); + _Thread_Wait_set_timeout_code( + ctx->semaphore_task_tcb, + CORE_SEMAPHORE_TIMEOUT + ); while (true) { rtems_status_code sc = rtems_semaphore_obtain( @@ -87,7 +91,7 @@ static bool test_body(void *arg) ctx->thread_queue_was_null = true; } - _Thread_queue_Process_timeout(ctx->semaphore_task_tcb); + _Thread_Timeout(0, ctx->semaphore_task_tcb); switch (ctx->semaphore_task_tcb->Wait.return_code) { case CORE_SEMAPHORE_STATUS_SUCCESSFUL: @@ -103,7 +107,9 @@ static bool test_body(void *arg) _Thread_Enable_dispatch(); - return false; + return ctx->thread_queue_was_null + && ctx->status_was_successful + && ctx->status_was_timeout; } static void Init(rtems_task_argument ignored) diff --git a/testsuites/sptests/spintrcritical22/init.c b/testsuites/sptests/spintrcritical22/init.c index 93946c39c5..1a377f7838 100644 --- a/testsuites/sptests/spintrcritical22/init.c +++ b/testsuites/sptests/spintrcritical22/init.c @@ -52,14 +52,18 @@ static void release_semaphore(rtems_id timer, void *arg) rtems_status_code sc; CORE_mutex_Control *mtx = &ctx->semaphore_control->Core_control.mutex; - if (mtx->Wait_queue.sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED) { + if ( + _Thread_Wait_flags_get(ctx->main_task_control) + == (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK) + ) { ctx->done = true; sc = rtems_semaphore_release(ctx->semaphore_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert( - mtx->Wait_queue.sync_state == THREAD_BLOCKING_OPERATION_SATISFIED + _Thread_Wait_flags_get(ctx->main_task_control) + == (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN) ); rtems_test_assert(mtx->nest_count == 1); rtems_test_assert(mtx->holder == ctx->main_task_control); diff --git a/testsuites/sptests/spintrcritical23/init.c b/testsuites/sptests/spintrcritical23/init.c index 8536489f5d..89fea25a12 100644 --- a/testsuites/sptests/spintrcritical23/init.c +++ b/testsuites/sptests/spintrcritical23/init.c @@ -70,7 +70,7 @@ static void change_priority(rtems_id timer, void *arg) rtems_interrupt_lock_acquire(&ctx->lock, &lock_context); if ( - ctx->priority_generation != ctx->tcb->Priority.generation + ctx->priority_generation != ctx->tcb->priority_generation && scheduler_node_unchanged(ctx) ) { rtems_task_priority priority_interrupt; @@ -113,7 +113,7 @@ static bool test_body(void *arg) priority_interrupt = 1 + (priority_task + 1) % 3; ctx->priority_task = priority_task; ctx->priority_interrupt = priority_interrupt; - ctx->priority_generation = ctx->tcb->Priority.generation; + ctx->priority_generation = ctx->tcb->priority_generation; memcpy( &ctx->scheduler_node, ctx->tcb->Scheduler.node, diff --git a/testsuites/sptests/spsize/size.c b/testsuites/sptests/spsize/size.c index f00da0d465..657fa425de 100644 --- a/testsuites/sptests/spsize/size.c +++ b/testsuites/sptests/spsize/size.c @@ -390,18 +390,13 @@ uninitialized = /*timerimpl.h*/ (sizeof _Timer_Information) + -/*tod.h*/ (sizeof _TOD.now) + - (sizeof _TOD.uptime) + - /*tqdata.h*/ 0 + /*types.h*/ 0 + /*userext.h*/ (sizeof _User_extensions_List) + -/*watchdog.h*/ (sizeof _Watchdog_Sync_level) + - (sizeof _Watchdog_Sync_count) + - (sizeof _Watchdog_Ticks_since_boot) + +/*watchdog.h*/ (sizeof _Watchdog_Ticks_since_boot) + (sizeof _Watchdog_Ticks_header) + (sizeof _Watchdog_Seconds_header) + diff --git a/testsuites/sptests/spthreadq01/init.c b/testsuites/sptests/spthreadq01/init.c index 240cd1a9e8..ce47760e2d 100644 --- a/testsuites/sptests/spthreadq01/init.c +++ b/testsuites/sptests/spthreadq01/init.c @@ -18,38 +18,31 @@ const char rtems_test_name[] = "SPTHREADQ 1"; -/* forward declarations to avoid warnings */ -rtems_task Init(rtems_task_argument argument); -void threadq_first_empty( - const char *discipline_string, - Thread_queue_Disciplines discipline -); - -void threadq_first_empty( - const char *discipline_string, - Thread_queue_Disciplines discipline +static Thread_queue_Control fifo_queue = + THREAD_QUEUE_FIFO_INITIALIZER( fifo_queue, "FIFO" ); + +static Thread_queue_Control prio_queue = + THREAD_QUEUE_PRIORIY_INITIALIZER( prio_queue, "Prio" ); + +static rtems_task Init( + rtems_task_argument ignored ) { - Thread_queue_Control tq; - - printf( "Init - initialize thread queue for %s\n", discipline_string ); - _Thread_queue_Initialize( &tq, discipline, 3 ); + TEST_BEGIN(); puts( "Init - _Thread_queue_Extract - thread not blocked on a thread queue" ); _Thread_Disable_dispatch(); - _Thread_queue_Extract( &tq, _Thread_Executing ); + _Thread_queue_Extract( _Thread_Executing ); _Thread_Enable_dispatch(); /* is there more to check? */ -} -rtems_task Init( - rtems_task_argument ignored -) -{ - TEST_BEGIN(); + rtems_test_assert( _Chain_Is_empty( &fifo_queue.Queues.Fifo ) ); + rtems_test_assert( fifo_queue.operations == &_Thread_queue_Operations_FIFO ); - threadq_first_empty( "FIFO", THREAD_QUEUE_DISCIPLINE_FIFO ); - threadq_first_empty( "Priority", THREAD_QUEUE_DISCIPLINE_PRIORITY ); + rtems_test_assert( _RBTree_Is_empty( &fifo_queue.Queues.Priority ) ); + rtems_test_assert( + prio_queue.operations == &_Thread_queue_Operations_priority + ); TEST_END(); rtems_test_exit(0); diff --git a/testsuites/sptests/sptimecounter01/Makefile.am b/testsuites/sptests/sptimecounter01/Makefile.am new file mode 100644 index 0000000000..b2310886fc --- /dev/null +++ b/testsuites/sptests/sptimecounter01/Makefile.am @@ -0,0 +1,19 @@ +rtems_tests_PROGRAMS = sptimecounter01 +sptimecounter01_SOURCES = init.c + +dist_rtems_tests_DATA = sptimecounter01.scn sptimecounter01.doc + +include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg +include $(top_srcdir)/../automake/compile.am +include $(top_srcdir)/../automake/leaf.am + +AM_CPPFLAGS += -I$(top_srcdir)/../support/include + +LINK_OBJS = $(sptimecounter01_OBJECTS) +LINK_LIBS = $(sptimecounter01_LDLIBS) + +sptimecounter01$(EXEEXT): $(sptimecounter01_OBJECTS) $(sptimecounter01_DEPENDENCIES) + @rm -f sptimecounter01$(EXEEXT) + $(make-exe) + +include $(top_srcdir)/../automake/local.am diff --git a/testsuites/sptests/sptimecounter01/init.c b/testsuites/sptests/sptimecounter01/init.c new file mode 100644 index 0000000000..47ebb2722c --- /dev/null +++ b/testsuites/sptests/sptimecounter01/init.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <assert.h> + +#include <bsp/bootcard.h> + +#include <rtems/test.h> + +#include <rtems/score/timecounterimpl.h> +#include <rtems/score/watchdogimpl.h> +#include <rtems/timecounter.h> +#include <rtems/bsd.h> + +const char rtems_test_name[] = "SPTIMECOUNTER_1"; + +typedef struct { + struct timecounter tc_soft; + u_int tc_soft_counter; +} test_context; + +static test_context test_instance; + +static uint32_t test_get_timecount_soft(struct timecounter *tc) +{ + test_context *ctx = tc->tc_priv; + + ++ctx->tc_soft_counter; + + return ctx->tc_soft_counter; +} + +void boot_card(const char *cmdline) +{ + test_context *ctx = &test_instance; + struct timecounter *tc_soft = &ctx->tc_soft; + uint64_t soft_freq = 1000000; + struct bintime bt; + + rtems_test_begink(); + + _Timecounter_Initialize(); + _Watchdog_Handler_initialization(); + + rtems_bsd_binuptime(&bt); + assert(bt.sec == 1); + assert(bt.frac== 0); + + rtems_bsd_binuptime(&bt); + assert(bt.sec == 1); + assert(bt.frac == 0); + + rtems_timecounter_tick(); + rtems_bsd_binuptime(&bt); + assert(bt.sec == 1); + assert(bt.frac == 0); + + ctx->tc_soft_counter = 0; + tc_soft->tc_get_timecount = test_get_timecount_soft; + tc_soft->tc_counter_mask = 0x0fffffff; + tc_soft->tc_frequency = soft_freq; + tc_soft->tc_quality = 1234; + tc_soft->tc_priv = ctx; + _Timecounter_Install(tc_soft); + assert(ctx->tc_soft_counter == 3); + + rtems_bsd_binuptime(&bt); + assert(ctx->tc_soft_counter == 4); + + assert(bt.sec == 1); + assert(bt.frac == 18446744073708); + + ctx->tc_soft_counter = 0xf0000000 | 3; + rtems_bsd_binuptime(&bt); + assert(ctx->tc_soft_counter == (0xf0000000 | 4)); + + assert(bt.sec == 1); + assert(bt.frac == 18446744073708); + + /* Ensure that the fraction overflows and the second remains constant */ + ctx->tc_soft_counter = (0xf0000000 | 3) + soft_freq; + rtems_bsd_binuptime(&bt); + assert(ctx->tc_soft_counter == (0xf0000000 | 4) + soft_freq); + assert(bt.sec == 1); + assert(bt.frac == 18446742522092); + + rtems_test_endk(); + + _Terminate(RTEMS_FATAL_SOURCE_EXIT, false, 0); +} + +#define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER + +#define CONFIGURE_APPLICATION_DISABLE_FILESYSTEM + +#define CONFIGURE_DISABLE_NEWLIB_REENTRANCY + +#define CONFIGURE_SCHEDULER_USER + +#define CONFIGURE_SCHEDULER_CONTEXT + +#define CONFIGURE_SCHEDULER_CONTROLS { } + +#define CONFIGURE_MEMORY_PER_TASK_FOR_SCHEDULER 0 + +#define CONFIGURE_TASK_STACK_ALLOCATOR NULL + +#define CONFIGURE_TASK_STACK_DEALLOCATOR NULL + +#define CONFIGURE_IDLE_TASK_INITIALIZES_APPLICATION + +#define CONFIGURE_IDLE_TASK_BODY NULL + +#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION + +#define CONFIGURE_INIT + +#include <rtems/confdefs.h> diff --git a/testsuites/sptests/sptimecounter01/sptimecounter01.doc b/testsuites/sptests/sptimecounter01/sptimecounter01.doc new file mode 100644 index 0000000000..4a7442c2e6 --- /dev/null +++ b/testsuites/sptests/sptimecounter01/sptimecounter01.doc @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +This file describes the directives and concepts tested by this test set. + +test set name: sptimecounter01 + +directives: + + _Timecounter_Initialize + rtems_timecounter_tick + _Timecounter_Install + rtems_bsd_bintime + +concepts: + + This test checks the correct functioning of the FreeBSD timecounter startup + process diff --git a/testsuites/sptests/sptimecounter01/sptimecounter01.scn b/testsuites/sptests/sptimecounter01/sptimecounter01.scn new file mode 100644 index 0000000000..5fa9c0f281 --- /dev/null +++ b/testsuites/sptests/sptimecounter01/sptimecounter01.scn @@ -0,0 +1,2 @@ +*** BEGIN OF TEST SPTIMECOUNTER_1 *** +*** END OF TEST SPTIMECOUNTER_1 *** diff --git a/testsuites/sptests/sptimecounter02/Makefile.am b/testsuites/sptests/sptimecounter02/Makefile.am new file mode 100644 index 0000000000..badb647a14 --- /dev/null +++ b/testsuites/sptests/sptimecounter02/Makefile.am @@ -0,0 +1,20 @@ +rtems_tests_PROGRAMS = sptimecounter02 +sptimecounter02_SOURCES = init.c +sptimecounter02_SOURCES += ../../support/src/spin.c + +dist_rtems_tests_DATA = sptimecounter02.scn sptimecounter02.doc + +include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg +include $(top_srcdir)/../automake/compile.am +include $(top_srcdir)/../automake/leaf.am + +AM_CPPFLAGS += -I$(top_srcdir)/../support/include + +LINK_OBJS = $(sptimecounter02_OBJECTS) +LINK_LIBS = $(sptimecounter02_LDLIBS) + +sptimecounter02$(EXEEXT): $(sptimecounter02_OBJECTS) $(sptimecounter02_DEPENDENCIES) + @rm -f sptimecounter02$(EXEEXT) + $(make-exe) + +include $(top_srcdir)/../automake/local.am diff --git a/testsuites/sptests/sptimecounter02/init.c b/testsuites/sptests/sptimecounter02/init.c new file mode 100644 index 0000000000..c7d72a8eb7 --- /dev/null +++ b/testsuites/sptests/sptimecounter02/init.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#define _KERNEL + +#include <sys/time.h> +#include <sys/timetc.h> + +#include <stdlib.h> +#include <stdio.h> +#include <inttypes.h> +#include <unistd.h> + +#include <rtems.h> +#include <rtems/counter.h> +#include <rtems/test.h> + +#include <rtems/score/timecounterimpl.h> +#include <rtems/timecounter.h> +#include <rtems/bsd.h> + +#include <test_support.h> + +#include "tmacros.h" + +const char rtems_test_name[] = "SPTIMECOUNTER 2"; + +#define CPU_COUNT 32 + +#define DURATION_IN_SECONDS 1 + +typedef struct { + rtems_test_parallel_context base; + struct timecounter tc_null; + uint32_t binuptime_per_job[CPU_COUNT]; + sbintime_t duration_per_job[CPU_COUNT]; + uint32_t rtemsuptime_per_job[CPU_COUNT]; +} timecounter_context; + +static timecounter_context test_instance; + +static rtems_interval test_duration(void) +{ + return DURATION_IN_SECONDS * rtems_clock_get_ticks_per_second(); +} + +static uint32_t test_get_timecount_null(struct timecounter *tc) +{ + return 0; +} + +static void install_tc_null(timecounter_context *ctx) +{ + struct timecounter *tc_cpu = &ctx->tc_null; + + tc_cpu->tc_get_timecount = test_get_timecount_null; + tc_cpu->tc_counter_mask = 0xffffffff; + tc_cpu->tc_frequency = rtems_counter_nanoseconds_to_ticks(1000000000); + tc_cpu->tc_quality = 2000; + rtems_timecounter_install(tc_cpu); +} + +static rtems_interval test_bintime_init( + rtems_test_parallel_context *base, + void *arg, + size_t active_workers +) +{ + rtems_test_spin_until_next_tick(); + + return test_duration(); +} + +static void test_bintime_body( + rtems_test_parallel_context *base, + void *arg, + size_t active_workers, + size_t worker_index +) +{ + timecounter_context *ctx = (timecounter_context *) base; + uint32_t counter = 1; + struct bintime start; + struct bintime end; + + rtems_bsd_binuptime(&start); + + do { + ++counter; + rtems_bsd_binuptime(&end); + } while (!rtems_test_parallel_stop_job(&ctx->base)); + + ctx->binuptime_per_job[worker_index] = counter; + ctx->duration_per_job[worker_index] = bttosbt(end) - bttosbt(start); +} + +static void test_bintime_fini( + rtems_test_parallel_context *base, + void *arg, + size_t active_workers +) +{ + timecounter_context *ctx = (timecounter_context *) base; + size_t i; + + printf(" <BinuptimeTest activeWorker=\"%zu\">\n", active_workers); + + for (i = 0; i < active_workers; ++i) { + sbintime_t error; + + printf( + " <Counter worker=\"%zu\">%" PRIu32 "</Counter>\n" + " <Duration worker=\"%zu\" unit=\"sbintime\">%" PRId64 "</Duration>\n", + i + 1, + ctx->binuptime_per_job[i], + i + 1, + ctx->duration_per_job[i] + ); + + error = DURATION_IN_SECONDS * SBT_1S - ctx->duration_per_job[i]; + rtems_test_assert(error * error < SBT_1MS * SBT_1MS); + } + + printf(" </BinuptimeTest>\n"); +} + +static rtems_interval test_bintime_null_init( + rtems_test_parallel_context *base, + void *arg, + size_t active_workers +) +{ + timecounter_context *ctx = &test_instance; + + install_tc_null(ctx); + + return test_duration(); +} + +static void test_bintime_null_body( + rtems_test_parallel_context *base, + void *arg, + size_t active_workers, + size_t worker_index +) +{ + timecounter_context *ctx = (timecounter_context *) base; + struct bintime bt; + uint32_t counter = 0; + + while (!rtems_test_parallel_stop_job(&ctx->base)) { + ++counter; + rtems_bsd_binuptime(&bt); + } + + ctx->binuptime_per_job[worker_index] = counter; +} + +static void test_bintime_null_fini( + rtems_test_parallel_context *base, + void *arg, + size_t active_workers +) +{ + timecounter_context *ctx = (timecounter_context *) base; + size_t i; + + printf(" <BinuptimeNullTest activeWorker=\"%zu\">\n", active_workers); + + for (i = 0; i < active_workers; ++i) { + printf( + " <Counter worker=\"%zu\">%" PRIu32 "</Counter>\n", + i + 1, + ctx->binuptime_per_job[i] + ); + } + + printf(" </BinuptimeNullTest>\n"); +} + +static const rtems_test_parallel_job timecounter_jobs[] = { + { + .init = test_bintime_init, + .body = test_bintime_body, + .fini = test_bintime_fini, + .cascade = true + },{ + .init = test_bintime_null_init, + .body = test_bintime_null_body, + .fini = test_bintime_null_fini, + .cascade = true + } +}; + +static void Init(rtems_task_argument arg) +{ + timecounter_context *ctx = &test_instance; + struct bintime bt; + struct timespec ts; + struct timeval tv; + + TEST_BEGIN(); + + printf("<SPTimecounter01>\n"); + + rtems_test_parallel( + &ctx->base, + NULL, + &timecounter_jobs[0], + RTEMS_ARRAY_SIZE(timecounter_jobs) + ); + + /* Check for all functions available in the bsd.h user space */ + + rtems_bsd_bintime(&bt); + rtems_bsd_microtime(&tv); + rtems_bsd_nanotime(&ts); + rtems_bsd_binuptime(&bt); + rtems_bsd_microuptime(&tv); + rtems_bsd_nanouptime(&ts); + rtems_bsd_getbintime(&bt); + rtems_bsd_getmicrotime(&tv); + rtems_bsd_getnanotime(&ts); + rtems_bsd_getbinuptime(&bt); + rtems_bsd_getmicrouptime(&tv); + rtems_bsd_getnanouptime(&ts); + + printf("</SPTimecounter01>\n"); + + TEST_END(); + rtems_test_exit(0); +} + +#define CONFIGURE_MICROSECONDS_PER_TICK 1000 + +#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER +#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER + +#define CONFIGURE_MAXIMUM_TASKS (2 + CPU_COUNT - 1) +#define CONFIGURE_MAXIMUM_TIMERS 2 +#define CONFIGURE_MAXIMUM_PERIODS 1 + +#define CONFIGURE_SMP_APPLICATION + +#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT + +#define CONFIGURE_RTEMS_INIT_TASKS_TABLE + +#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION + +#define CONFIGURE_INIT + +#include <rtems/confdefs.h> diff --git a/testsuites/sptests/sptimecounter02/sptimecounter02.doc b/testsuites/sptests/sptimecounter02/sptimecounter02.doc new file mode 100644 index 0000000000..9988ad067c --- /dev/null +++ b/testsuites/sptests/sptimecounter02/sptimecounter02.doc @@ -0,0 +1,12 @@ +This file describes the directives and concepts tested by this test set. + +test set name: sptimecounter02 + +directives: + + rtems_timecounter_install, rtems_bsd_binuptime. + +concepts: + + measurement of performance tests: how often per timeframe can FreeBSD time + routines be called in comparison to original rtems timing routines. diff --git a/testsuites/sptests/sptimecounter02/sptimecounter02.scn b/testsuites/sptests/sptimecounter02/sptimecounter02.scn new file mode 100644 index 0000000000..004c50e416 --- /dev/null +++ b/testsuites/sptests/sptimecounter02/sptimecounter02.scn @@ -0,0 +1,46 @@ +*** BEGIN OF TEST SPTIMECOUNTER_2 *** +<SPTimecounter01> + <BinuptimeTest activeWorker="1"> + <Counter worker="1">591457</Counter> + <Duration worker="1" unit="sbintime">4294787862</Duration> + <BinuptimeTest activeWorker="2"> + <Counter worker="1">587737</Counter> + <Duration worker="1" unit="sbintime">4294603178</Duration> + <Counter worker="2">587754</Counter> + <Duration worker="2" unit="sbintime">4294596307</Duration> + <BinuptimeTest activeWorker="3"> + <Counter worker="1">583602</Counter> + <Duration worker="1" unit="sbintime">4293605982</Duration> + <Counter worker="2">583643</Counter> + <Duration worker="2" unit="sbintime">4293602260</Duration> + <Counter worker="3">583659</Counter> + <Duration worker="3" unit="sbintime">4293604551</Duration> + <BinuptimeTest activeWorker="4"> + <Counter worker="1">583086</Counter> + <Duration worker="1" unit="sbintime">4291246232</Duration> + <Counter worker="2">583124</Counter> + <Duration worker="2" unit="sbintime">4291243178</Duration> + <Counter worker="3">583139</Counter> + <Duration worker="3" unit="sbintime">4291242796</Duration> + <Counter worker="4">565417</Counter> + <Duration worker="4" unit="sbintime">4291304930</Duration> + <BinuptimeNullTest activeWorker="1"> + <Counter worker="1">615571</Counter> + </BinuptimeNullTest> + <BinuptimeNullTest activeWorker="2"> + <Counter worker="1">615857</Counter> + <Counter worker="2">615856</Counter> + </BinuptimeNullTest> + <BinuptimeNullTest activeWorker="3"> + <Counter worker="1">615316</Counter> + <Counter worker="2">615328</Counter> + <Counter worker="3">615337</Counter> + </BinuptimeNullTest> + <BinuptimeNullTest activeWorker="4"> + <Counter worker="1">615495</Counter> + <Counter worker="2">615502</Counter> + <Counter worker="3">615509</Counter> + <Counter worker="4">597117</Counter> + </BinuptimeNullTest> +</SPTimecounter01> +*** END OF TEST SPTIMECOUNTER_2 *** diff --git a/testsuites/sptests/spwatchdog/init.c b/testsuites/sptests/spwatchdog/init.c index 1d3cb2f9dd..283f4c87a0 100644 --- a/testsuites/sptests/spwatchdog/init.c +++ b/testsuites/sptests/spwatchdog/init.c @@ -34,6 +34,119 @@ static void test_watchdog_routine( Objects_Id id, void *arg ) rtems_test_assert( 0 ); } +static void init_watchdogs( + Watchdog_Header *header, + Watchdog_Control watchdogs[3] +) +{ + Watchdog_Control *a = &watchdogs[0]; + Watchdog_Control *b = &watchdogs[1]; + Watchdog_Control *c = &watchdogs[2]; + Watchdog_Control *d = &watchdogs[3]; + + _Watchdog_Header_initialize( header ); + rtems_test_assert( _Watchdog_Is_empty( header ) ); + rtems_test_assert( _Chain_Is_empty( &header->Iterators ) ); + + _Watchdog_Initialize( c, NULL, 0, NULL ); + c->initial = 6; + _Watchdog_Insert( header, c ); + rtems_test_assert( c->delta_interval == 6 ); + + rtems_test_assert( !_Watchdog_Is_empty( header ) ); + rtems_test_assert( _Chain_Is_empty( &header->Iterators ) ); + + _Watchdog_Initialize( a, NULL, 0, NULL ); + a->initial = 2; + _Watchdog_Insert( header, a ); + rtems_test_assert( a->delta_interval == 2 ); + rtems_test_assert( c->delta_interval == 4 ); + + _Watchdog_Initialize( b, NULL, 0, NULL ); + b->initial = 4; + _Watchdog_Insert( header, b ); + rtems_test_assert( a->delta_interval == 2 ); + rtems_test_assert( b->delta_interval == 2 ); + rtems_test_assert( c->delta_interval == 2 ); + + _Watchdog_Initialize( d, NULL, 0, NULL ); +} + +static void destroy_watchdogs( + Watchdog_Header *header +) +{ + _ISR_lock_Destroy( &header->Lock ); +} + +static void add_iterator( + Watchdog_Header *header, + Watchdog_Iterator *i, + Watchdog_Control *w +) +{ + _Chain_Append_unprotected( &header->Iterators, &i->Node ); + i->delta_interval = 2; + i->current = &w->Node; +} + +static void test_watchdog_insert_and_remove( void ) +{ + Watchdog_Header header; + Watchdog_Control watchdogs[4]; + Watchdog_Control *a = &watchdogs[0]; + Watchdog_Control *b = &watchdogs[1]; + Watchdog_Control *c = &watchdogs[2]; + Watchdog_Control *d = &watchdogs[3]; + Watchdog_Iterator i; + + init_watchdogs( &header, watchdogs ); + add_iterator( &header, &i, c ); + + /* Remove next watchdog of iterator */ + _Watchdog_Remove( &header, c ); + rtems_test_assert( i.delta_interval == 2 ); + rtems_test_assert( i.current == &b->Node ); + + /* Remove watchdog before the current watchdog of iterator */ + _Watchdog_Remove( &header, a ); + rtems_test_assert( i.delta_interval == 4 ); + rtems_test_assert( i.current == &b->Node ); + + /* Remove current (= last) watchdog of iterator */ + _Watchdog_Remove( &header, b ); + rtems_test_assert( i.delta_interval == 4 ); + rtems_test_assert( i.current == _Chain_Head( &header.Watchdogs ) ); + + /* Insert first watchdog */ + a->initial = 1; + _Watchdog_Insert( &header, a ); + rtems_test_assert( i.delta_interval == 4 ); + rtems_test_assert( i.current == _Chain_Head( &header.Watchdogs ) ); + + destroy_watchdogs( &header ); + init_watchdogs( &header, watchdogs ); + add_iterator( &header, &i, b ); + + /* Insert right before current watchdog of iterator */ + d->initial = 3; + _Watchdog_Insert( &header, d ); + rtems_test_assert( i.delta_interval == 1 ); + rtems_test_assert( i.current == &b->Node ); + + destroy_watchdogs( &header ); + init_watchdogs( &header, watchdogs ); + add_iterator( &header, &i, b ); + + /* Insert right after current watchdog of iterator */ + d->initial = 5; + _Watchdog_Insert( &header, d ); + rtems_test_assert( i.delta_interval == 2 ); + rtems_test_assert( i.current == &b->Node ); + + destroy_watchdogs( &header ); +} + static void test_watchdog_static_init( void ) { #if defined(RTEMS_USE_16_BIT_OBJECT) @@ -70,6 +183,7 @@ rtems_task Init( TEST_BEGIN(); test_watchdog_static_init(); + test_watchdog_insert_and_remove(); build_time( &time, 12, 31, 1988, 9, 0, 0, 0 ); |