summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2017-12-23 18:18:56 +1100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-25 08:45:26 +0100
commit2afb22b7e1ebcbe40373ff7e0efae7d207c655a9 (patch)
tree44759efe9374f13200a97e96d91bd9a2b7e5ce2a /cpukit/include/rtems/score
parentMAINTAINERS: Add myself to Write After Approval. (diff)
downloadrtems-2afb22b7e1ebcbe40373ff7e0efae7d207c655a9.tar.bz2
Remove make preinstall
A speciality of the RTEMS build system was the make preinstall step. It copied header files from arbitrary locations into the build tree. The header files were included via the -Bsome/build/tree/path GCC command line option. This has at least seven problems: * The make preinstall step itself needs time and disk space. * Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error. * There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult. * The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit. * An introduction of a new build system is difficult. * Include paths specified by the -B option are system headers. This may suppress warnings. * The parallel build had sporadic failures on some hosts. This patch removes the make preinstall step. All installed header files are moved to dedicated include directories in the source tree. Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc, etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g. erc32, imx, qoriq, etc. The new cpukit include directories are: * cpukit/include * cpukit/score/cpu/@RTEMS_CPU@/include * cpukit/libnetworking The new BSP include directories are: * bsps/include * bsps/@RTEMS_CPU@/include * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include There are build tree include directories for generated files. The include directory order favours the most general header file, e.g. it is not possible to override general header files via the include path order. The "bootstrap -p" option was removed. The new "bootstrap -H" option should be used to regenerate the "headers.am" files. Update #3254.
Diffstat (limited to 'cpukit/include/rtems/score')
-rw-r--r--cpukit/include/rtems/score/address.h200
-rw-r--r--cpukit/include/rtems/score/apimutex.h109
-rw-r--r--cpukit/include/rtems/score/assert.h108
-rw-r--r--cpukit/include/rtems/score/atomic.h156
-rw-r--r--cpukit/include/rtems/score/basedefs.h415
-rw-r--r--cpukit/include/rtems/score/chain.h102
-rw-r--r--cpukit/include/rtems/score/chainimpl.h1123
-rw-r--r--cpukit/include/rtems/score/context.h163
-rw-r--r--cpukit/include/rtems/score/copyrt.h44
-rw-r--r--cpukit/include/rtems/score/corebarrier.h91
-rw-r--r--cpukit/include/rtems/score/corebarrierimpl.h173
-rw-r--r--cpukit/include/rtems/score/coremsg.h185
-rw-r--r--cpukit/include/rtems/score/coremsgimpl.h494
-rw-r--r--cpukit/include/rtems/score/coremutex.h104
-rw-r--r--cpukit/include/rtems/score/coremuteximpl.h447
-rw-r--r--cpukit/include/rtems/score/corerwlockimpl.h182
-rw-r--r--cpukit/include/rtems/score/coresem.h61
-rw-r--r--cpukit/include/rtems/score/coresemimpl.h207
-rw-r--r--cpukit/include/rtems/score/cpustdatomic.h682
-rw-r--r--cpukit/include/rtems/score/freechain.h111
-rw-r--r--cpukit/include/rtems/score/heap.h518
-rw-r--r--cpukit/include/rtems/score/heapimpl.h601
-rw-r--r--cpukit/include/rtems/score/interr.h268
-rw-r--r--cpukit/include/rtems/score/io.h46
-rw-r--r--cpukit/include/rtems/score/isr.h155
-rw-r--r--cpukit/include/rtems/score/isrlevel.h153
-rw-r--r--cpukit/include/rtems/score/isrlock.h439
-rw-r--r--cpukit/include/rtems/score/mpci.h135
-rw-r--r--cpukit/include/rtems/score/mpciimpl.h326
-rw-r--r--cpukit/include/rtems/score/mppkt.h121
-rw-r--r--cpukit/include/rtems/score/mrsp.h79
-rw-r--r--cpukit/include/rtems/score/mrspimpl.h384
-rw-r--r--cpukit/include/rtems/score/muteximpl.h37
-rw-r--r--cpukit/include/rtems/score/object.h469
-rw-r--r--cpukit/include/rtems/score/objectimpl.h1002
-rw-r--r--cpukit/include/rtems/score/objectmp.h197
-rw-r--r--cpukit/include/rtems/score/onceimpl.h52
-rw-r--r--cpukit/include/rtems/score/percpu.h851
-rw-r--r--cpukit/include/rtems/score/priority.h203
-rw-r--r--cpukit/include/rtems/score/prioritybitmap.h79
-rw-r--r--cpukit/include/rtems/score/prioritybitmapimpl.h215
-rw-r--r--cpukit/include/rtems/score/priorityimpl.h435
-rw-r--r--cpukit/include/rtems/score/processormask.h290
-rw-r--r--cpukit/include/rtems/score/profiling.h140
-rw-r--r--cpukit/include/rtems/score/protectedheap.h172
-rw-r--r--cpukit/include/rtems/score/rbtree.h568
-rw-r--r--cpukit/include/rtems/score/rbtreeimpl.h72
-rw-r--r--cpukit/include/rtems/score/scheduler.h556
-rw-r--r--cpukit/include/rtems/score/schedulercbs.h346
-rw-r--r--cpukit/include/rtems/score/schedulercbsimpl.h59
-rw-r--r--cpukit/include/rtems/score/scheduleredf.h197
-rw-r--r--cpukit/include/rtems/score/scheduleredfimpl.h164
-rw-r--r--cpukit/include/rtems/score/scheduleredfsmp.h200
-rw-r--r--cpukit/include/rtems/score/schedulerimpl.h1203
-rw-r--r--cpukit/include/rtems/score/schedulernode.h217
-rw-r--r--cpukit/include/rtems/score/schedulernodeimpl.h146
-rw-r--r--cpukit/include/rtems/score/schedulerpriority.h163
-rw-r--r--cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h181
-rw-r--r--cpukit/include/rtems/score/schedulerpriorityimpl.h241
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmp.h171
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmpimpl.h184
-rw-r--r--cpukit/include/rtems/score/schedulersimple.h126
-rw-r--r--cpukit/include/rtems/score/schedulersimpleimpl.h103
-rw-r--r--cpukit/include/rtems/score/schedulersimplesmp.h155
-rw-r--r--cpukit/include/rtems/score/schedulersmp.h127
-rw-r--r--cpukit/include/rtems/score/schedulersmpimpl.h1482
-rw-r--r--cpukit/include/rtems/score/schedulerstrongapa.h171
-rw-r--r--cpukit/include/rtems/score/semaphoreimpl.h73
-rw-r--r--cpukit/include/rtems/score/smp.h64
-rw-r--r--cpukit/include/rtems/score/smpbarrier.h125
-rw-r--r--cpukit/include/rtems/score/smpimpl.h354
-rw-r--r--cpukit/include/rtems/score/smplock.h327
-rw-r--r--cpukit/include/rtems/score/smplockmcs.h262
-rw-r--r--cpukit/include/rtems/score/smplockseq.h176
-rw-r--r--cpukit/include/rtems/score/smplockstats.h277
-rw-r--r--cpukit/include/rtems/score/smplockticket.h187
-rw-r--r--cpukit/include/rtems/score/stack.h69
-rw-r--r--cpukit/include/rtems/score/stackimpl.h99
-rw-r--r--cpukit/include/rtems/score/states.h50
-rw-r--r--cpukit/include/rtems/score/statesimpl.h283
-rw-r--r--cpukit/include/rtems/score/status.h129
-rw-r--r--cpukit/include/rtems/score/sysstate.h119
-rw-r--r--cpukit/include/rtems/score/thread.h935
-rw-r--r--cpukit/include/rtems/score/threaddispatch.h281
-rw-r--r--cpukit/include/rtems/score/threadimpl.h1969
-rw-r--r--cpukit/include/rtems/score/threadmp.h113
-rw-r--r--cpukit/include/rtems/score/threadq.h595
-rw-r--r--cpukit/include/rtems/score/threadqimpl.h1265
-rw-r--r--cpukit/include/rtems/score/timecounter.h244
-rw-r--r--cpukit/include/rtems/score/timecounterimpl.h50
-rw-r--r--cpukit/include/rtems/score/timespec.h272
-rw-r--r--cpukit/include/rtems/score/timestamp.h323
-rw-r--r--cpukit/include/rtems/score/tls.h217
-rw-r--r--cpukit/include/rtems/score/tod.h32
-rw-r--r--cpukit/include/rtems/score/todimpl.h304
-rw-r--r--cpukit/include/rtems/score/userext.h273
-rw-r--r--cpukit/include/rtems/score/userextimpl.h369
-rw-r--r--cpukit/include/rtems/score/watchdog.h166
-rw-r--r--cpukit/include/rtems/score/watchdogimpl.h574
-rw-r--r--cpukit/include/rtems/score/wkspace.h138
100 files changed, 30070 insertions, 0 deletions
diff --git a/cpukit/include/rtems/score/address.h b/cpukit/include/rtems/score/address.h
new file mode 100644
index 0000000000..8f38f7c2dc
--- /dev/null
+++ b/cpukit/include/rtems/score/address.h
@@ -0,0 +1,200 @@
+/**
+ * @file rtems/score/address.h
+ *
+ * @brief Information Required to Manipulate Physical Addresses
+ *
+ * This include file contains the information required to manipulate
+ * physical addresses.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ADDRESS_H
+#define _RTEMS_SCORE_ADDRESS_H
+
+#include <rtems/score/cpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreAddress Address Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which abstracts address
+ * manipulation in a portable manner.
+ */
+/**@{*/
+
+/**
+ * @brief Add offset to an address.
+ *
+ * This function is used to add an @a offset to a @a base address.
+ * It returns the resulting address. This address is typically
+ * converted to an access type before being used further.
+ *
+ * @param[in] base is the base address.
+ * @param[in] offset is the offset to add to @a base.
+ *
+ * @return This method returns the resulting address.
+ */
+RTEMS_INLINE_ROUTINE void *_Addresses_Add_offset (
+ const void *base,
+ uintptr_t offset
+)
+{
+ return (void *)((uintptr_t)base + offset);
+}
+
+/**
+ * @brief Subtract offset from offset.
+ *
+ * This function is used to subtract an @a offset from a @a base
+ * address. It returns the resulting address. This address is
+ * typically converted to an access type before being used further.
+ *
+ * @param[in] base is the base address.
+ * @param[in] offset is the offset to subtract to @a base.
+ *
+ * @return This method returns the resulting address.
+ */
+
+RTEMS_INLINE_ROUTINE void *_Addresses_Subtract_offset (
+ const void *base,
+ uintptr_t offset
+)
+{
+ return (void *)((uintptr_t)base - offset);
+}
+
+/**
+ * @brief Subtract two offsets.
+ *
+ * This function is used to subtract two addresses. It returns the
+ * resulting offset.
+ *
+ * @param[in] left is the address on the left hand side of the subtraction.
+ * @param[in] right is the address on the right hand side of the subtraction.
+ *
+ * @return This method returns the resulting address.
+ *
+ * @note The cast of an address to an uint32_t makes this code
+ * dependent on an addresses being thirty two bits.
+ */
+RTEMS_INLINE_ROUTINE int32_t _Addresses_Subtract (
+ const void *left,
+ const void *right
+)
+{
+ return (int32_t) ((const char *) left - (const char *) right);
+}
+
+/**
+ * @brief Is address aligned.
+ *
+ * This function returns true if the given address is correctly
+ * aligned for this processor and false otherwise. Proper alignment
+ * is based on correctness and efficiency.
+ *
+ * @param[in] address is the address being checked for alignment.
+ *
+ * @retval true The @a address is aligned.
+ * @retval false The @a address is not aligned.
+ */
+RTEMS_INLINE_ROUTINE bool _Addresses_Is_aligned (
+ const void *address
+)
+{
+#if (CPU_ALIGNMENT == 0)
+ return true;
+#else
+ return (((uintptr_t)address % CPU_ALIGNMENT) == 0);
+#endif
+}
+
+/**
+ * @brief Is address in range.
+ *
+ * This function returns true if the given address is within the
+ * memory range specified and false otherwise. base is the address
+ * of the first byte in the memory range and limit is the address
+ * of the last byte in the memory range. The base address is
+ * assumed to be lower than the limit address.
+ *
+ * @param[in] address is the address to check.
+ * @param[in] base is the lowest address of the range to check against.
+ * @param[in] limit is the highest address of the range to check against.
+ *
+ * @retval true The @a address is within the memory range specified
+ * @retval false The @a address is not within the memory range specified.
+ */
+RTEMS_INLINE_ROUTINE bool _Addresses_Is_in_range (
+ const void *address,
+ const void *base,
+ const void *limit
+)
+{
+ return (address >= base && address <= limit);
+}
+
+/**
+ * @brief Align address to nearest multiple of alignment, rounding up.
+ *
+ * This function returns the given address aligned to the given alignment.
+ * If the address already is aligned, or if alignment is 0, the address is
+ * returned as is. The returned address is greater than or equal to the
+ * given address.
+ *
+ * @param[in] address is the address to align.
+ * @param[in] alignment is the boundary for alignment and must be a power of 2
+ *
+ * @return Returns the aligned address.
+ */
+RTEMS_INLINE_ROUTINE void *_Addresses_Align_up(
+ void *address,
+ size_t alignment
+)
+{
+ uintptr_t mask = alignment - (uintptr_t)1;
+ return (void*)(((uintptr_t)address + mask) & ~mask);
+}
+
+/**
+ * @brief Align address to nearest multiple of alignment, truncating.
+ *
+ * This function returns the given address aligned to the given alignment.
+ * If the address already is aligned, or if alignment is 0, the address is
+ * returned as is. The returned address is less than or equal to the
+ * given address.
+ *
+ * @param[in] address is the address to align.
+ * @param[in] alignment is the boundary for alignment and must be a power of 2.
+ *
+ * @return Returns the aligned address.
+ */
+RTEMS_INLINE_ROUTINE void *_Addresses_Align_down(
+ void *address,
+ size_t alignment
+)
+{
+ uintptr_t mask = alignment - (uintptr_t)1;
+ return (void*)((uintptr_t)address & ~mask);
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/apimutex.h b/cpukit/include/rtems/score/apimutex.h
new file mode 100644
index 0000000000..f43edf23f4
--- /dev/null
+++ b/cpukit/include/rtems/score/apimutex.h
@@ -0,0 +1,109 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreAPIMutex
+ *
+ * @brief API Mutex Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_APIMUTEX_H
+#define _RTEMS_SCORE_APIMUTEX_H
+
+#include <rtems/score/thread.h>
+
+#include <sys/lock.h>
+
+/**
+ * @defgroup ScoreAPIMutex API Mutex Handler
+ *
+ * @ingroup Score
+ *
+ * @brief Provides routines to ensure mutual exclusion on API level.
+ */
+/**@{**/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @brief Control block used to manage each API mutex.
+ */
+typedef struct {
+ /**
+ * A recursive mutex.
+ */
+ struct _Mutex_recursive_Control Mutex;
+
+ /**
+ * @brief The thread life protection state before the outer-most mutex
+ * obtain.
+ */
+ Thread_Life_state previous_thread_life_state;
+} API_Mutex_Control;
+
+/**
+ * @brief Statically initialize an API mutex.
+ */
+#define API_MUTEX_INITIALIZER( name ) \
+ { _MUTEX_RECURSIVE_NAMED_INITIALIZER( name ), 0 }
+
+/**
+ * @brief Acquires the specified API mutex.
+ *
+ * @param[in] mutex The API mutex.
+ */
+void _API_Mutex_Lock( API_Mutex_Control *mutex );
+
+/**
+ * @brief Releases the specified API mutex.
+ *
+ * @param[in] mutex The API mutex.
+ */
+void _API_Mutex_Unlock( API_Mutex_Control *mutex );
+
+/**
+ * @brief Checks if the specified API mutex is owned by the executing thread.
+ *
+ * @param[in] mutex The API mutex.
+ */
+bool _API_Mutex_Is_owner( const API_Mutex_Control *mutex );
+
+/** @} */
+
+/**
+ * @defgroup ScoreAllocatorMutex RTEMS Allocator Mutex
+ *
+ * @ingroup ScoreAPIMutex
+ *
+ * @brief Protection for all memory allocations and deallocations in RTEMS.
+ *
+ * When the APIs all use this for allocation and deallocation protection, then
+ * this possibly should be renamed and moved to a higher level in the
+ * hierarchy.
+ */
+/**@{**/
+
+void _RTEMS_Lock_allocator( void );
+
+void _RTEMS_Unlock_allocator( void );
+
+bool _RTEMS_Allocator_is_owner( void );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/assert.h b/cpukit/include/rtems/score/assert.h
new file mode 100644
index 0000000000..d4432838ce
--- /dev/null
+++ b/cpukit/include/rtems/score/assert.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ASSERT_H
+#define _RTEMS_SCORE_ASSERT_H
+
+#include <rtems/score/basedefs.h>
+
+#if defined( RTEMS_DEBUG )
+ #include <assert.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @brief Assertion similar to assert() controlled via RTEMS_DEBUG instead of
+ * NDEBUG.
+ */
+#if defined( RTEMS_DEBUG )
+
+ /**
+ * @brief Macro with method name used in assert output
+ *
+ * Given the variations in compilers and standards, we have to poke a bit.
+ *
+ * @note This is based on the code in newlib's assert.h.
+ */
+ #ifndef __RTEMS_ASSERT_FUNCTION
+ /* Use g++'s demangled names in C++. */
+ #if defined __cplusplus && defined __GNUC__
+ #define __RTEMS_ASSERT_FUNCTION __PRETTY_FUNCTION__
+
+ /* C99 requires the use of __func__. */
+ #elif __STDC_VERSION__ >= 199901L
+ #define __RTEMS_ASSERT_FUNCTION __func__
+
+ /* Older versions of gcc don't have __func__ but can use __FUNCTION__. */
+ #elif __GNUC__ >= 2
+ #define __RTEMS_ASSERT_FUNCTION __FUNCTION__
+
+ /* failed to detect __func__ support. */
+ #else
+ #define __RTEMS_ASSERT_FUNCTION ((char *) 0)
+ #endif
+ #endif /* !__RTEMS_ASSERT_FUNCTION */
+
+ #if !defined( RTEMS_SCHEDSIM )
+ /* normal build is newlib. */
+
+ void __assert_func(const char *, int, const char *, const char *)
+ RTEMS_NO_RETURN;
+
+ #define _Assert( _e ) \
+ ( ( _e ) ? \
+ ( void ) 0 : \
+ __assert_func( __FILE__, __LINE__, __RTEMS_ASSERT_FUNCTION, #_e ) )
+
+ #elif defined(__linux__)
+ /* Scheduler simulator has only beed tested on glibc. */
+ #define _Assert( _e ) \
+ ( ( _e ) ? \
+ ( void ) 0 : \
+ __assert_fail( #_e, __FILE__, __LINE__, __RTEMS_ASSERT_FUNCTION ) )
+ #else
+ #error "Implement RTEMS assert support for this C Library"
+ #endif
+
+#else
+ #define _Assert( _e ) ( ( void ) 0 )
+#endif
+
+/**
+ * @brief Like _Assert(), but only armed if RTEMS_SMP is defined.
+ */
+#if defined( RTEMS_SMP )
+ #define _SMP_Assert( _e ) _Assert( _e )
+#else
+ #define _SMP_Assert( _e ) ( ( void ) 0 )
+#endif
+
+/**
+ * @brief Returns true if thread dispatching is allowed.
+ *
+ * Thread dispatching can be repressed via _Thread_Disable_dispatch() or
+ * _ISR_Local_disable().
+ */
+#if defined( RTEMS_DEBUG )
+ bool _Debug_Is_thread_dispatching_allowed( void );
+#endif
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_ASSERT_H */
diff --git a/cpukit/include/rtems/score/atomic.h b/cpukit/include/rtems/score/atomic.h
new file mode 100644
index 0000000000..526926926f
--- /dev/null
+++ b/cpukit/include/rtems/score/atomic.h
@@ -0,0 +1,156 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreAtomic
+ *
+ * @brief Atomic Operations API
+ */
+
+/*
+ * COPYRIGHT (c) 2012-2013 Deng Hengyi.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ATOMIC_H
+#define _RTEMS_SCORE_ATOMIC_H
+
+#include <rtems/score/cpuatomic.h>
+
+/**
+ * @defgroup ScoreAtomic Atomic Operations
+ *
+ * @ingroup Score
+ *
+ * @brief Support for atomic operations.
+ *
+ * Atomic operations can be used to implement low-level synchronization
+ * primitives on SMP systems, like spin locks. All atomic operations are
+ * defined in terms of C11 (ISO/IEC 9899:2011) or C++11 (ISO/IEC 14882:2011).
+ * For documentation use the standard documents.
+ *
+ * @{
+ */
+
+typedef CPU_atomic_Uint Atomic_Uint;
+
+typedef CPU_atomic_Ulong Atomic_Ulong;
+
+typedef CPU_atomic_Uintptr Atomic_Uintptr;
+
+typedef CPU_atomic_Flag Atomic_Flag;
+
+typedef CPU_atomic_Order Atomic_Order;
+
+#define ATOMIC_ORDER_RELAXED CPU_ATOMIC_ORDER_RELAXED
+
+#define ATOMIC_ORDER_ACQUIRE CPU_ATOMIC_ORDER_ACQUIRE
+
+#define ATOMIC_ORDER_RELEASE CPU_ATOMIC_ORDER_RELEASE
+
+#define ATOMIC_ORDER_ACQ_REL CPU_ATOMIC_ORDER_ACQ_REL
+
+#define ATOMIC_ORDER_SEQ_CST CPU_ATOMIC_ORDER_SEQ_CST
+
+#define ATOMIC_INITIALIZER_UINT( value ) CPU_ATOMIC_INITIALIZER_UINT( value )
+
+#define ATOMIC_INITIALIZER_ULONG( value ) CPU_ATOMIC_INITIALIZER_ULONG( value )
+
+#define ATOMIC_INITIALIZER_UINTPTR( value ) CPU_ATOMIC_INITIALIZER_UINTPTR( value )
+
+#define ATOMIC_INITIALIZER_FLAG CPU_ATOMIC_INITIALIZER_FLAG
+
+#define _Atomic_Fence( order ) _CPU_atomic_Fence( order )
+
+#define _Atomic_Init_uint( obj, desired ) \
+ _CPU_atomic_Init_uint( obj, desired )
+
+#define _Atomic_Init_ulong( obj, desired ) \
+ _CPU_atomic_Init_ulong( obj, desired )
+
+#define _Atomic_Init_uintptr( obj, desired ) \
+ _CPU_atomic_Init_uintptr( obj, desired )
+
+#define _Atomic_Load_uint( obj, order ) \
+ _CPU_atomic_Load_uint( obj, order )
+
+#define _Atomic_Load_ulong( obj, order ) \
+ _CPU_atomic_Load_ulong( obj, order )
+
+#define _Atomic_Load_uintptr( obj, order ) \
+ _CPU_atomic_Load_uintptr( obj, order )
+
+#define _Atomic_Store_uint( obj, desr, order ) \
+ _CPU_atomic_Store_uint( obj, desr, order )
+
+#define _Atomic_Store_ulong( obj, desr, order ) \
+ _CPU_atomic_Store_ulong( obj, desr, order )
+
+#define _Atomic_Store_uintptr( obj, desr, order ) \
+ _CPU_atomic_Store_uintptr( obj, desr, order )
+
+#define _Atomic_Fetch_add_uint( obj, arg, order ) \
+ _CPU_atomic_Fetch_add_uint( obj, arg, order )
+
+#define _Atomic_Fetch_add_ulong( obj, arg, order ) \
+ _CPU_atomic_Fetch_add_ulong( obj, arg, order )
+
+#define _Atomic_Fetch_add_uintptr( obj, arg, order ) \
+ _CPU_atomic_Fetch_add_uintptr( obj, arg, order )
+
+#define _Atomic_Fetch_sub_uint( obj, arg, order ) \
+ _CPU_atomic_Fetch_sub_uint( obj, arg, order )
+
+#define _Atomic_Fetch_sub_ulong( obj, arg, order ) \
+ _CPU_atomic_Fetch_sub_ulong( obj, arg, order )
+
+#define _Atomic_Fetch_sub_uintptr( obj, arg, order ) \
+ _CPU_atomic_Fetch_sub_uintptr( obj, arg, order )
+
+#define _Atomic_Fetch_or_uint( obj, arg, order ) \
+ _CPU_atomic_Fetch_or_uint( obj, arg, order )
+
+#define _Atomic_Fetch_or_ulong( obj, arg, order ) \
+ _CPU_atomic_Fetch_or_ulong( obj, arg, order )
+
+#define _Atomic_Fetch_or_uintptr( obj, arg, order ) \
+ _CPU_atomic_Fetch_or_uintptr( obj, arg, order )
+
+#define _Atomic_Fetch_and_uint( obj, arg, order ) \
+ _CPU_atomic_Fetch_and_uint( obj, arg, order )
+
+#define _Atomic_Fetch_and_ulong( obj, arg, order ) \
+ _CPU_atomic_Fetch_and_ulong( obj, arg, order )
+
+#define _Atomic_Fetch_and_uintptr( obj, arg, order ) \
+ _CPU_atomic_Fetch_and_uintptr( obj, arg, order )
+
+#define _Atomic_Exchange_uint( obj, desr, order ) \
+ _CPU_atomic_Exchange_uint( obj, desr, order )
+
+#define _Atomic_Exchange_ulong( obj, desr, order ) \
+ _CPU_atomic_Exchange_ulong( obj, desr, order )
+
+#define _Atomic_Exchange_uintptr( obj, desr, order ) \
+ _CPU_atomic_Exchange_uintptr( obj, desr, order )
+
+#define _Atomic_Compare_exchange_uint( obj, expected, desired, succ, fail ) \
+ _CPU_atomic_Compare_exchange_uint( obj, expected, desired, succ, fail )
+
+#define _Atomic_Compare_exchange_ulong( obj, expected, desired, succ, fail ) \
+ _CPU_atomic_Compare_exchange_ulong( obj, expected, desired, succ, fail )
+
+#define _Atomic_Compare_exchange_uintptr( obj, expected, desired, succ, fail ) \
+ _CPU_atomic_Compare_exchange_uintptr( obj, expected, desired, succ, fail )
+
+#define _Atomic_Flag_clear( obj, order ) \
+ _CPU_atomic_Flag_clear( obj, order )
+
+#define _Atomic_Flag_test_and_set( obj, order ) \
+ _CPU_atomic_Flag_test_and_set( obj, order )
+
+/** @} */
+
+#endif /* _RTEMS_SCORE_ATOMIC_H */
diff --git a/cpukit/include/rtems/score/basedefs.h b/cpukit/include/rtems/score/basedefs.h
new file mode 100644
index 0000000000..4e48d226e8
--- /dev/null
+++ b/cpukit/include/rtems/score/basedefs.h
@@ -0,0 +1,415 @@
+/**
+ * @file
+ *
+ * @ingroup Score
+ *
+ * @brief Basic Definitions
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2007.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2010, 2017 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BASEDEFS_H
+#define _RTEMS_BASEDEFS_H
+
+/**
+ * @defgroup ScoreBaseDefs Basic Definitions
+ *
+ * @ingroup Score
+ */
+/**@{*/
+
+#include <rtems/score/cpuopts.h>
+
+#ifndef ASM
+ #include <stddef.h>
+ #include <stdbool.h>
+ #include <stdint.h>
+#endif
+
+#ifndef TRUE
+ /**
+ * This ensures that RTEMS has TRUE defined in all situations.
+ */
+ #define TRUE 1
+#endif
+
+#ifndef FALSE
+ /**
+ * This ensures that RTEMS has FALSE defined in all situations.
+ */
+ #define FALSE 0
+#endif
+
+#if TRUE == FALSE
+ #error "TRUE equals FALSE"
+#endif
+
+/**
+ * The following (in conjunction with compiler arguments) are used
+ * to choose between the use of static inline functions and macro
+ * functions. The static inline implementation allows better
+ * type checking with no cost in code size or execution speed.
+ */
+#ifdef __GNUC__
+ #define RTEMS_INLINE_ROUTINE static __inline__
+#else
+ #define RTEMS_INLINE_ROUTINE static inline
+#endif
+
+/**
+ * The following macro is a compiler specific way to ensure that memory
+ * writes are not reordered around certian points. This specifically can
+ * impact interrupt disable and thread dispatching critical sections.
+ */
+#ifdef __GNUC__
+ #define RTEMS_COMPILER_MEMORY_BARRIER() __asm__ volatile("" ::: "memory")
+#else
+ #define RTEMS_COMPILER_MEMORY_BARRIER()
+#endif
+
+/**
+ * The following defines a compiler specific attribute which informs
+ * the compiler that the method must not be inlined.
+ */
+#ifdef __GNUC__
+ #define RTEMS_NO_INLINE __attribute__((__noinline__))
+#else
+ #define RTEMS_NO_INLINE
+#endif
+
+/**
+ * The following macro is a compiler specific way to indicate that
+ * the method will NOT return to the caller. This can assist the
+ * compiler in code generation and avoid unreachable paths. This
+ * can impact the code generated following calls to
+ * rtems_fatal_error_occurred and _Terminate.
+ */
+#if defined(RTEMS_SCHEDSIM)
+ #define RTEMS_NO_RETURN
+#elif defined(__GNUC__) && !defined(RTEMS_DEBUG)
+ #define RTEMS_NO_RETURN __attribute__((__noreturn__))
+#else
+ #define RTEMS_NO_RETURN
+#endif
+
+/* Provided for backward compatibility */
+#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE RTEMS_NO_RETURN
+
+/**
+ * The following defines a compiler specific attribute which informs
+ * the compiler that the method has no effect except the return value
+ * and that the return value depends only on parameters and/or global
+ * variables.
+ */
+#ifdef __GNUC__
+ #define RTEMS_PURE __attribute__((__pure__))
+#else
+ #define RTEMS_PURE
+#endif
+
+/* Provided for backward compatibility */
+#define RTEMS_COMPILER_PURE_ATTRIBUTE RTEMS_PURE
+
+/**
+ * Instructs the compiler to issue a warning whenever a variable or function
+ * with this attribute will be used.
+ */
+#ifdef __GNUC__
+ #define RTEMS_DEPRECATED __attribute__((__deprecated__))
+#else
+ #define RTEMS_DEPRECATED
+#endif
+
+/* Provided for backward compatibility */
+#define RTEMS_COMPILER_DEPRECATED_ATTRIBUTE RTEMS_DEPRECATED
+
+/**
+ * @brief Instructs the compiler to place a specific variable or function in
+ * the specified section.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_SECTION( _section ) __attribute__((__section__(_section)))
+#else
+ #define RTEMS_SECTION( _section )
+#endif
+
+/**
+ * @brief Instructs the compiler that a specific variable or function is used.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_USED __attribute__((__used__))
+#else
+ #define RTEMS_USED
+#endif
+
+/**
+ * Instructs the compiler that a specific variable is deliberately unused.
+ * This can occur when reading volatile device memory or skipping arguments
+ * in a variable argument method.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_UNUSED __attribute__((__unused__))
+#else
+ #define RTEMS_UNUSED
+#endif
+
+/* Provided for backward compatibility */
+#define RTEMS_COMPILER_UNUSED_ATTRIBUTE RTEMS_UNUSED
+
+/**
+ * Instructs the compiler that a specific structure or union members will be
+ * placed so that the least memory is used.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_PACKED __attribute__((__packed__))
+#else
+ #define RTEMS_PACKED
+#endif
+
+/**
+ * @brief Instructs the compiler to generate an alias to the specified target
+ * function.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_ALIAS( _target ) __attribute__((__alias__(#_target)))
+#else
+ #define RTEMS_ALIAS( _target )
+#endif
+
+/**
+ * @brief Instructs the compiler to generate a weak alias to the specified
+ * target function.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_WEAK_ALIAS( _target ) __attribute__((__weak__, __alias__(#_target)))
+#else
+ #define RTEMS_WEAK_ALIAS( _target )
+#endif
+
+/**
+ * @brief Instructs the compiler to enforce the specified alignment.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_ALIGNED( _alignment ) __attribute__((__aligned__(_alignment)))
+#else
+ #define RTEMS_ALIGNED( _alignment )
+#endif
+
+/* Provided for backward compatibility */
+#define RTEMS_COMPILER_PACKED_ATTRIBUTE RTEMS_PACKED
+
+#if defined(RTEMS_DEBUG) && !defined(RTEMS_SCHEDSIM)
+ #define _Assert_Unreachable() _Assert( 0 )
+#else
+ #define _Assert_Unreachable() do { } while ( 0 )
+#endif
+
+/**
+ * @brief Tells the compiler that this program point is unreachable.
+ */
+#if defined(__GNUC__) && !defined(RTEMS_SCHEDSIM)
+ #define RTEMS_UNREACHABLE() \
+ do { \
+ __builtin_unreachable(); \
+ _Assert_Unreachable(); \
+ } while ( 0 )
+#else
+ #define RTEMS_UNREACHABLE() _Assert_Unreachable()
+#endif
+
+/**
+ * @brief Tells the compiler that this function expects printf()-like
+ * arguments.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_PRINTFLIKE( _format_pos, _ap_pos ) \
+ __attribute__((__format__(__printf__, _format_pos, _ap_pos)))
+#else
+ #define RTEMS_PRINTFLIKE( _format_pos, _ap_pos )
+#endif
+
+/**
+ * @brief Obfuscates the variable so that the compiler cannot perform
+ * optimizations based on the variable value.
+ *
+ * The variable must be simple enough to fit into a register.
+ */
+#if defined(__GNUC__)
+ #define RTEMS_OBFUSCATE_VARIABLE( _var ) __asm__("" : "+r" (_var))
+#else
+ #define RTEMS_OBFUSCATE_VARIABLE( _var ) (void) (_var)
+#endif
+
+#if __cplusplus >= 201103L
+ #define RTEMS_STATIC_ASSERT(cond, msg) \
+ static_assert(cond, # msg)
+#elif __STDC_VERSION__ >= 201112L
+ #define RTEMS_STATIC_ASSERT(cond, msg) \
+ _Static_assert(cond, # msg)
+#else
+ #define RTEMS_STATIC_ASSERT(cond, msg) \
+ typedef int rtems_static_assert_ ## msg [(cond) ? 1 : -1]
+#endif
+
+#define RTEMS_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0]))
+
+/*
+ * Zero-length arrays are valid in C99 as flexible array members. C++11
+ * doesn't allow flexible array members. Use the GNU extension which is also
+ * supported by other compilers.
+ */
+#define RTEMS_ZERO_LENGTH_ARRAY 0
+
+/**
+ * @brief Returns a pointer to the container of a specified member pointer.
+ *
+ * @param[in] _m The pointer to a member of the container.
+ * @param[in] _type The type of the container.
+ * @param[in] _member_name The designator name of the container member.
+ */
+#define RTEMS_CONTAINER_OF( _m, _type, _member_name ) \
+ ( (_type *) ( (uintptr_t) ( _m ) - offsetof( _type, _member_name ) ) )
+
+#ifdef __cplusplus
+#define RTEMS_DEQUALIFY_DEPTHX( _ptr_level, _type, _var ) \
+ (const_cast<_type>( _var ))
+#else /* Standard C code */
+
+/* The reference type idea based on libHX by Jan Engelhardt */
+#define RTEMS_TYPEOF_REFX(_ptr_level, _ptr_type) \
+ typeof(_ptr_level(union { int z; typeof(_ptr_type) x; }){0}.x)
+
+#if defined(__GNUC__) && !defined(ASM)
+#if ((__GNUC__ * 1000 + __GNUC_MINOR__) >= 4004)
+extern void* RTEMS_DEQUALIFY_types_not_compatible(void)
+ __attribute__((error ("RTEMS_DEQUALIFY types differ not only by volatile and const")));
+#else
+extern void RTEMS_DEQUALIFY_types_not_compatible(void);
+#endif
+#define RTEMS_DEQUALIFY_DEPTHX( _ptr_level, _type, _var ) ( \
+ __builtin_choose_expr( __builtin_types_compatible_p ( \
+ RTEMS_TYPEOF_REFX( _ptr_level, _var ), \
+ RTEMS_TYPEOF_REFX( _ptr_level, _type ) \
+ ) || __builtin_types_compatible_p ( _type, void * ), \
+ (_type)(_var), \
+ RTEMS_DEQUALIFY_types_not_compatible() \
+ ) \
+)
+#endif /*__GNUC__*/
+#endif /*__cplusplus*/
+
+#ifndef RTEMS_DECONST
+#ifdef RTEMS_DEQUALIFY_DEPTHX
+#define RTEMS_DECONST( _type, _var ) \
+ RTEMS_DEQUALIFY_DEPTHX( *, _type, _var )
+#else /*RTEMS_DEQUALIFY_DEPTHX*/
+/**
+ * @brief Removes the const qualifier from a type of a variable.
+ *
+ * @param[in] _type The target type for the variable.
+ * @param[in] _var The variable.
+ */
+#define RTEMS_DECONST( _type, _var ) \
+ ((_type)(uintptr_t)(const void *) ( _var ))
+
+#endif /*RTEMS_DEQUALIFY_DEPTHX*/
+#endif /*RTEMS_DECONST*/
+
+#ifndef RTEMS_DEVOLATILE
+#ifdef RTEMS_DEQUALIFY_DEPTHX
+#define RTEMS_DEVOLATILE( _type, _var ) \
+ RTEMS_DEQUALIFY_DEPTHX( *, _type, _var )
+#else /*RTEMS_DEQUALIFY_DEPTHX*/
+/**
+ * @brief Removes the volatile qualifier from a type of a variable.
+ *
+ * @param[in] _type The target type for the variable.
+ * @param[in] _var The variable.
+ */
+#define RTEMS_DEVOLATILE( _type, _var ) \
+ ((_type)(uintptr_t)(volatile void *) ( _var ))
+
+#endif /*RTEMS_DEQUALIFY_DEPTHX*/
+#endif /*RTEMS_DEVOLATILE*/
+
+#ifndef RTEMS_DEQUALIFY
+#ifdef RTEMS_DEQUALIFY_DEPTHX
+#define RTEMS_DEQUALIFY( _type, _var ) \
+ RTEMS_DEQUALIFY_DEPTHX( *, _type, _var )
+#else /*RTEMS_DEQUALIFY_DEPTHX*/
+/**
+ * @brief Removes the all qualifiers from a type of a variable.
+ *
+ * @param[in] _type The target type for the variable.
+ * @param[in] _var The variable.
+ */
+#define RTEMS_DEQUALIFY( _type, _var ) \
+ ((_type)(uintptr_t)(const volatile void *) ( _var ))
+
+#endif /*RTEMS_DEQUALIFY_DEPTHX*/
+#endif /*RTEMS_DEQUALIFY*/
+
+/**
+ * @brief Evaluates to true if the members of two types have the same type.
+ *
+ * @param[in] _t_lhs Left hand side type.
+ * @param[in] _m_lhs Left hand side member.
+ * @param[in] _t_rhs Right hand side type.
+ * @param[in] _m_rhs Right hand side member.
+ */
+#ifdef __GNUC__
+ #define RTEMS_HAVE_MEMBER_SAME_TYPE( _t_lhs, _m_lhs, _t_rhs, _m_rhs ) \
+ __builtin_types_compatible_p( \
+ __typeof( ( (_t_lhs *) 0 )->_m_lhs ), \
+ __typeof( ( (_t_rhs *) 0 )->_m_rhs ) \
+ )
+#else
+ #define RTEMS_HAVE_MEMBER_SAME_TYPE( _t_lhs, _m_lhs, _t_rhs, _m_rhs ) \
+ true
+#endif
+
+/**
+ * @brief Concatenates _x and _y without expanding.
+ */
+#define RTEMS_CONCAT( _x, _y ) _x##_y
+
+/**
+ * @brief Concatenates expansion of _x and expansion of _y.
+ */
+#define RTEMS_XCONCAT( _x, _y ) RTEMS_CONCAT( _x, _y )
+
+/**
+ * @brief Stringifies _x without expanding.
+ */
+#define RTEMS_STRING( _x ) #_x
+
+/**
+ * @brief Stringifies expansion of _x.
+ */
+#define RTEMS_XSTRING( _x ) RTEMS_STRING( _x )
+
+#ifndef ASM
+ #ifdef RTEMS_DEPRECATED_TYPES
+ typedef bool boolean;
+ typedef float single_precision;
+ typedef double double_precision;
+ #endif
+
+ /**
+ * XXX: Eventually proc_ptr needs to disappear!!!
+ */
+ typedef void * proc_ptr;
+#endif
+
+/**@}*/
+
+#endif /* _RTEMS_BASEDEFS_H */
diff --git a/cpukit/include/rtems/score/chain.h b/cpukit/include/rtems/score/chain.h
new file mode 100644
index 0000000000..e358262e6e
--- /dev/null
+++ b/cpukit/include/rtems/score/chain.h
@@ -0,0 +1,102 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreChain
+ *
+ * @brief Chain Handler API
+ */
+
+/*
+ * Copyright (c) 2010 embedded brains GmbH.
+ *
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CHAIN_H
+#define _RTEMS_SCORE_CHAIN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreChain Chain Handler
+ *
+ * @ingroup Score
+ *
+ * The Chain Handler is used to manage sets of entities. This handler
+ * provides two data structures. The Chain Node data structure is included
+ * as the first part of every data structure that will be placed on
+ * a chain. The second data structure is Chain Control which is used
+ * to manage a set of Chain Nodes.
+ */
+/**@{*/
+
+/**
+ * @typedef Chain_Node
+ *
+ * This type definition promotes the name for the Chain Node used by
+ * all RTEMS code. It is a separate type definition because a forward
+ * reference is required to define it. See @ref Chain_Node_struct for
+ * detailed information.
+ */
+typedef struct Chain_Node_struct Chain_Node;
+
+/**
+ * @struct Chain_Node_struct
+ *
+ * This is used to manage each element (node) which is placed
+ * on a chain.
+ *
+ * @note Typically, a more complicated structure will use the
+ * chain package. The more complicated structure will
+ * include a chain node as the first element in its
+ * control structure. It will then call the chain package
+ * with a pointer to that node element. The node pointer
+ * and the higher level structure start at the same address
+ * so the user can cast the pointers back and forth.
+ *
+ */
+struct Chain_Node_struct {
+ /** This points to the node after this one on this chain. */
+ Chain_Node *next;
+ /** This points to the node immediate prior to this one on this chain. */
+ Chain_Node *previous;
+};
+
+/**
+ * @struct Chain_Control
+ *
+ * This is used to manage a chain. A chain consists of a doubly
+ * linked list of zero or more nodes.
+ *
+ * @note This implementation does not require special checks for
+ * manipulating the first and last elements on the chain.
+ * To accomplish this the @a Chain_Control structure is
+ * treated as two overlapping @ref Chain_Node structures.
+ */
+typedef union {
+ struct {
+ Chain_Node Node;
+ Chain_Node *fill;
+ } Head;
+
+ struct {
+ Chain_Node *fill;
+ Chain_Node Node;
+ } Tail;
+} Chain_Control;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/chainimpl.h b/cpukit/include/rtems/score/chainimpl.h
new file mode 100644
index 0000000000..c94c051198
--- /dev/null
+++ b/cpukit/include/rtems/score/chainimpl.h
@@ -0,0 +1,1123 @@
+/**
+ * @file
+ *
+ * @brief Chain Handler API
+ */
+
+/*
+ * Copyright (c) 2010 embedded brains GmbH.
+ *
+ * COPYRIGHT (c) 1989-2014.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CHAINIMPL_H
+#define _RTEMS_SCORE_CHAINIMPL_H
+
+#include <rtems/score/chain.h>
+#include <rtems/score/address.h>
+#include <rtems/score/assert.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreChain
+ */
+/**@{**/
+
+/**
+ * @brief Chain initializer for an empty chain with designator @a name.
+ */
+#define CHAIN_INITIALIZER_EMPTY(name) \
+ { { { &(name).Tail.Node, NULL }, &(name).Head.Node } }
+
+/**
+ * @brief Chain initializer for a chain with one @a node.
+ *
+ * @see CHAIN_NODE_INITIALIZER_ONE_NODE_CHAIN().
+ */
+#define CHAIN_INITIALIZER_ONE_NODE( node ) \
+ { { { (node), NULL }, (node) } }
+
+/**
+ * @brief Chain node initializer for a @a chain containing exactly this node.
+ *
+ * @see CHAIN_INITIALIZER_ONE_NODE().
+ */
+#define CHAIN_NODE_INITIALIZER_ONE_NODE_CHAIN( chain ) \
+ { &(chain)->Tail.Node, &(chain)->Head.Node }
+
+/**
+ * @brief Chain definition for an empty chain with designator @a name.
+ */
+#define CHAIN_DEFINE_EMPTY(name) \
+ Chain_Control name = CHAIN_INITIALIZER_EMPTY(name)
+
+/**
+ * @brief Initialize a chain header.
+ *
+ * This routine initializes @a the_chain structure to manage the
+ * contiguous array of @a number_nodes nodes which starts at
+ * @a starting_address. Each node is of @a node_size bytes.
+ *
+ * @param[in] the_chain specifies the chain to initialize
+ * @param[in] starting_address is the starting address of the array
+ * of elements
+ * @param[in] number_nodes is the numebr of nodes that will be in the chain
+ * @param[in] node_size is the size of each node
+ */
+void _Chain_Initialize(
+ Chain_Control *the_chain,
+ void *starting_address,
+ size_t number_nodes,
+ size_t node_size
+);
+
+/**
+ * @brief Returns the node count of the chain.
+ *
+ * @param[in] chain The chain.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * operation.
+ *
+ * @retval The node count of the chain.
+ */
+size_t _Chain_Node_count_unprotected( const Chain_Control *chain );
+
+/**
+ * @brief Set off chain.
+ *
+ * This function sets the next field of the @a node to NULL indicating the @a
+ * node is not part of a chain.
+ *
+ * @param[in] node the node set to off chain.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Set_off_chain(
+ Chain_Node *node
+)
+{
+ node->next = NULL;
+#if defined(RTEMS_DEBUG)
+ node->previous = NULL;
+#endif
+}
+
+/**
+ * @brief Initializes a chain node.
+ *
+ * In debug configurations, the node is set off chain. In all other
+ * configurations, this function does nothing.
+ *
+ * @param[in] the_node The chain node to initialize.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Initialize_node( Chain_Node *the_node )
+{
+#if defined(RTEMS_DEBUG)
+ _Chain_Set_off_chain( the_node );
+#else
+ (void) the_node;
+#endif
+}
+
+/**
+ * @brief Is the node off chain.
+ *
+ * This function returns true if the @a node is not on a chain. A @a node is
+ * off chain if the next field is set to NULL.
+ *
+ * @param[in] node is the node off chain.
+ *
+ * @retval true The @a node is off chain.
+ * @retval false The @a node is not off chain.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Is_node_off_chain(
+ const Chain_Node *node
+)
+{
+ return node->next == NULL;
+}
+
+/**
+ * @brief Are two nodes equal.
+ *
+ * This function returns true if @a left and @a right are equal,
+ * and false otherwise.
+ *
+ * @param[in] left is the node on the left hand side of the comparison.
+ * @param[in] right is the node on the left hand side of the comparison.
+ *
+ * @retval true @a left and @a right are equal.
+ * @retval false @a left and @a right are not equal.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Are_nodes_equal(
+ const Chain_Node *left,
+ const Chain_Node *right
+)
+{
+ return left == right;
+}
+
+/**
+ * @brief Is the chain node pointer NULL.
+ *
+ * This function returns true if the_node is NULL and false otherwise.
+ *
+ * @param[in] the_node is the node pointer to check.
+ *
+ * @retval true @a the_node is @c NULL.
+ * @retval false @a the_node is not @c NULL.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Is_null_node(
+ const Chain_Node *the_node
+)
+{
+ return (the_node == NULL);
+}
+
+/**
+ * @brief Return pointer to chain head.
+ *
+ * This function returns a pointer to the head node on the chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the permanent head node of the chain.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Head(
+ Chain_Control *the_chain
+)
+{
+ return &the_chain->Head.Node;
+}
+
+/**
+ * @brief Return pointer to immutable chain head.
+ *
+ * This function returns a pointer to the head node on the chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the permanent head node of the chain.
+ */
+RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_head(
+ const Chain_Control *the_chain
+)
+{
+ return &the_chain->Head.Node;
+}
+
+/**
+ * @brief Return pointer to chain tail.
+ *
+ * This function returns a pointer to the tail node on the chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the permanent tail node of the chain.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Tail(
+ Chain_Control *the_chain
+)
+{
+ return &the_chain->Tail.Node;
+}
+
+/**
+ * @brief Return pointer to immutable chain tail.
+ *
+ * This function returns a pointer to the tail node on the chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the permanent tail node of the chain.
+ */
+RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_tail(
+ const Chain_Control *the_chain
+)
+{
+ return &the_chain->Tail.Node;
+}
+
+/**
+ * @brief Return pointer to chain's first node.
+ *
+ * This function returns a pointer to the first node on the chain after the
+ * head.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the first node of the chain.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_First(
+ const Chain_Control *the_chain
+)
+{
+ return _Chain_Immutable_head( the_chain )->next;
+}
+
+/**
+ * @brief Return pointer to immutable chain's first node.
+ *
+ * This function returns a pointer to the first node on the chain after the
+ * head.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the first node of the chain.
+ */
+RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first(
+ const Chain_Control *the_chain
+)
+{
+ return _Chain_Immutable_head( the_chain )->next;
+}
+
+/**
+ * @brief Return pointer to chain's last node.
+ *
+ * This function returns a pointer to the last node on the chain just before
+ * the tail.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the last node of the chain.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Last(
+ const Chain_Control *the_chain
+)
+{
+ return _Chain_Immutable_tail( the_chain )->previous;
+}
+
+/**
+ * @brief Return pointer to immutable chain's last node.
+ *
+ * This function returns a pointer to the last node on the chain just before
+ * the tail.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This method returns the last node of the chain.
+ */
+RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_last(
+ const Chain_Control *the_chain
+)
+{
+ return _Chain_Immutable_tail( the_chain )->previous;
+}
+
+/**
+ * @brief Return pointer the next node from this node.
+ *
+ * This function returns a pointer to the next node after this node.
+ *
+ * @param[in] the_node is the node to be operated upon.
+ *
+ * @return This method returns the next node on the chain.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Next(
+ const Chain_Node *the_node
+)
+{
+ return the_node->next;
+}
+
+/**
+ * @brief Return pointer the immutable next node from this node.
+ *
+ * This function returns a pointer to the next node after this node.
+ *
+ * @param[in] the_node is the node to be operated upon.
+ *
+ * @return This method returns the next node on the chain.
+ */
+RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_next(
+ const Chain_Node *the_node
+)
+{
+ return the_node->next;
+}
+
+/**
+ * @brief Return pointer the previous node from this node.
+ *
+ * This function returns a pointer to the previous node on this chain.
+ *
+ * @param[in] the_node is the node to be operated upon.
+ *
+ * @return This method returns the previous node on the chain.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Previous(
+ const Chain_Node *the_node
+)
+{
+ return the_node->previous;
+}
+
+/**
+ * @brief Return pointer the immutable previous node from this node.
+ *
+ * This function returns a pointer to the previous node on this chain.
+ *
+ * @param[in] the_node is the node to be operated upon.
+ *
+ * @return This method returns the previous node on the chain.
+ */
+RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_previous(
+ const Chain_Node *the_node
+)
+{
+ return the_node->previous;
+}
+
+/**
+ * @brief Is the chain empty.
+ *
+ * This function returns true if there a no nodes on @a the_chain and
+ * false otherwise.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @retval true There are no nodes on @a the_chain.
+ * @retval false There are nodes on @a the_chain.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(
+ const Chain_Control *the_chain
+)
+{
+ return _Chain_Immutable_first( the_chain )
+ == _Chain_Immutable_tail( the_chain );
+}
+
+/**
+ * @brief Is this the first node on the chain.
+ *
+ * This function returns true if the_node is the first node on a chain and
+ * false otherwise.
+ *
+ * @param[in] the_node is the node the caller wants to know if it is
+ * the first node on a chain.
+ *
+ * @retval true @a the_node is the first node on a chain.
+ * @retval false @a the_node is not the first node on a chain.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Is_first(
+ const Chain_Node *the_node
+)
+{
+ return (the_node->previous->previous == NULL);
+}
+
+/**
+ * @brief Is this the last node on the chain.
+ *
+ * This function returns true if @a the_node is the last node on a chain and
+ * false otherwise.
+ *
+ * @param[in] the_node is the node to check as the last node.
+ *
+ * @retval true @a the_node is the last node on a chain.
+ * @retval false @a the_node is not the last node on a chain.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Is_last(
+ const Chain_Node *the_node
+)
+{
+ return (the_node->next->next == NULL);
+}
+
+/**
+ * @brief Does this chain have only one node.
+ *
+ * This function returns true if there is only one node on @a the_chain and
+ * false otherwise.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ *
+ * @return This function returns true if there is only one node on
+ * @a the_chain and false otherwise.
+ *
+ * @retval true There is only one node on @a the_chain.
+ * @retval false There is more than one node on @a the_chain.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Has_only_one_node(
+ const Chain_Control *the_chain
+)
+{
+ return _Chain_Immutable_first( the_chain )
+ == _Chain_Immutable_last( the_chain );
+}
+
+/**
+ * @brief Is this node the chain head.
+ *
+ * This function returns true if @a the_node is the head of @a the_chain and
+ * false otherwise.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ * @param[in] the_node is the node to check for being the Chain Head.
+ *
+ * @retval true @a the_node is the head of @a the_chain.
+ * @retval false @a the_node is not the head of @a the_chain.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Is_head(
+ const Chain_Control *the_chain,
+ const Chain_Node *the_node
+)
+{
+ return (the_node == _Chain_Immutable_head( the_chain ));
+}
+
+/**
+ * @brief Is this node the chail tail.
+ *
+ * This function returns true if @a the_node is the tail of @a the_chain and
+ * false otherwise.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ * @param[in] the_node is the node to check for being the Chain Tail.
+ *
+ * @retval true @a the_node is the tail of @a the_chain.
+ * @retval false @a the_node is not the tail of @a the_chain.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Is_tail(
+ const Chain_Control *the_chain,
+ const Chain_Node *the_node
+)
+{
+ return (the_node == _Chain_Immutable_tail( the_chain ));
+}
+
+/**
+ * @brief Initialize this chain as empty.
+ *
+ * This routine initializes the specified chain to contain zero nodes.
+ *
+ * @param[in] the_chain is the chain to be initialized.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(
+ Chain_Control *the_chain
+)
+{
+ Chain_Node *head;
+ Chain_Node *tail;
+
+ _Assert( the_chain != NULL );
+
+ head = _Chain_Head( the_chain );
+ tail = _Chain_Tail( the_chain );
+
+ head->next = tail;
+ head->previous = NULL;
+ tail->previous = head;
+}
+
+/**
+ * @brief Initializes this chain to contain exactly the specified node.
+ *
+ * @param[in] the_chain The chain control.
+ * @param[in] the_node The one and only node.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Initialize_one(
+ Chain_Control *the_chain,
+ Chain_Node *the_node
+)
+{
+ Chain_Node *head;
+ Chain_Node *tail;
+
+ _Assert( _Chain_Is_node_off_chain( the_node ) );
+
+ head = _Chain_Head( the_chain );
+ tail = _Chain_Tail( the_chain );
+
+ the_node->next = tail;
+ the_node->previous = head;
+
+ head->next = the_node;
+ head->previous = NULL;
+ tail->previous = the_node;
+}
+
+/**
+ * @brief Extract this node (unprotected).
+ *
+ * This routine extracts the_node from the chain on which it resides.
+ * It does NOT disable interrupts to ensure the atomicity of the
+ * extract operation.
+ *
+ * @param[in] the_node is the node to be extracted.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(
+ Chain_Node *the_node
+)
+{
+ Chain_Node *next;
+ Chain_Node *previous;
+
+ next = the_node->next;
+ previous = the_node->previous;
+ next->previous = previous;
+ previous->next = next;
+
+#if defined(RTEMS_DEBUG)
+ _Chain_Set_off_chain( the_node );
+#endif
+}
+
+/**
+ * @brief Get the first node (unprotected).
+ *
+ * This function removes the first node from the_chain and returns
+ * a pointer to that node. It does NOT disable interrupts to ensure
+ * the atomicity of the get operation.
+ *
+ * @param[in] the_chain is the chain to attempt to get the first node from.
+ *
+ * @return This method returns the first node on the chain even if it is
+ * the Chain Tail.
+ *
+ * @note This routine assumes that there is at least one node on the chain
+ * and always returns a node even if it is the Chain Tail.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_first_unprotected(
+ Chain_Control *the_chain
+)
+{
+ Chain_Node *head;
+ Chain_Node *old_first;
+ Chain_Node *new_first;
+
+ _Assert( !_Chain_Is_empty( the_chain ) );
+
+ head = _Chain_Head( the_chain );
+ old_first = head->next;
+ new_first = old_first->next;
+
+ head->next = new_first;
+ new_first->previous = head;
+
+#if defined(RTEMS_DEBUG)
+ _Chain_Set_off_chain( old_first );
+#endif
+
+ return old_first;
+}
+
+/**
+ * @brief Get the first node (unprotected).
+ *
+ * This function removes the first node from the_chain and returns
+ * a pointer to that node. If the_chain is empty, then NULL is returned.
+ *
+ * @param[in] the_chain is the chain to attempt to get the first node from.
+ *
+ * @return This method returns the first node on the chain or NULL if the
+ * chain is empty.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * get operation.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_unprotected(
+ Chain_Control *the_chain
+)
+{
+ if ( !_Chain_Is_empty(the_chain))
+ return _Chain_Get_first_unprotected(the_chain);
+ else
+ return NULL;
+}
+
+/**
+ * @brief Insert a node (unprotected).
+ *
+ * This routine inserts the_node on a chain immediately following
+ * after_node.
+ *
+ * @param[in] after_node is the node which will precede @a the_node on the
+ * chain.
+ * @param[in] the_node is the node to be inserted.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity
+ * of the extract operation.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Insert_unprotected(
+ Chain_Node *after_node,
+ Chain_Node *the_node
+)
+{
+ Chain_Node *before_node;
+
+ _Assert( _Chain_Is_node_off_chain( the_node ) );
+
+ the_node->previous = after_node;
+ before_node = after_node->next;
+ after_node->next = the_node;
+ the_node->next = before_node;
+ before_node->previous = the_node;
+}
+
+/**
+ * @brief Append a node (unprotected).
+ *
+ * This routine appends the_node onto the end of the_chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ * @param[in] the_node is the node to be appended.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * append operation.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Append_unprotected(
+ Chain_Control *the_chain,
+ Chain_Node *the_node
+)
+{
+ Chain_Node *tail;
+ Chain_Node *old_last;
+
+ _Assert( _Chain_Is_node_off_chain( the_node ) );
+
+ tail = _Chain_Tail( the_chain );
+ old_last = tail->previous;
+
+ the_node->next = tail;
+ tail->previous = the_node;
+ old_last->next = the_node;
+ the_node->previous = old_last;
+}
+
+/**
+ * @brief Append a node on the end of a chain if the node is in the off chain
+ * state (unprotected).
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * append operation.
+ *
+ * @see _Chain_Append_unprotected() and _Chain_Is_node_off_chain().
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Append_if_is_off_chain_unprotected(
+ Chain_Control *the_chain,
+ Chain_Node *the_node
+)
+{
+ if ( _Chain_Is_node_off_chain( the_node ) ) {
+ _Chain_Append_unprotected( the_chain, the_node );
+ }
+}
+
+/**
+ * @brief Prepend a node (unprotected).
+ *
+ * This routine prepends the_node onto the front of the_chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ * @param[in] the_node is the node to be prepended.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * prepend operation.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(
+ Chain_Control *the_chain,
+ Chain_Node *the_node
+)
+{
+ _Chain_Insert_unprotected(_Chain_Head(the_chain), the_node);
+}
+
+/**
+ * @brief Append a node and check if the chain was empty before (unprotected).
+ *
+ * This routine appends the_node onto the end of the_chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ * @param[in] the_node is the node to be appended.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * append operation.
+ *
+ * @retval true The chain was empty before.
+ * @retval false The chain contained at least one node before.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Append_with_empty_check_unprotected(
+ Chain_Control *the_chain,
+ Chain_Node *the_node
+)
+{
+ bool was_empty = _Chain_Is_empty( the_chain );
+
+ _Chain_Append_unprotected( the_chain, the_node );
+
+ return was_empty;
+}
+
+/**
+ * @brief Prepend a node and check if the chain was empty before (unprotected).
+ *
+ * This routine prepends the_node onto the front of the_chain.
+ *
+ * @param[in] the_chain is the chain to be operated upon.
+ * @param[in] the_node is the node to be prepended.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * prepend operation.
+ *
+ * @retval true The chain was empty before.
+ * @retval false The chain contained at least one node before.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Prepend_with_empty_check_unprotected(
+ Chain_Control *the_chain,
+ Chain_Node *the_node
+)
+{
+ bool was_empty = _Chain_Is_empty( the_chain );
+
+ _Chain_Prepend_unprotected( the_chain, the_node );
+
+ return was_empty;
+}
+
+/**
+ * @brief Get the first node and check if the chain is empty afterwards
+ * (unprotected).
+ *
+ * This function removes the first node from the_chain and returns
+ * a pointer to that node in @a the_node. If the_chain is empty, then NULL is
+ * returned.
+ *
+ * @param[in] the_chain is the chain to attempt to get the first node from.
+ * @param[out] the_node is the first node on the chain or NULL if the chain is
+ * empty.
+ *
+ * @note It does NOT disable interrupts to ensure the atomicity of the
+ * get operation.
+ *
+ * @retval true The chain is empty now.
+ * @retval false The chain contains at least one node now.
+ */
+RTEMS_INLINE_ROUTINE bool _Chain_Get_with_empty_check_unprotected(
+ Chain_Control *the_chain,
+ Chain_Node **the_node
+)
+{
+ bool is_empty_now = true;
+ Chain_Node *head = _Chain_Head( the_chain );
+ Chain_Node *tail = _Chain_Tail( the_chain );
+ Chain_Node *old_first = head->next;
+
+ if ( old_first != tail ) {
+ Chain_Node *new_first = old_first->next;
+
+ head->next = new_first;
+ new_first->previous = head;
+
+ *the_node = old_first;
+
+ is_empty_now = new_first == tail;
+ } else
+ *the_node = NULL;
+
+ return is_empty_now;
+}
+
+/**
+ * @brief Chain node order.
+ *
+ * @param[in] left The left hand side.
+ * @param[in] right The right hand side.
+ *
+ * @retval true According to the order the left node precedes the right node.
+ * @retval false Otherwise.
+ */
+typedef bool ( *Chain_Node_order )(
+ const void *left,
+ const Chain_Node *right
+);
+
+/**
+ * @brief Inserts a node into the chain according to the order relation.
+ *
+ * After the operation the chain contains the node to insert and the order
+ * relation holds for all nodes from the head up to the inserted node. Nodes
+ * after the inserted node are not moved.
+ *
+ * @param[in] the_chain The chain.
+ * @param[in] to_insert The node to insert.
+ * @param[in] left The left hand side passed to the order relation. It must
+ * correspond to the node to insert. The separate left hand side parameter
+ * may help the compiler to generate better code if it is stored in a local
+ * variable.
+ * @param[in] order The order relation.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected(
+ Chain_Control *the_chain,
+ Chain_Node *to_insert,
+ const void *left,
+ Chain_Node_order order
+)
+{
+ const Chain_Node *tail = _Chain_Immutable_tail( the_chain );
+ Chain_Node *next = _Chain_First( the_chain );
+
+ while ( next != tail && !( *order )( left, next ) ) {
+ next = _Chain_Next( next );
+ }
+
+ _Chain_Insert_unprotected( _Chain_Previous( next ), to_insert );
+}
+
+/**
+ * @brief The chain iterator direction.
+ */
+typedef enum {
+ /**
+ * @brief Iteration from head to tail.
+ */
+ CHAIN_ITERATOR_FORWARD,
+
+ /**
+ * @brief Iteration from tail to head.
+ */
+ CHAIN_ITERATOR_BACKWARD
+} Chain_Iterator_direction;
+
+/**
+ * @brief A chain iterator which is updated during node extraction if it is
+ * properly registered.
+ *
+ * @see _Chain_Iterator_initialize().
+ */
+typedef struct {
+ /**
+ * @brief Node for registration.
+ *
+ * Used during _Chain_Iterator_initialize() and _Chain_Iterator_destroy().
+ */
+ Chain_Node Registry_node;
+
+ /**
+ * @brief The direction of this iterator.
+ *
+ * Immutable after initialization via _Chain_Iterator_initialize().
+ */
+ Chain_Iterator_direction direction;
+
+ /**
+ * @brief The current position of this iterator.
+ *
+ * The position is initialized via _Chain_Iterator_initialize(). It must be
+ * explicitly set after one valid iteration step, e.g. in case a next node in
+ * the iterator direction existed. It is updated through the registration in
+ * case a node is extracted via _Chain_Iterator_registry_update().
+ */
+ Chain_Node *position;
+} Chain_Iterator;
+
+/**
+ * @brief A registry for chain iterators.
+ *
+ * Should be attached to a chain control to enable safe iteration through a
+ * chain in case of concurrent node extractions.
+ */
+typedef struct {
+ Chain_Control Iterators;
+} Chain_Iterator_registry;
+
+/**
+ * @brief Chain iterator registry initializer for static initialization.
+ *
+ * @param name The designator of the chain iterator registry.
+ */
+#define CHAIN_ITERATOR_REGISTRY_INITIALIZER( name ) \
+ { CHAIN_INITIALIZER_EMPTY( name.Iterators ) }
+
+/**
+ * @brief Initializes a chain iterator registry.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Iterator_registry_initialize(
+ Chain_Iterator_registry *the_registry
+)
+{
+ _Chain_Initialize_empty( &the_registry->Iterators );
+}
+
+/**
+ * @brief Updates all iterators present in the chain iterator registry in case
+ * of a node extraction.
+ *
+ * Must be called before _Chain_Extract_unprotected().
+ *
+ * @warning This function will look at all registered chain iterators to
+ * determine if an update is necessary.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Iterator_registry_update(
+ Chain_Iterator_registry *the_registry,
+ Chain_Node *the_node_to_extract
+)
+{
+ Chain_Node *iter_node;
+ Chain_Node *iter_tail;
+
+ iter_node = _Chain_Head( &the_registry->Iterators );
+ iter_tail = _Chain_Tail( &the_registry->Iterators );
+
+ while ( ( iter_node = _Chain_Next( iter_node ) ) != iter_tail ) {
+ Chain_Iterator *iter;
+
+ iter = (Chain_Iterator *) iter_node;
+
+ if ( iter->position == the_node_to_extract ) {
+ if ( iter->direction == CHAIN_ITERATOR_FORWARD ) {
+ iter->position = _Chain_Previous( the_node_to_extract );
+ } else {
+ iter->position = _Chain_Next( the_node_to_extract );
+ }
+ }
+ }
+}
+
+/**
+ * @brief Initializes the chain iterator.
+ *
+ * In the following example nodes inserted during the iteration are visited in
+ * case they are inserted after the current position in iteration order.
+ *
+ * @code
+ * #include <rtems/score/chainimpl.h>
+ * #include <rtems/score/isrlock.h>
+ *
+ * typedef struct {
+ * Chain_Control Chain;
+ * Chain_Iterator_registry Iterators;
+ * ISR_LOCK_MEMBER( Lock )
+ * } Some_Control;
+ *
+ * void iterate(
+ * Some_Control *the_some,
+ * void ( *visitor )( Chain_Node * )
+ * )
+ * {
+ * ISR_lock_Context lock_context;
+ * Chain_Iterator iter;
+ * Chain_Node *node;
+ * const Chain_Node *end;
+ *
+ * end = _Chain_Immutable_tail( &the_some->Chain );
+ *
+ * _ISR_lock_ISR_disable_and_acquire( &the_some->Lock, &lock_context );
+ *
+ * _Chain_Iterator_initialize(
+ * &the_some->Chain,
+ * &the_some->Iterators,
+ * &iter,
+ * CHAIN_ITERATOR_FORWARD
+ * );
+ *
+ * while ( ( node = _Chain_Iterator_next( &iter ) ) != end ) {
+ * _Chain_Iterator_set_position( &iter, node );
+ * _ISR_lock_Release_and_ISR_enable( &the_some->Lock, &lock_context );
+ * ( *visitor )( node );
+ * _ISR_lock_ISR_disable_and_acquire( &the_some->Lock, &lock_context );
+ * }
+ *
+ * _Chain_Iterator_destroy( &iter );
+ * _ISR_lock_Release_and_ISR_enable( &the_some->Lock, &lock_context );
+ * }
+ * @endcode
+ *
+ * @param the_chain The chain to iterate.
+ * @param the_registry The registry for the chain iterator.
+ * @param the_iterator The chain iterator to initialize.
+ * @param direction The iteration direction.
+ *
+ * @see _Chain_Iterator_next(), _Chain_Iterator_set_position() and
+ * Chain_Iterator_destroy().
+ *
+ * @warning Think twice before you use a chain iterator. Its current
+ * implementation is unfit for use in performance relevant components, due to
+ * the linear time complexity in _Chain_Iterator_registry_update().
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Iterator_initialize(
+ Chain_Control *the_chain,
+ Chain_Iterator_registry *the_registry,
+ Chain_Iterator *the_iterator,
+ Chain_Iterator_direction direction
+)
+{
+ _Chain_Initialize_node( &the_iterator->Registry_node );
+ _Chain_Append_unprotected(
+ &the_registry->Iterators,
+ &the_iterator->Registry_node
+ );
+
+ the_iterator->direction = direction;
+
+ if ( direction == CHAIN_ITERATOR_FORWARD ) {
+ the_iterator->position = _Chain_Head( the_chain );
+ } else {
+ the_iterator->position = _Chain_Tail( the_chain );
+ }
+}
+
+/**
+ * @brief Returns the next node in the iterator direction.
+ *
+ * In case a next node exists, then the iterator should be updated via
+ * _Chain_Iterator_set_position() to continue with the next iteration step.
+ *
+ * @param the_iterator The chain iterator.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Iterator_next(
+ const Chain_Iterator *the_iterator
+)
+{
+ if ( the_iterator->direction == CHAIN_ITERATOR_FORWARD ) {
+ return _Chain_Next( the_iterator->position );
+ } else {
+ return _Chain_Previous( the_iterator->position );
+ }
+}
+
+/**
+ * @brief Sets the iterator position.
+ *
+ * @param the_iterator The chain iterator.
+ * @param the_node The new iterator position.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Iterator_set_position(
+ Chain_Iterator *the_iterator,
+ Chain_Node *the_node
+)
+{
+ the_iterator->position = the_node;
+}
+
+/**
+ * @brief Destroys the iterator.
+ *
+ * Removes the iterator from its registry.
+ *
+ * @param the_iterator The chain iterator.
+ */
+RTEMS_INLINE_ROUTINE void _Chain_Iterator_destroy(
+ Chain_Iterator *the_iterator
+)
+{
+ _Chain_Extract_unprotected( &the_iterator->Registry_node );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/context.h b/cpukit/include/rtems/score/context.h
new file mode 100644
index 0000000000..990a602396
--- /dev/null
+++ b/cpukit/include/rtems/score/context.h
@@ -0,0 +1,163 @@
+/**
+ * @file rtems/score/context.h
+ *
+ * @brief Information About Each Thread's Context
+ *
+ * This include file contains all information about each thread's context.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CONTEXT_H
+#define _RTEMS_SCORE_CONTEXT_H
+
+/**
+ * @defgroup ScoreContext Context Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which abstracts thread context
+ * management in a portable manner.
+ *
+ * The context switch needed variable is contained in the per cpu
+ * data structure.
+ */
+/**@{*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/cpu.h>
+
+/**
+ * @brief Size of floating point context area.
+ *
+ * This constant defines the number of bytes required
+ * to store a full floating point context.
+ */
+#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+ #define CONTEXT_FP_SIZE CPU_CONTEXT_FP_SIZE
+#else
+ #define CONTEXT_FP_SIZE 0
+#endif
+
+/**
+ * @brief Initialize context area.
+ *
+ * This routine initializes @a _the_context such that the stack
+ * pointer, interrupt level, and entry point are correct for the
+ * thread's initial state.
+ *
+ * @param[in] _the_context will be initialized
+ * @param[in] _stack is the lowest physical address of the thread's
+ * context
+ * @param[in] _size is the size in octets of the thread's context
+ * @param[in] _isr is the ISR enable level for this thread
+ * @param[in] _entry is this thread's entry point
+ * @param[in] _is_fp is set to true if this thread has floating point
+ * enabled
+ * @param[in] _tls_area The thread-local storage (TLS) area begin.
+ */
+#define _Context_Initialize( _the_context, _stack, _size, _isr, _entry, \
+ _is_fp, _tls_area ) \
+ _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry, \
+ _is_fp, _tls_area )
+
+/**
+ * This macro is invoked from _Thread_Handler to do whatever CPU
+ * specific magic is required that must be done in the context of
+ * the thread when it starts.
+ *
+ * If the CPU architecture does not require any magic, then this
+ * macro is empty.
+ */
+
+#if defined(_CPU_Context_Initialization_at_thread_begin)
+ #define _Context_Initialization_at_thread_begin() \
+ _CPU_Context_Initialization_at_thread_begin()
+#else
+ #define _Context_Initialization_at_thread_begin()
+#endif
+
+/**
+ * @brief Perform context switch.
+ *
+ * This routine saves the current context into the @a _executing
+ * context record and restores the context specified by @a _heir.
+ *
+ * @param[in] _executing is the currently executing thread's context
+ * @param[in] _heir is the context of the thread to be switched to
+ */
+#define _Context_Switch( _executing, _heir ) \
+ _CPU_Context_switch( _executing, _heir )
+
+/**
+ * @brief Restart currently executing thread.
+ *
+ * This routine restarts the calling thread by restoring its initial
+ * stack pointer and returning to the thread's entry point.
+ *
+ * @param[in] _the_context is the context of the thread to restart
+ */
+#define _Context_Restart_self( _the_context ) \
+ _CPU_Context_Restart_self( _the_context )
+
+/**
+ * @brief Initialize floating point context area.
+ *
+ * This routine initializes the floating point context save
+ * area to contain an initial known state.
+ *
+ * @param[in] _fp_area is the base address of the floating point
+ * context save area to initialize.
+ */
+#define _Context_Initialize_fp( _fp_area ) \
+ _CPU_Context_Initialize_fp( _fp_area )
+
+/**
+ * @brief Restore floating point context area.
+ *
+ * This routine restores the floating point context contained
+ * in the @a _fp area. It is assumed that the current
+ * floating point context has been saved by a previous invocation
+ * of @a _Context_Save_fp.
+ *
+ * @param[in] _fp points to the floating point context area to restore.
+ */
+#define _Context_Restore_fp( _fp ) \
+ _CPU_Context_restore_fp( _fp )
+
+/**
+ * @brief Save floating point context area.
+ *
+ * This routine saves the current floating point context
+ * in the @a _fp area.
+ *
+ * @param[in] _fp points to the floating point context area to restore.
+ */
+#define _Context_Save_fp( _fp ) \
+ _CPU_Context_save_fp( _fp )
+
+#if defined(_CPU_Context_Destroy)
+ #define _Context_Destroy( _the_thread, _the_context ) \
+ _CPU_Context_Destroy( _the_thread, _the_context )
+#else
+ #define _Context_Destroy( _the_thread, _the_context )
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/copyrt.h b/cpukit/include/rtems/score/copyrt.h
new file mode 100644
index 0000000000..17c925a008
--- /dev/null
+++ b/cpukit/include/rtems/score/copyrt.h
@@ -0,0 +1,44 @@
+/**
+ * @file rtems/score/copyrt.h
+ *
+ * @brief Copyright Notice for RTEMS
+ *
+ * This include file contains the copyright notice for RTEMS
+ * which is included in every binary copy of the executive.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_COPYRT_H
+#define _RTEMS_SCORE_COPYRT_H
+
+/**
+ * @defgroup SuperCoreCopyright RTEMS Copyright Notice
+ *
+ * @ingroup Score
+ */
+/**@{*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This is the copyright string for RTEMS.
+ */
+extern const char _Copyright_Notice[];
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/corebarrier.h b/cpukit/include/rtems/score/corebarrier.h
new file mode 100644
index 0000000000..ba706be3e3
--- /dev/null
+++ b/cpukit/include/rtems/score/corebarrier.h
@@ -0,0 +1,91 @@
+/**
+ * @file rtems/score/corebarrier.h
+ *
+ * @brief Constants and Structures Associated with the Barrier Handler
+ *
+ * This include file contains all the constants and structures associated
+ * with the Barrier Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2007.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_COREBARRIER_H
+#define _RTEMS_SCORE_COREBARRIER_H
+
+#include <rtems/score/threadq.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreBarrier Barrier Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which provides the foundation
+ * Barrier services used in all of the APIs supported by RTEMS.
+ */
+/**@{*/
+
+/**
+ * Flavors of barriers.
+ */
+typedef enum {
+ /** This specifies that the barrier will automatically release when
+ * the user specified number of threads have arrived at the barrier.
+ */
+ CORE_BARRIER_AUTOMATIC_RELEASE,
+ /** This specifies that the user will have to manually release the barrier
+ * in order to release the waiting threads.
+ */
+ CORE_BARRIER_MANUAL_RELEASE
+} CORE_barrier_Disciplines;
+
+/**
+ * The following defines the control block used to manage the
+ * attributes of each barrier.
+ */
+typedef struct {
+ /** This field indicates whether the barrier is automatic or manual.
+ */
+ CORE_barrier_Disciplines discipline;
+ /** This element indicates the number of threads which must arrive at the
+ * barrier to trip the automatic release.
+ */
+ uint32_t maximum_count;
+} CORE_barrier_Attributes;
+
+/**
+ * The following defines the control block used to manage each
+ * barrier.
+ */
+typedef struct {
+ /** This field is the Waiting Queue used to manage the set of tasks
+ * which are blocked waiting for the barrier to be released.
+ */
+ Thread_queue_Control Wait_queue;
+ /** This element is the set of attributes which define this instance's
+ * behavior.
+ */
+ CORE_barrier_Attributes Attributes;
+ /** This element contains the current number of thread waiting for this
+ * barrier to be released. */
+ uint32_t number_of_waiting_threads;
+} CORE_barrier_Control;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/corebarrierimpl.h b/cpukit/include/rtems/score/corebarrierimpl.h
new file mode 100644
index 0000000000..d5d63659d0
--- /dev/null
+++ b/cpukit/include/rtems/score/corebarrierimpl.h
@@ -0,0 +1,173 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with the SuperCore Barrier
+ *
+ * This include file contains all of the inlined routines associated
+ * with the SuperCore barrier.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_COREBARRIERIMPL_H
+#define _RTEMS_SCORE_COREBARRIERIMPL_H
+
+#include <rtems/score/corebarrier.h>
+#include <rtems/score/status.h>
+#include <rtems/score/threadqimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreBarrier
+ */
+/**@{**/
+
+#define CORE_BARRIER_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
+
+/**
+ * @brief Initialize core barrier.
+ *
+ * This routine initializes the barrier based on the parameters passed.
+ *
+ * @param[in] the_barrier is the barrier to initialize
+ * @param[in] the_barrier_attributes define the behavior of this instance
+ */
+void _CORE_barrier_Initialize(
+ CORE_barrier_Control *the_barrier,
+ CORE_barrier_Attributes *the_barrier_attributes
+);
+
+RTEMS_INLINE_ROUTINE void _CORE_barrier_Destroy(
+ CORE_barrier_Control *the_barrier
+)
+{
+ _Thread_queue_Destroy( &the_barrier->Wait_queue );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_barrier_Acquire_critical(
+ CORE_barrier_Control *the_barrier,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire_critical( &the_barrier->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_barrier_Release(
+ CORE_barrier_Control *the_barrier,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Release( &the_barrier->Wait_queue, queue_context );
+}
+
+/**
+ * @brief Wait for the barrier.
+ *
+ * This routine wait for the barrier to be released. If the barrier
+ * is set to automatic and this is the appropriate thread, then it returns
+ * immediately. Otherwise, the calling thread is blocked until the barrier
+ * is released.
+ *
+ * @param[in] the_barrier is the barrier to wait for
+ * @param[in,out] executing The currently executing thread.
+ * @param[in] wait is true if the calling thread is willing to wait
+ *
+ * @return The method status.
+ */
+Status_Control _CORE_barrier_Seize(
+ CORE_barrier_Control *the_barrier,
+ Thread_Control *executing,
+ bool wait,
+ Thread_queue_Context *queue_context
+);
+
+uint32_t _CORE_barrier_Do_flush(
+ CORE_barrier_Control *the_barrier,
+ Thread_queue_Flush_filter filter,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Manually release the barrier.
+ *
+ * This routine manually releases the barrier. All of the threads waiting
+ * for the barrier will be readied.
+ *
+ * @param[in] the_barrier is the barrier to surrender
+ * @param[in] mp_callout is the routine to invoke if the
+ * thread unblocked is remote
+ *
+ * @retval the number of unblocked threads
+ */
+RTEMS_INLINE_ROUTINE uint32_t _CORE_barrier_Surrender(
+ CORE_barrier_Control *the_barrier,
+ Thread_queue_Context *queue_context
+)
+{
+ return _CORE_barrier_Do_flush(
+ the_barrier,
+ _Thread_queue_Flush_default_filter,
+ queue_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_barrier_Flush(
+ CORE_barrier_Control *the_barrier,
+ Thread_queue_Context *queue_context
+)
+{
+ _CORE_barrier_Do_flush(
+ the_barrier,
+ _Thread_queue_Flush_status_object_was_deleted,
+ queue_context
+ );
+}
+
+/**
+ * This function returns true if the automatic release attribute is
+ * enabled in the @a attribute_set and false otherwise.
+ *
+ * @param[in] the_attribute is the attribute set to test
+ *
+ * @return true if the priority attribute is enabled
+ */
+RTEMS_INLINE_ROUTINE bool _CORE_barrier_Is_automatic(
+ CORE_barrier_Attributes *the_attribute
+)
+{
+ return
+ (the_attribute->discipline == CORE_BARRIER_AUTOMATIC_RELEASE);
+}
+
+/**
+ * This routine returns the number of threads currently waiting at the barrier.
+ *
+ * @param[in] the_barrier is the barrier to obtain the number of blocked
+ * threads for
+ * @return the current count of this barrier
+ */
+RTEMS_INLINE_ROUTINE uint32_t _CORE_barrier_Get_number_of_waiting_threads(
+ CORE_barrier_Control *the_barrier
+)
+{
+ return the_barrier->number_of_waiting_threads;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/coremsg.h b/cpukit/include/rtems/score/coremsg.h
new file mode 100644
index 0000000000..8d25529fdc
--- /dev/null
+++ b/cpukit/include/rtems/score/coremsg.h
@@ -0,0 +1,185 @@
+/**
+ * @file rtems/score/coremsg.h
+ *
+ * @brief Constants and Structures Associated with the Message Queue Handler.
+ *
+ * This include file contains all the constants and structures associated
+ * with the Message queue Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_COREMSG_H
+#define _RTEMS_SCORE_COREMSG_H
+
+#include <rtems/score/chain.h>
+#include <rtems/score/isrlock.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/watchdog.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreMessageQueue Message Queue Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which provides the foundation
+ * Message Queue services used in all of the APIs supported by RTEMS.
+ */
+/**@{*/
+
+#if defined(RTEMS_POSIX_API)
+ /**
+ * This macro is defined when an API is enabled that requires that the
+ * Message Queue Handler include support for priority based enqueuing
+ * of messages.
+ */
+ #define RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY
+#endif
+
+#if defined(RTEMS_POSIX_API)
+ /**
+ * This macro is defined when an API is enabled that requires that the
+ * Message Queue Handler include support for notification of enqueuing
+ * a message.
+ */
+ #define RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION
+#endif
+
+#if defined(RTEMS_POSIX_API)
+ /**
+ * This macro is defined when an API is enabled that requires the
+ * Message Queue Handler include support for blocking send operations.
+ */
+ #define RTEMS_SCORE_COREMSG_ENABLE_BLOCKING_SEND
+#endif
+
+typedef struct CORE_message_queue_Control CORE_message_queue_Control;
+
+/**
+ * @brief Data types needed to manipulate the contents of message buffers.
+ *
+ * The following defines the data types needed to manipulate
+ * the contents of message buffers.
+ *
+ * @note The buffer field is normally longer than a single uint32_t
+ * but since messages are variable length we just make a ptr to 1.
+ */
+typedef struct {
+ /** This field is the size of this message. */
+ size_t size;
+ /** This field contains the actual message. */
+ uint32_t buffer[1];
+} CORE_message_queue_Buffer;
+
+/**
+ * @brief The organization of a message buffer.
+ *
+ * The following records define the organization of a message
+ * buffer.
+ */
+typedef struct {
+ /** This element allows this structure to be placed on chains. */
+ Chain_Node Node;
+ #if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY)
+ /** This field is the priority of this message. */
+ int priority;
+ #endif
+ /** This field points to the contents of the message. */
+ CORE_message_queue_Buffer Contents;
+} CORE_message_queue_Buffer_control;
+
+/**
+ * @brief The possible blocking disciplines for a message queue.
+ *
+ * This enumerated types defines the possible blocking disciplines
+ * for a message queue.
+ */
+typedef enum {
+ /** This value indicates that blocking tasks are in FIFO order. */
+ CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO,
+ /** This value indicates that blocking tasks are in priority order. */
+ CORE_MESSAGE_QUEUE_DISCIPLINES_PRIORITY
+} CORE_message_queue_Disciplines;
+
+#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
+ /**
+ * @brief Type for a notification handler.
+ *
+ * The following defines the type for a Notification handler. A
+ * notification handler is invoked when the message queue makes a
+ * 0->1 transition on pending messages.
+ */
+ typedef void (*CORE_message_queue_Notify_Handler)(
+ CORE_message_queue_Control *,
+ Thread_queue_Context *
+ );
+#endif
+
+/**
+ * @brief Control block used to manage each message queue.
+ *
+ * The following defines the control block used to manage each
+ * Message Queue.
+ */
+struct CORE_message_queue_Control {
+ /** This field is the Waiting Queue used to manage the set of tasks
+ * which are blocked waiting to receive a message from this queue.
+ */
+ Thread_queue_Control Wait_queue;
+
+ /**
+ * @brief The thread queue operations according to the blocking discipline.
+ */
+ const Thread_queue_Operations *operations;
+
+ /** This element is maximum number of messages which may be pending
+ * at any given time.
+ */
+ uint32_t maximum_pending_messages;
+ /** This element is the number of messages which are currently pending.
+ */
+ uint32_t number_of_pending_messages;
+ /** This is the size in bytes of the largest message which may be
+ * sent via this queue.
+ */
+ size_t maximum_message_size;
+ /** This chain is the set of pending messages. It may be ordered by
+ * message priority or in FIFO order.
+ */
+ Chain_Control Pending_messages;
+ /** This is the address of the memory allocated for message buffers.
+ * It is allocated are part of message queue initialization and freed
+ * as part of destroying it.
+ */
+ CORE_message_queue_Buffer *message_buffers;
+ #if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
+ /** This is the routine invoked when the message queue transitions
+ * from zero (0) messages pending to one (1) message pending.
+ */
+ CORE_message_queue_Notify_Handler notify_handler;
+ #endif
+ /** This chain is the set of inactive messages. A message is inactive
+ * when it does not contain a pending message.
+ */
+ Chain_Control Inactive_messages;
+};
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/coremsgimpl.h b/cpukit/include/rtems/score/coremsgimpl.h
new file mode 100644
index 0000000000..e33e3308b2
--- /dev/null
+++ b/cpukit/include/rtems/score/coremsgimpl.h
@@ -0,0 +1,494 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines in the Core Message Handler
+ *
+ * This include file contains the static inline implementation of all
+ * inlined routines in the Core Message Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_COREMSGIMPL_H
+#define _RTEMS_SCORE_COREMSGIMPL_H
+
+#include <rtems/score/coremsg.h>
+#include <rtems/score/status.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/threaddispatch.h>
+#include <rtems/score/threadqimpl.h>
+
+#include <limits.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreMessageQueue
+ */
+/**@{**/
+
+/**
+ * @brief Used when appending messages onto a message queue.
+ *
+ * This is the priority constant used when appending messages onto
+ * a message queue.
+ */
+#define CORE_MESSAGE_QUEUE_SEND_REQUEST INT_MAX
+
+/**
+ * @brief Used when prepending messages onto a message queue.
+ *
+ * This is the priority constant used when prepending messages onto
+ * a message queue.
+ */
+#define CORE_MESSAGE_QUEUE_URGENT_REQUEST INT_MIN
+
+/**
+ * @brief The modes in which a message may be submitted to a message queue.
+ *
+ * The following type details the modes in which a message
+ * may be submitted to a message queue. The message may be posted
+ * in a send or urgent fashion.
+ *
+ * @note All other values are message priorities. Numerically smaller
+ * priorities indicate higher priority messages.
+ */
+typedef int CORE_message_queue_Submit_types;
+
+/**
+ * @brief Initialize a message queue.
+ *
+ * This package is the implementation of the CORE Message Queue Handler.
+ * This core object provides task synchronization and communication functions
+ * via messages passed to queue objects.
+ *
+ * This routine initializes @a the_message_queue
+ * based on the parameters passed.
+ *
+ * @param[in] the_message_queue points to the message queue to initialize
+ * @param[in] discipline the blocking discipline
+ * @param[in] maximum_pending_messages is the maximum number of messages
+ * that will be allowed to pend at any given time
+ * @param[in] maximum_message_size is the size of largest message that
+ * may be sent to this message queue instance
+ *
+ * @retval true if the message queue can be initialized. In general,
+ * false will only be returned if memory for the pending
+ * messages cannot be allocated.
+ */
+bool _CORE_message_queue_Initialize(
+ CORE_message_queue_Control *the_message_queue,
+ CORE_message_queue_Disciplines discipline,
+ uint32_t maximum_pending_messages,
+ size_t maximum_message_size
+);
+
+/**
+ * @brief Close a message queue.
+ *
+ * This package is the implementation of the CORE Message Queue Handler.
+ * This core object provides task synchronization and communication functions
+ * via messages passed to queue objects
+ *
+ * This function closes a message by returning all allocated space and
+ * flushing @a the_message_queue's task wait queue.
+ *
+ * @param[in] the_message_queue points to the message queue to close
+ * @param[in] queue_context The thread queue context used for
+ * _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
+ */
+void _CORE_message_queue_Close(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Flush pending messages.
+ *
+ * This package is the implementation of the CORE Message Queue Handler.
+ * This core object provides task synchronization and communication functions
+ * via messages passed to queue objects.
+ *
+ * This function flushes @a the_message_queue's pending message queue. The
+ * number of messages flushed from the queue is returned.
+ *
+ * @param[in] the_message_queue points to the message queue to flush
+ * @param[in] queue_context The thread queue context with interrupts disabled.
+ *
+ * @retval This method returns the number of message pending messages flushed.
+ */
+uint32_t _CORE_message_queue_Flush(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_queue_Context *queue_context
+);
+
+#if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API)
+/**
+ * @brief Flush waiting threads.
+ *
+ * This function flushes the threads which are blocked on
+ * @a the_message_queue's pending message queue. They are
+ * unblocked whether blocked sending or receiving. It returns
+ * the number of messages flushed from the queue.
+ *
+ * @param[in] the_message_queue points to the message queue to flush
+ * @retval number of messages flushed from the queue
+ */
+ void _CORE_message_queue_Flush_waiting_threads(
+ CORE_message_queue_Control *the_message_queue
+ );
+#endif
+
+/**
+ * @brief Broadcast a message to the message queue.
+ *
+ * This package is the implementation of the CORE Message Queue Handler.
+ * This core object provides task synchronization and communication functions
+ * via messages passed to queue objects.
+ *
+ * This function sends a message for every thread waiting on the queue and
+ * returns the number of threads made ready by the message.
+ *
+ * @param[in] the_message_queue points to the message queue
+ * @param[in] buffer is the starting address of the message to broadcast
+ * @param[in] size is the size of the message being broadcast
+ * @param[out] count points to the variable that will contain the
+ * number of tasks that are sent this message
+ * @param[in] queue_context The thread queue context used for
+ * _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
+ * @retval @a *count will contain the number of messages sent
+ * @retval indication of the successful completion or reason for failure
+ */
+Status_Control _CORE_message_queue_Broadcast(
+ CORE_message_queue_Control *the_message_queue,
+ const void *buffer,
+ size_t size,
+ uint32_t *count,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Submit a message to the message queue.
+ *
+ * This routine implements the send and urgent message functions. It
+ * processes a message that is to be submitted to the designated
+ * message queue. The message will either be processed as a
+ * send message which it will be inserted at the rear of the queue
+ * or it will be processed as an urgent message which will be inserted
+ * at the front of the queue.
+ *
+ * @param[in] the_message_queue points to the message queue
+ * @param[in] buffer is the starting address of the message to send
+ * @param[in] size is the size of the message being send
+ * @param[in] submit_type determines whether the message is prepended,
+ * appended, or enqueued in priority order.
+ * @param[in] wait indicates whether the calling thread is willing to block
+ * if the message queue is full.
+ * @param[in] queue_context The thread queue context used for
+ * _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
+ * @retval indication of the successful completion or reason for failure
+ */
+Status_Control _CORE_message_queue_Submit(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_Control *executing,
+ const void *buffer,
+ size_t size,
+ CORE_message_queue_Submit_types submit_type,
+ bool wait,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Size a message from the message queue.
+ *
+ * This package is the implementation of the CORE Message Queue Handler.
+ * This core object provides task synchronization and communication functions
+ * via messages passed to queue objects.
+ *
+ * This kernel routine dequeues a message, copies the message buffer to
+ * a given destination buffer, and frees the message buffer to the
+ * inactive message pool. The thread will be blocked if wait is true,
+ * otherwise an error will be given to the thread if no messages are available.
+ *
+ * @param[in] the_message_queue points to the message queue
+ * @param[in] buffer is the starting address of the message buffer to
+ * to be filled in with a message
+ * @param[in] size_p is a pointer to the size of the @a buffer and
+ * indicates the maximum size message that the caller can receive.
+ * @param[in] wait indicates whether the calling thread is willing to block
+ * if the message queue is empty.
+ * @param[in] queue_context The thread queue context used for
+ * _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
+ *
+ * @retval indication of the successful completion or reason for failure.
+ * On success, the location pointed to @a size_p will contain the
+ * size of the received message.
+ *
+ * @note Returns message priority via return area in TCB.
+ *
+ * - INTERRUPT LATENCY:
+ * + available
+ * + wait
+ */
+Status_Control _CORE_message_queue_Seize(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_Control *executing,
+ void *buffer,
+ size_t *size_p,
+ bool wait,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Insert a message into the message queue.
+ *
+ * Copies the specified content into the message storage space and then
+ * inserts the message into the message queue according to the submit type.
+ *
+ * @param[in] the_message_queue points to the message queue
+ * @param[in] the_message is the message to enqueue
+ * @param[in] content_source the message content source
+ * @param[in] content_size the message content size in bytes
+ * @param[in] submit_type determines whether the message is prepended,
+ * appended, or enqueued in priority order.
+ */
+void _CORE_message_queue_Insert_message(
+ CORE_message_queue_Control *the_message_queue,
+ CORE_message_queue_Buffer_control *the_message,
+ const void *content_source,
+ size_t content_size,
+ CORE_message_queue_Submit_types submit_type
+);
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Send(
+ CORE_message_queue_Control *the_message_queue,
+ const void *buffer,
+ size_t size,
+ bool wait,
+ Thread_queue_Context *queue_context
+)
+{
+ return _CORE_message_queue_Submit(
+ the_message_queue,
+ _Thread_Executing,
+ buffer,
+ size,
+ CORE_MESSAGE_QUEUE_SEND_REQUEST,
+ wait,
+ queue_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Urgent(
+ CORE_message_queue_Control *the_message_queue,
+ const void *buffer,
+ size_t size,
+ bool wait,
+ Thread_queue_Context *queue_context
+)
+{
+ return _CORE_message_queue_Submit(
+ the_message_queue,
+ _Thread_Executing,
+ buffer,
+ size,
+ CORE_MESSAGE_QUEUE_URGENT_REQUEST,
+ wait,
+ queue_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire( &the_message_queue->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire_critical(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire_critical( &the_message_queue->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_message_queue_Release(
+ CORE_message_queue_Control *the_message_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Release( &the_message_queue->Wait_queue, queue_context );
+}
+
+/**
+ * This routine copies the contents of the source message buffer
+ * to the destination message buffer.
+ */
+RTEMS_INLINE_ROUTINE void _CORE_message_queue_Copy_buffer (
+ const void *source,
+ void *destination,
+ size_t size
+)
+{
+ memcpy(destination, source, size);
+}
+
+/**
+ * This function allocates a message buffer from the inactive
+ * message buffer chain.
+ */
+RTEMS_INLINE_ROUTINE CORE_message_queue_Buffer_control *
+_CORE_message_queue_Allocate_message_buffer (
+ CORE_message_queue_Control *the_message_queue
+)
+{
+ return (CORE_message_queue_Buffer_control *)
+ _Chain_Get_unprotected( &the_message_queue->Inactive_messages );
+}
+
+/**
+ * This routine frees a message buffer to the inactive
+ * message buffer chain.
+ */
+RTEMS_INLINE_ROUTINE void _CORE_message_queue_Free_message_buffer (
+ CORE_message_queue_Control *the_message_queue,
+ CORE_message_queue_Buffer_control *the_message
+)
+{
+ _Chain_Append_unprotected( &the_message_queue->Inactive_messages, &the_message->Node );
+}
+
+/**
+ * This function returns the priority of @a the_message.
+ *
+ * @note It encapsulates the optional behavior that message priority is
+ * disabled if no API requires it.
+ */
+RTEMS_INLINE_ROUTINE int _CORE_message_queue_Get_message_priority (
+ const CORE_message_queue_Buffer_control *the_message
+)
+{
+ #if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY)
+ return the_message->priority;
+ #else
+ return 0;
+ #endif
+}
+
+/**
+ * This function removes the first message from the_message_queue
+ * and returns a pointer to it.
+ */
+RTEMS_INLINE_ROUTINE
+ CORE_message_queue_Buffer_control *_CORE_message_queue_Get_pending_message (
+ CORE_message_queue_Control *the_message_queue
+)
+{
+ return (CORE_message_queue_Buffer_control *)
+ _Chain_Get_unprotected( &the_message_queue->Pending_messages );
+}
+
+#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
+ /**
+ * This function returns true if notification is enabled on this message
+ * queue and false otherwise.
+ */
+ RTEMS_INLINE_ROUTINE bool _CORE_message_queue_Is_notify_enabled (
+ CORE_message_queue_Control *the_message_queue
+ )
+ {
+ return (the_message_queue->notify_handler != NULL);
+ }
+#endif
+
+/**
+ * This routine initializes the notification information for
+ * @a the_message_queue.
+ */
+#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
+ RTEMS_INLINE_ROUTINE void _CORE_message_queue_Set_notify (
+ CORE_message_queue_Control *the_message_queue,
+ CORE_message_queue_Notify_Handler the_handler
+ )
+ {
+ the_message_queue->notify_handler = the_handler;
+ }
+#else
+ /* turn it into nothing if not enabled */
+ #define _CORE_message_queue_Set_notify( the_message_queue, the_handler ) \
+ do { } while ( 0 )
+#endif
+
+RTEMS_INLINE_ROUTINE Thread_Control *_CORE_message_queue_Dequeue_receiver(
+ CORE_message_queue_Control *the_message_queue,
+ const void *buffer,
+ size_t size,
+ CORE_message_queue_Submit_types submit_type,
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_Control *the_thread;
+
+ /*
+ * If there are pending messages, then there can't be threads
+ * waiting for us to send them a message.
+ *
+ * NOTE: This check is critical because threads can block on
+ * send and receive and this ensures that we are broadcasting
+ * the message to threads waiting to receive -- not to send.
+ */
+ if ( the_message_queue->number_of_pending_messages != 0 ) {
+ return NULL;
+ }
+
+ /*
+ * There must be no pending messages if there is a thread waiting to
+ * receive a message.
+ */
+ the_thread = _Thread_queue_First_locked(
+ &the_message_queue->Wait_queue,
+ the_message_queue->operations
+ );
+ if ( the_thread == NULL ) {
+ return NULL;
+ }
+
+ *(size_t *) the_thread->Wait.return_argument = size;
+ the_thread->Wait.count = (uint32_t) submit_type;
+
+ _CORE_message_queue_Copy_buffer(
+ buffer,
+ the_thread->Wait.return_argument_second.mutable_object,
+ size
+ );
+
+ _Thread_queue_Extract_critical(
+ &the_message_queue->Wait_queue.Queue,
+ the_message_queue->operations,
+ the_thread,
+ queue_context
+ );
+
+ return the_thread;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/coremutex.h b/cpukit/include/rtems/score/coremutex.h
new file mode 100644
index 0000000000..fd1f27c697
--- /dev/null
+++ b/cpukit/include/rtems/score/coremutex.h
@@ -0,0 +1,104 @@
+/**
+ * @file
+ *
+ * @brief CORE Mutex API
+ *
+ * This include file contains all the constants and structures associated with
+ * the Mutex Handler. A mutex is an enhanced version of the standard Dijkstra
+ * binary semaphore used to provide synchronization and mutual exclusion
+ * capabilities.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_COREMUTEX_H
+#define _RTEMS_SCORE_COREMUTEX_H
+
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/watchdog.h>
+#include <rtems/score/interr.h>
+
+struct _Scheduler_Control;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreMutex Mutex Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which provides the foundation
+ * Mutex services used in all of the APIs supported by RTEMS.
+ */
+/**@{*/
+
+/**
+ * @brief Control block used to manage each mutex.
+ *
+ * The following defines the control block used to manage each mutex.
+ */
+typedef struct {
+ /**
+ * @brief The thread queue of this mutex.
+ *
+ * The owner of the thread queue indicates the mutex owner.
+ */
+ Thread_queue_Control Wait_queue;
+} CORE_mutex_Control;
+
+/**
+ * @brief The recursive mutex control.
+ */
+typedef struct {
+ /**
+ * @brief The plain non-recursive mutex.
+ */
+ CORE_mutex_Control Mutex;
+
+ /**
+ * @brief The nest level in case of a recursive seize.
+ */
+ unsigned int nest_level;
+} CORE_recursive_mutex_Control;
+
+/**
+ * @brief The recursive mutex control with priority ceiling protocol support.
+ */
+typedef struct {
+ /**
+ * @brief The plain recursive mutex.
+ */
+ CORE_recursive_mutex_Control Recursive;
+
+ /**
+ * @brief The priority ceiling node for the mutex owner.
+ */
+ Priority_Node Priority_ceiling;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The scheduler instance for this priority ceiling mutex.
+ */
+ const struct _Scheduler_Control *scheduler;
+#endif
+} CORE_ceiling_mutex_Control;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/coremuteximpl.h b/cpukit/include/rtems/score/coremuteximpl.h
new file mode 100644
index 0000000000..78fafca6e1
--- /dev/null
+++ b/cpukit/include/rtems/score/coremuteximpl.h
@@ -0,0 +1,447 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreMutex
+ *
+ * @brief CORE Mutex Implementation
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_COREMUTEXIMPL_H
+#define _RTEMS_SCORE_COREMUTEXIMPL_H
+
+#include <rtems/score/coremutex.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/schedulerimpl.h>
+#include <rtems/score/status.h>
+#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadqimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreMutex
+ */
+/**@{**/
+
+#define CORE_MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
+
+#define CORE_MUTEX_TQ_PRIORITY_INHERIT_OPERATIONS \
+ &_Thread_queue_Operations_priority_inherit
+
+RTEMS_INLINE_ROUTINE void _CORE_mutex_Initialize(
+ CORE_mutex_Control *the_mutex
+)
+{
+ _Thread_queue_Object_initialize( &the_mutex->Wait_queue );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex )
+{
+ _Thread_queue_Destroy( &the_mutex->Wait_queue );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_mutex_Acquire_critical(
+ CORE_mutex_Control *the_mutex,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_mutex_Release(
+ CORE_mutex_Control *the_mutex,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Release( &the_mutex->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE Thread_Control *_CORE_mutex_Get_owner(
+ const CORE_mutex_Control *the_mutex
+)
+{
+ return the_mutex->Wait_queue.Queue.owner;
+}
+
+/**
+ * @brief Is mutex locked.
+ *
+ * This routine returns true if the mutex specified is locked and false
+ * otherwise.
+ *
+ * @param[in] the_mutex is the mutex to check.
+ *
+ * @retval true The mutex is locked.
+ * @retval false The mutex is not locked.
+ */
+RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_locked(
+ const CORE_mutex_Control *the_mutex
+)
+{
+ return _CORE_mutex_Get_owner( the_mutex ) != NULL;
+}
+
+Status_Control _CORE_mutex_Seize_slow(
+ CORE_mutex_Control *the_mutex,
+ const Thread_queue_Operations *operations,
+ Thread_Control *executing,
+ bool wait,
+ Thread_queue_Context *queue_context
+);
+
+RTEMS_INLINE_ROUTINE void _CORE_mutex_Set_owner(
+ CORE_mutex_Control *the_mutex,
+ Thread_Control *owner
+)
+{
+ the_mutex->Wait_queue.Queue.owner = owner;
+}
+
+RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_owner(
+ const CORE_mutex_Control *the_mutex,
+ const Thread_Control *the_thread
+)
+{
+ return _CORE_mutex_Get_owner( the_mutex ) == the_thread;
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_recursive_mutex_Initialize(
+ CORE_recursive_mutex_Control *the_mutex
+)
+{
+ _CORE_mutex_Initialize( &the_mutex->Mutex );
+ the_mutex->nest_level = 0;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize_nested(
+ CORE_recursive_mutex_Control *the_mutex
+)
+{
+ ++the_mutex->nest_level;
+ return STATUS_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize(
+ CORE_recursive_mutex_Control *the_mutex,
+ const Thread_queue_Operations *operations,
+ Thread_Control *executing,
+ bool wait,
+ Status_Control ( *nested )( CORE_recursive_mutex_Control * ),
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_Control *owner;
+
+ _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
+
+ owner = _CORE_mutex_Get_owner( &the_mutex->Mutex );
+
+ if ( owner == NULL ) {
+ _CORE_mutex_Set_owner( &the_mutex->Mutex, executing );
+ _Thread_Resource_count_increment( executing );
+ _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
+ return STATUS_SUCCESSFUL;
+ }
+
+ if ( owner == executing ) {
+ Status_Control status;
+
+ status = ( *nested )( the_mutex );
+ _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
+ return status;
+ }
+
+ return _CORE_mutex_Seize_slow(
+ &the_mutex->Mutex,
+ operations,
+ executing,
+ wait,
+ queue_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
+ CORE_recursive_mutex_Control *the_mutex,
+ const Thread_queue_Operations *operations,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
+)
+{
+ unsigned int nest_level;
+ Thread_queue_Heads *heads;
+
+ _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
+
+ if ( !_CORE_mutex_Is_owner( &the_mutex->Mutex, executing ) ) {
+ _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
+ return STATUS_NOT_OWNER;
+ }
+
+ nest_level = the_mutex->nest_level;
+
+ if ( nest_level > 0 ) {
+ the_mutex->nest_level = nest_level - 1;
+ _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
+ return STATUS_SUCCESSFUL;
+ }
+
+ _Thread_Resource_count_decrement( executing );
+ _CORE_mutex_Set_owner( &the_mutex->Mutex, NULL );
+
+ heads = the_mutex->Mutex.Wait_queue.Queue.heads;
+
+ if ( heads == NULL ) {
+ _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
+ return STATUS_SUCCESSFUL;
+ }
+
+ _Thread_queue_Surrender(
+ &the_mutex->Mutex.Wait_queue.Queue,
+ heads,
+ executing,
+ queue_context,
+ operations
+ );
+ return STATUS_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Initialize(
+ CORE_ceiling_mutex_Control *the_mutex,
+ const Scheduler_Control *scheduler,
+ Priority_Control priority_ceiling
+)
+{
+ _CORE_recursive_mutex_Initialize( &the_mutex->Recursive );
+ _Priority_Node_initialize( &the_mutex->Priority_ceiling, priority_ceiling );
+#if defined(RTEMS_SMP)
+ the_mutex->scheduler = scheduler;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *
+_CORE_ceiling_mutex_Get_scheduler(
+ const CORE_ceiling_mutex_Control *the_mutex
+)
+{
+#if defined(RTEMS_SMP)
+ return the_mutex->scheduler;
+#else
+ return &_Scheduler_Table[ 0 ];
+#endif
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
+ CORE_ceiling_mutex_Control *the_mutex,
+ Priority_Control priority_ceiling,
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_Control *owner;
+
+ owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
+
+ if ( owner != NULL ) {
+ _Thread_Wait_acquire( owner, queue_context );
+ _Thread_Priority_change(
+ owner,
+ &the_mutex->Priority_ceiling,
+ priority_ceiling,
+ false,
+ queue_context
+ );
+ _Thread_Wait_release( owner, queue_context );
+ } else {
+ the_mutex->Priority_ceiling.priority = priority_ceiling;
+ }
+}
+
+RTEMS_INLINE_ROUTINE Priority_Control _CORE_ceiling_mutex_Get_priority(
+ const CORE_ceiling_mutex_Control *the_mutex
+)
+{
+ return the_mutex->Priority_ceiling.priority;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
+ CORE_ceiling_mutex_Control *the_mutex,
+ Thread_Control *owner,
+ Thread_queue_Context *queue_context
+)
+{
+ ISR_lock_Context lock_context;
+ Scheduler_Node *scheduler_node;
+ Per_CPU_Control *cpu_self;
+
+ _Thread_Wait_acquire_default_critical( owner, &lock_context );
+
+ scheduler_node = _Thread_Scheduler_get_home_node( owner );
+
+ if (
+ _Priority_Get_priority( &scheduler_node->Wait.Priority )
+ < the_mutex->Priority_ceiling.priority
+ ) {
+ _Thread_Wait_release_default_critical( owner, &lock_context );
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ return STATUS_MUTEX_CEILING_VIOLATED;
+ }
+
+ _CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, owner );
+ _Thread_Resource_count_increment( owner );
+ _Thread_Priority_add(
+ owner,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
+ _Thread_Wait_release_default_critical( owner, &lock_context );
+
+ cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Seize(
+ CORE_ceiling_mutex_Control *the_mutex,
+ Thread_Control *executing,
+ bool wait,
+ Status_Control ( *nested )( CORE_recursive_mutex_Control * ),
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_Control *owner;
+
+ _CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
+
+ owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
+
+ if ( owner == NULL ) {
+#if defined(RTEMS_SMP)
+ if (
+ _Thread_Scheduler_get_home( executing )
+ != _CORE_ceiling_mutex_Get_scheduler( the_mutex )
+ ) {
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ return STATUS_NOT_DEFINED;
+ }
+#endif
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ return _CORE_ceiling_mutex_Set_owner(
+ the_mutex,
+ executing,
+ queue_context
+ );
+ }
+
+ if ( owner == executing ) {
+ Status_Control status;
+
+ status = ( *nested )( &the_mutex->Recursive );
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ return status;
+ }
+
+ return _CORE_mutex_Seize_slow(
+ &the_mutex->Recursive.Mutex,
+ CORE_MUTEX_TQ_OPERATIONS,
+ executing,
+ wait,
+ queue_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
+ CORE_ceiling_mutex_Control *the_mutex,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
+)
+{
+ unsigned int nest_level;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *new_owner;
+
+ _CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
+
+ if ( !_CORE_mutex_Is_owner( &the_mutex->Recursive.Mutex, executing ) ) {
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ return STATUS_NOT_OWNER;
+ }
+
+ nest_level = the_mutex->Recursive.nest_level;
+
+ if ( nest_level > 0 ) {
+ the_mutex->Recursive.nest_level = nest_level - 1;
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ return STATUS_SUCCESSFUL;
+ }
+
+ _Thread_Resource_count_decrement( executing );
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( executing, &lock_context );
+ _Thread_Priority_remove(
+ executing,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
+ _Thread_Wait_release_default_critical( executing, &lock_context );
+
+ new_owner = _Thread_queue_First_locked(
+ &the_mutex->Recursive.Mutex.Wait_queue,
+ CORE_MUTEX_TQ_OPERATIONS
+ );
+ _CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, new_owner );
+
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+
+ if ( new_owner != NULL ) {
+#if defined(RTEMS_MULTIPROCESSING)
+ if ( _Objects_Is_local_id( new_owner->Object.id ) )
+#endif
+ {
+ _Thread_Resource_count_increment( new_owner );
+ _Thread_Priority_add(
+ new_owner,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
+ }
+
+ _Thread_queue_Extract_critical(
+ &the_mutex->Recursive.Mutex.Wait_queue.Queue,
+ CORE_MUTEX_TQ_OPERATIONS,
+ new_owner,
+ queue_context
+ );
+ } else {
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ }
+
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/corerwlockimpl.h b/cpukit/include/rtems/score/corerwlockimpl.h
new file mode 100644
index 0000000000..942e8c8d75
--- /dev/null
+++ b/cpukit/include/rtems/score/corerwlockimpl.h
@@ -0,0 +1,182 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with the SuperCore RWLock
+ *
+ * This include file contains all of the inlined routines associated
+ * with the SuperCore RWLock.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CORERWLOCKIMPL_H
+#define _RTEMS_SCORE_CORERWLOCKIMPL_H
+
+#include <rtems/score/percpu.h>
+#include <rtems/score/status.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadqimpl.h>
+#include <rtems/score/watchdog.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreRWLock
+ */
+/**@{**/
+
+#define CORE_RWLOCK_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
+
+/**
+ * This is used to denote that a thread is blocking waiting for
+ * read-only access to the RWLock.
+ */
+#define CORE_RWLOCK_THREAD_WAITING_FOR_READ 0
+
+/**
+ * This is used to denote that a thread is blocking waiting for
+ * write-exclusive access to the RWLock.
+ */
+#define CORE_RWLOCK_THREAD_WAITING_FOR_WRITE 1
+
+/**
+ * RWLock State.
+ */
+typedef enum {
+ /** This indicates the the RWLock is not currently locked.
+ */
+ CORE_RWLOCK_UNLOCKED,
+ /** This indicates the the RWLock is currently locked for reading.
+ */
+ CORE_RWLOCK_LOCKED_FOR_READING,
+ /** This indicates the the RWLock is currently locked for reading.
+ */
+ CORE_RWLOCK_LOCKED_FOR_WRITING
+} CORE_RWLock_States;
+
+/**
+ * The following defines the control block used to manage each
+ * RWLock.
+ */
+typedef struct {
+ /** This field is the Waiting Queue used to manage the set of tasks
+ * which are blocked waiting for the RWLock to be released.
+ */
+ Thread_queue_Syslock_queue Queue;
+
+ /** This element is the current state of the RWLock.
+ */
+ CORE_RWLock_States current_state;
+
+ /** This element contains the current number of thread waiting for this
+ * RWLock to be released. */
+ unsigned int number_of_readers;
+} CORE_RWLock_Control;
+
+/**
+ * @brief Initialize a RWlock.
+ *
+ * This routine initializes the RWLock based on the parameters passed.
+ *
+ * @param[in] the_rwlock is the RWLock to initialize
+ */
+void _CORE_RWLock_Initialize(
+ CORE_RWLock_Control *the_rwlock
+);
+
+RTEMS_INLINE_ROUTINE void _CORE_RWLock_Destroy(
+ CORE_RWLock_Control *the_rwlock
+)
+{
+ (void) the_rwlock;
+}
+
+RTEMS_INLINE_ROUTINE Thread_Control *_CORE_RWLock_Acquire(
+ CORE_RWLock_Control *the_rwlock,
+ Thread_queue_Context *queue_context
+)
+{
+ ISR_Level level;
+ Thread_Control *executing;
+
+ _Thread_queue_Context_ISR_disable( queue_context, level );
+ _Thread_queue_Context_set_ISR_level( queue_context, level );
+ executing = _Thread_Executing;
+ _Thread_queue_Queue_acquire_critical(
+ &the_rwlock->Queue.Queue,
+ &executing->Potpourri_stats,
+ &queue_context->Lock_context.Lock_context
+ );
+
+ return executing;
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_RWLock_Release(
+ CORE_RWLock_Control *the_rwlock,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Queue_release(
+ &the_rwlock->Queue.Queue,
+ &queue_context->Lock_context.Lock_context
+ );
+}
+
+/**
+ * @brief Obtain RWLock for reading.
+ *
+ * This routine attempts to obtain the RWLock for read access.
+ *
+ * @param[in] the_rwlock is the RWLock to wait for
+ * @param[in] wait is true if the calling thread is willing to wait
+ */
+
+Status_Control _CORE_RWLock_Seize_for_reading(
+ CORE_RWLock_Control *the_rwlock,
+ bool wait,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Obtain RWLock for writing.
+ *
+ * This routine attempts to obtain the RWLock for write exclusive access.
+ *
+ * @param[in] the_rwlock is the RWLock to wait for
+ * @param[in] wait is true if the calling thread is willing to wait
+ */
+Status_Control _CORE_RWLock_Seize_for_writing(
+ CORE_RWLock_Control *the_rwlock,
+ bool wait,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Release the RWLock.
+ *
+ * This routine manually releases @a the_rwlock. All of the threads waiting
+ * for the RWLock will be readied.
+ *
+ * @param[in] the_rwlock is the RWLock to surrender
+ *
+ * @retval Status is returned to indicate successful or failure.
+ */
+Status_Control _CORE_RWLock_Surrender( CORE_RWLock_Control *the_rwlock );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/coresem.h b/cpukit/include/rtems/score/coresem.h
new file mode 100644
index 0000000000..f9d3ac8fd5
--- /dev/null
+++ b/cpukit/include/rtems/score/coresem.h
@@ -0,0 +1,61 @@
+/**
+ * @file rtems/score/coresem.h
+ *
+ * @brief Data Associated with the Counting Semaphore Handler
+ *
+ * This include file contains all the constants and structures associated
+ * with the Counting Semaphore Handler. A counting semaphore is the
+ * standard Dijkstra binary semaphore used to provide synchronization
+ * and mutual exclusion capabilities.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CORESEM_H
+#define _RTEMS_SCORE_CORESEM_H
+
+#include <rtems/score/threadq.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSemaphore Semaphore Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which provides the foundation
+ * Semaphore services used in all of the APIs supported by RTEMS.
+ */
+/**@{*/
+
+/**
+ * The following defines the control block used to manage each
+ * counting semaphore.
+ */
+typedef struct {
+ /** This field is the Waiting Queue used to manage the set of tasks
+ * which are blocked waiting to obtain the semaphore.
+ */
+ Thread_queue_Control Wait_queue;
+
+ /** This element contains the current count of this semaphore. */
+ uint32_t count;
+} CORE_semaphore_Control;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/coresemimpl.h b/cpukit/include/rtems/score/coresemimpl.h
new file mode 100644
index 0000000000..00f77e61dd
--- /dev/null
+++ b/cpukit/include/rtems/score/coresemimpl.h
@@ -0,0 +1,207 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with the SuperCore Semaphore
+ *
+ * This include file contains all of the inlined routines associated
+ * with the SuperCore semaphore.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CORESEMIMPL_H
+#define _RTEMS_SCORE_CORESEMIMPL_H
+
+#include <rtems/score/coresem.h>
+#include <rtems/score/objectimpl.h>
+#include <rtems/score/threaddispatch.h>
+#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadqimpl.h>
+#include <rtems/score/statesimpl.h>
+#include <rtems/score/status.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreSemaphore
+ */
+/**@{**/
+
+/**
+ * @brief Initialize the semaphore based on the parameters passed.
+ *
+ * This package is the implementation of the CORE Semaphore Handler.
+ * This core object utilizes standard Dijkstra counting semaphores to provide
+ * synchronization and mutual exclusion capabilities.
+ *
+ * This routine initializes the semaphore based on the parameters passed.
+ *
+ * @param[in] the_semaphore is the semaphore to initialize
+ * @param[in] initial_value is the initial count of the semaphore
+ */
+void _CORE_semaphore_Initialize(
+ CORE_semaphore_Control *the_semaphore,
+ uint32_t initial_value
+);
+
+RTEMS_INLINE_ROUTINE void _CORE_semaphore_Acquire_critical(
+ CORE_semaphore_Control *the_semaphore,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire_critical( &the_semaphore->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_semaphore_Release(
+ CORE_semaphore_Control *the_semaphore,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Release( &the_semaphore->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE void _CORE_semaphore_Destroy(
+ CORE_semaphore_Control *the_semaphore,
+ const Thread_queue_Operations *operations,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Flush_critical(
+ &the_semaphore->Wait_queue.Queue,
+ operations,
+ _Thread_queue_Flush_status_object_was_deleted,
+ queue_context
+ );
+ _Thread_queue_Destroy( &the_semaphore->Wait_queue );
+}
+
+/**
+ * @brief Surrender a unit to a semaphore.
+ *
+ * This routine frees a unit to the semaphore. If a task was blocked waiting
+ * for a unit from this semaphore, then that task will be readied and the unit
+ * given to that task. Otherwise, the unit will be returned to the semaphore.
+ *
+ * @param[in] the_semaphore is the semaphore to surrender
+ * @param[in] operations The thread queue operations.
+ * @param[in] queue_context is a temporary variable used to contain the ISR
+ * disable level cookie
+ *
+ * @retval an indication of whether the routine succeeded or failed
+ */
+RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Surrender(
+ CORE_semaphore_Control *the_semaphore,
+ const Thread_queue_Operations *operations,
+ uint32_t maximum_count,
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_Control *the_thread;
+ Status_Control status;
+
+ status = STATUS_SUCCESSFUL;
+
+ _CORE_semaphore_Acquire_critical( the_semaphore, queue_context );
+
+ the_thread = _Thread_queue_First_locked(
+ &the_semaphore->Wait_queue,
+ operations
+ );
+ if ( the_thread != NULL ) {
+ _Thread_queue_Extract_critical(
+ &the_semaphore->Wait_queue.Queue,
+ operations,
+ the_thread,
+ queue_context
+ );
+ } else {
+ if ( the_semaphore->count < maximum_count )
+ the_semaphore->count += 1;
+ else
+ status = STATUS_MAXIMUM_COUNT_EXCEEDED;
+
+ _CORE_semaphore_Release( the_semaphore, queue_context );
+ }
+
+ return status;
+}
+
+/**
+ * This routine returns the current count associated with the semaphore.
+ *
+ * @param[in] the_semaphore is the semaphore to obtain the count of
+ *
+ * @return the current count of this semaphore
+ */
+RTEMS_INLINE_ROUTINE uint32_t _CORE_semaphore_Get_count(
+ const CORE_semaphore_Control *the_semaphore
+)
+{
+ return the_semaphore->count;
+}
+
+/**
+ * This routine attempts to receive a unit from the_semaphore.
+ * If a unit is available or if the wait flag is false, then the routine
+ * returns. Otherwise, the calling task is blocked until a unit becomes
+ * available.
+ *
+ * @param[in] the_semaphore is the semaphore to obtain
+ * @param[in] operations The thread queue operations.
+ * @param[in] executing The currently executing thread.
+ * @param[in] wait is true if the thread is willing to wait
+ * @param[in] queue_context is a temporary variable used to contain the ISR
+ * disable level cookie
+ */
+RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Seize(
+ CORE_semaphore_Control *the_semaphore,
+ const Thread_queue_Operations *operations,
+ Thread_Control *executing,
+ bool wait,
+ Thread_queue_Context *queue_context
+)
+{
+ _Assert( _ISR_Get_level() != 0 );
+
+ _CORE_semaphore_Acquire_critical( the_semaphore, queue_context );
+ if ( the_semaphore->count != 0 ) {
+ the_semaphore->count -= 1;
+ _CORE_semaphore_Release( the_semaphore, queue_context );
+ return STATUS_SUCCESSFUL;
+ }
+
+ if ( !wait ) {
+ _CORE_semaphore_Release( the_semaphore, queue_context );
+ return STATUS_UNSATISFIED;
+ }
+
+ _Thread_queue_Context_set_thread_state(
+ queue_context,
+ STATES_WAITING_FOR_SEMAPHORE
+ );
+ _Thread_queue_Enqueue(
+ &the_semaphore->Wait_queue.Queue,
+ operations,
+ executing,
+ queue_context
+ );
+ return _Thread_Wait_get_status( executing );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/cpustdatomic.h b/cpukit/include/rtems/score/cpustdatomic.h
new file mode 100644
index 0000000000..6c6db8d279
--- /dev/null
+++ b/cpukit/include/rtems/score/cpustdatomic.h
@@ -0,0 +1,682 @@
+/**
+ * @file
+ *
+ * @brief Atomic Operations CPU API
+ */
+
+/*
+ * COPYRIGHT (c) 2013 Deng Hengyi.
+ * Copyright (c) 2015 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CPUSTDATOMIC_H
+#define _RTEMS_SCORE_CPUSTDATOMIC_H
+
+#include <rtems/score/basedefs.h>
+
+#ifdef RTEMS_SMP
+ #if defined(__cplusplus) \
+ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9))
+ /*
+ * The GCC 4.9 ships its own <stdatomic.h> which is not C++ compatible. The
+ * suggested solution was to include <atomic> in case C++ is used. This works
+ * at least with GCC 4.9. See also:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60940
+ */
+ #include <atomic>
+ #define _RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC
+ #else
+ #include <stdatomic.h>
+ #define _RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC
+ #endif
+#else
+ #include <rtems/score/isrlevel.h>
+#endif
+
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+
+typedef std::atomic_uint CPU_atomic_Uint;
+
+typedef std::atomic_ulong CPU_atomic_Ulong;
+
+typedef std::atomic_uintptr_t CPU_atomic_Uintptr;
+
+typedef std::atomic_flag CPU_atomic_Flag;
+
+typedef std::memory_order CPU_atomic_Order;
+
+#define CPU_ATOMIC_ORDER_RELAXED std::memory_order_relaxed
+
+#define CPU_ATOMIC_ORDER_ACQUIRE std::memory_order_acquire
+
+#define CPU_ATOMIC_ORDER_RELEASE std::memory_order_release
+
+#define CPU_ATOMIC_ORDER_ACQ_REL std::memory_order_acq_rel
+
+#define CPU_ATOMIC_ORDER_SEQ_CST std::memory_order_seq_cst
+
+#define CPU_ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value )
+
+#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value )
+
+#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value )
+
+#define CPU_ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT
+
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+
+typedef atomic_uint CPU_atomic_Uint;
+
+typedef atomic_ulong CPU_atomic_Ulong;
+
+typedef atomic_uintptr_t CPU_atomic_Uintptr;
+
+typedef atomic_flag CPU_atomic_Flag;
+
+typedef memory_order CPU_atomic_Order;
+
+#define CPU_ATOMIC_ORDER_RELAXED memory_order_relaxed
+
+#define CPU_ATOMIC_ORDER_ACQUIRE memory_order_acquire
+
+#define CPU_ATOMIC_ORDER_RELEASE memory_order_release
+
+#define CPU_ATOMIC_ORDER_ACQ_REL memory_order_acq_rel
+
+#define CPU_ATOMIC_ORDER_SEQ_CST memory_order_seq_cst
+
+#define CPU_ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value )
+
+#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value )
+
+#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value )
+
+#define CPU_ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT
+
+#else
+
+typedef unsigned int CPU_atomic_Uint;
+
+typedef unsigned long CPU_atomic_Ulong;
+
+typedef uintptr_t CPU_atomic_Uintptr;
+
+typedef bool CPU_atomic_Flag;
+
+typedef int CPU_atomic_Order;
+
+#define CPU_ATOMIC_ORDER_RELAXED 0
+
+#define CPU_ATOMIC_ORDER_ACQUIRE 2
+
+#define CPU_ATOMIC_ORDER_RELEASE 3
+
+#define CPU_ATOMIC_ORDER_ACQ_REL 4
+
+#define CPU_ATOMIC_ORDER_SEQ_CST 5
+
+#define CPU_ATOMIC_INITIALIZER_UINT( value ) ( value )
+
+#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ( value )
+
+#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ( value )
+
+#define CPU_ATOMIC_INITIALIZER_FLAG false
+
+#endif
+
+static inline void _CPU_atomic_Fence( CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ std::atomic_thread_fence( order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_thread_fence( order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+#endif
+}
+
+static inline void _CPU_atomic_Init_uint( CPU_atomic_Uint *obj, unsigned int desired )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_init( obj, desired );
+#else
+ *obj = desired;
+#endif
+}
+
+static inline void _CPU_atomic_Init_ulong( CPU_atomic_Ulong *obj, unsigned long desired )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_init( obj, desired );
+#else
+ *obj = desired;
+#endif
+}
+
+static inline void _CPU_atomic_Init_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_init( obj, desired );
+#else
+ *obj = desired;
+#endif
+}
+
+static inline unsigned int _CPU_atomic_Load_uint( const CPU_atomic_Uint *obj, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->load( order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_load_explicit( obj, order );
+#else
+ unsigned int val;
+
+ (void) order;
+ val = *obj;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ return val;
+#endif
+}
+
+static inline unsigned long _CPU_atomic_Load_ulong( const CPU_atomic_Ulong *obj, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->load( order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_load_explicit( obj, order );
+#else
+ unsigned long val;
+
+ (void) order;
+ val = *obj;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ return val;
+#endif
+}
+
+static inline uintptr_t _CPU_atomic_Load_uintptr( const CPU_atomic_Uintptr *obj, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->load( order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_load_explicit( obj, order );
+#else
+ uintptr_t val;
+
+ (void) order;
+ val = *obj;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ return val;
+#endif
+}
+
+static inline void _CPU_atomic_Store_uint( CPU_atomic_Uint *obj, unsigned int desired, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_store_explicit( obj, desired, order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ *obj = desired;
+#endif
+}
+
+static inline void _CPU_atomic_Store_ulong( CPU_atomic_Ulong *obj, unsigned long desired, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_store_explicit( obj, desired, order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ *obj = desired;
+#endif
+}
+
+static inline void _CPU_atomic_Store_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_store_explicit( obj, desired, order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ *obj = desired;
+#endif
+}
+
+static inline unsigned int _CPU_atomic_Fetch_add_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_add( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_add_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val + arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned long _CPU_atomic_Fetch_add_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_add( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_add_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val + arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline uintptr_t _CPU_atomic_Fetch_add_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_add( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_add_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val + arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned int _CPU_atomic_Fetch_sub_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_sub( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_sub_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val - arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned long _CPU_atomic_Fetch_sub_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_sub( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_sub_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val - arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline uintptr_t _CPU_atomic_Fetch_sub_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_sub( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_sub_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val - arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned int _CPU_atomic_Fetch_or_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_or( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_or_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val | arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned long _CPU_atomic_Fetch_or_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_or( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_or_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val | arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline uintptr_t _CPU_atomic_Fetch_or_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_or( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_or_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val | arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned int _CPU_atomic_Fetch_and_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_and( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_and_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val & arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned long _CPU_atomic_Fetch_and_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_and( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_and_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val & arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline uintptr_t _CPU_atomic_Fetch_and_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->fetch_and( arg, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_fetch_and_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val & arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned int _CPU_atomic_Exchange_uint( CPU_atomic_Uint *obj, unsigned int desired, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->exchange( desired, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_exchange_explicit( obj, desired, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = desired;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline unsigned long _CPU_atomic_Exchange_ulong( CPU_atomic_Ulong *obj, unsigned long desired, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->exchange( desired, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_exchange_explicit( obj, desired, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = desired;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline uintptr_t _CPU_atomic_Exchange_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->exchange( desired, order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_exchange_explicit( obj, desired, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = desired;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+static inline bool _CPU_atomic_Compare_exchange_uint( CPU_atomic_Uint *obj, unsigned int *expected, unsigned int desired, CPU_atomic_Order succ, CPU_atomic_Order fail )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->compare_exchange_strong( *expected, desired, succ, fail );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
+#else
+ bool success;
+ ISR_Level level;
+ unsigned int actual;
+
+ (void) succ;
+ (void) fail;
+ _ISR_Local_disable( level );
+ actual = *obj;
+ success = ( actual == *expected );
+ if ( success ) {
+ *obj = desired;
+ } else {
+ *expected = actual;
+ }
+ _ISR_Local_enable( level );
+
+ return success;
+#endif
+}
+
+static inline bool _CPU_atomic_Compare_exchange_ulong( CPU_atomic_Ulong *obj, unsigned long *expected, unsigned long desired, CPU_atomic_Order succ, CPU_atomic_Order fail )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->compare_exchange_strong( *expected, desired, succ, fail );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
+#else
+ bool success;
+ ISR_Level level;
+ unsigned long actual;
+
+ (void) succ;
+ (void) fail;
+ _ISR_Local_disable( level );
+ actual = *obj;
+ success = ( actual == *expected );
+ if ( success ) {
+ *obj = desired;
+ } else {
+ *expected = actual;
+ }
+ _ISR_Local_enable( level );
+
+ return success;
+#endif
+}
+
+static inline bool _CPU_atomic_Compare_exchange_uintptr( CPU_atomic_Uintptr *obj, uintptr_t *expected, uintptr_t desired, CPU_atomic_Order succ, CPU_atomic_Order fail )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->compare_exchange_strong( *expected, desired, succ, fail );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
+#else
+ bool success;
+ ISR_Level level;
+ uintptr_t actual;
+
+ (void) succ;
+ (void) fail;
+ _ISR_Local_disable( level );
+ actual = *obj;
+ success = ( actual == *expected );
+ if ( success ) {
+ *obj = desired;
+ } else {
+ *expected = actual;
+ }
+ _ISR_Local_enable( level );
+
+ return success;
+#endif
+}
+
+static inline void _CPU_atomic_Flag_clear( CPU_atomic_Flag *obj, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ obj->clear( order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ atomic_flag_clear_explicit( obj, order );
+#else
+ (void) order;
+ *obj = false;
+#endif
+}
+
+static inline bool _CPU_atomic_Flag_test_and_set( CPU_atomic_Flag *obj, CPU_atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
+ return obj->test_and_set( order );
+#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
+ return atomic_flag_test_and_set_explicit( obj, order );
+#else
+ bool flag;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ flag = *obj;
+ *obj = true;
+ _ISR_Local_enable( level );
+
+ return flag;
+#endif
+}
+
+#endif /* _RTEMS_SCORE_CPUSTDATOMIC_H */
diff --git a/cpukit/include/rtems/score/freechain.h b/cpukit/include/rtems/score/freechain.h
new file mode 100644
index 0000000000..1540c0e2a1
--- /dev/null
+++ b/cpukit/include/rtems/score/freechain.h
@@ -0,0 +1,111 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreFreechain
+ *
+ * @brief Freechain Handler API
+ */
+/*
+ * Copyright (c) 2013 Gedare Bloom.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_FREECHAIN_H
+#define _RTEMS_SCORE_FREECHAIN_H
+
+#include <rtems/score/basedefs.h>
+#include <rtems/score/chain.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreFreechain Freechain Handler
+ *
+ * @ingroup Score
+ *
+ * The Freechain Handler is used to manage a chain of nodes, of which size can
+ * automatically increase when there is no free node left. This handler
+ * provides one data structure: Freechain_Control.
+ *
+ * @{
+ */
+
+/**
+ * @brief Allocator function.
+ */
+typedef void *( *Freechain_Allocator )( size_t size );
+
+/**
+ * @brief The freechain control.
+ */
+typedef struct {
+ /**
+ * @brief Chain of free nodes.
+ */
+ Chain_Control Free;
+} Freechain_Control;
+
+/**
+ * @brief Initializes a freechain.
+ *
+ * This routine initializes the freechain control structure to manage a chain
+ * of nodes. In case the freechain is empty the extend handler is called to
+ * get more nodes.
+ *
+ * @param[in] freechain The freechain control to initialize.
+ * @param[in] allocator The allocator function.
+ * @param[in] number_nodes The initial number of nodes.
+ * @param[in] node_size The node size.
+ */
+void _Freechain_Initialize(
+ Freechain_Control *freechain,
+ Freechain_Allocator allocator,
+ size_t number_nodes,
+ size_t node_size
+);
+
+/**
+ * @brief Gets a node from the freechain.
+ *
+ * @param[in] freechain The freechain control.
+ * @param[in] allocator The allocator function.
+ * @param[in] number_nodes_to_extend The number of nodes in case an extend is
+ * necessary due to an empty freechain.
+ * @param[in] node_size The node size.
+ *
+ * @retval NULL The freechain is empty and the extend operation failed.
+ * @retval otherwise Pointer to a node. The node ownership passes to the
+ * caller.
+ */
+void *_Freechain_Get(
+ Freechain_Control *freechain,
+ Freechain_Allocator allocator,
+ size_t number_nodes_to_extend,
+ size_t node_size
+);
+
+/**
+ * @brief Puts a node back onto the freechain.
+ *
+ * @param[in] freechain The freechain control.
+ * @param[in] node The node to put back. The node may be @c NULL, in this case
+ * the function does nothing.
+ */
+void _Freechain_Put(
+ Freechain_Control *freechain,
+ void *node
+);
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/heap.h b/cpukit/include/rtems/score/heap.h
new file mode 100644
index 0000000000..60cb3be99d
--- /dev/null
+++ b/cpukit/include/rtems/score/heap.h
@@ -0,0 +1,518 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
+ *
+ * @brief Heap Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_HEAP_H
+#define _RTEMS_SCORE_HEAP_H
+
+#include <rtems/score/cpu.h>
+#include <rtems/score/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef RTEMS_DEBUG
+ #define HEAP_PROTECTION
+#endif
+
+/**
+ * @defgroup ScoreHeap Heap Handler
+ *
+ * @ingroup Score
+ *
+ * @brief The Heap Handler provides a heap.
+ *
+ * A heap is a doubly linked list of variable size blocks which are allocated
+ * using the first fit method. Garbage collection is performed each time a
+ * block is returned to the heap by coalescing neighbor blocks. Control
+ * information for both allocated and free blocks is contained in the heap
+ * area. A heap control structure contains control information for the heap.
+ *
+ * The alignment routines could be made faster should we require only powers of
+ * two to be supported for page size, alignment and boundary arguments. The
+ * minimum alignment requirement for pages is currently CPU_ALIGNMENT and this
+ * value is only required to be multiple of two and explicitly not required to
+ * be a power of two.
+ *
+ * There are two kinds of blocks. One sort describes a free block from which
+ * we can allocate memory. The other blocks are used and provide an allocated
+ * memory area. The free blocks are accessible via a list of free blocks.
+ *
+ * Blocks or areas cover a continuous set of memory addresses. They have a
+ * begin and end address. The end address is not part of the set. The size of
+ * a block or area equals the distance between the begin and end address in
+ * units of bytes.
+ *
+ * Free blocks look like:
+ * <table>
+ * <tr>
+ * <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
+ * previous block is free, <br> otherwise it may contain data used by
+ * the previous block</td>
+ * </tr>
+ * <tr>
+ * <td>block size and a flag which indicates if the previous block is free
+ * or used, <br> this field contains always valid data regardless of the
+ * block usage</td>
+ * </tr>
+ * <tr><td>pointer to next block (this field is page size aligned)</td></tr>
+ * <tr><td>pointer to previous block</td></tr>
+ * <tr><td colspan=2>free space</td></tr>
+ * </table>
+ *
+ * Used blocks look like:
+ * <table>
+ * <tr>
+ * <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
+ * previous block is free,<br>otherwise it may contain data used by
+ * the previous block</td>
+ * </tr>
+ * <tr>
+ * <td>block size and a flag which indicates if the previous block is free
+ * or used, <br> this field contains always valid data regardless of the
+ * block usage</td>
+ * </tr>
+ * <tr><td>begin of allocated area (this field is page size aligned)</td></tr>
+ * <tr><td>allocated space</td></tr>
+ * <tr><td colspan=2>allocated space</td></tr>
+ * </table>
+ *
+ * The heap area after initialization contains two blocks and looks like:
+ * <table>
+ * <tr><th>Label</th><th colspan=2>Content</th></tr>
+ * <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr>
+ * <tr>
+ * <td>first_block->prev_size</td>
+ * <td colspan=2>
+ * subordinate heap area end address (this will be used to maintain a
+ * linked list of scattered heap areas)
+ * </td>
+ * </tr>
+ * <tr>
+ * <td>first_block->size</td>
+ * <td colspan=2>size available for allocation
+ * | @c HEAP_PREV_BLOCK_USED</td>
+ * </tr>
+ * <tr>
+ * <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td>
+ * <td rowspan=3>memory area available for allocation</td>
+ * </tr>
+ * <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
+ * <tr><td>...</td></tr>
+ * <tr>
+ * <td>last_block->prev_size</td><td colspan=2>size of first block</td>
+ * </tr>
+ * <tr>
+ * <td>last_block->size</td>
+ * <td colspan=2>first block begin address - last block begin address</td>
+ * </tr>
+ * <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr>
+ * </table>
+ * The next block of the last block is the first block. Since the first
+ * block indicates that the previous block is used, this ensures that the
+ * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
+ * functions.
+ */
+/**@{**/
+
+typedef struct Heap_Control Heap_Control;
+
+typedef struct Heap_Block Heap_Block;
+
+#ifndef HEAP_PROTECTION
+ #define HEAP_PROTECTION_HEADER_SIZE 0
+#else
+ #define HEAP_PROTECTOR_COUNT 2
+
+ #define HEAP_BEGIN_PROTECTOR_0 ((uintptr_t) 0xfd75a98f)
+ #define HEAP_BEGIN_PROTECTOR_1 ((uintptr_t) 0xbfa1f177)
+ #define HEAP_END_PROTECTOR_0 ((uintptr_t) 0xd6b8855e)
+ #define HEAP_END_PROTECTOR_1 ((uintptr_t) 0x13a44a5b)
+
+ #define HEAP_FREE_PATTERN ((uintptr_t) 0xe7093cdf)
+
+ #define HEAP_PROTECTION_OBOLUS ((Heap_Block *) 1)
+
+ typedef void (*_Heap_Protection_handler)(
+ Heap_Control *heap,
+ Heap_Block *block
+ );
+
+ typedef struct {
+ _Heap_Protection_handler block_initialize;
+ _Heap_Protection_handler block_check;
+ _Heap_Protection_handler block_error;
+ void *handler_data;
+ Heap_Block *first_delayed_free_block;
+ Heap_Block *last_delayed_free_block;
+ uintptr_t delayed_free_block_count;
+ uintptr_t delayed_free_fraction;
+ } Heap_Protection;
+
+ typedef struct {
+ uintptr_t protector [HEAP_PROTECTOR_COUNT];
+ Heap_Block *next_delayed_free_block;
+ Thread_Control *task;
+ void *tag;
+ } Heap_Protection_block_begin;
+
+ typedef struct {
+ uintptr_t protector [HEAP_PROTECTOR_COUNT];
+ } Heap_Protection_block_end;
+
+ #define HEAP_PROTECTION_HEADER_SIZE \
+ (sizeof(Heap_Protection_block_begin) + sizeof(Heap_Protection_block_end))
+#endif
+
+/**
+ * @brief The block header consists of the two size fields
+ * (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag).
+ */
+#define HEAP_BLOCK_HEADER_SIZE \
+ (2 * sizeof(uintptr_t) + HEAP_PROTECTION_HEADER_SIZE)
+
+/**
+ * @brief Description for free or used blocks.
+ */
+struct Heap_Block {
+ /**
+ * @brief Size of the previous block or part of the allocated area of the
+ * previous block.
+ *
+ * This field is only valid if the previous block is free. This case is
+ * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
+ * @a size_and_flag field of the current block.
+ *
+ * In a used block only the @a size_and_flag field needs to be valid. The
+ * @a prev_size field of the current block is maintained by the previous
+ * block. The current block can use the @a prev_size field in the next block
+ * for allocation.
+ */
+ uintptr_t prev_size;
+
+ #ifdef HEAP_PROTECTION
+ Heap_Protection_block_begin Protection_begin;
+ #endif
+
+ /**
+ * @brief Contains the size of the current block and a flag which indicates
+ * if the previous block is free or used.
+ *
+ * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
+ * used, otherwise the previous block is free. A used previous block may
+ * claim the @a prev_size field for allocation. This trick allows to
+ * decrease the overhead in the used blocks by the size of the @a prev_size
+ * field. As sizes are required to be multiples of two, the least
+ * significant bits would be always zero. We use this bit to store the flag.
+ *
+ * This field is always valid.
+ */
+ uintptr_t size_and_flag;
+
+ #ifdef HEAP_PROTECTION
+ Heap_Protection_block_end Protection_end;
+ #endif
+
+ /**
+ * @brief Pointer to the next free block or part of the allocated area.
+ *
+ * This field is page size aligned and begins of the allocated area in case
+ * the block is used.
+ *
+ * This field is only valid if the block is free and thus part of the free
+ * block list.
+ */
+ Heap_Block *next;
+
+ /**
+ * @brief Pointer to the previous free block or part of the allocated area.
+ *
+ * This field is only valid if the block is free and thus part of the free
+ * block list.
+ */
+ Heap_Block *prev;
+};
+
+/**
+ * @brief Run-time heap statistics.
+ *
+ * The value @a searches / @a allocs gives the mean number of searches per
+ * allocation, while @a max_search gives maximum number of searches ever
+ * performed on a single allocation call.
+ */
+typedef struct {
+ /**
+ * @brief Lifetime number of bytes allocated from this heap.
+ *
+ * This value is an integral multiple of the page size.
+ */
+ uint64_t lifetime_allocated;
+
+ /**
+ * @brief Lifetime number of bytes freed to this heap.
+ *
+ * This value is an integral multiple of the page size.
+ */
+ uint64_t lifetime_freed;
+
+ /**
+ * @brief Size of the allocatable area in bytes.
+ *
+ * This value is an integral multiple of the page size.
+ */
+ uintptr_t size;
+
+ /**
+ * @brief Current free size in bytes.
+ *
+ * This value is an integral multiple of the page size.
+ */
+ uintptr_t free_size;
+
+ /**
+ * @brief Minimum free size ever in bytes.
+ *
+ * This value is an integral multiple of the page size.
+ */
+ uintptr_t min_free_size;
+
+ /**
+ * @brief Current number of free blocks.
+ */
+ uint32_t free_blocks;
+
+ /**
+ * @brief Maximum number of free blocks ever.
+ */
+ uint32_t max_free_blocks;
+
+ /**
+ * @brief Current number of used blocks.
+ */
+ uint32_t used_blocks;
+
+ /**
+ * @brief Maximum number of blocks searched ever.
+ */
+ uint32_t max_search;
+
+ /**
+ * @brief Total number of searches.
+ */
+ uint32_t searches;
+
+ /**
+ * @brief Total number of successful allocations.
+ */
+ uint32_t allocs;
+
+ /**
+ * @brief Total number of failed allocations.
+ */
+ uint32_t failed_allocs;
+
+ /**
+ * @brief Total number of successful frees.
+ */
+ uint32_t frees;
+
+ /**
+ * @brief Total number of successful resizes.
+ */
+ uint32_t resizes;
+} Heap_Statistics;
+
+/**
+ * @brief Control block used to manage a heap.
+ */
+struct Heap_Control {
+ Heap_Block free_list;
+ uintptr_t page_size;
+ uintptr_t min_block_size;
+ uintptr_t area_begin;
+ uintptr_t area_end;
+ Heap_Block *first_block;
+ Heap_Block *last_block;
+ Heap_Statistics stats;
+ #ifdef HEAP_PROTECTION
+ Heap_Protection Protection;
+ #endif
+};
+
+/**
+ * @brief Information about blocks.
+ */
+typedef struct {
+ /**
+ * @brief Number of blocks of this type.
+ */
+ uintptr_t number;
+
+ /**
+ * @brief Largest block of this type.
+ */
+ uintptr_t largest;
+
+ /**
+ * @brief Total size of the blocks of this type.
+ */
+ uintptr_t total;
+} Heap_Information;
+
+/**
+ * @brief Information block returned by _Heap_Get_information().
+ */
+typedef struct {
+ Heap_Information Free;
+ Heap_Information Used;
+ Heap_Statistics Stats;
+} Heap_Information_block;
+
+/**
+ * @brief Heap area structure for table based heap initialization and
+ * extension.
+ *
+ * @see Heap_Initialization_or_extend_handler.
+ */
+typedef struct {
+ void *begin;
+ uintptr_t size;
+} Heap_Area;
+
+/**
+ * @brief Heap initialization and extend handler type.
+ *
+ * This helps to do a table based heap initialization and extension. Create a
+ * table of Heap_Area elements and iterate through it. Set the handler to
+ * _Heap_Initialize() in the first iteration and then to _Heap_Extend().
+ *
+ * @see Heap_Area, _Heap_Initialize(), _Heap_Extend(), or _Heap_No_extend().
+ */
+typedef uintptr_t (*Heap_Initialization_or_extend_handler)(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t page_size_or_unused
+);
+
+/**
+ * @brief Extends the memory available for the heap @a heap using the memory
+ * area starting at @a area_begin of size @a area_size bytes.
+ *
+ * There are no alignment requirements for the memory area. The memory area
+ * must be big enough to contain some maintenance blocks. It must not overlap
+ * parts of the current heap memory areas. Disconnected memory areas added to
+ * the heap will lead to used blocks which cover the gaps. Extending with an
+ * inappropriate memory area will corrupt the heap resulting in undefined
+ * behaviour.
+ *
+ * The unused fourth parameter is provided to have the same signature as
+ * _Heap_Initialize().
+ *
+ * Returns the extended space available for allocation, or zero in case of failure.
+ *
+ * @see Heap_Initialization_or_extend_handler.
+ */
+uintptr_t _Heap_Extend(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t unused
+);
+
+/**
+ * @brief This function returns always zero.
+ *
+ * This function only returns zero and does nothing else.
+ *
+ * Returns always zero.
+ *
+ * @see Heap_Initialization_or_extend_handler.
+ */
+uintptr_t _Heap_No_extend(
+ Heap_Control *unused_0,
+ void *unused_1,
+ uintptr_t unused_2,
+ uintptr_t unused_3
+);
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
+ uintptr_t value,
+ uintptr_t alignment
+)
+{
+ uintptr_t remainder = value % alignment;
+
+ if ( remainder != 0 ) {
+ return value - remainder + alignment;
+ } else {
+ return value;
+ }
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min_block_size( uintptr_t page_size )
+{
+ return _Heap_Align_up( sizeof( Heap_Block ), page_size );
+}
+
+/**
+ * @brief Returns the worst case overhead to manage a memory area.
+ */
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead(
+ uintptr_t page_size
+)
+{
+ if ( page_size != 0 ) {
+ page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
+ } else {
+ page_size = CPU_ALIGNMENT;
+ }
+
+ return 2 * (page_size - 1) + HEAP_BLOCK_HEADER_SIZE;
+}
+
+/**
+ * @brief Returns the size with administration and alignment overhead for one
+ * allocation.
+ */
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Size_with_overhead(
+ uintptr_t page_size,
+ uintptr_t size,
+ uintptr_t alignment
+)
+{
+ if ( page_size != 0 ) {
+ page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
+ } else {
+ page_size = CPU_ALIGNMENT;
+ }
+
+ if ( page_size < alignment ) {
+ page_size = alignment;
+ }
+
+ return HEAP_BLOCK_HEADER_SIZE + page_size - 1 + size;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/heapimpl.h b/cpukit/include/rtems/score/heapimpl.h
new file mode 100644
index 0000000000..a8948edd6f
--- /dev/null
+++ b/cpukit/include/rtems/score/heapimpl.h
@@ -0,0 +1,601 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
+ *
+ * @brief Heap Handler Implementation
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_HEAPIMPL_H
+#define _RTEMS_SCORE_HEAPIMPL_H
+
+#include <rtems/score/heap.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreHeap
+ */
+/**@{**/
+
+/**
+ * @brief See also @ref Heap_Block.size_and_flag.
+ */
+#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
+
+/**
+ * @brief Size of the part at the block begin which may be used for allocation
+ * in charge of the previous block.
+ */
+#define HEAP_ALLOC_BONUS sizeof(uintptr_t)
+
+/**
+ * @brief See _Heap_Resize_block().
+ */
+typedef enum {
+ HEAP_RESIZE_SUCCESSFUL,
+ HEAP_RESIZE_UNSATISFIED,
+ HEAP_RESIZE_FATAL_ERROR
+} Heap_Resize_status;
+
+/**
+ * @brief Gets the first and last block for the heap area with begin
+ * @a heap_area_begin and size @a heap_area_size.
+ *
+ * A page size of @a page_size and minimal block size of @a min_block_size will
+ * be used for calculation.
+ *
+ * Nothing will be written to this area.
+ *
+ * In case of success the pointers to the first and last block will be returned
+ * via @a first_block_ptr and @a last_block_ptr.
+ *
+ * Returns @c true if the area is big enough, and @c false otherwise.
+ */
+bool _Heap_Get_first_and_last_block(
+ uintptr_t heap_area_begin,
+ uintptr_t heap_area_size,
+ uintptr_t page_size,
+ uintptr_t min_block_size,
+ Heap_Block **first_block_ptr,
+ Heap_Block **last_block_ptr
+);
+
+/**
+ * @brief Initializes the heap control block @a heap to manage the area
+ * starting at @a area_begin of size @a area_size bytes.
+ *
+ * Blocks of memory are allocated from the heap in multiples of @a page_size
+ * byte units. If the @a page_size is equal to zero or is not multiple of
+ * @c CPU_ALIGNMENT, it is aligned up to the nearest @c CPU_ALIGNMENT boundary.
+ *
+ * Returns the maximum memory available, or zero in case of failure.
+ *
+ * @see Heap_Initialization_or_extend_handler.
+ */
+uintptr_t _Heap_Initialize(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t page_size
+);
+
+/**
+ * @brief Allocates a memory area of size @a size bytes from the heap @a heap.
+ *
+ * If the alignment parameter @a alignment is not equal to zero, the allocated
+ * memory area will begin at an address aligned by this value.
+ *
+ * If the boundary parameter @a boundary is not equal to zero, the allocated
+ * memory area will fulfill a boundary constraint. The boundary value
+ * specifies the set of addresses which are aligned by the boundary value. The
+ * interior of the allocated memory area will not contain an element of this
+ * set. The begin or end address of the area may be a member of the set.
+ *
+ * A size value of zero will return a unique address which may be freed with
+ * _Heap_Free().
+ *
+ * Returns a pointer to the begin of the allocated memory area, or @c NULL if
+ * no memory is available or the parameters are inconsistent.
+ */
+void *_Heap_Allocate_aligned_with_boundary(
+ Heap_Control *heap,
+ uintptr_t size,
+ uintptr_t alignment,
+ uintptr_t boundary
+);
+
+/**
+ * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero.
+ */
+RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned(
+ Heap_Control *heap,
+ uintptr_t size,
+ uintptr_t alignment
+)
+{
+ return _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 );
+}
+
+/**
+ * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and
+ * boundary equals zero.
+ */
+RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
+{
+ return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
+}
+
+/**
+ * @brief Frees the allocated memory area starting at @a addr in the heap
+ * @a heap.
+ *
+ * Inappropriate values for @a addr may corrupt the heap.
+ *
+ * Returns @c true in case of success, and @c false otherwise.
+ */
+bool _Heap_Free( Heap_Control *heap, void *addr );
+
+/**
+ * @brief Walks the heap @a heap to verify its integrity.
+ *
+ * If @a dump is @c true, then diagnostic messages will be printed to standard
+ * output. In this case @a source is used to mark the output lines.
+ *
+ * Returns @c true if no errors occurred, and @c false if the heap is corrupt.
+ */
+bool _Heap_Walk(
+ Heap_Control *heap,
+ int source,
+ bool dump
+);
+
+/**
+ * @brief Heap block visitor.
+ *
+ * @see _Heap_Iterate().
+ *
+ * @retval true Stop the iteration.
+ * @retval false Continue the iteration.
+ */
+typedef bool (*Heap_Block_visitor)(
+ const Heap_Block *block,
+ uintptr_t block_size,
+ bool block_is_used,
+ void *visitor_arg
+);
+
+/**
+ * @brief Iterates over all blocks of the heap.
+ *
+ * For each block the @a visitor with the argument @a visitor_arg will be
+ * called.
+ */
+void _Heap_Iterate(
+ Heap_Control *heap,
+ Heap_Block_visitor visitor,
+ void *visitor_arg
+);
+
+/**
+ * @brief Greedy allocate that empties the heap.
+ *
+ * Afterwards the heap has at most @a block_count allocatable blocks of sizes
+ * specified by @a block_sizes. The @a block_sizes must point to an array with
+ * @a block_count members. All other blocks are used.
+ *
+ * @see _Heap_Greedy_free().
+ */
+Heap_Block *_Heap_Greedy_allocate(
+ Heap_Control *heap,
+ const uintptr_t *block_sizes,
+ size_t block_count
+);
+
+/**
+ * @brief Greedy allocate all blocks except the largest free block.
+ *
+ * Afterwards the heap has at most one allocatable block. This block is the
+ * largest free block if it exists. The allocatable size of this block is
+ * stored in @a allocatable_size. All other blocks are used.
+ *
+ * @see _Heap_Greedy_free().
+ */
+Heap_Block *_Heap_Greedy_allocate_all_except_largest(
+ Heap_Control *heap,
+ uintptr_t *allocatable_size
+);
+
+/**
+ * @brief Frees blocks of a greedy allocation.
+ *
+ * The @a blocks must be the return value of _Heap_Greedy_allocate().
+ */
+void _Heap_Greedy_free(
+ Heap_Control *heap,
+ Heap_Block *blocks
+);
+
+/**
+ * @brief Returns information about used and free blocks for the heap @a heap
+ * in @a info.
+ */
+void _Heap_Get_information(
+ Heap_Control *heap,
+ Heap_Information_block *info
+);
+
+/**
+ * @brief Returns information about free blocks for the heap @a heap in
+ * @a info.
+ */
+void _Heap_Get_free_information(
+ Heap_Control *heap,
+ Heap_Information *info
+);
+
+/**
+ * @brief Returns the size of the allocatable memory area starting at @a addr
+ * in @a size.
+ *
+ * The size value may be greater than the initially requested size in
+ * _Heap_Allocate_aligned_with_boundary().
+ *
+ * Inappropriate values for @a addr will not corrupt the heap, but may yield
+ * invalid size values.
+ *
+ * Returns @a true if successful, and @c false otherwise.
+ */
+bool _Heap_Size_of_alloc_area(
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t *size
+);
+
+/**
+ * @brief Resizes the block of the allocated memory area starting at @a addr.
+ *
+ * The new memory area will have a size of at least @a size bytes. A resize
+ * may be impossible and depends on the current heap usage.
+ *
+ * The size available for allocation in the current block before the resize
+ * will be returned in @a old_size. The size available for allocation in
+ * the resized block will be returned in @a new_size. If the resize was not
+ * successful, then a value of zero will be returned in @a new_size.
+ *
+ * Inappropriate values for @a addr may corrupt the heap.
+ */
+Heap_Resize_status _Heap_Resize_block(
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t size,
+ uintptr_t *old_size,
+ uintptr_t *new_size
+);
+
+/**
+ * @brief Allocates the memory area starting at @a alloc_begin of size
+ * @a alloc_size bytes in the block @a block.
+ *
+ * The block may be split up into multiple blocks. The previous and next block
+ * may be used or free. Free block parts which form a vaild new block will be
+ * inserted into the free list or merged with an adjacent free block. If the
+ * block is used, they will be inserted after the free list head. If the block
+ * is free, they will be inserted after the previous block in the free list.
+ *
+ * Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the
+ * heap.
+ *
+ * Returns the block containing the allocated memory area.
+ */
+Heap_Block *_Heap_Block_allocate(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_begin,
+ uintptr_t alloc_size
+);
+
+#ifndef HEAP_PROTECTION
+ #define _Heap_Protection_block_initialize( heap, block ) ((void) 0)
+ #define _Heap_Protection_block_check( heap, block ) ((void) 0)
+ #define _Heap_Protection_block_error( heap, block ) ((void) 0)
+ #define _Heap_Protection_free_all_delayed_blocks( heap ) ((void) 0)
+#else
+ static inline void _Heap_Protection_block_initialize(
+ Heap_Control *heap,
+ Heap_Block *block
+ )
+ {
+ (*heap->Protection.block_initialize)( heap, block );
+ }
+
+ static inline void _Heap_Protection_block_check(
+ Heap_Control *heap,
+ Heap_Block *block
+ )
+ {
+ (*heap->Protection.block_check)( heap, block );
+ }
+
+ static inline void _Heap_Protection_block_error(
+ Heap_Control *heap,
+ Heap_Block *block
+ )
+ {
+ (*heap->Protection.block_error)( heap, block );
+ }
+
+ static inline void _Heap_Protection_free_all_delayed_blocks( Heap_Control *heap )
+ {
+ uintptr_t large = 0
+ - (uintptr_t) HEAP_BLOCK_HEADER_SIZE
+ - (uintptr_t) HEAP_ALLOC_BONUS
+ - (uintptr_t) 1;
+ void *p = _Heap_Allocate( heap, large );
+ _Heap_Free( heap, p );
+ }
+#endif
+
+/**
+ * @brief Sets the fraction of delayed free blocks that is actually freed
+ * during memory shortage.
+ *
+ * The default is to free half the delayed free blocks. This is equal to a
+ * fraction value of two.
+ *
+ * @param[in] heap The heap control.
+ * @param[in] fraction The fraction is one divided by this fraction value.
+ */
+RTEMS_INLINE_ROUTINE void _Heap_Protection_set_delayed_free_fraction(
+ Heap_Control *heap,
+ uintptr_t fraction
+)
+{
+#ifdef HEAP_PROTECTION
+ heap->Protection.delayed_free_fraction = fraction;
+#else
+ (void) heap;
+ (void) fraction;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
+{
+ return &heap->free_list;
+}
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
+{
+ return &heap->free_list;
+}
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
+{
+ return _Heap_Free_list_head(heap)->next;
+}
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_last( Heap_Control *heap )
+{
+ return _Heap_Free_list_tail(heap)->prev;
+}
+
+RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block )
+{
+ Heap_Block *next = block->next;
+ Heap_Block *prev = block->prev;
+
+ prev->next = next;
+ next->prev = prev;
+}
+
+RTEMS_INLINE_ROUTINE void _Heap_Free_list_replace(
+ Heap_Block *old_block,
+ Heap_Block *new_block
+)
+{
+ Heap_Block *next = old_block->next;
+ Heap_Block *prev = old_block->prev;
+
+ new_block->next = next;
+ new_block->prev = prev;
+
+ next->prev = new_block;
+ prev->next = new_block;
+}
+
+RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after(
+ Heap_Block *block_before,
+ Heap_Block *new_block
+)
+{
+ Heap_Block *next = block_before->next;
+
+ new_block->next = next;
+ new_block->prev = block_before;
+ block_before->next = new_block;
+ next->prev = new_block;
+}
+
+RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_before(
+ Heap_Block *block_next,
+ Heap_Block *new_block
+)
+{
+ Heap_Block *prev = block_next->prev;
+
+ new_block->next = block_next;
+ new_block->prev = prev;
+ prev->next = new_block;
+ block_next->prev = new_block;
+}
+
+RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
+ uintptr_t value,
+ uintptr_t alignment
+)
+{
+ return (value % alignment) == 0;
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
+ uintptr_t value,
+ uintptr_t alignment
+)
+{
+ return value - (value % alignment);
+}
+
+/**
+ * @brief Returns the block which is @a offset away from @a block.
+ */
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
+ const Heap_Block *block,
+ uintptr_t offset
+)
+{
+ return (Heap_Block *) ((uintptr_t) block + offset);
+}
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Prev_block(
+ const Heap_Block *block
+)
+{
+ return (Heap_Block *) ((uintptr_t) block - block->prev_size);
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
+ const Heap_Block *block
+)
+{
+ return (uintptr_t) block + HEAP_BLOCK_HEADER_SIZE;
+}
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
+ uintptr_t alloc_begin,
+ uintptr_t page_size
+)
+{
+ return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
+ - HEAP_BLOCK_HEADER_SIZE);
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
+{
+ return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
+}
+
+RTEMS_INLINE_ROUTINE void _Heap_Block_set_size(
+ Heap_Block *block,
+ uintptr_t size
+)
+{
+ uintptr_t flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
+
+ block->size_and_flag = size | flag;
+}
+
+RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
+{
+ return block->size_and_flag & HEAP_PREV_BLOCK_USED;
+}
+
+RTEMS_INLINE_ROUTINE bool _Heap_Is_used(
+ const Heap_Block *block
+)
+{
+ const Heap_Block *const next_block =
+ _Heap_Block_at( block, _Heap_Block_size( block ) );
+
+ return _Heap_Is_prev_used( next_block );
+}
+
+RTEMS_INLINE_ROUTINE bool _Heap_Is_free(
+ const Heap_Block *block
+)
+{
+ return !_Heap_Is_used( block );
+}
+
+RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
+ const Heap_Control *heap,
+ const Heap_Block *block
+)
+{
+ return (uintptr_t) block >= (uintptr_t) heap->first_block
+ && (uintptr_t) block <= (uintptr_t) heap->last_block;
+}
+
+/**
+ * @brief Sets the size of the last block for heap @a heap.
+ *
+ * The next block of the last block will be the first block. Since the first
+ * block indicates that the previous block is used, this ensures that the last
+ * block appears as used for the _Heap_Is_used() and _Heap_Is_free()
+ * functions.
+ *
+ * This feature will be used to terminate the scattered heap area list. See
+ * also _Heap_Extend().
+ */
+RTEMS_INLINE_ROUTINE void _Heap_Set_last_block_size( Heap_Control *heap )
+{
+ _Heap_Block_set_size(
+ heap->last_block,
+ (uintptr_t) heap->first_block - (uintptr_t) heap->last_block
+ );
+}
+
+/**
+ * @brief Returns the size of the allocatable area in bytes.
+ *
+ * This value is an integral multiple of the page size.
+ */
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
+{
+ return heap->stats.size;
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
+{
+ return a > b ? a : b;
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min( uintptr_t a, uintptr_t b )
+{
+ return a < b ? a : b;
+}
+
+#ifdef RTEMS_DEBUG
+ #define RTEMS_HEAP_DEBUG
+#endif
+
+#ifdef RTEMS_HEAP_DEBUG
+ #include <assert.h>
+ #define _HAssert( cond ) \
+ do { \
+ if ( !(cond) ) { \
+ __assert( __FILE__, __LINE__, #cond ); \
+ } \
+ } while (0)
+#else
+ #define _HAssert( cond ) ((void) 0)
+#endif
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/interr.h b/cpukit/include/rtems/score/interr.h
new file mode 100644
index 0000000000..3144952716
--- /dev/null
+++ b/cpukit/include/rtems/score/interr.h
@@ -0,0 +1,268 @@
+/**
+ * @file rtems/score/interr.h
+ *
+ * @brief Constants and Prototypes Related to the Internal Error Handler
+ *
+ * This include file contains constants and prototypes related
+ * to the Internal Error Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_INTERR_H
+#define _RTEMS_SCORE_INTERR_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rtems/system.h>
+
+/**
+ * @defgroup ScoreIntErr Internal Error Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which provides the foundation
+ * Semaphore services used in all of the APIs supported by RTEMS.
+ */
+/**@{*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @brief This type lists the possible sources from which an error
+ * can be reported.
+ */
+typedef enum {
+ /**
+ * @brief Errors of the core system.
+ *
+ * @see Internal_errors_Core_list.
+ */
+ INTERNAL_ERROR_CORE = 0,
+
+ /**
+ * @brief Errors of the RTEMS API.
+ */
+ INTERNAL_ERROR_RTEMS_API = 1,
+
+ /**
+ * @brief Errors of the POSIX API.
+ */
+ INTERNAL_ERROR_POSIX_API = 2,
+
+ /**
+ * @brief Fatal source for the block device cache.
+ *
+ * @see rtems_bdbuf_fatal_code.
+ */
+ RTEMS_FATAL_SOURCE_BDBUF = 3,
+
+ /**
+ * @brief Fatal source for application specific errors.
+ *
+ * The fatal code is application specific.
+ */
+ RTEMS_FATAL_SOURCE_APPLICATION = 4,
+
+ /**
+ * @brief Fatal source of exit().
+ *
+ * The fatal code is the exit() status code.
+ */
+ RTEMS_FATAL_SOURCE_EXIT = 5,
+
+ /**
+ * @brief Fatal source for BSP errors.
+ *
+ * The fatal codes are defined in <bsp/fatal.h>. Examples are interrupt and
+ * exception initialization.
+ *
+ * @see bsp_fatal_code and bsp_fatal().
+ */
+ RTEMS_FATAL_SOURCE_BSP = 6,
+
+ /**
+ * @brief Fatal source of assert().
+ *
+ * The fatal code is the pointer value of the assert context.
+ *
+ * @see rtems_assert_context.
+ */
+ RTEMS_FATAL_SOURCE_ASSERT = 7,
+
+ /**
+ * @brief Fatal source of the stack checker.
+ *
+ * The fatal code is the object name of the executing task.
+ */
+ RTEMS_FATAL_SOURCE_STACK_CHECKER = 8,
+
+ /**
+ * @brief Fatal source of the exceptions.
+ *
+ * The fatal code is the pointer value of the exception frame pointer.
+ *
+ * @see rtems_exception_frame and rtems_exception_frame_print().
+ */
+ RTEMS_FATAL_SOURCE_EXCEPTION = 9,
+
+ /**
+ * @brief Fatal source of SMP domain.
+ *
+ * @see SMP_Fatal_code.
+ */
+ RTEMS_FATAL_SOURCE_SMP = 10,
+
+ /**
+ * @brief Fatal source of rtems_panic().
+ *
+ * @see rtem
+ */
+ RTEMS_FATAL_SOURCE_PANIC = 11,
+
+ /**
+ * @brief The last available fatal source.
+ *
+ * This enum value ensures that the enum type needs at least 32-bits for
+ * architectures with short enums.
+ */
+ RTEMS_FATAL_SOURCE_LAST = 0xffffffff
+} Internal_errors_Source;
+
+/**
+ * @brief A list of errors which are generated internally by the executive
+ * core.
+ *
+ * Do not re-use numbers of obsolete error codes. Uncomment no longer used
+ * error codes.
+ */
+typedef enum {
+ /* INTERNAL_ERROR_NO_CONFIGURATION_TABLE = 0, */
+ /* INTERNAL_ERROR_NO_CPU_TABLE = 1, */
+ INTERNAL_ERROR_TOO_LITTLE_WORKSPACE = 2,
+ INTERNAL_ERROR_WORKSPACE_ALLOCATION = 3,
+ INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL = 4,
+ INTERNAL_ERROR_THREAD_EXITTED = 5,
+ INTERNAL_ERROR_INCONSISTENT_MP_INFORMATION = 6,
+ INTERNAL_ERROR_INVALID_NODE = 7,
+ INTERNAL_ERROR_NO_MPCI = 8,
+ INTERNAL_ERROR_BAD_PACKET = 9,
+ INTERNAL_ERROR_OUT_OF_PACKETS = 10,
+ INTERNAL_ERROR_OUT_OF_GLOBAL_OBJECTS = 11,
+ INTERNAL_ERROR_OUT_OF_PROXIES = 12,
+ INTERNAL_ERROR_INVALID_GLOBAL_ID = 13,
+ INTERNAL_ERROR_BAD_STACK_HOOK = 14,
+ /* INTERNAL_ERROR_BAD_ATTRIBUTES = 15, */
+ /* INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY = 16, */
+ /* INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL = 17, */
+ /* INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE = 18, */
+ INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0 = 19,
+ /* INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP = 20, */
+ INTERNAL_ERROR_GXX_KEY_ADD_FAILED = 21,
+ INTERNAL_ERROR_GXX_MUTEX_INIT_FAILED = 22,
+ INTERNAL_ERROR_NO_MEMORY_FOR_HEAP = 23,
+ INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR = 24,
+ INTERNAL_ERROR_RESOURCE_IN_USE = 25,
+ INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL = 26,
+ /* INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL = 27, */
+ INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK = 28,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE = 29,
+ INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL = 30,
+ INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT = 31,
+ INTERNAL_ERROR_RTEMS_INIT_TASK_CREATE_FAILED = 32,
+ INTERNAL_ERROR_POSIX_INIT_THREAD_CREATE_FAILED = 33,
+ INTERNAL_ERROR_LIBIO_USER_ENV_KEY_CREATE_FAILED = 34,
+ /* INTERNAL_ERROR_LIBIO_SEM_CREATE_FAILED = 35, */
+ INTERNAL_ERROR_LIBIO_STDOUT_FD_OPEN_FAILED = 36,
+ INTERNAL_ERROR_LIBIO_STDERR_FD_OPEN_FAILED = 37,
+ INTERNAL_ERROR_ILLEGAL_USE_OF_FLOATING_POINT_UNIT = 38,
+ INTERNAL_ERROR_ARC4RANDOM_GETENTROPY_FAIL = 39
+} Internal_errors_Core_list;
+
+typedef CPU_Uint32ptr Internal_errors_t;
+
+/**
+ * This type holds the fatal error information.
+ */
+typedef struct {
+ /** This is the source of the error. */
+ Internal_errors_Source the_source;
+ /** This is the error code. */
+ Internal_errors_t the_error;
+} Internal_errors_Information;
+
+/**
+ * When a fatal error occurs, the error information is stored here.
+ */
+extern Internal_errors_Information _Internal_errors_What_happened;
+
+/**
+ * @brief Initiates system termination.
+ *
+ * This routine is invoked when the application or the executive itself
+ * determines that a fatal error has occurred or a final system state is
+ * reached (for example after exit()).
+ *
+ * The first action of this function is to call the fatal handler of the user
+ * extensions. For the initial extensions the following conditions are
+ * required
+ * - a valid stack pointer and enough stack space,
+ * - a valid code memory, and
+ * - valid read-only data.
+ *
+ * For the initial extensions the read-write data (including BSS segment) is
+ * not required on single processor configurations. On SMP configurations
+ * however the read-write data must be initialized since this function must
+ * determine the state of the other processors and request them to shut-down if
+ * necessary.
+ *
+ * Non-initial extensions require in addition valid read-write data. The BSP
+ * may install an initial extension that performs a system reset. In this case
+ * the non-initial extensions will be not called.
+ *
+ * Once all fatal handler executed the error information will be stored to
+ * _Internal_errors_What_happened and the system state is set to
+ * SYSTEM_STATE_TERMINATED.
+ *
+ * The final step is to call the CPU specific _CPU_Fatal_halt().
+ *
+ * @param[in] the_source The fatal source indicating the subsystem the fatal
+ * condition originated in.
+ * @param[in] the_error The fatal error code. This value must be interpreted
+ * with respect to the source.
+ *
+ * @see rtems_fatal() and _Internal_error().
+ */
+void _Terminate(
+ Internal_errors_Source the_source,
+ Internal_errors_t the_error
+) RTEMS_NO_RETURN;
+
+/**
+ * @brief Terminates the system with an INTERNAL_ERROR_CORE fatal source and
+ * the specified core error code.
+ *
+ * @param[in] core_error The core error code.
+ *
+ * @see _Terminate().
+ */
+void _Internal_error( Internal_errors_Core_list core_error ) RTEMS_NO_RETURN;
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/io.h b/cpukit/include/rtems/score/io.h
new file mode 100644
index 0000000000..ae3c57f031
--- /dev/null
+++ b/cpukit/include/rtems/score/io.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_IO_H
+#define _RTEMS_SCORE_IO_H
+
+#include <rtems/score/basedefs.h>
+
+#include <stdarg.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef void ( *IO_Put_char )(int c, void *arg);
+
+int _IO_Printf(
+ IO_Put_char put_char,
+ void *arg,
+ char const *fmt,
+ ...
+) RTEMS_PRINTFLIKE( 3, 4 );
+
+int _IO_Vprintf(
+ IO_Put_char put_char,
+ void *arg,
+ char const *fmt,
+ va_list ap
+);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_IO_H */
diff --git a/cpukit/include/rtems/score/isr.h b/cpukit/include/rtems/score/isr.h
new file mode 100644
index 0000000000..d9c03b807f
--- /dev/null
+++ b/cpukit/include/rtems/score/isr.h
@@ -0,0 +1,155 @@
+/**
+ * @file rtems/score/isr.h
+ *
+ * @brief Data Related to the Management of Processor Interrupt Levels
+ *
+ * This include file contains all the constants and structures associated
+ * with the management of processor interrupt levels. This handler
+ * supports interrupt critical sections, vectoring of user interrupt
+ * handlers, nesting of interrupts, and manipulating interrupt levels.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2012.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ISR_H
+#define _RTEMS_SCORE_ISR_H
+
+#include <rtems/score/isrlevel.h>
+
+/**
+ * @defgroup ScoreISR ISR Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which provides the foundation
+ * ISR services used in all of the APIs supported by RTEMS.
+ *
+ * The ISR Nest level counter variable is maintained as part of the
+ * per cpu data structure.
+ */
+/**@{*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * The following type defines the type used to manage the vectors.
+ */
+typedef uint32_t ISR_Vector_number;
+
+/**
+ * Return type for ISR Handler
+ */
+typedef void ISR_Handler;
+
+#if (CPU_SIMPLE_VECTORED_INTERRUPTS == FALSE)
+
+typedef void * ISR_Handler_entry;
+
+#else
+/**
+ * Pointer to an ISR Handler
+ */
+#if (CPU_ISR_PASSES_FRAME_POINTER == TRUE)
+typedef ISR_Handler ( *ISR_Handler_entry )(
+ ISR_Vector_number,
+ CPU_Interrupt_frame *
+ );
+#else
+typedef ISR_Handler ( *ISR_Handler_entry )(
+ ISR_Vector_number
+ );
+#endif
+
+/**
+ * The following declares the Vector Table. Application
+ * interrupt service routines are vectored by the ISR Handler via this table.
+ */
+extern ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS ];
+#endif
+
+/**
+ * @brief Initialize the ISR handler.
+ *
+ * This routine performs the initialization necessary for the ISR handler.
+ */
+void _ISR_Handler_initialization ( void );
+
+/**
+ * @brief Install interrupt handler vector.
+ *
+ * This routine installs new_handler as the interrupt service routine
+ * for the specified vector. The previous interrupt service routine is
+ * returned as old_handler.
+ *
+ * LM32 Specific Information:
+ * XXX document implementation including references if appropriate
+ *
+ * @param[in] _vector is the vector number
+ * @param[in] _new_handler is ISR handler to install
+ * @param[in] _old_handler is a pointer to a variable which will be set
+ * to the old handler
+ *
+ * @retval *_old_handler will be set to the old ISR handler
+ */
+#define _ISR_Install_vector( _vector, _new_handler, _old_handler ) \
+ _CPU_ISR_install_vector( _vector, _new_handler, _old_handler )
+
+/**
+ * @brief ISR interrupt dispatcher.
+ *
+ * This routine is the interrupt dispatcher. ALL interrupts
+ * are vectored to this routine so that minimal context can be saved
+ * and setup performed before the application's high-level language
+ * interrupt service routine is invoked. After the application's
+ * interrupt service routine returns control to this routine, it
+ * will determine if a thread dispatch is necessary. If so, it will
+ * ensure that the necessary thread scheduling operations are
+ * performed when the outermost interrupt service routine exits.
+ *
+ * @note Typically implemented in assembly language.
+ */
+void _ISR_Handler( void );
+
+/**
+ * @brief ISR wrapper for thread dispatcher.
+ *
+ * This routine provides a wrapper so that the routine
+ * @ref _Thread_Dispatch can be invoked when a reschedule is necessary
+ * at the end of the outermost interrupt service routine. This
+ * wrapper is necessary to establish the processor context needed
+ * by _Thread_Dispatch and to save the processor context which is
+ * corrupted by _Thread_Dispatch. This context typically consists
+ * of registers which are not preserved across routine invocations.
+ *
+ * @note Typically mplemented in assembly language.
+ */
+void _ISR_Dispatch( void );
+
+/**
+ * @brief Checks if an ISR in progress.
+ *
+ * This function returns true if the processor is currently servicing
+ * and interrupt and false otherwise. A return value of true indicates
+ * that the caller is an interrupt service routine, NOT a thread.
+ *
+ * @retval This methods returns true when called from an ISR.
+ */
+bool _ISR_Is_in_progress( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/isrlevel.h b/cpukit/include/rtems/score/isrlevel.h
new file mode 100644
index 0000000000..abfb1b67fc
--- /dev/null
+++ b/cpukit/include/rtems/score/isrlevel.h
@@ -0,0 +1,153 @@
+/**
+ * @file rtems/score/isrlevel.h
+ *
+ * @brief ISR Level Type
+ *
+ * This include file defines the ISR Level type. It exists to
+ * simplify include dependencies. It is part of the ISR Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ISR_LEVEL_h
+#define _RTEMS_SCORE_ISR_LEVEL_h
+
+#include <rtems/score/cpu.h>
+#include <rtems/score/assert.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreISR ISR Handler
+ *
+ * @ingroup Score
+ *
+ * @addtogroup ScoreISR ISR Handler
+ */
+/**@{*/
+
+/**
+ * The following type defines the control block used to manage
+ * the interrupt level portion of the status register.
+ */
+typedef uint32_t ISR_Level;
+
+/**
+ * @brief Disables interrupts on this processor.
+ *
+ * This macro disables all interrupts on this processor so that a critical
+ * section of code is protected from concurrent access by interrupts of this
+ * processor. Disabling of interrupts disables thread dispatching on the
+ * processor as well.
+ *
+ * On SMP configurations other processors can enter such sections if not
+ * protected by other means.
+ *
+ * @param[out] _level The argument @a _level will contain the previous
+ * interrupt mask level.
+ */
+#define _ISR_Local_disable( _level ) \
+ do { \
+ _CPU_ISR_Disable( _level ); \
+ RTEMS_COMPILER_MEMORY_BARRIER(); \
+ } while (0)
+
+/**
+ * @brief Enables interrupts on this processor.
+ *
+ * This macro restores the interrupt status on the processor with the
+ * interrupt level value obtained by _ISR_Local_disable(). It is used at the end of
+ * a critical section of code to enable interrupts so they can be processed
+ * again.
+ *
+ * @param[in] _level The interrupt level previously obtained by
+ * _ISR_Local_disable().
+ */
+#define _ISR_Local_enable( _level ) \
+ do { \
+ RTEMS_COMPILER_MEMORY_BARRIER(); \
+ _CPU_ISR_Enable( _level ); \
+ } while (0)
+
+/**
+ * @brief Temporarily enables interrupts on this processor.
+ *
+ * This macro temporarily enables interrupts to the previous
+ * interrupt mask level and then disables all interrupts so that
+ * the caller can continue into the second part of a critical
+ * section.
+ *
+ * This routine is used to temporarily enable interrupts
+ * during a long critical section. It is used in long sections of
+ * critical code when a point is reached at which interrupts can
+ * be temporarily enabled. Deciding where to flash interrupts
+ * in a long critical section is often difficult and the point
+ * must be selected with care to ensure that the critical section
+ * properly protects itself.
+ *
+ * @param[in] _level The interrupt level previously obtained by
+ * _ISR_Local_disable().
+ */
+#define _ISR_Local_flash( _level ) \
+ do { \
+ RTEMS_COMPILER_MEMORY_BARRIER(); \
+ _CPU_ISR_Flash( _level ); \
+ RTEMS_COMPILER_MEMORY_BARRIER(); \
+ } while (0)
+
+/**
+ * @brief Returns true if interrupts are enabled in the specified interrupt
+ * level, otherwise returns false.
+ *
+ * @param[in] _level The ISR level.
+ *
+ * @retval true Interrupts are enabled in the interrupt level.
+ * @retval false Otherwise.
+ */
+#define _ISR_Is_enabled( _level ) \
+ _CPU_ISR_Is_enabled( _level )
+
+/**
+ * @brief Return current interrupt level.
+ *
+ * This routine returns the current interrupt level.
+ *
+ * LM32 Specific Information:
+ * XXX document implementation including references if appropriate
+ *
+ * @retval This method returns the current level.
+ */
+#define _ISR_Get_level() \
+ _CPU_ISR_Get_level()
+
+/**
+ * @brief Set current interrupt level.
+ *
+ * This routine sets the current interrupt level to that specified
+ * by @a _new_level. The new interrupt level is effective when the
+ * routine exits.
+ *
+ * @param[in] _new_level contains the desired interrupt level.
+ */
+#define _ISR_Set_level( _new_level ) \
+ do { \
+ RTEMS_COMPILER_MEMORY_BARRIER(); \
+ _CPU_ISR_Set_level( _new_level ); \
+ RTEMS_COMPILER_MEMORY_BARRIER(); \
+ } while (0)
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/cpukit/include/rtems/score/isrlock.h b/cpukit/include/rtems/score/isrlock.h
new file mode 100644
index 0000000000..7dd2f29000
--- /dev/null
+++ b/cpukit/include/rtems/score/isrlock.h
@@ -0,0 +1,439 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreISRLocks
+ *
+ * @brief ISR Locks
+ */
+
+/*
+ * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ISR_LOCK_H
+#define _RTEMS_SCORE_ISR_LOCK_H
+
+#include <rtems/score/isrlevel.h>
+#include <rtems/score/smplock.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreISRLocks ISR Locks
+ *
+ * @ingroup ScoreISR
+ *
+ * @brief Low-level lock to protect critical sections accessed by threads and
+ * interrupt service routines.
+ *
+ * On single processor configurations the ISR locks degrade to simple ISR
+ * disable/enable sequences. No additional storage or objects are required.
+ *
+ * This synchronization primitive is supported on SMP configurations. Here SMP
+ * locks are used.
+ *
+ * @{
+ */
+
+/**
+ * @brief ISR lock control.
+ *
+ * @warning Empty structures are implementation-defined in C. GCC gives them a
+ * size of zero. In C++ empty structures have a non-zero size.
+ */
+typedef struct {
+#if defined( RTEMS_SMP )
+ SMP_lock_Control Lock;
+#endif
+} ISR_lock_Control;
+
+/**
+ * @brief Local ISR lock context for acquire and release pairs.
+ */
+typedef struct {
+#if defined( RTEMS_SMP )
+ SMP_lock_Context Lock_context;
+#else
+ ISR_Level isr_level;
+#endif
+#if defined( RTEMS_PROFILING )
+ /**
+ * @brief The last interrupt disable instant in CPU counter ticks.
+ */
+ CPU_Counter_ticks ISR_disable_instant;
+#endif
+} ISR_lock_Context;
+
+/**
+ * @brief Defines an ISR lock member.
+ *
+ * Do not add a ';' after this macro.
+ *
+ * @param _designator The designator for the interrupt lock.
+ */
+#if defined( RTEMS_SMP )
+ #define ISR_LOCK_MEMBER( _designator ) ISR_lock_Control _designator;
+#else
+ #define ISR_LOCK_MEMBER( _designator )
+#endif
+
+/**
+ * @brief Declares an ISR lock variable.
+ *
+ * Do not add a ';' after this macro.
+ *
+ * @param _qualifier The qualifier for the interrupt lock, e.g. extern.
+ * @param _designator The designator for the interrupt lock.
+ */
+#if defined( RTEMS_SMP )
+ #define ISR_LOCK_DECLARE( _qualifier, _designator ) \
+ _qualifier ISR_lock_Control _designator;
+#else
+ #define ISR_LOCK_DECLARE( _qualifier, _designator )
+#endif
+
+/**
+ * @brief Defines an ISR lock variable.
+ *
+ * Do not add a ';' after this macro.
+ *
+ * @param _qualifier The qualifier for the interrupt lock, e.g. static.
+ * @param _designator The designator for the interrupt lock.
+ * @param _name The name for the interrupt lock. It must be a string. The
+ * name is only used if profiling is enabled.
+ */
+#if defined( RTEMS_SMP )
+ #define ISR_LOCK_DEFINE( _qualifier, _designator, _name ) \
+ _qualifier ISR_lock_Control _designator = { SMP_LOCK_INITIALIZER( _name ) };
+#else
+ #define ISR_LOCK_DEFINE( _qualifier, _designator, _name )
+#endif
+
+/**
+ * @brief Defines an ISR lock variable reference.
+ *
+ * Do not add a ';' after this macro.
+ *
+ * @param _designator The designator for the interrupt lock reference.
+ * @param _target The target for the interrupt lock reference.
+ */
+#if defined( RTEMS_SMP )
+ #define ISR_LOCK_REFERENCE( _designator, _target ) \
+ ISR_lock_Control *_designator = _target;
+#else
+ #define ISR_LOCK_REFERENCE( _designator, _target )
+#endif
+
+/**
+ * @brief Initializer for static initialization of ISR locks.
+ *
+ * @param _name The name for the interrupt lock. It must be a string. The
+ * name is only used if profiling is enabled.
+ */
+#if defined( RTEMS_SMP )
+ #define ISR_LOCK_INITIALIZER( _name ) \
+ { SMP_LOCK_INITIALIZER( _name ) }
+#else
+ #define ISR_LOCK_INITIALIZER( _name ) \
+ { }
+#endif
+
+/**
+ * @brief Sets the ISR level in the ISR lock context.
+ *
+ * @param[in] context The ISR lock context.
+ * @param[in] level The ISR level.
+ */
+RTEMS_INLINE_ROUTINE void _ISR_lock_Context_set_level(
+ ISR_lock_Context *context,
+ ISR_Level level
+)
+{
+#if defined( RTEMS_SMP )
+ context->Lock_context.isr_level = level;
+#else
+ context->isr_level = level;
+#endif
+}
+
+/**
+ * @brief Initializes an ISR lock.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param[in] _lock The ISR lock control.
+ * @param[in] _name The name for the ISR lock. This name must be a
+ * string persistent throughout the life time of this lock. The name is only
+ * used if profiling is enabled.
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Initialize( _lock, _name ) \
+ _SMP_lock_Initialize( &( _lock )->Lock, _name )
+#else
+ #define _ISR_lock_Initialize( _lock, _name )
+#endif
+
+/**
+ * @brief Destroys an ISR lock.
+ *
+ * Concurrent destruction leads to unpredictable results.
+ *
+ * @param[in] _lock The ISR lock control.
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Destroy( _lock ) \
+ _SMP_lock_Destroy( &( _lock )->Lock )
+#else
+ #define _ISR_lock_Destroy( _lock )
+#endif
+
+/**
+ * @brief Acquires an ISR lock.
+ *
+ * Interrupts will be disabled. On SMP configurations this function acquires
+ * an SMP lock.
+ *
+ * This function can be used in thread and interrupt context.
+ *
+ * @param[in] _lock The ISR lock control.
+ * @param[in] _context The local ISR lock context for an acquire and release
+ * pair.
+ *
+ * @see _ISR_lock_Release_and_ISR_enable().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_ISR_disable_and_acquire( _lock, _context ) \
+ _SMP_lock_ISR_disable_and_acquire( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ )
+#else
+ #define _ISR_lock_ISR_disable_and_acquire( _lock, _context ) \
+ _ISR_Local_disable( ( _context )->isr_level )
+#endif
+
+/**
+ * @brief Releases an ISR lock.
+ *
+ * The interrupt status will be restored. On SMP configurations this function
+ * releases an SMP lock.
+ *
+ * This function can be used in thread and interrupt context.
+ *
+ * @param[in] _lock The ISR lock control.
+ * @param[in] _context The local ISR lock context for an acquire and release
+ * pair.
+ *
+ * @see _ISR_lock_ISR_disable_and_acquire().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Release_and_ISR_enable( _lock, _context ) \
+ _SMP_lock_Release_and_ISR_enable( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ )
+#else
+ #define _ISR_lock_Release_and_ISR_enable( _lock, _context ) \
+ _ISR_Local_enable( ( _context )->isr_level )
+#endif
+
+/**
+ * @brief Acquires an ISR lock inside an ISR disabled section.
+ *
+ * The interrupt status will remain unchanged. On SMP configurations this
+ * function acquires an SMP lock.
+ *
+ * In case the executing context can be interrupted by higher priority
+ * interrupts and these interrupts enter the critical section protected by this
+ * lock, then the result is unpredictable.
+ *
+ * @param[in] _lock The ISR lock control.
+ * @param[in] _context The local ISR lock context for an acquire and release
+ * pair.
+ *
+ * @see _ISR_lock_Release().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Acquire( _lock, _context ) \
+ _SMP_lock_Acquire( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ )
+#else
+ #define _ISR_lock_Acquire( _lock, _context ) \
+ (void) _context;
+#endif
+
+/**
+ * @brief Releases an ISR lock inside an ISR disabled section.
+ *
+ * The interrupt status will remain unchanged. On SMP configurations this
+ * function releases an SMP lock.
+ *
+ * @param[in] _lock The ISR lock control.
+ * @param[in] _context The local ISR lock context for an acquire and release
+ * pair.
+ *
+ * @see _ISR_lock_Acquire().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Release( _lock, _context ) \
+ _SMP_lock_Release( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ )
+#else
+ #define _ISR_lock_Release( _lock, _context ) \
+ (void) _context;
+#endif
+
+/**
+ * @brief Acquires an ISR lock inside an ISR disabled section (inline).
+ *
+ * @see _ISR_lock_Acquire().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Acquire_inline( _lock, _context ) \
+ _SMP_lock_Acquire_inline( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ )
+#else
+ #define _ISR_lock_Acquire_inline( _lock, _context ) \
+ (void) _context;
+#endif
+
+/**
+ * @brief Releases an ISR lock inside an ISR disabled section (inline).
+ *
+ * @see _ISR_lock_Release().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Release_inline( _lock, _context ) \
+ _SMP_lock_Release_inline( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ )
+#else
+ #define _ISR_lock_Release_inline( _lock, _context ) \
+ (void) _context;
+#endif
+
+#if defined( RTEMS_DEBUG )
+ /**
+ * @brief Returns true, if the ISR lock is owned by the current processor,
+ * otherwise false.
+ *
+ * On uni-processor configurations, this function returns true, if interrupts
+ * are disabled, otherwise false.
+ *
+ * @param[in] _lock The ISR lock control.
+ */
+ #if defined( RTEMS_SMP )
+ #define _ISR_lock_Is_owner( _lock ) \
+ _SMP_lock_Is_owner( &( _lock )->Lock )
+ #else
+ #define _ISR_lock_Is_owner( _lock ) \
+ ( _ISR_Get_level() != 0 )
+ #endif
+#endif
+
+/**
+ * @brief Flashes an ISR lock.
+ *
+ * On uni-processor configurations this a simple _ISR_Local_flash(). On SMP
+ * configurations this function releases an SMP lock, restores the interrupt
+ * status, then disables interrupts and acquires the SMP lock again.
+ *
+ * This function can be used in thread and interrupt context.
+ *
+ * @param[in] _lock The ISR lock control.
+ * @param[in] _context The local ISR lock context for an acquire and release
+ * pair.
+ *
+ * @see _ISR_lock_ISR_disable_and_acquire() and
+ * _ISR_lock_Release_and_ISR_enable().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_Flash( _lock, _context ) \
+ _SMP_lock_Release_and_ISR_enable( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ ); \
+ _SMP_lock_ISR_disable_and_acquire( \
+ &( _lock )->Lock, \
+ &( _context )->Lock_context \
+ )
+#else
+ #define _ISR_lock_Flash( _lock, _context ) \
+ _ISR_Local_flash( ( _context )->isr_level )
+#endif
+
+#if defined( RTEMS_PROFILING )
+ #define _ISR_lock_ISR_disable_profile( _context ) \
+ ( _context )->ISR_disable_instant = _CPU_Counter_read();
+#else
+ #define _ISR_lock_ISR_disable_profile( _context )
+#endif
+
+/**
+ * @brief Disables interrupts and saves the previous interrupt state in the ISR
+ * lock context.
+ *
+ * This function can be used in thread and interrupt context.
+ *
+ * @param[in] _context The local ISR lock context to store the interrupt state.
+ *
+ * @see _ISR_lock_ISR_enable().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_ISR_disable( _context ) \
+ do { \
+ _ISR_Local_disable( ( _context )->Lock_context.isr_level ); \
+ _ISR_lock_ISR_disable_profile( _context ) \
+ } while ( 0 )
+#else
+ #define _ISR_lock_ISR_disable( _context ) \
+ do { \
+ _ISR_Local_disable( ( _context )->isr_level ); \
+ _ISR_lock_ISR_disable_profile( _context ) \
+ } while ( 0 )
+#endif
+
+/**
+ * @brief Restores the saved interrupt state of the ISR lock context.
+ *
+ * This function can be used in thread and interrupt context.
+ *
+ * @param[in] _context The local ISR lock context containing the saved
+ * interrupt state.
+ *
+ * @see _ISR_lock_ISR_disable().
+ */
+#if defined( RTEMS_SMP )
+ #define _ISR_lock_ISR_enable( _context ) \
+ _ISR_Local_enable( ( _context )->Lock_context.isr_level )
+#else
+ #define _ISR_lock_ISR_enable( _context ) \
+ _ISR_Local_enable( ( _context )->isr_level )
+#endif
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_ISR_LOCK_H */
diff --git a/cpukit/include/rtems/score/mpci.h b/cpukit/include/rtems/score/mpci.h
new file mode 100644
index 0000000000..c20b45c3e1
--- /dev/null
+++ b/cpukit/include/rtems/score/mpci.h
@@ -0,0 +1,135 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreMPCI
+ *
+ * @brief MPCI Layer API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MPCI_H
+#define _RTEMS_SCORE_MPCI_H
+
+#include <rtems/score/mppkt.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/watchdog.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreMPCI MPCI Handler
+ *
+ * @ingroup Score
+ *
+ * The MPCI Handler encapsulates functionality which is related to the
+ * generation, receipt, and processing of remote operations in a
+ * multiprocessor system. This handler contains the message passing
+ * support for making remote service calls as well as the server thread
+ * which processes requests from remote nodes.
+*/
+/**@{*/
+
+/**
+ * The following defines the node number used when a broadcast is desired.
+ */
+#define MPCI_ALL_NODES 0
+
+/**
+ * This type is returned by all user provided MPCI routines.
+ */
+typedef void MPCI_Entry;
+
+/**
+ * This type defines the prototype for the initization entry point
+ * in an Multiprocessor Communications Interface.
+ */
+typedef MPCI_Entry ( *MPCI_initialization_entry )( void );
+
+/**
+ * This type defines the prototype for the get packet entry point
+ * in an Multiprocessor Communications Interface. The single
+ * parameter will point to the packet allocated.
+ */
+typedef MPCI_Entry ( *MPCI_get_packet_entry )(
+ MP_packet_Prefix **
+ );
+
+/**
+ * This type defines the prototype for the return packet entry point
+ * in an Multiprocessor Communications Interface. The single
+ * parameter will point to a packet previously allocated by the
+ * get packet MPCI entry.
+ */
+typedef MPCI_Entry ( *MPCI_return_packet_entry )(
+ MP_packet_Prefix *
+ );
+
+/**
+ * This type defines the prototype for send get packet entry point
+ * in an Multiprocessor Communications Interface. The single
+ * parameter will point to a packet previously allocated by the
+ * get packet entry point that has been filled in by the caller.
+ */
+typedef MPCI_Entry ( *MPCI_send_entry )(
+ uint32_t,
+ MP_packet_Prefix *
+ );
+
+/**
+ * This type defines the prototype for the receive packet entry point
+ * in an Multiprocessor Communications Interface. The single
+ * parameter will point to a packet allocated and filled in by the
+ * receive packet handler. The caller will block until a packet is
+ * received.
+ */
+typedef MPCI_Entry ( *MPCI_receive_entry )(
+ MP_packet_Prefix **
+ );
+
+/**
+ * This type defines the Multiprocessor Communications
+ * Interface (MPCI) Table. This table defines the user-provided
+ * MPCI which is a required part of a multiprocessor system.
+ *
+ * For non-blocking local operations that become remote operations,
+ * we need a timeout. This is a per-driver timeout: default_timeout
+ */
+typedef struct {
+ /** This fields contains the timeout for MPCI operations in ticks. */
+ uint32_t default_timeout;
+ /** This field contains the maximum size of a packet supported by this
+ * MPCI layer. This size places a limit on the size of a message
+ * which can be transmitted over this interface.
+ **/
+ size_t maximum_packet_size;
+ /** This field points to the MPCI initialization entry point. */
+ MPCI_initialization_entry initialization;
+ /** This field points to the MPCI get packet entry point. */
+ MPCI_get_packet_entry get_packet;
+ /** This field points to the MPCI return packet entry point. */
+ MPCI_return_packet_entry return_packet;
+ /** This field points to the MPCI send packet entry point. */
+ MPCI_send_entry send_packet;
+ /** This field points to the MPCI receive packet entry point. */
+ MPCI_receive_entry receive_packet;
+} MPCI_Control;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/mpciimpl.h b/cpukit/include/rtems/score/mpciimpl.h
new file mode 100644
index 0000000000..eb03a1d7b3
--- /dev/null
+++ b/cpukit/include/rtems/score/mpciimpl.h
@@ -0,0 +1,326 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreMPCI
+ *
+ * @brief MPCI Layer Implementation
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MPCIIMPL_H
+#define _RTEMS_SCORE_MPCIIMPL_H
+
+#include <rtems/score/mpci.h>
+#include <rtems/score/status.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreMPCI
+ *
+ * @{
+ */
+
+/**
+ * For packets associated with requests that don't already have a timeout,
+ * use the one specified by this MPCI driver. The value specified by
+ * the MPCI driver sets an upper limit on how long a remote request
+ * should take to complete.
+ */
+#define MPCI_DEFAULT_TIMEOUT 0xFFFFFFFF
+
+/**
+ * The following defines the type for packet processing routines
+ * invoked by the MPCI Receive server.
+ */
+typedef void (*MPCI_Packet_processor)( MP_packet_Prefix * );
+
+/**
+ * The following enumerated type defines the list of
+ * internal MP operations.
+ */
+typedef enum {
+ MPCI_PACKETS_SYSTEM_VERIFY = 0
+} MPCI_Internal_Remote_operations;
+
+/**
+ * The following data structure defines the packet used to perform
+ * remote event operations.
+ */
+typedef struct {
+ /** This field is the general header for all packets. */
+ MP_packet_Prefix Prefix;
+ /** This value specifies the operation. */
+ MPCI_Internal_Remote_operations operation;
+ /** This is the maximum number of nodes in the system. It must agree
+ * on all nodes.
+ */
+ uint32_t maximum_nodes;
+ /** This field is the maximum number of concurrently existent
+ * globally offered objects.
+ */
+ uint32_t maximum_global_objects;
+} MPCI_Internal_packet;
+
+/**
+ * The following thread queue is used to maintain a list of tasks
+ * which currently have outstanding remote requests.
+ */
+extern Thread_queue_Control _MPCI_Remote_blocked_threads;
+
+/**
+ * The following define the internal pointers to the user's
+ * configuration information.
+ */
+extern MPCI_Control *_MPCI_table;
+
+/**
+ * @brief Pointer to MP thread control block.
+ *
+ * The following is used to determine when the multiprocessing receive
+ * thread is executing so that a proxy can be allocated instead of
+ * blocking the multiprocessing receive thread.
+ */
+extern Thread_Control *_MPCI_Receive_server_tcb;
+
+/**
+ * The following table contains the process packet routines provided
+ * by each object that supports MP operations.
+ */
+extern MPCI_Packet_processor
+_MPCI_Packet_processors[ MP_PACKET_CLASSES_LAST + 1 ];
+
+/**
+ * This routine registers the MPCI packet processor for the
+ * designated object class.
+ *
+ * @param[in] the_class is the class indicator for packets which will
+ * be processed by @a the_packet_processor method.
+ * @param[in] the_packet_processor is a pointer to a method which is
+ * invoked when packets with @a the_class are received.
+ */
+void _MPCI_Register_packet_processor(
+ MP_packet_Classes the_class,
+ MPCI_Packet_processor the_packet_processor
+
+);
+
+/**
+ * This function obtains a packet by invoking the user provided
+ * MPCI get packet callout.
+ *
+ * @retval This method returns a pointer to a MPCI packet which can be
+ * filled in by the caller and used to perform a subsequent
+ * remote operation.
+ */
+MP_packet_Prefix *_MPCI_Get_packet ( void );
+
+/**
+ * @brief Deallocate a packet.
+ *
+ * This routine deallocates a packet by invoking the user provided
+ * MPCI return packet callout.
+ *
+ * @param[in] the_packet is the MP packet to deallocate.
+ */
+void _MPCI_Return_packet (
+ MP_packet_Prefix *the_packet
+);
+
+/**
+ * @brief Send a process packet.
+ *
+ * This routine sends a process packet by invoking the user provided
+ * MPCI send callout.
+ *
+ * @param[in] destination is the node which should receive this packet.
+ * @param[in] the_packet is the packet to be sent.
+ */
+void _MPCI_Send_process_packet (
+ uint32_t destination,
+ MP_packet_Prefix *the_packet
+);
+
+/**
+ * @brief Send a request packet.
+ *
+ * This routine sends a request packet by invoking the user provided
+ * MPCI send callout.
+ *
+ * @param[in] destination is the node which should receive this packet.
+ * @param[in] the_packet is the packet to be sent.
+ * @param[in] extra_state is the extra thread state bits which should be
+ * set in addition to the remote operation pending state. It
+ * may indicate the caller is blocking on a message queue
+ * operation.
+ *
+ * @retval This method returns the operation status from the remote node.
+ */
+Status_Control _MPCI_Send_request_packet(
+ uint32_t destination,
+ MP_packet_Prefix *the_packet,
+ States_Control extra_state
+);
+
+/**
+ * @brief Send a response packet.
+ *
+ * This routine sends a response packet by invoking the user provided
+ * MPCI send callout.
+ *
+ * @param[in] destination is the node which should receive this packet.
+ * @param[in] the_packet is the packet to be sent.
+ */
+void _MPCI_Send_response_packet (
+ uint32_t destination,
+ MP_packet_Prefix *the_packet
+);
+
+/**
+ * @brief Receive a packet.
+ *
+ * This routine receives a packet by invoking the user provided
+ * MPCI receive callout.
+ *
+ * @retval This method returns the packet received.
+ */
+MP_packet_Prefix *_MPCI_Receive_packet ( void );
+
+/**
+ * @brief Pass a packet to the thread.
+ *
+ * This routine is responsible for passing @a the_packet to the thread
+ * waiting on the remote operation to complete. The unblocked thread is
+ * responsible for eventually freeing @a the_packet.
+ *
+ * @param[in] the_packet is the response packet to be processed.
+ *
+ * @retval This method returns a pointer to the thread which was if unblocked
+ * or NULL if the waiting thread no longer exists.
+ */
+Thread_Control *_MPCI_Process_response (
+ MP_packet_Prefix *the_packet
+);
+
+/**
+ * @brief Receive and process all packets.
+ *
+ * This is the server thread which receives and processes all MCPI packets.
+ *
+ * @param[in] ignored is the thread argument. It is not used.
+ */
+void _MPCI_Receive_server(
+ Thread_Entry_numeric_type ignored
+);
+
+/**
+ * @brief Announce the availability of a packet.
+ *
+ * This routine informs RTEMS of the availability of an MPCI packet.
+ */
+void _MPCI_Announce ( void );
+
+/**
+ * @brief Perform a process on another node.
+ *
+ * This routine performs a remote procedure call so that a
+ * process operation can be performed on another node.
+ *
+ * @param[in] operation is the remote operation to perform.
+ */
+void _MPCI_Internal_packets_Send_process_packet (
+ MPCI_Internal_Remote_operations operation
+);
+
+/**
+ * _MPCI_Internal_packets_Send_request_packet
+ *
+ * This routine performs a remote procedure call so that a
+ * directive operation can be initiated on another node.
+ *
+ * This routine is not needed since there are no request
+ * packets to be sent by this manager.
+ */
+
+/**
+ * _MPCI_Internal_packets_Send_response_packet
+ *
+ * This routine performs a remote procedure call so that a
+ * directive can be performed on another node.
+ *
+ * This routine is not needed since there are no response
+ * packets to be sent by this manager.
+ */
+
+/**
+ * @brief Perform requested action from another node.
+ *
+ * This routine performs the actions specific to this package for
+ * the request from another node.
+ */
+void _MPCI_Internal_packets_Process_packet (
+ MP_packet_Prefix *the_packet_prefix
+);
+
+/**
+ * _MPCI_Internal_packets_Send_object_was_deleted
+ *
+ * This routine is invoked indirectly by the thread queue
+ * when a proxy has been removed from the thread queue and
+ * the remote node must be informed of this.
+ *
+ * This routine is not needed since there are no objects
+ * deleted by this manager.
+ */
+
+/**
+ * _MPCI_Internal_packets_Send_extract_proxy
+ *
+ * This routine is invoked when a task is deleted and it
+ * has a proxy which must be removed from a thread queue and
+ * the remote node must be informed of this.
+ *
+ * This routine is not needed since there are no objects
+ * deleted by this manager.
+ */
+
+/**
+ * @brief Obtain an internal thread.
+ *
+ * This routine is used to obtain an internal threads MP packet.
+ */
+MPCI_Internal_packet *_MPCI_Internal_packets_Get_packet ( void );
+
+/**
+ * This function returns true if the the_packet_class is valid,
+ * and false otherwise.
+ *
+ * @note Check for lower bounds (MP_PACKET_CLASSES_FIRST ) is unnecessary
+ * because this enum starts at lower bound of zero.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Mp_packet_Is_valid_packet_class (
+ MP_packet_Classes the_packet_class
+)
+{
+ return ( the_packet_class <= MP_PACKET_CLASSES_LAST );
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/mppkt.h b/cpukit/include/rtems/score/mppkt.h
new file mode 100644
index 0000000000..573abf574b
--- /dev/null
+++ b/cpukit/include/rtems/score/mppkt.h
@@ -0,0 +1,121 @@
+/**
+ * @file rtems/score/mppkt.h
+ *
+ * @brief Specification for the Packet Handler
+ *
+ * This package is the specification for the Packet Handler.
+ * This handler defines the basic packet and provides
+ * mechanisms to utilize packets based on this prefix.
+ * Packets are the fundamental basis for messages passed between
+ * nodes in an MP system.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MPPKT_H
+#define _RTEMS_SCORE_MPPKT_H
+
+#include <rtems/score/object.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/watchdog.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreMPPacket MP Packet Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates the primary definition of MPCI packets. This
+ * handler defines the part of the packet that is common to all remote
+ * operations.
+ */
+/**@{*/
+
+/**
+ * The following enumerated type defines the packet classes.
+ *
+ * @note In general, each class corresponds to a manager
+ * which supports global operations. Each manager
+ * defines the set of supported operations.
+ */
+typedef enum {
+ MP_PACKET_MPCI_INTERNAL = 0,
+ MP_PACKET_TASKS = 1,
+ MP_PACKET_MESSAGE_QUEUE = 2,
+ MP_PACKET_SEMAPHORE = 3,
+ MP_PACKET_PARTITION = 4,
+ MP_PACKET_REGION = 5,
+ MP_PACKET_EVENT = 6,
+ MP_PACKET_SIGNAL = 7
+} MP_packet_Classes;
+
+/**
+ * This constant defines the first entry in the MP_packet_Classes enumeration.
+ */
+#define MP_PACKET_CLASSES_FIRST MP_PACKET_MPCI_INTERNAL
+
+/**
+ * This constant defines the last entry in the MP_packet_Classes enumeration.
+ */
+#define MP_PACKET_CLASSES_LAST MP_PACKET_SIGNAL
+
+/**
+ * The following record contains the prefix for every packet
+ * passed between nodes in an MP system.
+ *
+ * @note This structure is padded to ensure that anything following it
+ * is on a 16 byte boundary. This is the most stringent structure
+ * alignment rule encountered yet.
+ */
+typedef struct {
+ /** This field indicates the API class of the operation being performed. */
+ MP_packet_Classes the_class;
+ /** This field is the id of the object to be acted upon. */
+ Objects_Id id;
+ /** This field is the ID of the originating thread. */
+ Objects_Id source_tid;
+ /** This field is the priority of the originating thread. */
+ uint32_t source_priority;
+ /** This field is where the status of the operation will be returned. */
+ uint32_t return_code;
+ /** This field is the length of the data following the prefix. */
+ uint32_t length;
+ /** This field is the length of the data which required network conversion. */
+ uint32_t to_convert;
+ /** This field is the requested timeout for this operation. */
+ Watchdog_Interval timeout;
+} MP_packet_Prefix;
+
+/**
+ * An MPCI must support packets of at least this size.
+ */
+#define MP_PACKET_MINIMUM_PACKET_SIZE 64
+
+/**
+ * The following constant defines the number of uint32_t's
+ * in a packet which must be converted to native format in a
+ * heterogeneous system. In packets longer than
+ * MP_PACKET_MINIMUN_HETERO_CONVERSION uint32_t's, some of the "extra" data
+ * may a user message buffer which is not automatically endian swapped.
+ */
+#define MP_PACKET_MINIMUN_HETERO_CONVERSION \
+ ( sizeof( MP_packet_Prefix ) / sizeof( uint32_t ) )
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/mrsp.h b/cpukit/include/rtems/score/mrsp.h
new file mode 100644
index 0000000000..85cbff784f
--- /dev/null
+++ b/cpukit/include/rtems/score/mrsp.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014, 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MRSP_H
+#define _RTEMS_SCORE_MRSP_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/threadq.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreMRSP Multiprocessor Resource Sharing Protocol Handler
+ *
+ * @ingroup Score
+ *
+ * @brief Multiprocessor Resource Sharing Protocol (MrsP).
+ *
+ * The Multiprocessor Resource Sharing Protocol (MrsP) is defined in A. Burns
+ * and A.J. Wellings, A Schedulability Compatible Multiprocessor Resource
+ * Sharing Protocol - MrsP, Proceedings of the 25th Euromicro Conference on
+ * Real-Time Systems (ECRTS 2013), July 2013. It is a generalization of the
+ * Priority Ceiling Protocol to SMP systems. Each MrsP semaphore uses a
+ * ceiling priority per scheduler instance. A task obtaining or owning a MrsP
+ * semaphore will execute with the ceiling priority for its scheduler instance
+ * as specified by the MrsP semaphore object. Tasks waiting to get ownership
+ * of a MrsP semaphore will not relinquish the processor voluntarily. In case
+ * the owner of a MrsP semaphore gets preempted it can ask all tasks waiting
+ * for this semaphore to help out and temporarily borrow the right to execute
+ * on one of their assigned processors.
+ *
+ * @{
+ */
+
+/**
+ * @brief MrsP control block.
+ */
+typedef struct {
+ /**
+ * @brief The thread queue to manage ownership and waiting threads.
+ */
+ Thread_queue_Control Wait_queue;
+
+ /**
+ * @brief The ceiling priority used by the owner thread.
+ */
+ Priority_Node Ceiling_priority;
+
+ /**
+ * @brief One ceiling priority per scheduler instance.
+ */
+ Priority_Control *ceiling_priorities;
+} MRSP_Control;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_MRSP_H */
diff --git a/cpukit/include/rtems/score/mrspimpl.h b/cpukit/include/rtems/score/mrspimpl.h
new file mode 100644
index 0000000000..b9c7441401
--- /dev/null
+++ b/cpukit/include/rtems/score/mrspimpl.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2014, 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MRSPIMPL_H
+#define _RTEMS_SCORE_MRSPIMPL_H
+
+#include <rtems/score/mrsp.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/assert.h>
+#include <rtems/score/status.h>
+#include <rtems/score/threadqimpl.h>
+#include <rtems/score/watchdogimpl.h>
+#include <rtems/score/wkspace.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreMRSP
+ *
+ * @{
+ */
+
+#define MRSP_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit
+
+RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
+ MRSP_Control *mrsp,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire_critical( &mrsp->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Release(
+ MRSP_Control *mrsp,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Release( &mrsp->Wait_queue, queue_context );
+}
+
+RTEMS_INLINE_ROUTINE Thread_Control *_MRSP_Get_owner(
+ const MRSP_Control *mrsp
+)
+{
+ return mrsp->Wait_queue.Queue.owner;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Set_owner(
+ MRSP_Control *mrsp,
+ Thread_Control *owner
+)
+{
+ mrsp->Wait_queue.Queue.owner = owner;
+}
+
+RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
+ const MRSP_Control *mrsp,
+ const Scheduler_Control *scheduler
+)
+{
+ uint32_t scheduler_index;
+
+ scheduler_index = _Scheduler_Get_index( scheduler );
+ return mrsp->ceiling_priorities[ scheduler_index ];
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
+ MRSP_Control *mrsp,
+ const Scheduler_Control *scheduler,
+ Priority_Control new_priority
+)
+{
+ uint32_t scheduler_index;
+
+ scheduler_index = _Scheduler_Get_index( scheduler );
+ mrsp->ceiling_priorities[ scheduler_index ] = new_priority;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Raise_priority(
+ MRSP_Control *mrsp,
+ Thread_Control *thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ Status_Control status;
+ ISR_lock_Context lock_context;
+ const Scheduler_Control *scheduler;
+ Priority_Control ceiling_priority;
+ Scheduler_Node *scheduler_node;
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+
+ scheduler = _Thread_Scheduler_get_home( thread );
+ scheduler_node = _Thread_Scheduler_get_home_node( thread );
+ ceiling_priority = _MRSP_Get_priority( mrsp, scheduler );
+
+ if (
+ ceiling_priority
+ <= _Priority_Get_priority( &scheduler_node->Wait.Priority )
+ ) {
+ _Priority_Node_initialize( priority_node, ceiling_priority );
+ _Thread_Priority_add( thread, priority_node, queue_context );
+ status = STATUS_SUCCESSFUL;
+ } else {
+ status = STATUS_MUTEX_CEILING_VIOLATED;
+ }
+
+ _Thread_Wait_release_default_critical( thread, &lock_context );
+ return status;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Remove_priority(
+ Thread_Control *thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+ _Thread_Priority_remove( thread, priority_node, queue_context );
+ _Thread_Wait_release_default_critical( thread, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Replace_priority(
+ MRSP_Control *mrsp,
+ Thread_Control *thread,
+ Priority_Node *ceiling_priority
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_Wait_acquire_default( thread, &lock_context );
+ _Thread_Priority_replace(
+ thread,
+ ceiling_priority,
+ &mrsp->Ceiling_priority
+ );
+ _Thread_Wait_release_default( thread, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
+ MRSP_Control *mrsp,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
+)
+{
+ Status_Control status;
+ Per_CPU_Control *cpu_self;
+
+ status = _MRSP_Raise_priority(
+ mrsp,
+ executing,
+ &mrsp->Ceiling_priority,
+ queue_context
+ );
+
+ if ( status != STATUS_SUCCESSFUL ) {
+ _MRSP_Release( mrsp, queue_context );
+ return status;
+ }
+
+ _MRSP_Set_owner( mrsp, executing );
+ cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+ _MRSP_Release( mrsp, queue_context );
+ _Thread_Priority_and_sticky_update( executing, 1 );
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
+ MRSP_Control *mrsp,
+ const Scheduler_Control *scheduler,
+ Priority_Control ceiling_priority,
+ Thread_Control *executing,
+ bool initially_locked
+)
+{
+ uint32_t scheduler_count = _Scheduler_Count;
+ uint32_t i;
+
+ if ( initially_locked ) {
+ return STATUS_INVALID_NUMBER;
+ }
+
+ mrsp->ceiling_priorities = _Workspace_Allocate(
+ sizeof( *mrsp->ceiling_priorities ) * scheduler_count
+ );
+ if ( mrsp->ceiling_priorities == NULL ) {
+ return STATUS_NO_MEMORY;
+ }
+
+ for ( i = 0 ; i < scheduler_count ; ++i ) {
+ const Scheduler_Control *scheduler_of_index;
+
+ scheduler_of_index = &_Scheduler_Table[ i ];
+
+ if ( scheduler != scheduler_of_index ) {
+ mrsp->ceiling_priorities[ i ] =
+ _Scheduler_Map_priority( scheduler_of_index, 0 );
+ } else {
+ mrsp->ceiling_priorities[ i ] = ceiling_priority;
+ }
+ }
+
+ _Thread_queue_Object_initialize( &mrsp->Wait_queue );
+ return STATUS_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
+ MRSP_Control *mrsp,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
+)
+{
+ Status_Control status;
+ Priority_Node ceiling_priority;
+
+ status = _MRSP_Raise_priority(
+ mrsp,
+ executing,
+ &ceiling_priority,
+ queue_context
+ );
+
+ if ( status != STATUS_SUCCESSFUL ) {
+ _MRSP_Release( mrsp, queue_context );
+ return status;
+ }
+
+ _Thread_queue_Context_set_deadlock_callout(
+ queue_context,
+ _Thread_queue_Deadlock_status
+ );
+ status = _Thread_queue_Enqueue_sticky(
+ &mrsp->Wait_queue.Queue,
+ MRSP_TQ_OPERATIONS,
+ executing,
+ queue_context
+ );
+
+ if ( status == STATUS_SUCCESSFUL ) {
+ _MRSP_Replace_priority( mrsp, executing, &ceiling_priority );
+ } else {
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ int sticky_level_change;
+
+ if ( status != STATUS_DEADLOCK ) {
+ sticky_level_change = -1;
+ } else {
+ sticky_level_change = 0;
+ }
+
+ _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
+ _MRSP_Remove_priority( executing, &ceiling_priority, &queue_context );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context.Lock_context.Lock_context
+ );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+ _Thread_Priority_and_sticky_update( executing, sticky_level_change );
+ _Thread_Dispatch_enable( cpu_self );
+ }
+
+ return status;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
+ MRSP_Control *mrsp,
+ Thread_Control *executing,
+ bool wait,
+ Thread_queue_Context *queue_context
+)
+{
+ Status_Control status;
+ Thread_Control *owner;
+
+ _MRSP_Acquire_critical( mrsp, queue_context );
+
+ owner = _MRSP_Get_owner( mrsp );
+
+ if ( owner == NULL ) {
+ status = _MRSP_Claim_ownership( mrsp, executing, queue_context );
+ } else if ( owner == executing ) {
+ _MRSP_Release( mrsp, queue_context );
+ status = STATUS_UNAVAILABLE;
+ } else if ( wait ) {
+ status = _MRSP_Wait_for_ownership( mrsp, executing, queue_context );
+ } else {
+ _MRSP_Release( mrsp, queue_context );
+ status = STATUS_UNAVAILABLE;
+ }
+
+ return status;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
+ MRSP_Control *mrsp,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_queue_Heads *heads;
+
+ if ( _MRSP_Get_owner( mrsp ) != executing ) {
+ _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
+ return STATUS_NOT_OWNER;
+ }
+
+ _MRSP_Acquire_critical( mrsp, queue_context );
+
+ _MRSP_Set_owner( mrsp, NULL );
+ _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
+
+ heads = mrsp->Wait_queue.Queue.heads;
+
+ if ( heads == NULL ) {
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _MRSP_Release( mrsp, queue_context );
+ _Thread_Priority_and_sticky_update( executing, -1 );
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
+ }
+
+ _Thread_queue_Surrender_sticky(
+ &mrsp->Wait_queue.Queue,
+ heads,
+ executing,
+ queue_context,
+ MRSP_TQ_OPERATIONS
+ );
+ return STATUS_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
+{
+ if ( _MRSP_Get_owner( mrsp ) != NULL ) {
+ return STATUS_RESOURCE_IN_USE;
+ }
+
+ return STATUS_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Destroy(
+ MRSP_Control *mrsp,
+ Thread_queue_Context *queue_context
+)
+{
+ _MRSP_Release( mrsp, queue_context );
+ _Thread_queue_Destroy( &mrsp->Wait_queue );
+ _Workspace_Free( mrsp->ceiling_priorities );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_MRSPIMPL_H */
diff --git a/cpukit/include/rtems/score/muteximpl.h b/cpukit/include/rtems/score/muteximpl.h
new file mode 100644
index 0000000000..5fd4f5e8be
--- /dev/null
+++ b/cpukit/include/rtems/score/muteximpl.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MUTEXIMPL_H
+#define _RTEMS_SCORE_MUTEXIMPL_H
+
+#include <rtems/score/threadqimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef struct {
+ Thread_queue_Syslock_queue Queue;
+} Mutex_Control;
+
+typedef struct {
+ Mutex_Control Mutex;
+ unsigned int nest_level;
+} Mutex_recursive_Control;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_MUTEXIMPL_H */
diff --git a/cpukit/include/rtems/score/object.h b/cpukit/include/rtems/score/object.h
new file mode 100644
index 0000000000..6789c61fea
--- /dev/null
+++ b/cpukit/include/rtems/score/object.h
@@ -0,0 +1,469 @@
+/**
+ * @file rtems/score/object.h
+ *
+ * @brief Constants and Structures Associated with the Object Handler
+ *
+ * This include file contains all the constants and structures associated
+ * with the Object Handler. This Handler provides mechanisms which
+ * can be used to initialize and manipulate all objects which have ids.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_OBJECT_H
+#define _RTEMS_SCORE_OBJECT_H
+
+#include <rtems/score/basedefs.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/rbtree.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup Score SuperCore
+ *
+ * @brief Provides services for all APIs.
+ */
+/**@{*/
+
+#if defined(RTEMS_POSIX_API)
+ /**
+ * This macro is defined when an API is enabled that requires the
+ * use of strings for object names. Since the Classic API uses
+ * 32-bit unsigned integers and not strings, this allows us to
+ * disable this in the smallest RTEMS configuratinos.
+ */
+ #define RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES
+#endif
+
+/**
+ * @defgroup ScoreCPU CPU Architecture Support
+ *
+ * @ingroup Score
+ *
+ * @brief Provides CPU architecture dependent services.
+ */
+/**@{*/
+
+/**
+ * @defgroup ScoreObject Object Handler
+ *
+ * @ingroup Score
+ */
+/**@{*/
+
+/**
+ * The following type defines the control block used to manage
+ * object names.
+ */
+typedef union {
+ #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES)
+ /** This is a pointer to a string name. */
+ const char *name_p;
+ #endif
+ /** This is the actual 32-bit "raw" integer name. */
+ uint32_t name_u32;
+} Objects_Name;
+
+#if defined(RTEMS_USE_16_BIT_OBJECT)
+/**
+ * The following type defines the control block used to manage
+ * object IDs. The format is as follows (0=LSB):
+ *
+ * Bits 0 .. 7 = index (up to 254 objects of a type)
+ * Bits 8 .. 10 = API (up to 7 API classes)
+ * Bits 11 .. 15 = class (up to 31 object types per API)
+ */
+typedef uint16_t Objects_Id;
+
+/**
+ * This type is used to store the maximum number of allowed objects
+ * of each type.
+ */
+typedef uint8_t Objects_Maximum;
+
+#define OBJECTS_INDEX_START_BIT 0U
+#define OBJECTS_API_START_BIT 8U
+#define OBJECTS_CLASS_START_BIT 11U
+
+#define OBJECTS_INDEX_MASK (Objects_Id)0x00ffU
+#define OBJECTS_API_MASK (Objects_Id)0x0700U
+#define OBJECTS_CLASS_MASK (Objects_Id)0xF800U
+
+#define OBJECTS_INDEX_VALID_BITS (Objects_Id)0x00ffU
+#define OBJECTS_API_VALID_BITS (Objects_Id)0x0007U
+/* OBJECTS_NODE_VALID_BITS should not be used with 16 bit Ids */
+#define OBJECTS_CLASS_VALID_BITS (Objects_Id)0x001fU
+
+#define OBJECTS_UNLIMITED_OBJECTS 0x8000U
+
+#define OBJECTS_ID_INITIAL_INDEX (0)
+#define OBJECTS_ID_FINAL_INDEX (0xff)
+
+#else
+/**
+ * The following type defines the control block used to manage
+ * object IDs. The format is as follows (0=LSB):
+ *
+ * Bits 0 .. 15 = index (up to 65535 objects of a type)
+ * Bits 16 .. 23 = node (up to 255 nodes)
+ * Bits 24 .. 26 = API (up to 7 API classes)
+ * Bits 27 .. 31 = class (up to 31 object types per API)
+ */
+typedef uint32_t Objects_Id;
+
+/**
+ * This type is used to store the maximum number of allowed objects
+ * of each type.
+ */
+typedef uint16_t Objects_Maximum;
+
+/**
+ * This is the bit position of the starting bit of the index portion of
+ * the object Id.
+ */
+#define OBJECTS_INDEX_START_BIT 0U
+/**
+ * This is the bit position of the starting bit of the node portion of
+ * the object Id.
+ */
+#define OBJECTS_NODE_START_BIT 16U
+
+/**
+ * This is the bit position of the starting bit of the API portion of
+ * the object Id.
+ */
+#define OBJECTS_API_START_BIT 24U
+
+/**
+ * This is the bit position of the starting bit of the class portion of
+ * the object Id.
+ */
+#define OBJECTS_CLASS_START_BIT 27U
+
+/**
+ * This mask is used to extract the index portion of an object Id.
+ */
+#define OBJECTS_INDEX_MASK (Objects_Id)0x0000ffffU
+
+/**
+ * This mask is used to extract the node portion of an object Id.
+ */
+#define OBJECTS_NODE_MASK (Objects_Id)0x00ff0000U
+
+/**
+ * This mask is used to extract the API portion of an object Id.
+ */
+#define OBJECTS_API_MASK (Objects_Id)0x07000000U
+
+/**
+ * This mask is used to extract the class portion of an object Id.
+ */
+#define OBJECTS_CLASS_MASK (Objects_Id)0xf8000000U
+
+/**
+ * This mask represents the bits that is used to ensure no extra bits
+ * are set after shifting to extract the index portion of an object Id.
+ */
+#define OBJECTS_INDEX_VALID_BITS (Objects_Id)0x0000ffffU
+
+/**
+ * This mask represents the bits that is used to ensure no extra bits
+ * are set after shifting to extract the node portion of an object Id.
+ */
+#define OBJECTS_NODE_VALID_BITS (Objects_Id)0x000000ffU
+
+/**
+ * This mask represents the bits that is used to ensure no extra bits
+ * are set after shifting to extract the API portion of an object Id.
+ */
+#define OBJECTS_API_VALID_BITS (Objects_Id)0x00000007U
+
+/**
+ * This mask represents the bits that is used to ensure no extra bits
+ * are set after shifting to extract the class portion of an object Id.
+ */
+#define OBJECTS_CLASS_VALID_BITS (Objects_Id)0x0000001fU
+
+/**
+ * Mask to enable unlimited objects. This is used in the configuration
+ * table when specifying the number of configured objects.
+ */
+#define OBJECTS_UNLIMITED_OBJECTS 0x80000000U
+
+/**
+ * This is the lowest value for the index portion of an object Id.
+ */
+#define OBJECTS_ID_INITIAL_INDEX (0)
+
+/**
+ * This is the highest value for the index portion of an object Id.
+ */
+#define OBJECTS_ID_FINAL_INDEX (0xffffU)
+#endif
+
+/**
+ * This enumerated type is used in the class field of the object ID.
+ */
+typedef enum {
+ OBJECTS_NO_API = 0,
+ OBJECTS_INTERNAL_API = 1,
+ OBJECTS_CLASSIC_API = 2,
+ OBJECTS_POSIX_API = 3,
+ OBJECTS_FAKE_OBJECTS_API = 7
+} Objects_APIs;
+
+/** This macro is used to generically specify the last API index. */
+#define OBJECTS_APIS_LAST OBJECTS_POSIX_API
+
+/**
+ * The following defines the Object Control Block used to manage
+ * each object local to this node.
+ */
+typedef struct {
+ /** This is the chain node portion of an object. */
+ Chain_Node Node;
+ /** This is the object's ID. */
+ Objects_Id id;
+ /** This is the object's name. */
+ Objects_Name name;
+} Objects_Control;
+
+#if defined( RTEMS_MULTIPROCESSING )
+/**
+ * @brief This defines the Global Object Control Block used to manage objects
+ * resident on other nodes.
+ */
+typedef struct {
+ /**
+ * @brief Nodes to manage active and inactive global objects.
+ */
+ union {
+ /**
+ * @brief Inactive global objects reside on a chain.
+ */
+ Chain_Node Inactive;
+
+ struct {
+ /**
+ * @brief Node to lookup an active global object by identifier.
+ */
+ RBTree_Node Id_lookup;
+
+ /**
+ * @brief Node to lookup an active global object by name.
+ */
+ RBTree_Node Name_lookup;
+ } Active;
+ } Nodes;
+
+ /**
+ * @brief The global object identifier.
+ */
+ Objects_Id id;
+
+ /**
+ * @brief The global object name.
+ *
+ * Using an unsigned thirty two bit value is broken but works. If any API is
+ * MP with variable length names .. BOOM!!!!
+ */
+ uint32_t name;
+} Objects_MP_Control;
+#endif
+
+/**
+ * No object can have this ID.
+ */
+#define OBJECTS_ID_NONE 0
+
+/**
+ * The following defines the constant which may be used
+ * to manipulate the calling task.
+ */
+#define OBJECTS_ID_OF_SELF ((Objects_Id) 0)
+
+/**
+ * The following constant is used to specify that a name to ID search
+ * should search through all nodes.
+ */
+#define OBJECTS_SEARCH_ALL_NODES 0
+
+/**
+ * The following constant is used to specify that a name to ID search
+ * should search through all nodes except the current node.
+ */
+#define OBJECTS_SEARCH_OTHER_NODES 0x7FFFFFFE
+
+/**
+ * The following constant is used to specify that a name to ID search
+ * should search only on this node.
+ */
+#define OBJECTS_SEARCH_LOCAL_NODE 0x7FFFFFFF
+
+/**
+ * The following constant is used to specify that a name to ID search
+ * is being asked for the ID of the currently executing task.
+ */
+#define OBJECTS_WHO_AM_I 0
+
+/**
+ * This macros calculates the lowest ID for the specified api, class,
+ * and node.
+ */
+#define OBJECTS_ID_INITIAL(_api, _class, _node) \
+ _Objects_Build_id( (_api), (_class), (_node), OBJECTS_ID_INITIAL_INDEX )
+
+/**
+ * This macro specifies the highest object ID value
+ */
+#define OBJECTS_ID_FINAL ((Objects_Id)~0)
+
+/**
+ * This macro is used to build a thirty-two bit style name from
+ * four characters. The most significant byte will be the
+ * character @a _C1.
+ *
+ * @param[in] _C1 is the first character of the name
+ * @param[in] _C2 is the second character of the name
+ * @param[in] _C3 is the third character of the name
+ * @param[in] _C4 is the fourth character of the name
+ */
+#define _Objects_Build_name( _C1, _C2, _C3, _C4 ) \
+ ( (uint32_t)(_C1) << 24 | \
+ (uint32_t)(_C2) << 16 | \
+ (uint32_t)(_C3) << 8 | \
+ (uint32_t)(_C4) )
+
+/**
+ * This function returns the API portion of the ID.
+ *
+ * @param[in] id is the object Id to be processed.
+ *
+ * @return This method returns an object Id constructed from the arguments.
+ */
+RTEMS_INLINE_ROUTINE Objects_APIs _Objects_Get_API(
+ Objects_Id id
+)
+{
+ return (Objects_APIs) ((id >> OBJECTS_API_START_BIT) & OBJECTS_API_VALID_BITS);
+}
+
+/**
+ * This function returns the class portion of the ID.
+ *
+ * @param[in] id is the object Id to be processed
+ */
+RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_class(
+ Objects_Id id
+)
+{
+ return (uint32_t)
+ ((id >> OBJECTS_CLASS_START_BIT) & OBJECTS_CLASS_VALID_BITS);
+}
+
+/**
+ * This function returns the node portion of the ID.
+ *
+ * @param[in] id is the object Id to be processed
+ *
+ * @return This method returns the node portion of an object ID.
+ */
+RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_node(
+ Objects_Id id
+)
+{
+ /*
+ * If using 16-bit Ids, then there is no node field and it MUST
+ * be a single processor system.
+ */
+ #if defined(RTEMS_USE_16_BIT_OBJECT)
+ return 1;
+ #else
+ return (id >> OBJECTS_NODE_START_BIT) & OBJECTS_NODE_VALID_BITS;
+ #endif
+}
+
+/**
+ * This function returns the index portion of the ID.
+ *
+ * @param[in] id is the Id to be processed
+ *
+ * @return This method returns the class portion of the specified object ID.
+ */
+RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Get_index(
+ Objects_Id id
+)
+{
+ return
+ (Objects_Maximum)((id >> OBJECTS_INDEX_START_BIT) &
+ OBJECTS_INDEX_VALID_BITS);
+}
+
+/**
+ * This function builds an object's id from the processor node and index
+ * values specified.
+ *
+ * @param[in] the_api indicates the API associated with this Id.
+ * @param[in] the_class indicates the class of object.
+ * It is specific to @a the_api.
+ * @param[in] node is the node where this object resides.
+ * @param[in] index is the instance number of this object.
+ *
+ * @return This method returns an object Id constructed from the arguments.
+ */
+RTEMS_INLINE_ROUTINE Objects_Id _Objects_Build_id(
+ Objects_APIs the_api,
+ uint16_t the_class,
+ uint8_t node,
+ uint16_t index
+)
+{
+ return (( (Objects_Id) the_api ) << OBJECTS_API_START_BIT) |
+ (( (Objects_Id) the_class ) << OBJECTS_CLASS_START_BIT) |
+ #if !defined(RTEMS_USE_16_BIT_OBJECT)
+ (( (Objects_Id) node ) << OBJECTS_NODE_START_BIT) |
+ #endif
+ (( (Objects_Id) index ) << OBJECTS_INDEX_START_BIT);
+}
+
+/**
+ * Returns if the object maximum specifies unlimited objects.
+ *
+ * @param[in] maximum The object maximum specification.
+ *
+ * @retval true Unlimited objects are available.
+ * @retval false The object count is fixed.
+ */
+RTEMS_INLINE_ROUTINE bool _Objects_Is_unlimited( uint32_t maximum )
+{
+ return (maximum & OBJECTS_UNLIMITED_OBJECTS) != 0;
+}
+
+/*
+ * We cannot use an inline function for this since it may be evaluated at
+ * compile time.
+ */
+#define _Objects_Maximum_per_allocation( maximum ) \
+ ((Objects_Maximum) ((maximum) & ~OBJECTS_UNLIMITED_OBJECTS))
+
+/**@}*/
+/**@}*/
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/objectimpl.h b/cpukit/include/rtems/score/objectimpl.h
new file mode 100644
index 0000000000..cc5820785c
--- /dev/null
+++ b/cpukit/include/rtems/score/objectimpl.h
@@ -0,0 +1,1002 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines in the Object Handler
+ *
+ * This include file contains the static inline implementation of all
+ * of the inlined routines in the Object Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_OBJECTIMPL_H
+#define _RTEMS_SCORE_OBJECTIMPL_H
+
+#include <rtems/score/object.h>
+#include <rtems/score/apimutex.h>
+#include <rtems/score/isrlock.h>
+#include <rtems/score/threaddispatch.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreObject
+ *
+ * @{
+ */
+
+/**
+ * Functions which compare names are prototyped like this.
+ */
+typedef bool (*Objects_Name_comparators)(
+ void * /* name_1 */,
+ void * /* name_2 */,
+ uint16_t /* length */
+);
+
+/**
+ * This enumerated type is used in the class field of the object ID
+ * for RTEMS internal object classes.
+ */
+typedef enum {
+ OBJECTS_INTERNAL_NO_CLASS = 0,
+ OBJECTS_INTERNAL_THREADS = 1
+} Objects_Internal_API;
+
+/** This macro is used to generically specify the last API index. */
+#define OBJECTS_INTERNAL_CLASSES_LAST OBJECTS_INTERNAL_THREADS
+
+/**
+ * This enumerated type is used in the class field of the object ID
+ * for the RTEMS Classic API.
+ */
+typedef enum {
+ OBJECTS_CLASSIC_NO_CLASS = 0,
+ OBJECTS_RTEMS_TASKS = 1,
+ OBJECTS_RTEMS_TIMERS = 2,
+ OBJECTS_RTEMS_SEMAPHORES = 3,
+ OBJECTS_RTEMS_MESSAGE_QUEUES = 4,
+ OBJECTS_RTEMS_PARTITIONS = 5,
+ OBJECTS_RTEMS_REGIONS = 6,
+ OBJECTS_RTEMS_PORTS = 7,
+ OBJECTS_RTEMS_PERIODS = 8,
+ OBJECTS_RTEMS_EXTENSIONS = 9,
+ OBJECTS_RTEMS_BARRIERS = 10
+} Objects_Classic_API;
+
+/** This macro is used to generically specify the last API index. */
+#define OBJECTS_RTEMS_CLASSES_LAST OBJECTS_RTEMS_BARRIERS
+
+/**
+ * This enumerated type is used in the class field of the object ID
+ * for the POSIX API.
+ */
+typedef enum {
+ OBJECTS_POSIX_NO_CLASS = 0,
+ OBJECTS_POSIX_THREADS = 1,
+ OBJECTS_POSIX_KEYS = 2,
+ OBJECTS_POSIX_INTERRUPTS = 3,
+ OBJECTS_POSIX_MESSAGE_QUEUES = 5,
+ OBJECTS_POSIX_SEMAPHORES = 7,
+ OBJECTS_POSIX_TIMERS = 9,
+ OBJECTS_POSIX_SHMS = 12
+} Objects_POSIX_API;
+
+/** This macro is used to generically specify the last API index. */
+#define OBJECTS_POSIX_CLASSES_LAST OBJECTS_POSIX_SHMS
+
+/*
+ * For fake objects, which have an object identifier, but no objects
+ * information block.
+ */
+typedef enum {
+ OBJECTS_FAKE_OBJECTS_NO_CLASS = 0,
+ OBJECTS_FAKE_OBJECTS_SCHEDULERS = 1
+} Objects_Fake_objects_API;
+
+#if defined(RTEMS_MULTIPROCESSING)
+/**
+ * The following type defines the callout used when a local task
+ * is extracted from a remote thread queue (i.e. it's proxy must
+ * extracted from the remote queue).
+ */
+typedef void ( *Objects_Thread_queue_Extract_callout )(
+ Thread_Control *,
+ Objects_Id
+);
+#endif
+
+/**
+ * The following defines the structure for the information used to
+ * manage each class of objects.
+ */
+typedef struct {
+ /** This field indicates the API of this object class. */
+ Objects_APIs the_api;
+ /** This is the class of this object set. */
+ uint16_t the_class;
+ /** This is the minimum valid id of this object class. */
+ Objects_Id minimum_id;
+ /** This is the maximum valid id of this object class. */
+ Objects_Id maximum_id;
+ /** This is the maximum number of objects in this class. */
+ Objects_Maximum maximum;
+ /** This is the true if unlimited objects in this class. */
+ bool auto_extend;
+ /** This is the number of objects in a block. */
+ Objects_Maximum allocation_size;
+ /** This is the size in bytes of each object instance. */
+ size_t size;
+ /** This points to the table of local objects. */
+ Objects_Control **local_table;
+ /** This is the chain of inactive control blocks. */
+ Chain_Control Inactive;
+ /** This is the number of objects on the Inactive list. */
+ Objects_Maximum inactive;
+ /** This is the number of inactive objects per block. */
+ uint32_t *inactive_per_block;
+ /** This is a table to the chain of inactive object memory blocks. */
+ void **object_blocks;
+ #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES)
+ /** This is true if names are strings. */
+ bool is_string;
+ #endif
+ /** This is the maximum length of names. */
+ uint16_t name_length;
+ #if defined(RTEMS_MULTIPROCESSING)
+ /** This is this object class' method called when extracting a thread. */
+ Objects_Thread_queue_Extract_callout extract;
+
+ /**
+ * @brief The global objects of this object information sorted by object
+ * identifier.
+ */
+ RBTree_Control Global_by_id;
+
+ /**
+ * @brief The global objects of this object information sorted by object
+ * name.
+ *
+ * Objects with the same name are sorted according to their identifier.
+ */
+ RBTree_Control Global_by_name;
+ #endif
+} Objects_Information;
+
+/**
+ * The following is referenced to the node number of the local node.
+ */
+#if defined(RTEMS_MULTIPROCESSING)
+extern uint16_t _Objects_Local_node;
+#else
+#define _Objects_Local_node ((uint16_t)1)
+#endif
+
+/**
+ * The following is referenced to the number of nodes in the system.
+ */
+#if defined(RTEMS_MULTIPROCESSING)
+extern uint16_t _Objects_Maximum_nodes;
+#else
+#define _Objects_Maximum_nodes 1
+#endif
+
+/**
+ * The following is the list of information blocks per API for each object
+ * class. From the ID, we can go to one of these information blocks,
+ * and obtain a pointer to the appropriate object control block.
+ */
+extern Objects_Information ** const
+_Objects_Information_table[ OBJECTS_APIS_LAST + 1 ];
+
+/**
+ * This function extends an object class information record.
+ *
+ * @param[in] information points to an object class information block.
+ */
+void _Objects_Extend_information(
+ Objects_Information *information
+);
+
+/**
+ * @brief Shrink an object class information record
+ *
+ * This function shrink an object class information record.
+ * The object's name and object space are released. The local_table
+ * etc block does not shrink. The InActive list needs to be scanned
+ * to find the objects are remove them.
+ *
+ * @param[in] information points to an object class information block.
+ */
+void _Objects_Shrink_information(
+ Objects_Information *information
+);
+
+void _Objects_Do_initialize_information(
+ Objects_Information *information,
+ Objects_APIs the_api,
+ uint16_t the_class,
+ uint32_t maximum,
+ uint16_t size,
+ bool is_string,
+ uint32_t maximum_name_length
+#if defined(RTEMS_MULTIPROCESSING)
+ ,
+ Objects_Thread_queue_Extract_callout extract
+#endif
+);
+
+/**
+ * @brief Initialize object Information
+ *
+ * This function initializes an object class information record.
+ * SUPPORTS_GLOBAL is true if the object class supports global
+ * objects, and false otherwise. Maximum indicates the number
+ * of objects required in this class and size indicates the size
+ * in bytes of each control block for this object class. The
+ * name length and string designator are also set. In addition,
+ * the class may be a task, therefore this information is also included.
+ *
+ * @param[in] information points to an object class information block.
+ * @param[in] the_api indicates the API associated with this information block.
+ * @param[in] the_class indicates the class of object being managed
+ * by this information block. It is specific to @a the_api.
+ * @param[in] maximum is the maximum number of instances of this object
+ * class which may be concurrently active.
+ * @param[in] size is the size of the data structure for this class.
+ * @param[in] is_string is true if this object uses string style names.
+ * @param[in] maximum_name_length is the maximum length of object names.
+ */
+#if defined(RTEMS_MULTIPROCESSING)
+ #define _Objects_Initialize_information( \
+ information, \
+ the_api, \
+ the_class, \
+ maximum, \
+ size, \
+ is_string, \
+ maximum_name_length, \
+ extract \
+ ) \
+ _Objects_Do_initialize_information( \
+ information, \
+ the_api, \
+ the_class, \
+ maximum, \
+ size, \
+ is_string, \
+ maximum_name_length, \
+ extract \
+ )
+#else
+ #define _Objects_Initialize_information( \
+ information, \
+ the_api, \
+ the_class, \
+ maximum, \
+ size, \
+ is_string, \
+ maximum_name_length, \
+ extract \
+ ) \
+ _Objects_Do_initialize_information( \
+ information, \
+ the_api, \
+ the_class, \
+ maximum, \
+ size, \
+ is_string, \
+ maximum_name_length \
+ )
+#endif
+
+/**
+ * @brief Object API Maximum Class
+ *
+ * This function returns the highest numeric value of a valid
+ * API for the specified @a api.
+ *
+ * @param[in] api is the API of interest
+ *
+ * @retval A positive integer on success and 0 otherwise.
+ */
+unsigned int _Objects_API_maximum_class(
+ uint32_t api
+);
+
+/**
+ * @brief Allocates an object without locking the allocator mutex.
+ *
+ * This function can be called in two contexts
+ * - the executing thread is the owner of the object allocator mutex, or
+ * - in case the system state is not up, e.g. during sequential system
+ * initialization.
+ *
+ * @param[in] information The object information block.
+ *
+ * @retval NULL No object available.
+ * @retval object The allocated object.
+ *
+ * @see _Objects_Allocate() and _Objects_Free().
+ */
+Objects_Control *_Objects_Allocate_unprotected(
+ Objects_Information *information
+);
+
+/**
+ * @brief Allocates an object.
+ *
+ * This function locks the object allocator mutex via
+ * _Objects_Allocator_lock(). The caller must later unlock the object
+ * allocator mutex via _Objects_Allocator_unlock(). The caller must unlock the
+ * mutex in any case, even if the allocation failed due to resource shortage.
+ *
+ * A typical object allocation code looks like this:
+ * @code
+ * rtems_status_code some_create( rtems_id *id )
+ * {
+ * rtems_status_code sc;
+ * Some_Control *some;
+ *
+ * // The object allocator mutex protects the executing thread from
+ * // asynchronous thread restart and deletion.
+ * some = (Some_Control *) _Objects_Allocate( &_Some_Information );
+ *
+ * if ( some != NULL ) {
+ * _Some_Initialize( some );
+ * sc = RTEMS_SUCCESSFUL;
+ * } else {
+ * sc = RTEMS_TOO_MANY;
+ * }
+ *
+ * _Objects_Allocator_unlock();
+ *
+ * return sc;
+ * }
+ * @endcode
+ *
+ * @param[in] information The object information block.
+ *
+ * @retval NULL No object available.
+ * @retval object The allocated object.
+ *
+ * @see _Objects_Free().
+ */
+Objects_Control *_Objects_Allocate( Objects_Information *information );
+
+/**
+ * @brief Frees an object.
+ *
+ * Appends the object to the chain of inactive objects.
+ *
+ * @param[in] information The object information block.
+ * @param[in] the_object The object to free.
+ *
+ * @see _Objects_Allocate().
+ *
+ * A typical object deletion code looks like this:
+ * @code
+ * rtems_status_code some_delete( rtems_id id )
+ * {
+ * Some_Control *some;
+ *
+ * // The object allocator mutex protects the executing thread from
+ * // asynchronous thread restart and deletion.
+ * _Objects_Allocator_lock();
+ *
+ * // Get the object under protection of the object allocator mutex.
+ * some = (Semaphore_Control *)
+ * _Objects_Get_no_protection( id, &_Some_Information );
+ *
+ * if ( some == NULL ) {
+ * _Objects_Allocator_unlock();
+ * return RTEMS_INVALID_ID;
+ * }
+ *
+ * // After the object close an object get with this identifier will
+ * // fail.
+ * _Objects_Close( &_Some_Information, &some->Object );
+ *
+ * _Some_Delete( some );
+ *
+ * // Thread dispatching is enabled. The object free is only protected
+ * // by the object allocator mutex.
+ * _Objects_Free( &_Some_Information, &some->Object );
+ *
+ * _Objects_Allocator_unlock();
+ * return RTEMS_SUCCESSFUL;
+ * }
+ * @endcode
+ */
+void _Objects_Free(
+ Objects_Information *information,
+ Objects_Control *the_object
+);
+
+/**
+ * This function implements the common portion of the object
+ * identification directives. This directive returns the object
+ * id associated with name. If more than one object of this class
+ * is named name, then the object to which the id belongs is
+ * arbitrary. Node indicates the extent of the search for the
+ * id of the object named name. If the object class supports global
+ * objects, then the search can be limited to a particular node
+ * or allowed to encompass all nodes.
+ */
+typedef enum {
+ OBJECTS_NAME_OR_ID_LOOKUP_SUCCESSFUL,
+ OBJECTS_INVALID_NAME,
+ OBJECTS_INVALID_ADDRESS,
+ OBJECTS_INVALID_ID,
+ OBJECTS_INVALID_NODE
+} Objects_Name_or_id_lookup_errors;
+
+/**
+ * This macro defines the first entry in the
+ * @ref Objects_Name_or_id_lookup_errors enumerated list.
+ */
+#define OBJECTS_NAME_ERRORS_FIRST OBJECTS_NAME_OR_ID_LOOKUP_SUCCESSFUL
+
+/**
+ * This macro defines the last entry in the
+ * @ref Objects_Name_or_id_lookup_errors enumerated list.
+ */
+#define OBJECTS_NAME_ERRORS_LAST OBJECTS_INVALID_NODE
+
+/**
+ * @brief Converts an object name to an Id.
+ *
+ * This method converts an object name to an Id. It performs a look up
+ * using the object information block for this object class.
+ *
+ * @param[in] information points to an object class information block.
+ * @param[in] name is the name of the object to find.
+ * @param[in] node is the set of nodes to search.
+ * @param[in] id will contain the Id if the search is successful.
+ *
+ * @retval This method returns one of the values from the
+ * @ref Objects_Name_or_id_lookup_errors enumeration to indicate
+ * successful or failure. On success @a id will contain the Id of
+ * the requested object.
+ */
+Objects_Name_or_id_lookup_errors _Objects_Name_to_id_u32(
+ Objects_Information *information,
+ uint32_t name,
+ uint32_t node,
+ Objects_Id *id
+);
+
+typedef enum {
+ OBJECTS_GET_BY_NAME_INVALID_NAME,
+ OBJECTS_GET_BY_NAME_NAME_TOO_LONG,
+ OBJECTS_GET_BY_NAME_NO_OBJECT
+} Objects_Get_by_name_error;
+
+/**
+ * @brief Gets an object control block identified by its name.
+ *
+ * The object information must use string names.
+ *
+ * @param information The object information. Must not be NULL.
+ * @param name The object name.
+ * @param name_length_p Optional parameter to return the name length.
+ * @param error The error indication in case of failure. Must not be NULL.
+ *
+ * @retval NULL No object exists for this name or invalid parameters.
+ * @retval other The first object according to object index associated with
+ * this name.
+ */
+Objects_Control *_Objects_Get_by_name(
+ const Objects_Information *information,
+ const char *name,
+ size_t *name_length_p,
+ Objects_Get_by_name_error *error
+);
+
+/**
+ * @brief Implements the common portion of the object Id to name directives.
+ *
+ * This function implements the common portion of the object Id
+ * to name directives. This function returns the name
+ * associated with object id.
+ *
+ * @param[in] id is the Id of the object whose name we are locating.
+ * @param[in] name will contain the name of the object, if found.
+ *
+ * @retval This method returns one of the values from the
+ * @ref Objects_Name_or_id_lookup_errors enumeration to indicate
+ * successful or failure. On success @a name will contain the name of
+ * the requested object.
+ *
+ * @note This function currently does not support string names.
+ */
+Objects_Name_or_id_lookup_errors _Objects_Id_to_name (
+ Objects_Id id,
+ Objects_Name *name
+);
+
+/**
+ * @brief Maps the specified object identifier to the associated local object
+ * control block.
+ *
+ * In this function interrupts are disabled during the object lookup. In case
+ * an associated object exists, then interrupts remain disabled, otherwise the
+ * previous interrupt state is restored.
+ *
+ * @param id The object identifier. This is the first parameter since usual
+ * callers get the object identifier as the first parameter themself.
+ * @param lock_context The interrupt lock context. This is the second
+ * parameter since usual callers get the interrupt lock context as the second
+ * parameter themself.
+ * @param information The object class information block.
+ *
+ * @retval NULL No associated object exists.
+ * @retval other The pointer to the associated object control block.
+ * Interrupts are now disabled and must be restored using the specified lock
+ * context via _ISR_lock_ISR_enable() or _ISR_lock_Release_and_ISR_enable().
+ */
+Objects_Control *_Objects_Get(
+ Objects_Id id,
+ ISR_lock_Context *lock_context,
+ const Objects_Information *information
+);
+
+/**
+ * @brief Maps object ids to object control blocks.
+ *
+ * This function maps object ids to object control blocks.
+ * If id corresponds to a local object, then it returns
+ * the_object control pointer which maps to id and location
+ * is set to OBJECTS_LOCAL. If the object class supports global
+ * objects and the object id is global and resides on a remote
+ * node, then location is set to OBJECTS_REMOTE, and the_object
+ * is undefined. Otherwise, location is set to OBJECTS_ERROR
+ * and the_object is undefined.
+ *
+ * @param[in] id is the Id of the object whose name we are locating.
+ * This is the first parameter since usual callers get the object identifier
+ * as the first parameter themself.
+ * @param[in] information points to an object class information block.
+ *
+ * @retval This method returns one of the values from the
+ * @ref Objects_Name_or_id_lookup_errors enumeration to indicate
+ * successful or failure. On success @a id will contain the Id of
+ * the requested object.
+ */
+Objects_Control *_Objects_Get_no_protection(
+ Objects_Id id,
+ const Objects_Information *information
+);
+
+/**
+ * Gets the next open object after the specified object identifier.
+ *
+ * Locks the object allocator mutex in case a next object exists.
+ *
+ * @param[in] id is the Id of the object whose name we are locating.
+ * This is the first parameter since usual callers get the object identifier
+ * as the first parameter themself.
+ * @param[in] information points to an object class information block.
+ * @param[in] next_id_p is the Id of the next object we will look at.
+ *
+ * @retval This method returns the pointer to the object located or
+ * NULL on error.
+ */
+Objects_Control *_Objects_Get_next(
+ Objects_Id id,
+ const Objects_Information *information,
+ Objects_Id *next_id_p
+);
+
+/**
+ * @brief Get object information.
+ *
+ * This function return the information structure given
+ * an the API and Class. This can be done independent of
+ * the existence of any objects created by the API.
+ *
+ * @param[in] the_api indicates the API for the information we want
+ * @param[in] the_class indicates the Class for the information we want
+ *
+ * @retval This method returns a pointer to the Object Information Table
+ * for the class of objects which corresponds to this object ID.
+ */
+Objects_Information *_Objects_Get_information(
+ Objects_APIs the_api,
+ uint16_t the_class
+);
+
+/**
+ * @brief Get information of an object from an ID.
+ *
+ * This function return the information structure given
+ * an @a id of an object.
+ *
+ * @param[in] id is the object ID to get the information from
+ *
+ * @retval This method returns a pointer to the Object Information Table
+ * for the class of objects which corresponds to this object ID.
+ */
+Objects_Information *_Objects_Get_information_id(
+ Objects_Id id
+);
+
+/**
+ * @brief Gets object name in the form of a C string.
+ *
+ * This method objects the name of an object and returns its name
+ * in the form of a C string. It attempts to be careful about
+ * overflowing the user's string and about returning unprintable characters.
+ *
+ * @param[in] id is the object to obtain the name of
+ * @param[in] length indicates the length of the caller's buffer
+ * @param[in] name points a string which will be filled in.
+ *
+ * @retval This method returns @a name or NULL on error. @a *name will
+ * contain the name if successful.
+ */
+char *_Objects_Get_name_as_string(
+ Objects_Id id,
+ size_t length,
+ char *name
+);
+
+/**
+ * @brief Converts the specified object name to a text representation.
+ *
+ * Non-printable characters according to isprint() are converted to '*'.
+ *
+ * @param[in] name The object name.
+ * @param[in] is_string Indicates if the object name is a string or a four
+ * character array (32-bit unsigned integer).
+ * @param[in] buffer The string buffer for the text representation.
+ * @param[in] buffer_size The buffer size in characters.
+ *
+ * @retval The length of the text representation. May be greater than or equal
+ * to the buffer size if truncation occurred.
+ */
+size_t _Objects_Name_to_string(
+ Objects_Name name,
+ bool is_string,
+ char *buffer,
+ size_t buffer_size
+);
+
+/**
+ * @brief Set objects name.
+ *
+ * This method sets the object name to either a copy of a string
+ * or up to the first four characters of the string based upon
+ * whether this object class uses strings for names.
+ *
+ * @param[in] information points to the object information structure
+ * @param[in] the_object is the object to operate upon
+ * @param[in] name is a pointer to the name to use
+ *
+ * @retval If successful, true is returned. Otherwise false is returned.
+ */
+bool _Objects_Set_name(
+ Objects_Information *information,
+ Objects_Control *the_object,
+ const char *name
+);
+
+/**
+ * @brief Removes object from namespace.
+ *
+ * This function removes @a the_object from the namespace.
+ *
+ * @param[in] information points to an Object Information Table.
+ * @param[in] the_object is a pointer to an object.
+ */
+void _Objects_Namespace_remove(
+ Objects_Information *information,
+ Objects_Control *the_object
+);
+
+/**
+ * @brief Close object.
+ *
+ * This function removes the_object control pointer and object name
+ * in the Local Pointer and Local Name Tables.
+ *
+ * @param[in] information points to an Object Information Table
+ * @param[in] the_object is a pointer to an object
+ */
+void _Objects_Close(
+ Objects_Information *information,
+ Objects_Control *the_object
+);
+
+/**
+ * @brief Returns the count of active objects.
+ *
+ * @param[in] information The object information table.
+ *
+ * @retval The count of active objects.
+ */
+Objects_Maximum _Objects_Active_count(
+ const Objects_Information *information
+);
+
+RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Extend_size(
+ const Objects_Information *information
+)
+{
+ return information->auto_extend ? information->allocation_size : 0;
+}
+
+/**
+ * This function returns true if the api is valid.
+ *
+ * @param[in] the_api is the api portion of an object ID.
+ *
+ * @return This method returns true if the specified api value is valid
+ * and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Objects_Is_api_valid(
+ uint32_t the_api
+)
+{
+ if ( !the_api || the_api > OBJECTS_APIS_LAST )
+ return false;
+ return true;
+}
+
+/**
+ * This function returns true if the node is of the local object, and
+ * false otherwise.
+ *
+ * @param[in] node is the node number and corresponds to the node number
+ * portion of an object ID.
+ *
+ * @return This method returns true if the specified node is the local node
+ * and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Objects_Is_local_node(
+ uint32_t node
+)
+{
+ return ( node == _Objects_Local_node );
+}
+
+/**
+ * This function returns true if the id is of a local object, and
+ * false otherwise.
+ *
+ * @param[in] id is an object ID
+ *
+ * @return This method returns true if the specified object Id is local
+ * and false otherwise.
+ *
+ * @note On a single processor configuration, this always returns true.
+ */
+RTEMS_INLINE_ROUTINE bool _Objects_Is_local_id(
+#if defined(RTEMS_MULTIPROCESSING)
+ Objects_Id id
+#else
+ Objects_Id id RTEMS_UNUSED
+#endif
+)
+{
+#if defined(RTEMS_MULTIPROCESSING)
+ return _Objects_Is_local_node( _Objects_Get_node(id) );
+#else
+ return true;
+#endif
+}
+
+/**
+ * This function returns true if left and right are equal,
+ * and false otherwise.
+ *
+ * @param[in] left is the Id on the left hand side of the comparison
+ * @param[in] right is the Id on the right hand side of the comparison
+ *
+ * @return This method returns true if the specified object IDs are equal
+ * and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Objects_Are_ids_equal(
+ Objects_Id left,
+ Objects_Id right
+)
+{
+ return ( left == right );
+}
+
+/**
+ * This function sets the pointer to the local_table object
+ * referenced by the index.
+ *
+ * @param[in] information points to an Object Information Table
+ * @param[in] index is the index of the object the caller wants to access
+ * @param[in] the_object is the local object pointer
+ *
+ * @note This routine is ONLY to be called in places where the
+ * index portion of the Id is known to be good. This is
+ * OK since it is normally called from object create/init
+ * or delete/destroy operations.
+ */
+
+RTEMS_INLINE_ROUTINE void _Objects_Set_local_object(
+ Objects_Information *information,
+ uint32_t index,
+ Objects_Control *the_object
+)
+{
+ /*
+ * This routine is ONLY to be called from places in the code
+ * where the Id is known to be good. Therefore, this should NOT
+ * occur in normal situations.
+ */
+ #if defined(RTEMS_DEBUG)
+ if ( index > information->maximum )
+ return;
+ #endif
+
+ information->local_table[ index ] = the_object;
+}
+
+/**
+ * This function sets the pointer to the local_table object
+ * referenced by the index to a NULL so the object Id is invalid
+ * after this call.
+ *
+ * @param[in] information points to an Object Information Table
+ * @param[in] the_object is the local object pointer
+ *
+ * @note This routine is ONLY to be called in places where the
+ * index portion of the Id is known to be good. This is
+ * OK since it is normally called from object create/init
+ * or delete/destroy operations.
+ */
+
+RTEMS_INLINE_ROUTINE void _Objects_Invalidate_Id(
+ Objects_Information *information,
+ Objects_Control *the_object
+)
+{
+ _Assert( information != NULL );
+ _Assert( the_object != NULL );
+
+ _Objects_Set_local_object(
+ information,
+ _Objects_Get_index( the_object->id ),
+ NULL
+ );
+}
+
+/**
+ * This function places the_object control pointer and object name
+ * in the Local Pointer and Local Name Tables, respectively.
+ *
+ * @param[in] information points to an Object Information Table
+ * @param[in] the_object is a pointer to an object
+ * @param[in] name is the name of the object to make accessible
+ */
+RTEMS_INLINE_ROUTINE void _Objects_Open(
+ Objects_Information *information,
+ Objects_Control *the_object,
+ Objects_Name name
+)
+{
+ _Assert( information != NULL );
+ _Assert( the_object != NULL );
+
+ the_object->name = name;
+
+ _Objects_Set_local_object(
+ information,
+ _Objects_Get_index( the_object->id ),
+ the_object
+ );
+}
+
+/**
+ * This function places the_object control pointer and object name
+ * in the Local Pointer and Local Name Tables, respectively.
+ *
+ * @param[in] information points to an Object Information Table
+ * @param[in] the_object is a pointer to an object
+ * @param[in] name is the name of the object to make accessible
+ */
+RTEMS_INLINE_ROUTINE void _Objects_Open_u32(
+ Objects_Information *information,
+ Objects_Control *the_object,
+ uint32_t name
+)
+{
+ /* ASSERT: information->is_string == false */
+ the_object->name.name_u32 = name;
+
+ _Objects_Set_local_object(
+ information,
+ _Objects_Get_index( the_object->id ),
+ the_object
+ );
+}
+
+/**
+ * This function places the_object control pointer and object name
+ * in the Local Pointer and Local Name Tables, respectively.
+ *
+ * @param[in] information points to an Object Information Table
+ * @param[in] the_object is a pointer to an object
+ * @param[in] name is the name of the object to make accessible
+ */
+RTEMS_INLINE_ROUTINE void _Objects_Open_string(
+ Objects_Information *information,
+ Objects_Control *the_object,
+ const char *name
+)
+{
+ #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES)
+ /* ASSERT: information->is_string */
+ the_object->name.name_p = name;
+ #endif
+
+ _Objects_Set_local_object(
+ information,
+ _Objects_Get_index( the_object->id ),
+ the_object
+ );
+}
+
+/**
+ * @brief Locks the object allocator mutex.
+ *
+ * While holding the allocator mutex the executing thread is protected from
+ * asynchronous thread restart and deletion.
+ *
+ * The usage of the object allocator mutex with the thread life protection
+ * makes it possible to allocate and free objects without thread dispatching
+ * disabled. The usage of a unified workspace and unlimited objects may lead
+ * to heap fragmentation. Thus the execution time of the _Objects_Allocate()
+ * function may increase during system run-time.
+ *
+ * @see _Objects_Allocator_unlock() and _Objects_Allocate().
+ */
+RTEMS_INLINE_ROUTINE void _Objects_Allocator_lock( void )
+{
+ _RTEMS_Lock_allocator();
+}
+
+/**
+ * @brief Unlocks the object allocator mutex.
+ *
+ * In case the mutex is fully unlocked, then this function restores the
+ * previous thread life protection state and thus may not return if the
+ * executing thread was restarted or deleted in the mean-time.
+ */
+RTEMS_INLINE_ROUTINE void _Objects_Allocator_unlock( void )
+{
+ _RTEMS_Unlock_allocator();
+}
+
+RTEMS_INLINE_ROUTINE bool _Objects_Allocator_is_owner( void )
+{
+ return _RTEMS_Allocator_is_owner();
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if defined(RTEMS_MULTIPROCESSING)
+#include <rtems/score/objectmp.h>
+#endif
+
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/objectmp.h b/cpukit/include/rtems/score/objectmp.h
new file mode 100644
index 0000000000..5c9f4f74e3
--- /dev/null
+++ b/cpukit/include/rtems/score/objectmp.h
@@ -0,0 +1,197 @@
+/**
+ * @file rtems/score/objectmp.h
+ *
+ * @brief Data Associated with the Manipulation of Global RTEMS Objects
+ *
+ * This include file contains all the constants and structures associated
+ * with the manipulation of Global RTEMS Objects.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_OBJECTMP_H
+#define _RTEMS_SCORE_OBJECTMP_H
+
+#ifndef _RTEMS_SCORE_OBJECTIMPL_H
+# error "Never use <rtems/rtems/objectmp.h> directly; include <rtems/rtems/objectimpl.h> instead."
+#endif
+
+#include <rtems/score/chainimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreObjectMP Object Handler Multiprocessing Support
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which is used to manage
+ * objects which have been declared to be globally visible. This handler
+ * knows objects from all of the nodes in the system.
+ */
+/**@{*/
+
+/**
+ * @brief Intializes the inactive global object chain
+ * based on the maximum number of global objects configured.
+ *
+ * This routine intializes the inactive global object chain
+ * based on the maximum number of global objects configured.
+ */
+void _Objects_MP_Handler_initialization(void);
+
+/**
+ * @brief Intializes the global object node number
+ * used in the ID field of all objects.
+ *
+ * This routine intializes the global object node number
+ * used in the ID field of all objects.
+ */
+void _Objects_MP_Handler_early_initialization(void);
+
+/**
+ * @brief Place the specified global object in the
+ * specified information table.
+ *
+ * This routine place the specified global object in the
+ * specified information table.
+ *
+ * @param[in] information points to the object information table for this
+ * object class.
+ * @param[in] the_global_object points to the object being opened.
+ * @param[in] the_name is the name of the object being opened.
+ * @param[in] the_id is the Id of the object being opened.
+ *
+ * @todo This method only works for object types with 4 byte object names.
+ * It does not support variable length object names.
+ */
+void _Objects_MP_Open (
+ Objects_Information *information,
+ Objects_MP_Control *the_global_object,
+ uint32_t the_name,
+ Objects_Id the_id
+);
+
+/**
+ * @brief Allocates a global object control block
+ * and places it in the specified information table.
+ *
+ * This routine allocates a global object control block
+ * and places it in the specified information table. If the
+ * allocation fails, then is_fatal_error determines the
+ * error processing actions taken.
+ *
+ * @param[in] information points to the object information table for this
+ * object class.
+ * @param[in] the_name is the name of the object being opened.
+ * @param[in] the_id is the Id of the object being opened.
+ * @param[in] is_fatal_error is true if not being able to allocate the
+ * object is considered a fatal error.
+ *
+ * @todo This method only works for object types with 4 byte object names.
+ * It does not support variable length object names.
+ */
+bool _Objects_MP_Allocate_and_open (
+ Objects_Information *information,
+ uint32_t the_name,
+ Objects_Id the_id,
+ bool is_fatal_error
+);
+
+/**
+ * @brief Removes a global object from the specified information table.
+ *
+ * This routine removes a global object from the specified
+ * information table and deallocates the global object control block.
+ */
+void _Objects_MP_Close (
+ Objects_Information *information,
+ Objects_Id the_id
+);
+
+/**
+ * @brief Look for the object with the_name in the global
+ * object tables indicated by information.
+ *
+ * This routine looks for the object with the_name in the global
+ * object tables indicated by information. It returns the ID of the
+ * object with that name if one is found.
+ *
+ * @param[in] information points to the object information table for this
+ * object class.
+ * @param[in] the_name is the name of the object being searched for.
+ * @param[in] nodes_to_search indicates the set of nodes to search.
+ * @param[in] the_id will contain the Id of the object if found.
+ *
+ * @retval This method returns one of the
+ * @ref Objects_Name_or_id_lookup_errors. If successful, @a the_id
+ * will contain the Id of the object.
+ */
+Objects_Name_or_id_lookup_errors _Objects_MP_Global_name_search (
+ Objects_Information *information,
+ Objects_Name the_name,
+ uint32_t nodes_to_search,
+ Objects_Id *the_id
+);
+
+/**
+ * @brief Returns true, if the object identifier is in the global object
+ * identifier cache of the specified object information, otherwise false.
+ *
+ * @param id The object identifier.
+ * @param information The object information.
+ *
+ * @retval true A remote objects with this object identifier exits in the
+ * global object identifier cache of the specified information.
+ * @retval false Otherwise.
+ */
+bool _Objects_MP_Is_remote(
+ Objects_Id id,
+ const Objects_Information *information
+);
+
+/**
+ * This is the maximum number of global objects configured.
+ */
+extern uint32_t _Objects_MP_Maximum_global_objects;
+
+/**
+ * This function allocates a Global Object control block.
+ */
+
+Objects_MP_Control *_Objects_MP_Allocate_global_object( void );
+
+/**
+ * This routine deallocates a Global Object control block.
+ */
+
+void _Objects_MP_Free_global_object( Objects_MP_Control *the_object );
+
+/**
+ * This function returns whether the global object is NULL or not.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Objects_MP_Is_null_global_object (
+ Objects_MP_Control *the_object
+)
+{
+ return( the_object == NULL );
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/onceimpl.h b/cpukit/include/rtems/score/onceimpl.h
new file mode 100644
index 0000000000..60f1378506
--- /dev/null
+++ b/cpukit/include/rtems/score/onceimpl.h
@@ -0,0 +1,52 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreOnce
+ *
+ * @brief Once API
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_ONCE_H
+#define _RTEMS_ONCE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreOnce Once Functions.
+ *
+ * @ingroup Score
+ *
+ * @brief The _Once() function for pthread_once() and rtems_gxx_once().
+ *
+ * @{
+ */
+
+int _Once( unsigned char *once_state, void (*init_routine)(void) );
+
+void _Once_Lock( void );
+
+void _Once_Unlock( void );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_ONCE_H */
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
new file mode 100644
index 0000000000..00528b5ce3
--- /dev/null
+++ b/cpukit/include/rtems/score/percpu.h
@@ -0,0 +1,851 @@
+/**
+ * @file rtems/score/percpu.h
+ *
+ * This include file defines the per CPU information required
+ * by RTEMS.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2012, 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_PERCPU_H
+#define _RTEMS_PERCPU_H
+
+#include <rtems/score/cpuimpl.h>
+
+#if defined( ASM )
+ #include <rtems/asm.h>
+#else
+ #include <rtems/score/assert.h>
+ #include <rtems/score/chain.h>
+ #include <rtems/score/isrlock.h>
+ #include <rtems/score/smp.h>
+ #include <rtems/score/smplock.h>
+ #include <rtems/score/timestamp.h>
+ #include <rtems/score/watchdog.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(RTEMS_SMP)
+ #if defined(RTEMS_PROFILING)
+ #define PER_CPU_CONTROL_SIZE_APPROX ( 512 + CPU_INTERRUPT_FRAME_SIZE )
+ #elif defined(RTEMS_DEBUG)
+ #define PER_CPU_CONTROL_SIZE_APPROX ( 256 + CPU_INTERRUPT_FRAME_SIZE )
+ #else
+ #define PER_CPU_CONTROL_SIZE_APPROX ( 128 + CPU_INTERRUPT_FRAME_SIZE )
+ #endif
+
+ /*
+ * This ensures that on SMP configurations the individual per-CPU controls
+ * are on different cache lines to prevent false sharing. This define can be
+ * used in assembler code to easily get the per-CPU control for a particular
+ * processor.
+ */
+ #if PER_CPU_CONTROL_SIZE_APPROX > 1024
+ #define PER_CPU_CONTROL_SIZE_LOG2 11
+ #elif PER_CPU_CONTROL_SIZE_APPROX > 512
+ #define PER_CPU_CONTROL_SIZE_LOG2 10
+ #elif PER_CPU_CONTROL_SIZE_APPROX > 256
+ #define PER_CPU_CONTROL_SIZE_LOG2 9
+ #elif PER_CPU_CONTROL_SIZE_APPROX > 128
+ #define PER_CPU_CONTROL_SIZE_LOG2 8
+ #else
+ #define PER_CPU_CONTROL_SIZE_LOG2 7
+ #endif
+
+ #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
+#endif
+
+#if !defined( ASM )
+
+struct _Thread_Control;
+
+struct Scheduler_Context;
+
+/**
+ * @defgroup PerCPU RTEMS Per CPU Information
+ *
+ * @ingroup Score
+ *
+ * This defines the per CPU state information required by RTEMS
+ * and the BSP. In an SMP configuration, there will be multiple
+ * instances of this data structure -- one per CPU -- and the
+ * current CPU number will be used as the index.
+ */
+
+/**@{*/
+
+#if defined( RTEMS_SMP )
+
+/**
+ * @brief State of a processor.
+ *
+ * The processor state controls the life cycle of processors at the lowest
+ * level. No multi-threading or other high-level concepts matter here.
+ *
+ * State changes must be initiated via _Per_CPU_State_change(). This function
+ * may not return in case someone requested a shutdown. The
+ * _SMP_Send_message() function will be used to notify other processors about
+ * state changes if the other processor is in the up state.
+ *
+ * Due to the sequential nature of the basic system initialization one
+ * processor has a special role. It is the processor executing the boot_card()
+ * function. This processor is called the boot processor. All other
+ * processors are called secondary.
+ *
+ * @dot
+ * digraph states {
+ * i [label="PER_CPU_STATE_INITIAL"];
+ * rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
+ * reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
+ * u [label="PER_CPU_STATE_UP"];
+ * s [label="PER_CPU_STATE_SHUTDOWN"];
+ * i -> rdy [label="processor\ncompleted initialization"];
+ * rdy -> reqsm [label="boot processor\ncompleted initialization"];
+ * reqsm -> u [label="processor\nstarts multitasking"];
+ * i -> s;
+ * rdy -> s;
+ * reqsm -> s;
+ * u -> s;
+ * }
+ * @enddot
+ */
+typedef enum {
+ /**
+ * @brief The per CPU controls are initialized to zero.
+ *
+ * The boot processor executes the sequential boot code in this state. The
+ * secondary processors should perform their basic initialization now and
+ * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
+ * is complete.
+ */
+ PER_CPU_STATE_INITIAL,
+
+ /**
+ * @brief Processor is ready to start multitasking.
+ *
+ * The secondary processor performed its basic initialization and is ready to
+ * receive inter-processor interrupts. Interrupt delivery must be disabled
+ * in this state, but requested inter-processor interrupts must be recorded
+ * and must be delivered once the secondary processor enables interrupts for
+ * the first time. The boot processor will wait for all secondary processors
+ * to change into this state. In case a secondary processor does not reach
+ * this state the system will not start. The secondary processors wait now
+ * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
+ * by the boot processor once all secondary processors reached the
+ * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
+ */
+ PER_CPU_STATE_READY_TO_START_MULTITASKING,
+
+ /**
+ * @brief Multitasking start of processor is requested.
+ *
+ * The boot processor completed system initialization and is about to perform
+ * a context switch to its heir thread. Secondary processors should now
+ * issue a context switch to the heir thread. This normally enables
+ * interrupts on the processor for the first time.
+ */
+ PER_CPU_STATE_REQUEST_START_MULTITASKING,
+
+ /**
+ * @brief Normal multitasking state.
+ */
+ PER_CPU_STATE_UP,
+
+ /**
+ * @brief This is the terminal state.
+ */
+ PER_CPU_STATE_SHUTDOWN
+} Per_CPU_State;
+
+#endif /* defined( RTEMS_SMP ) */
+
+/**
+ * @brief Per-CPU statistics.
+ */
+typedef struct {
+#if defined( RTEMS_PROFILING )
+ /**
+ * @brief The thread dispatch disabled begin instant in CPU counter ticks.
+ *
+ * This value is used to measure the time of disabled thread dispatching.
+ */
+ CPU_Counter_ticks thread_dispatch_disabled_instant;
+
+ /**
+ * @brief The maximum time of disabled thread dispatching in CPU counter
+ * ticks.
+ */
+ CPU_Counter_ticks max_thread_dispatch_disabled_time;
+
+ /**
+ * @brief The maximum time spent to process a single sequence of nested
+ * interrupts in CPU counter ticks.
+ *
+ * This is the time interval between the change of the interrupt nest level
+ * from zero to one and the change back from one to zero.
+ */
+ CPU_Counter_ticks max_interrupt_time;
+
+ /**
+ * @brief The maximum interrupt delay in CPU counter ticks if supported by
+ * the hardware.
+ */
+ CPU_Counter_ticks max_interrupt_delay;
+
+ /**
+ * @brief Count of times when the thread dispatch disable level changes from
+ * zero to one in thread context.
+ *
+ * This value may overflow.
+ */
+ uint64_t thread_dispatch_disabled_count;
+
+ /**
+ * @brief Total time of disabled thread dispatching in CPU counter ticks.
+ *
+ * The average time of disabled thread dispatching is the total time of
+ * disabled thread dispatching divided by the thread dispatch disabled
+ * count.
+ *
+ * This value may overflow.
+ */
+ uint64_t total_thread_dispatch_disabled_time;
+
+ /**
+ * @brief Count of times when the interrupt nest level changes from zero to
+ * one.
+ *
+ * This value may overflow.
+ */
+ uint64_t interrupt_count;
+
+ /**
+ * @brief Total time of interrupt processing in CPU counter ticks.
+ *
+ * The average time of interrupt processing is the total time of interrupt
+ * processing divided by the interrupt count.
+ *
+ * This value may overflow.
+ */
+ uint64_t total_interrupt_time;
+#endif /* defined( RTEMS_PROFILING ) */
+} Per_CPU_Stats;
+
+/**
+ * @brief Per-CPU watchdog header index.
+ */
+typedef enum {
+ /**
+ * @brief Index for monotonic clock per-CPU watchdog header.
+ *
+ * The reference time point for the monotonic clock is the system start. The
+ * clock resolution is one system clock tick. It is used for the system
+ * clock tick based time services and the POSIX services using
+ * CLOCK_MONOTONIC.
+ */
+ PER_CPU_WATCHDOG_MONOTONIC,
+
+ /**
+ * @brief Index for realtime clock per-CPU watchdog header.
+ *
+ * The reference time point for the realtime clock is the POSIX Epoch. The
+ * clock resolution is one nanosecond. It is used for the time of day
+ * services and the POSIX services using CLOCK_REALTIME.
+ */
+ PER_CPU_WATCHDOG_REALTIME,
+
+ /**
+ * @brief Count of per-CPU watchdog headers.
+ */
+ PER_CPU_WATCHDOG_COUNT
+} Per_CPU_Watchdog_index;
+
+/**
+ * @brief Per CPU Core Structure
+ *
+ * This structure is used to hold per core state information.
+ */
+typedef struct Per_CPU_Control {
+ #if CPU_PER_CPU_CONTROL_SIZE > 0
+ /**
+ * @brief CPU port specific control.
+ */
+ CPU_Per_CPU_control cpu_per_cpu;
+ #endif
+
+ #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
+ (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
+ /**
+ * This contains a pointer to the lower range of the interrupt stack for
+ * this CPU. This is the address allocated and freed.
+ */
+ void *interrupt_stack_low;
+
+ /**
+ * This contains a pointer to the interrupt stack pointer for this CPU.
+ * It will be loaded at the beginning on an ISR.
+ */
+ void *interrupt_stack_high;
+ #endif
+
+ /**
+ * This contains the current interrupt nesting level on this
+ * CPU.
+ */
+ uint32_t isr_nest_level;
+
+ /**
+ * @brief Indicetes if an ISR thread dispatch is disabled.
+ *
+ * This flag is context switched with each thread. It indicates that this
+ * thread has an interrupt stack frame on its stack. By using this flag, we
+ * can avoid nesting more interrupt dispatching attempts on a previously
+ * interrupted thread's stack.
+ */
+ uint32_t isr_dispatch_disable;
+
+ /**
+ * @brief The thread dispatch critical section nesting counter which is used
+ * to prevent context switches at inopportune moments.
+ */
+ volatile uint32_t thread_dispatch_disable_level;
+
+ /**
+ * @brief This is set to true when this processor needs to run the thread
+ * dispatcher.
+ *
+ * It is volatile since interrupts may alter this flag.
+ *
+ * This field is not protected by a lock and must be accessed only by this
+ * processor. Code (e.g. scheduler and post-switch action requests) running
+ * on another processors must use an inter-processor interrupt to set the
+ * thread dispatch necessary indicator to true.
+ *
+ * @see _Thread_Get_heir_and_make_it_executing().
+ */
+ volatile bool dispatch_necessary;
+
+ /*
+ * Ensure that the executing member is at least 4-byte aligned, see
+ * PER_CPU_OFFSET_EXECUTING. This is necessary on CPU ports with relaxed
+ * alignment restrictions, e.g. type alignment is less than the type size.
+ */
+ bool reserved_for_executing_alignment[ 3 ];
+
+ /**
+ * @brief This is the thread executing on this processor.
+ *
+ * This field is not protected by a lock. The only writer is this processor.
+ *
+ * On SMP configurations a thread may be registered as executing on more than
+ * one processor in case a thread migration is in progress. On SMP
+ * configurations use _Thread_Is_executing_on_a_processor() to figure out if
+ * a thread context is executing on a processor.
+ */
+ struct _Thread_Control *executing;
+
+ /**
+ * @brief This is the heir thread for this processor.
+ *
+ * This field is not protected by a lock. The only writer after multitasking
+ * start is the scheduler owning this processor. It is assumed that stores
+ * to pointers are atomic on all supported SMP architectures. The CPU port
+ * specific code (inter-processor interrupt handling and
+ * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
+ * last value written.
+ *
+ * A thread can be a heir on at most one processor in the system.
+ *
+ * @see _Thread_Get_heir_and_make_it_executing().
+ */
+ struct _Thread_Control *heir;
+
+#if defined(RTEMS_SMP)
+ CPU_Interrupt_frame Interrupt_frame;
+#endif
+
+ /**
+ * @brief The CPU usage timestamp contains the time point of the last heir
+ * thread change or last CPU usage update of the executing thread of this
+ * processor.
+ *
+ * Protected by the scheduler lock.
+ *
+ * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
+ * _Thread_Get_CPU_time_used().
+ */
+ Timestamp_Control cpu_usage_timestamp;
+
+ /**
+ * @brief Watchdog state for this processor.
+ */
+ struct {
+ /**
+ * @brief Protects all watchdog operations on this processor.
+ */
+ ISR_LOCK_MEMBER( Lock )
+
+ /**
+ * @brief Watchdog ticks on this processor used for monotonic clock
+ * watchdogs.
+ */
+ uint64_t ticks;
+
+ /**
+ * @brief Header for watchdogs.
+ *
+ * @see Per_CPU_Watchdog_index.
+ */
+ Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
+ } Watchdog;
+
+ #if defined( RTEMS_SMP )
+ /**
+ * @brief This lock protects some parts of the low-level thread dispatching.
+ *
+ * We must use a ticket lock here since we cannot transport a local context
+ * through the context switch.
+ *
+ * @see _Thread_Dispatch().
+ */
+ SMP_ticket_lock_Control Lock;
+
+ #if defined( RTEMS_PROFILING )
+ /**
+ * @brief Lock statistics for the per-CPU lock.
+ */
+ SMP_lock_Stats Lock_stats;
+
+ /**
+ * @brief Lock statistics context for the per-CPU lock.
+ */
+ SMP_lock_Stats_context Lock_stats_context;
+ #endif
+
+ /**
+ * @brief Chain of threads in need for help.
+ *
+ * This field is protected by the Per_CPU_Control::Lock lock.
+ */
+ Chain_Control Threads_in_need_for_help;
+
+ /**
+ * @brief Bit field for SMP messages.
+ *
+ * This bit field is not protected locks. Atomic operations are used to
+ * set and get the message bits.
+ */
+ Atomic_Ulong message;
+
+ struct {
+ /**
+ * @brief The scheduler control of the scheduler owning this processor.
+ *
+ * This pointer is NULL in case this processor is currently not used by a
+ * scheduler instance.
+ */
+ const struct _Scheduler_Control *control;
+
+ /**
+ * @brief The scheduler context of the scheduler owning this processor.
+ *
+ * This pointer is NULL in case this processor is currently not used by a
+ * scheduler instance.
+ */
+ const struct Scheduler_Context *context;
+
+ /**
+ * @brief The idle thread for this processor in case it is online and
+ * currently not used by a scheduler instance.
+ */
+ struct _Thread_Control *idle_if_online_and_unused;
+ } Scheduler;
+
+ /**
+ * @brief Indicates the current state of the CPU.
+ *
+ * This field is protected by the _Per_CPU_State_lock lock.
+ *
+ * @see _Per_CPU_State_change().
+ */
+ Per_CPU_State state;
+
+ /**
+ * @brief Action to be executed by this processor in the
+ * SYSTEM_STATE_BEFORE_MULTITASKING state on behalf of the boot processor.
+ *
+ * @see _SMP_Before_multitasking_action().
+ */
+ Atomic_Uintptr before_multitasking_action;
+
+ /**
+ * @brief Indicates if the processor has been successfully started via
+ * _CPU_SMP_Start_processor().
+ */
+ bool online;
+
+ /**
+ * @brief Indicates if the processor is the one that performed the initial
+ * system initialization.
+ */
+ bool boot;
+ #endif
+
+ Per_CPU_Stats Stats;
+} Per_CPU_Control;
+
+#if defined( RTEMS_SMP )
+typedef struct {
+ Per_CPU_Control per_cpu;
+ char unused_space_for_cache_line_alignment
+ [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
+} Per_CPU_Control_envelope;
+#else
+typedef struct {
+ Per_CPU_Control per_cpu;
+} Per_CPU_Control_envelope;
+#endif
+
+/**
+ * @brief Set of Per CPU Core Information
+ *
+ * This is an array of per CPU core information.
+ */
+extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Acquire( cpu ) \
+ _SMP_ticket_lock_Acquire( \
+ &( cpu )->Lock, \
+ &( cpu )->Lock_stats, \
+ &( cpu )->Lock_stats_context \
+ )
+#else
+#define _Per_CPU_Acquire( cpu ) \
+ do { \
+ (void) ( cpu ); \
+ } while ( 0 )
+#endif
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Release( cpu ) \
+ _SMP_ticket_lock_Release( \
+ &( cpu )->Lock, \
+ &( cpu )->Lock_stats_context \
+ )
+#else
+#define _Per_CPU_Release( cpu ) \
+ do { \
+ (void) ( cpu ); \
+ } while ( 0 )
+#endif
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
+ do { \
+ _ISR_Local_disable( isr_cookie ); \
+ _Per_CPU_Acquire( cpu ); \
+ } while ( 0 )
+#else
+#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
+ do { \
+ _ISR_Local_disable( isr_cookie ); \
+ (void) ( cpu ); \
+ } while ( 0 )
+#endif
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
+ do { \
+ _Per_CPU_Release( cpu ); \
+ _ISR_Local_enable( isr_cookie ); \
+ } while ( 0 )
+#else
+#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
+ do { \
+ (void) ( cpu ); \
+ _ISR_Local_enable( isr_cookie ); \
+ } while ( 0 )
+#endif
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Acquire_all( isr_cookie ) \
+ do { \
+ uint32_t ncpus = _SMP_Get_processor_count(); \
+ uint32_t cpu; \
+ _ISR_Local_disable( isr_cookie ); \
+ for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
+ _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
+ } \
+ } while ( 0 )
+#else
+#define _Per_CPU_Acquire_all( isr_cookie ) \
+ _ISR_Local_disable( isr_cookie )
+#endif
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Release_all( isr_cookie ) \
+ do { \
+ uint32_t ncpus = _SMP_Get_processor_count(); \
+ uint32_t cpu; \
+ for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
+ _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
+ } \
+ _ISR_Local_enable( isr_cookie ); \
+ } while ( 0 )
+#else
+#define _Per_CPU_Release_all( isr_cookie ) \
+ _ISR_Local_enable( isr_cookie )
+#endif
+
+/*
+ * If we get the current processor index in a context which allows thread
+ * dispatching, then we may already run on another processor right after the
+ * read instruction. There are very few cases in which this makes sense (here
+ * we can use _Per_CPU_Get_snapshot()). All other places must use
+ * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
+ */
+#if defined( _CPU_Get_current_per_CPU_control )
+ #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
+#else
+ #define _Per_CPU_Get_snapshot() \
+ ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
+#endif
+
+#if defined( RTEMS_SMP )
+static inline Per_CPU_Control *_Per_CPU_Get( void )
+{
+ Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
+
+ _Assert(
+ cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
+ );
+
+ return cpu_self;
+}
+#else
+#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
+#endif
+
+static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
+{
+ return &_Per_CPU_Information[ index ].per_cpu;
+}
+
+static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
+{
+ const Per_CPU_Control_envelope *per_cpu_envelope =
+ ( const Per_CPU_Control_envelope * ) cpu;
+
+ return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
+}
+
+static inline struct _Thread_Control *_Per_CPU_Get_executing(
+ const Per_CPU_Control *cpu
+)
+{
+ return cpu->executing;
+}
+
+static inline bool _Per_CPU_Is_processor_online(
+ const Per_CPU_Control *cpu
+)
+{
+#if defined( RTEMS_SMP )
+ return cpu->online;
+#else
+ (void) cpu;
+
+ return true;
+#endif
+}
+
+static inline bool _Per_CPU_Is_boot_processor(
+ const Per_CPU_Control *cpu
+)
+{
+#if defined( RTEMS_SMP )
+ return cpu->boot;
+#else
+ (void) cpu;
+
+ return true;
+#endif
+}
+
+#if defined( RTEMS_SMP )
+
+/**
+ * @brief Allocate and Initialize Per CPU Structures
+ *
+ * This method allocates and initialize the per CPU structure.
+ */
+void _Per_CPU_Initialize(void);
+
+void _Per_CPU_State_change(
+ Per_CPU_Control *cpu,
+ Per_CPU_State new_state
+);
+
+/**
+ * @brief Waits for a processor to change into a non-initial state.
+ *
+ * This function should be called only in _CPU_SMP_Start_processor() if
+ * required by the CPU port or BSP.
+ *
+ * @code
+ * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
+ * {
+ * uint32_t timeout = 123456;
+ *
+ * start_the_processor(cpu_index);
+ *
+ * return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
+ * }
+ * @endcode
+ *
+ * @param[in] cpu_index The processor index.
+ * @param[in] timeout_in_ns The timeout in nanoseconds. Use a value of zero to
+ * wait forever if necessary.
+ *
+ * @retval true The processor is in a non-initial state.
+ * @retval false The timeout expired before the processor reached a non-initial
+ * state.
+ */
+bool _Per_CPU_State_wait_for_non_initial_state(
+ uint32_t cpu_index,
+ uint32_t timeout_in_ns
+);
+
+#endif /* defined( RTEMS_SMP ) */
+
+/*
+ * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
+ * Thus when built for non-SMP, there should be no performance penalty.
+ */
+#define _Thread_Dispatch_disable_level \
+ _Per_CPU_Get()->thread_dispatch_disable_level
+#define _Thread_Heir \
+ _Per_CPU_Get()->heir
+
+#if defined(_CPU_Get_thread_executing)
+#define _Thread_Executing \
+ _CPU_Get_thread_executing()
+#else
+#define _Thread_Executing \
+ _Per_CPU_Get_executing( _Per_CPU_Get() )
+#endif
+
+#define _ISR_Nest_level \
+ _Per_CPU_Get()->isr_nest_level
+#define _CPU_Interrupt_stack_low \
+ _Per_CPU_Get()->interrupt_stack_low
+#define _CPU_Interrupt_stack_high \
+ _Per_CPU_Get()->interrupt_stack_high
+#define _Thread_Dispatch_necessary \
+ _Per_CPU_Get()->dispatch_necessary
+
+/**
+ * @brief Returns the thread control block of the executing thread.
+ *
+ * This function can be called in any thread context. On SMP configurations,
+ * interrupts are disabled to ensure that the processor index is used
+ * consistently if no CPU port specific method is available to get the
+ * executing thread.
+ *
+ * @return The thread control block of the executing thread.
+ */
+RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
+{
+ struct _Thread_Control *executing;
+
+ #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
+ ISR_Level level;
+
+ _ISR_Local_disable( level );
+ #endif
+
+ executing = _Thread_Executing;
+
+ #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
+ _ISR_Local_enable( level );
+ #endif
+
+ return executing;
+}
+
+/**@}*/
+
+#endif /* !defined( ASM ) */
+
+#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
+
+#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
+ (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
+ /*
+ * If this CPU target lets RTEMS allocates the interrupt stack, then
+ * we need to have places in the per CPU table to hold them.
+ */
+ #define PER_CPU_INTERRUPT_STACK_LOW \
+ CPU_PER_CPU_CONTROL_SIZE
+ #define PER_CPU_INTERRUPT_STACK_HIGH \
+ PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
+ #define PER_CPU_END_STACK \
+ PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
+
+ #define INTERRUPT_STACK_LOW \
+ (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
+ #define INTERRUPT_STACK_HIGH \
+ (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
+#else
+ #define PER_CPU_END_STACK \
+ CPU_PER_CPU_CONTROL_SIZE
+#endif
+
+/*
+ * These are the offsets of the required elements in the per CPU table.
+ */
+#define PER_CPU_ISR_NEST_LEVEL \
+ PER_CPU_END_STACK
+#define PER_CPU_ISR_DISPATCH_DISABLE \
+ PER_CPU_ISR_NEST_LEVEL + 4
+#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
+ PER_CPU_ISR_DISPATCH_DISABLE + 4
+#define PER_CPU_DISPATCH_NEEDED \
+ PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
+#define PER_CPU_OFFSET_EXECUTING \
+ PER_CPU_DISPATCH_NEEDED + 4
+#define PER_CPU_OFFSET_HEIR \
+ PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
+#if defined(RTEMS_SMP)
+#define PER_CPU_INTERRUPT_FRAME_AREA \
+ PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
+#endif
+
+#define THREAD_DISPATCH_DISABLE_LEVEL \
+ (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
+#define ISR_NEST_LEVEL \
+ (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
+#define DISPATCH_NEEDED \
+ (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
+
+#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/priority.h b/cpukit/include/rtems/score/priority.h
new file mode 100644
index 0000000000..7a8ddba763
--- /dev/null
+++ b/cpukit/include/rtems/score/priority.h
@@ -0,0 +1,203 @@
+/**
+ * @file
+ *
+ * @brief Priority Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2016, 2017 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PRIORITY_H
+#define _RTEMS_SCORE_PRIORITY_H
+
+#include <rtems/score/chain.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/rbtree.h>
+
+struct _Scheduler_Control;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScorePriority Priority Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which is used to manage thread
+ * priorities. The actual priority of a thread is an aggregation of priority
+ * nodes. The thread priority aggregation for the home scheduler instance of a
+ * thread consists of at least one priority node, which is normally the real
+ * priority of the thread. The locking protocols (e.g. priority ceiling and
+ * priority inheritance), rate-monotonic period objects and the POSIX sporadic
+ * server add, change and remove priority nodes.
+ *
+ * @{
+ */
+
+/**
+ * @brief The thread priority control.
+ *
+ * Lower values represent higher priorities. So, a priority value of zero
+ * represents the highest priority thread. This value is reserved for internal
+ * threads and the priority ceiling protocol.
+ *
+ * The format of the thread priority control depends on the context. A thread
+ * priority control may contain a user visible priority for API import/export.
+ * It may also contain a scheduler internal priority value. Values are
+ * translated via the scheduler map/unmap priority operations. The format of
+ * scheduler interal values depend on the particular scheduler implementation.
+ * It may for example encode a deadline in case of the EDF scheduler.
+ *
+ * The thread priority control value contained in the scheduler node
+ * (Scheduler_Node::Priority::value) uses the least-significant bit to indicate
+ * if the thread should be appended or prepended to its priority group, see
+ * SCHEDULER_PRIORITY_APPEND().
+ */
+typedef uint64_t Priority_Control;
+
+/**
+ * @brief The highest (most important) thread priority value.
+ */
+#define PRIORITY_MINIMUM 0
+
+/**
+ * @brief The priority value of pseudo-ISR threads.
+ *
+ * Examples are the MPCI and timer server threads.
+ */
+#define PRIORITY_PSEUDO_ISR PRIORITY_MINIMUM
+
+/**
+ * @brief The default lowest (least important) thread priority value.
+ *
+ * This value is CPU port dependent.
+ */
+#if defined (CPU_PRIORITY_MAXIMUM)
+ #define PRIORITY_DEFAULT_MAXIMUM CPU_PRIORITY_MAXIMUM
+#else
+ #define PRIORITY_DEFAULT_MAXIMUM 255
+#endif
+
+/**
+ * @brief The priority node to build up a priority aggregation.
+ */
+typedef struct {
+ /**
+ * @brief Node component for a chain or red-black tree.
+ */
+ union {
+ Chain_Node Chain;
+ RBTree_Node RBTree;
+ } Node;
+
+ /**
+ * @brief The priority value of this node.
+ */
+ Priority_Control priority;
+} Priority_Node;
+
+/**
+ * @brief The priority action type.
+ */
+typedef enum {
+ PRIORITY_ACTION_ADD,
+ PRIORITY_ACTION_CHANGE,
+ PRIORITY_ACTION_REMOVE,
+ PRIORITY_ACTION_INVALID
+} Priority_Action_type;
+
+typedef struct Priority_Aggregation Priority_Aggregation;
+
+/**
+ * @brief The priority aggregation.
+ *
+ * This structure serves two purposes. Firstly, it provides a place to
+ * register priority nodes and reflects the overall priority of its
+ * contributors. Secondly, it provides an action block to signal addition,
+ * change and removal of a priority node.
+ */
+struct Priority_Aggregation {
+ /**
+ * @brief This priority node reflects the overall priority of the aggregation.
+ *
+ * The overall priority of the aggregation is the minimum priority of the
+ * priority nodes in the contributors tree.
+ *
+ * This priority node may be used to add this aggregation to another
+ * aggregation to build up a recursive priority scheme.
+ *
+ * In case priority nodes of the contributors tree are added, changed or
+ * removed the priority of this node may change. To signal such changes to a
+ * priority aggregation the action block may be used.
+ */
+ Priority_Node Node;
+
+ /**
+ * @brief A red-black tree to contain priority nodes contributing to the
+ * overall priority of this priority aggregation.
+ */
+ RBTree_Control Contributors;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The scheduler instance of this priority aggregation.
+ */
+ const struct _Scheduler_Control *scheduler;
+#endif
+
+ /**
+ * @brief A priority action block to manage priority node additions, changes
+ * and removals.
+ */
+ struct {
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The next priority aggregation in the action list.
+ */
+ Priority_Aggregation *next;
+#endif
+
+ /**
+ * @brief The priority node of the action.
+ */
+ Priority_Node *node;
+
+ /**
+ * @brief The type of the action.
+ */
+ Priority_Action_type type;
+ } Action;
+};
+
+/**
+ * @brief A list of priority actions.
+ *
+ * Actions are only added to the list. The action lists reside on the stack
+ * and have a short life-time. They are moved, processed or destroyed as a
+ * whole.
+ */
+typedef struct {
+ /**
+ * @brief The first action of a priority action list.
+ */
+ Priority_Aggregation *actions;
+} Priority_Actions;
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/prioritybitmap.h b/cpukit/include/rtems/score/prioritybitmap.h
new file mode 100644
index 0000000000..40638dd628
--- /dev/null
+++ b/cpukit/include/rtems/score/prioritybitmap.h
@@ -0,0 +1,79 @@
+/**
+ * @file rtems/score/prioritybitmap.h
+ *
+ * @brief Manipulation Routines for the Bitmap Priority Queue Implementation
+ *
+ * This include file contains all thread priority manipulation routines for
+ * the bit map priority queue implementation.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2010.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PRIORITYBITMAP_H
+#define _RTEMS_SCORE_PRIORITYBITMAP_H
+
+#include <rtems/score/cpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScorePriorityBitmap Bitmap Priority Thread Routines
+ *
+ * @ingroup Score
+ */
+/**@{*/
+
+typedef uint16_t Priority_bit_map_Word;
+
+typedef struct {
+ /**
+ * @brief Each sixteen bit entry in this word is associated with one of the
+ * sixteen entries in the bit map.
+ */
+ Priority_bit_map_Word major_bit_map;
+
+ /**
+ * @brief Each bit in the bit map indicates whether or not there are threads
+ * ready at a particular priority.
+ *
+ * The mapping of individual priority levels to particular bits is processor
+ * dependent as is the value of each bit used to indicate that threads are
+ * ready at that priority.
+ */
+ Priority_bit_map_Word bit_map[ 16 ];
+} Priority_bit_map_Control;
+
+/**
+ * The following record defines the information associated with
+ * each thread to manage its interaction with the priority bit maps.
+ */
+typedef struct {
+ /** This is the address of minor bit map slot. */
+ Priority_bit_map_Word *minor;
+ /** This is the priority bit map ready mask. */
+ Priority_bit_map_Word ready_major;
+ /** This is the priority bit map ready mask. */
+ Priority_bit_map_Word ready_minor;
+ /** This is the priority bit map block mask. */
+ Priority_bit_map_Word block_major;
+ /** This is the priority bit map block mask. */
+ Priority_bit_map_Word block_minor;
+} Priority_bit_map_Information;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/prioritybitmapimpl.h b/cpukit/include/rtems/score/prioritybitmapimpl.h
new file mode 100644
index 0000000000..82c92eb5d6
--- /dev/null
+++ b/cpukit/include/rtems/score/prioritybitmapimpl.h
@@ -0,0 +1,215 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines in the Priority Handler Bit Map Implementation
+ *
+ * This file contains the static inline implementation of all inlined
+ * routines in the Priority Handler bit map implementation
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2010.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PRIORITYBITMAPIMPL_H
+#define _RTEMS_SCORE_PRIORITYBITMAPIMPL_H
+
+#include <rtems/score/prioritybitmap.h>
+
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScorePriority
+ */
+/**@{**/
+
+/**
+ * This table is used by the generic bitfield routines to perform
+ * a highly optimized bit scan without the use of special CPU
+ * instructions.
+ */
+extern const unsigned char _Bitfield_Leading_zeros[256];
+
+/**
+ * @brief Returns the bit number of the first bit set in the specified value.
+ *
+ * The correspondence between the bit number and actual bit position is CPU
+ * architecture dependent. The search for the first bit set may run from most
+ * to least significant bit or vice-versa.
+ *
+ * @param value The value to bit scan.
+ *
+ * @return The bit number of the first bit set.
+ *
+ * @see _Priority_Bits_index() and _Priority_Mask().
+ */
+RTEMS_INLINE_ROUTINE unsigned int _Bitfield_Find_first_bit(
+ unsigned int value
+)
+{
+ unsigned int bit_number;
+
+#if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE )
+ _CPU_Bitfield_Find_first_bit( value, bit_number );
+#elif defined(__GNUC__)
+ bit_number = (unsigned int) __builtin_clz( value )
+ - __SIZEOF_INT__ * __CHAR_BIT__ + 16;
+#else
+ if ( value < 0x100 ) {
+ bit_number = _Bitfield_Leading_zeros[ value ] + 8;
+ } else { \
+ bit_number = _Bitfield_Leading_zeros[ value >> 8 ];
+ }
+#endif
+
+ return bit_number;
+}
+
+/**
+ * @brief Returns the priority bit mask for the specified major or minor bit
+ * number.
+ *
+ * @param bit_number The bit number for which we need a mask.
+ *
+ * @return The priority bit mask.
+ */
+RTEMS_INLINE_ROUTINE Priority_bit_map_Word _Priority_Mask(
+ unsigned int bit_number
+)
+{
+#if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE )
+ return _CPU_Priority_Mask( bit_number );
+#else
+ return (Priority_bit_map_Word) ( 0x8000u >> bit_number );
+#endif
+}
+
+/**
+ * @brief Returns the bit index position for the specified major or minor bit
+ * number.
+ *
+ * @param bit_number The bit number for which we need an index.
+ *
+ * @return The corresponding array index into the priority bit map.
+ */
+RTEMS_INLINE_ROUTINE unsigned int _Priority_Bits_index(
+ unsigned int bit_number
+)
+{
+#if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE )
+ return _CPU_Priority_bits_index( bit_number );
+#else
+ return bit_number;
+#endif
+}
+
+/**
+ * This function returns the major portion of the_priority.
+ */
+
+RTEMS_INLINE_ROUTINE unsigned int _Priority_Major( unsigned int the_priority )
+{
+ return the_priority / 16;
+}
+
+/**
+ * This function returns the minor portion of the_priority.
+ */
+
+RTEMS_INLINE_ROUTINE unsigned int _Priority_Minor( unsigned int the_priority )
+{
+ return the_priority % 16;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_bit_map_Initialize(
+ Priority_bit_map_Control *bit_map
+)
+{
+ memset( bit_map, 0, sizeof( *bit_map ) );
+}
+
+/**
+ * Priority Queue implemented by bit map
+ */
+
+RTEMS_INLINE_ROUTINE void _Priority_bit_map_Add (
+ Priority_bit_map_Control *bit_map,
+ Priority_bit_map_Information *bit_map_info
+)
+{
+ *bit_map_info->minor |= bit_map_info->ready_minor;
+ bit_map->major_bit_map |= bit_map_info->ready_major;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_bit_map_Remove (
+ Priority_bit_map_Control *bit_map,
+ Priority_bit_map_Information *bit_map_info
+)
+{
+ *bit_map_info->minor &= bit_map_info->block_minor;
+ if ( *bit_map_info->minor == 0 )
+ bit_map->major_bit_map &= bit_map_info->block_major;
+}
+
+RTEMS_INLINE_ROUTINE unsigned int _Priority_bit_map_Get_highest(
+ const Priority_bit_map_Control *bit_map
+)
+{
+ unsigned int minor;
+ unsigned int major;
+
+ major = _Bitfield_Find_first_bit( bit_map->major_bit_map );
+ minor = _Bitfield_Find_first_bit( bit_map->bit_map[ major ] );
+
+ return (_Priority_Bits_index( major ) << 4) +
+ _Priority_Bits_index( minor );
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_bit_map_Is_empty(
+ const Priority_bit_map_Control *bit_map
+)
+{
+ return bit_map->major_bit_map == 0;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_bit_map_Initialize_information(
+ Priority_bit_map_Control *bit_map,
+ Priority_bit_map_Information *bit_map_info,
+ unsigned int new_priority
+)
+{
+ unsigned int major;
+ unsigned int minor;
+ Priority_bit_map_Word mask;
+
+ major = _Priority_Major( new_priority );
+ minor = _Priority_Minor( new_priority );
+
+ bit_map_info->minor = &bit_map->bit_map[ _Priority_Bits_index( major ) ];
+
+ mask = _Priority_Mask( major );
+ bit_map_info->ready_major = mask;
+ bit_map_info->block_major = (Priority_bit_map_Word) ~mask;
+
+ mask = _Priority_Mask( minor );
+ bit_map_info->ready_minor = mask;
+ bit_map_info->block_minor = (Priority_bit_map_Word) ~mask;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/priorityimpl.h b/cpukit/include/rtems/score/priorityimpl.h
new file mode 100644
index 0000000000..3380983cb7
--- /dev/null
+++ b/cpukit/include/rtems/score/priorityimpl.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PRIORITYIMPL_H
+#define _RTEMS_SCORE_PRIORITYIMPL_H
+
+#include <rtems/score/priority.h>
+#include <rtems/score/scheduler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_empty(
+ Priority_Actions *actions
+)
+{
+ actions->actions = NULL;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_one(
+ Priority_Actions *actions,
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Action_type type
+)
+{
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = node;
+ aggregation->Action.type = type;
+
+ actions->actions = aggregation;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_empty(
+ const Priority_Actions *actions
+)
+{
+ return actions->actions == NULL;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_valid(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation != NULL;
+#else
+ (void) aggregation;
+ return false;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Actions_move(
+ Priority_Actions *actions
+)
+{
+ Priority_Aggregation *aggregation;
+
+ aggregation = actions->actions;
+ actions->actions = NULL;
+
+ return aggregation;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_add(
+ Priority_Actions *actions,
+ Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ /*
+ * Priority aggregations are only added to action lists, so do not care about
+ * the current next pointer value.
+ */
+ aggregation->Action.next = actions->actions;
+#endif
+ actions->actions = aggregation;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_initialize(
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ node->priority = priority;
+ _RBTree_Initialize_node( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_set_priority(
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ node->priority = priority;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_set_inactive(
+ Priority_Node *node
+)
+{
+ _RBTree_Set_off_tree( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Node_is_active(
+ const Priority_Node *node
+)
+{
+ return !_RBTree_Is_node_off_tree( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Initialize_empty(
+ Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_DEBUG)
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = NULL;
+ aggregation->Action.type = PRIORITY_ACTION_INVALID;
+#endif
+ _RBTree_Initialize_node( &aggregation->Node.Node.RBTree );
+ _RBTree_Initialize_empty( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Initialize_one(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+#if defined(RTEMS_DEBUG)
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = NULL;
+ aggregation->Action.type = PRIORITY_ACTION_INVALID;
+#endif
+ _Priority_Node_initialize( &aggregation->Node, node->priority );
+ _RBTree_Initialize_one( &aggregation->Contributors, &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Is_empty(
+ const Priority_Aggregation *aggregation
+)
+{
+ return _RBTree_Is_empty( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE Priority_Control _Priority_Get_priority(
+ const Priority_Aggregation *aggregation
+)
+{
+ return aggregation->Node.priority;
+}
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Priority_Get_scheduler(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation->scheduler;
+#else
+ return &_Scheduler_Table[ 0 ];
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Priority_Node *_Priority_Get_minimum_node(
+ const Priority_Aggregation *aggregation
+)
+{
+ return (Priority_Node *) _RBTree_Minimum( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action_node(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ aggregation->Action.node = node;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action_type(
+ Priority_Aggregation *aggregation,
+ Priority_Action_type type
+)
+{
+ aggregation->Action.type = type;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Action_type type
+)
+{
+ aggregation->Action.node = node;
+ aggregation->Action.type = type;
+}
+
+RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Get_next_action(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation->Action.next;
+#else
+ (void) aggregation;
+ return NULL;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Less(
+ const void *left,
+ const RBTree_Node *right
+)
+{
+ const Priority_Control *the_left;
+ const Priority_Node *the_right;
+
+ the_left = left;
+ the_right = RTEMS_CONTAINER_OF( right, Priority_Node, Node.RBTree );
+
+ return *the_left < the_right->priority;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Plain_insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ return _RBTree_Insert_inline(
+ &aggregation->Contributors,
+ &node->Node.RBTree,
+ &priority,
+ _Priority_Less
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Plain_extract(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ _RBTree_Extract( &aggregation->Contributors, &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Plain_changed(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ _Priority_Plain_extract( aggregation, node );
+ _Priority_Plain_insert( aggregation, node, node->priority );
+}
+
+typedef void ( *Priority_Add_handler )(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+);
+
+typedef void ( *Priority_Change_handler )(
+ Priority_Aggregation *aggregation,
+ bool prepend_it,
+ Priority_Actions *actions,
+ void *arg
+);
+
+typedef void ( *Priority_Remove_handler )(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+);
+
+RTEMS_INLINE_ROUTINE void _Priority_Change_nothing(
+ Priority_Aggregation *aggregation,
+ bool prepend_it,
+ Priority_Actions *actions,
+ void *arg
+)
+{
+ (void) aggregation;
+ (void) prepend_it;
+ (void) actions;
+ (void) arg;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Remove_nothing(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+)
+{
+ (void) aggregation;
+ (void) actions;
+ (void) arg;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Non_empty_insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ bool is_new_minimum;
+
+ _Assert( !_Priority_Is_empty( aggregation ) );
+ is_new_minimum = _Priority_Plain_insert( aggregation, node, node->priority );
+
+ if ( is_new_minimum ) {
+ aggregation->Node.priority = node->priority;
+ ( *change )( aggregation, false, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Add_handler add,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ if ( _Priority_Is_empty( aggregation ) ) {
+ _Priority_Initialize_one( aggregation, node );
+ ( *add )( aggregation, actions, arg );
+ } else {
+ _Priority_Non_empty_insert( aggregation, node, actions, change, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Extract(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Remove_handler remove,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ _Priority_Plain_extract( aggregation, node );
+
+ if ( _Priority_Is_empty( aggregation ) ) {
+ ( *remove )( aggregation, actions, arg );
+ } else {
+ Priority_Node *min;
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( node->priority < min->priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, true, actions, arg );
+ }
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Extract_non_empty(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ Priority_Node *min;
+
+ _Priority_Plain_extract( aggregation, node );
+ _Assert( !_Priority_Is_empty( aggregation ) );
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( node->priority < min->priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, true, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Changed(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ bool prepend_it,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ Priority_Node *min;
+
+ _Priority_Plain_changed( aggregation, node );
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( min->priority != aggregation->Node.priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, prepend_it, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Replace(
+ Priority_Aggregation *aggregation,
+ Priority_Node *victim,
+ Priority_Node *replacement
+)
+{
+ replacement->priority = victim->priority;
+ _RBTree_Replace_node(
+ &aggregation->Contributors,
+ &victim->Node.RBTree,
+ &replacement->Node.RBTree
+ );
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PRIORITYIMPL_H */
diff --git a/cpukit/include/rtems/score/processormask.h b/cpukit/include/rtems/score/processormask.h
new file mode 100644
index 0000000000..a06aa2a56b
--- /dev/null
+++ b/cpukit/include/rtems/score/processormask.h
@@ -0,0 +1,290 @@
+/**
+ * @file
+ *
+ * @brief Processor Mask API
+ *
+ * @ingroup ScoreProcessorMask
+ */
+
+/*
+ * Copyright (c) 2016, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PROCESSORMASK_H
+#define _RTEMS_SCORE_PROCESSORMASK_H
+
+#include <rtems/score/cpu.h>
+
+#include <sys/cpuset.h>
+
+#include <strings.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreProcessorMask Processor Mask
+ *
+ * @ingroup Score
+ *
+ * The processor mask provides a bit map large enough to provide one bit for
+ * each processor in the system. It is a fixed size internal data type
+ * provided for efficiency in addition to the API level cpu_set_t.
+ *
+ * @{
+ */
+
+/**
+ * @brief A bit map which is large enough to provide one bit for each processor
+ * in the system.
+ */
+typedef BITSET_DEFINE( Processor_mask, CPU_MAXIMUM_PROCESSORS ) Processor_mask;
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Zero( Processor_mask *mask )
+{
+ BIT_ZERO( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_zero( const Processor_mask *mask )
+{
+ return BIT_EMPTY( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Fill( Processor_mask *mask )
+{
+ BIT_FILL( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Assign(
+ Processor_mask *dst, const Processor_mask *src
+)
+{
+ BIT_COPY( CPU_MAXIMUM_PROCESSORS, src, dst );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Set(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ BIT_SET( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Clear(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ BIT_CLR( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_set(
+ const Processor_mask *mask,
+ uint32_t index
+)
+{
+ return BIT_ISSET( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+/**
+ * @brief Returns true if the processor sets a and b are equal, and false
+ * otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_equal(
+ const Processor_mask *a,
+ const Processor_mask *b
+)
+{
+ return !BIT_CMP( CPU_MAXIMUM_PROCESSORS, a, b );
+}
+
+/**
+ * @brief Returns true if the intersection of the processor sets a and b is
+ * non-empty, and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Has_overlap(
+ const Processor_mask *a,
+ const Processor_mask *b
+)
+{
+ return BIT_OVERLAP( CPU_MAXIMUM_PROCESSORS, a, b );
+}
+
+/**
+ * @brief Returns true if the processor set small is a subset of processor set
+ * big, and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_subset(
+ const Processor_mask *big,
+ const Processor_mask *small
+)
+{
+ return BIT_SUBSET( CPU_MAXIMUM_PROCESSORS, big, small );
+}
+
+/**
+ * @brief Performs a bitwise a = b & c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_And(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_AND2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b & ~c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_Nand(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_NAND2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b | c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_Or(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_OR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b ^ c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_Xor(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_XOR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Count( const Processor_mask *a )
+{
+ return (uint32_t) BIT_COUNT( CPU_MAXIMUM_PROCESSORS, a );
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Find_last_set( const Processor_mask *a )
+{
+ return (uint32_t) BIT_FLS( CPU_MAXIMUM_PROCESSORS, a );
+}
+
+/**
+ * @brief Returns the subset of 32 processors containing the specified index as
+ * an unsigned 32-bit integer.
+ */
+RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_To_uint32_t(
+ const Processor_mask *mask,
+ uint32_t index
+)
+{
+ long bits = mask->__bits[ __bitset_words( index ) ];
+
+ return (uint32_t) (bits >> (32 * (index % _BITSET_BITS) / 32));
+}
+
+/**
+ * @brief Creates a processor set from an unsigned 32-bit integer relative to
+ * the specified index.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_From_uint32_t(
+ Processor_mask *mask,
+ uint32_t bits,
+ uint32_t index
+)
+{
+ _Processor_mask_Zero( mask );
+ mask->__bits[ __bitset_words( index ) ] = ((long) bits) << (32 * (index % _BITSET_BITS) / 32);
+}
+
+/**
+ * @brief Creates a processor set from the specified index.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_From_index(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ BIT_SETOF( CPU_MAXIMUM_PROCESSORS, (int) index, mask );
+}
+
+typedef enum {
+ PROCESSOR_MASK_COPY_LOSSLESS,
+ PROCESSOR_MASK_COPY_PARTIAL_LOSS,
+ PROCESSOR_MASK_COPY_COMPLETE_LOSS,
+ PROCESSOR_MASK_COPY_INVALID_SIZE
+} Processor_mask_Copy_status;
+
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_at_most_partial_loss(
+ Processor_mask_Copy_status status
+)
+{
+ return (unsigned int) status <= PROCESSOR_MASK_COPY_PARTIAL_LOSS;
+}
+
+Processor_mask_Copy_status _Processor_mask_Copy(
+ long *dst,
+ size_t dst_size,
+ const long *src,
+ size_t src_size
+);
+
+RTEMS_INLINE_ROUTINE Processor_mask_Copy_status _Processor_mask_To_cpu_set_t(
+ const Processor_mask *src,
+ size_t dst_size,
+ cpu_set_t *dst
+)
+{
+ return _Processor_mask_Copy(
+ &dst->__bits[ 0 ],
+ dst_size,
+ &src->__bits[ 0 ],
+ sizeof( *src )
+ );
+}
+
+RTEMS_INLINE_ROUTINE Processor_mask_Copy_status _Processor_mask_From_cpu_set_t(
+ Processor_mask *dst,
+ size_t src_size,
+ const cpu_set_t *src
+)
+{
+ return _Processor_mask_Copy(
+ &dst->__bits[ 0 ],
+ sizeof( *dst ),
+ &src->__bits[ 0 ],
+ src_size
+ );
+}
+
+extern const Processor_mask _Processor_mask_The_one_and_only;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PROCESSORMASK_H */
diff --git a/cpukit/include/rtems/score/profiling.h b/cpukit/include/rtems/score/profiling.h
new file mode 100644
index 0000000000..6ba5d2987f
--- /dev/null
+++ b/cpukit/include/rtems/score/profiling.h
@@ -0,0 +1,140 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreProfiling
+ *
+ * @brief Profiling Support API
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PROFILING
+#define _RTEMS_SCORE_PROFILING
+
+#include <rtems/score/percpu.h>
+#include <rtems/score/isrlock.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreProfiling Profiling Support
+ *
+ * @brief Profiling support.
+ *
+ * @{
+ */
+
+static inline void _Profiling_Thread_dispatch_disable(
+ Per_CPU_Control *cpu,
+ uint32_t previous_thread_dispatch_disable_level
+)
+{
+#if defined( RTEMS_PROFILING )
+ if ( previous_thread_dispatch_disable_level == 0 ) {
+ Per_CPU_Stats *stats = &cpu->Stats;
+
+ stats->thread_dispatch_disabled_instant = _CPU_Counter_read();
+ ++stats->thread_dispatch_disabled_count;
+ }
+#else
+ (void) cpu;
+ (void) previous_thread_dispatch_disable_level;
+#endif
+}
+
+static inline void _Profiling_Thread_dispatch_disable_critical(
+ Per_CPU_Control *cpu,
+ uint32_t previous_thread_dispatch_disable_level,
+ const ISR_lock_Context *lock_context
+)
+{
+#if defined( RTEMS_PROFILING )
+ if ( previous_thread_dispatch_disable_level == 0 ) {
+ Per_CPU_Stats *stats = &cpu->Stats;
+
+ stats->thread_dispatch_disabled_instant = lock_context->ISR_disable_instant;
+ ++stats->thread_dispatch_disabled_count;
+ }
+#else
+ (void) cpu;
+ (void) previous_thread_dispatch_disable_level;
+ (void) lock_context;
+#endif
+}
+
+static inline void _Profiling_Thread_dispatch_enable(
+ Per_CPU_Control *cpu,
+ uint32_t new_thread_dispatch_disable_level
+)
+{
+#if defined( RTEMS_PROFILING )
+ if ( new_thread_dispatch_disable_level == 0 ) {
+ Per_CPU_Stats *stats = &cpu->Stats;
+ CPU_Counter_ticks now = _CPU_Counter_read();
+ CPU_Counter_ticks delta = _CPU_Counter_difference(
+ now,
+ stats->thread_dispatch_disabled_instant
+ );
+
+ stats->total_thread_dispatch_disabled_time += delta;
+
+ if ( stats->max_thread_dispatch_disabled_time < delta ) {
+ stats->max_thread_dispatch_disabled_time = delta;
+ }
+ }
+#else
+ (void) cpu;
+ (void) new_thread_dispatch_disable_level;
+#endif
+}
+
+static inline void _Profiling_Update_max_interrupt_delay(
+ Per_CPU_Control *cpu,
+ CPU_Counter_ticks interrupt_delay
+)
+{
+#if defined( RTEMS_PROFILING )
+ Per_CPU_Stats *stats = &cpu->Stats;
+
+ if ( stats->max_interrupt_delay < interrupt_delay ) {
+ stats->max_interrupt_delay = interrupt_delay;
+ }
+#else
+ (void) cpu;
+ (void) interrupt_delay;
+#endif
+}
+
+/**
+ * @brief Updates the interrupt profiling statistics.
+ *
+ * Must be called with the interrupt stack and before the thread dispatch
+ * disable level is decremented.
+ */
+void _Profiling_Outer_most_interrupt_entry_and_exit(
+ Per_CPU_Control *cpu,
+ CPU_Counter_ticks interrupt_entry_instant,
+ CPU_Counter_ticks interrupt_exit_instant
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PROFILING */
diff --git a/cpukit/include/rtems/score/protectedheap.h b/cpukit/include/rtems/score/protectedheap.h
new file mode 100644
index 0000000000..a08fa36cf3
--- /dev/null
+++ b/cpukit/include/rtems/score/protectedheap.h
@@ -0,0 +1,172 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2007.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PROTECTED_HEAP_H
+#define _RTEMS_SCORE_PROTECTED_HEAP_H
+
+#include <rtems/score/heapimpl.h>
+#include <rtems/score/apimutex.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreProtHeap Protected Heap Handler
+ *
+ * @ingroup ScoreHeap
+ *
+ * @brief Provides protected heap services.
+ *
+ * The @ref ScoreAllocatorMutex is used to protect the heap accesses.
+ *
+ */
+/**@{**/
+
+/**
+ * @brief See _Heap_Initialize().
+ */
+RTEMS_INLINE_ROUTINE uintptr_t _Protected_heap_Initialize(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t page_size
+)
+{
+ return _Heap_Initialize( heap, area_begin, area_size, page_size );
+}
+
+/**
+ * @brief See _Heap_Extend().
+ *
+ * Returns @a true in case of success, and @a false otherwise.
+ */
+bool _Protected_heap_Extend(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size
+);
+
+/**
+ * @brief See _Heap_Allocate_aligned_with_boundary().
+ */
+void *_Protected_heap_Allocate_aligned_with_boundary(
+ Heap_Control *heap,
+ uintptr_t size,
+ uintptr_t alignment,
+ uintptr_t boundary
+);
+
+/**
+ * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero.
+ */
+RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate_aligned(
+ Heap_Control *heap,
+ uintptr_t size,
+ uintptr_t alignment
+)
+{
+ return
+ _Protected_heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 );
+}
+
+/**
+ * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and
+ * boundary equals zero.
+ */
+RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate(
+ Heap_Control *heap,
+ uintptr_t size
+)
+{
+ return _Protected_heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
+}
+
+/**
+ * @brief See _Heap_Size_of_alloc_area().
+ */
+bool _Protected_heap_Get_block_size(
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t *size
+);
+
+/**
+ * @brief See _Heap_Resize_block().
+ *
+ * Returns @a true in case of success, and @a false otherwise.
+ */
+bool _Protected_heap_Resize_block(
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t size
+);
+
+/**
+ * @brief See _Heap_Free().
+ *
+ * Returns @a true in case of success, and @a false otherwise.
+ */
+bool _Protected_heap_Free( Heap_Control *heap, void *addr );
+
+/**
+ * @brief See _Heap_Walk().
+ */
+bool _Protected_heap_Walk( Heap_Control *heap, int source, bool dump );
+
+/**
+ * @brief See _Heap_Iterate().
+ */
+void _Protected_heap_Iterate(
+ Heap_Control *heap,
+ Heap_Block_visitor visitor,
+ void *visitor_arg
+);
+
+/**
+ * @brief See _Heap_Get_information().
+ *
+ * Returns @a true in case of success, and @a false otherwise.
+ */
+bool _Protected_heap_Get_information(
+ Heap_Control *heap,
+ Heap_Information_block *info
+);
+
+/**
+ * @brief See _Heap_Get_free_information().
+ *
+ * Returns @a true in case of success, and @a false otherwise.
+ */
+bool _Protected_heap_Get_free_information(
+ Heap_Control *heap,
+ Heap_Information *info
+);
+
+/**
+ * @brief See _Heap_Get_size().
+ */
+uintptr_t _Protected_heap_Get_size( Heap_Control *heap );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/rbtree.h b/cpukit/include/rtems/score/rbtree.h
new file mode 100644
index 0000000000..15a3bc8913
--- /dev/null
+++ b/cpukit/include/rtems/score/rbtree.h
@@ -0,0 +1,568 @@
+/**
+ * @file rtems/score/rbtree.h
+ *
+ * @brief Constants and Structures Associated with the Red-Black Tree Handler
+ *
+ * This include file contains all the constants and structures associated
+ * with the Red-Black Tree Handler.
+ */
+
+/*
+ * Copyright (c) 2010 Gedare Bloom.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_RBTREE_H
+#define _RTEMS_SCORE_RBTREE_H
+
+#include <sys/tree.h>
+#include <rtems/score/basedefs.h>
+#include <rtems/score/assert.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreRBTree Red-Black Tree Handler
+ *
+ * @ingroup Score
+ *
+ * The Red-Black Tree Handler is used to manage sets of entities. This handler
+ * provides two data structures. The rbtree Node data structure is included
+ * as the first part of every data structure that will be placed on
+ * a RBTree. The second data structure is rbtree Control which is used
+ * to manage a set of rbtree Nodes.
+ */
+/**@{*/
+
+struct RBTree_Control;
+
+/**
+ * @brief Red-black tree node.
+ *
+ * This is used to manage each node (element) which is placed on a red-black
+ * tree.
+ */
+typedef struct RBTree_Node {
+ RB_ENTRY(RBTree_Node) Node;
+} RBTree_Node;
+
+/**
+ * @brief Red-black tree control.
+ *
+ * This is used to manage a red-black tree. A red-black tree consists of a
+ * tree of zero or more nodes.
+ */
+typedef RB_HEAD(RBTree_Control, RBTree_Node) RBTree_Control;
+
+/**
+ * @brief Initializer for an empty red-black tree with designator @a name.
+ */
+#define RBTREE_INITIALIZER_EMPTY( name ) \
+ RB_INITIALIZER( name )
+
+/**
+ * @brief Definition for an empty red-black tree with designator @a name.
+ */
+#define RBTREE_DEFINE_EMPTY( name ) \
+ RBTree_Control name = RBTREE_INITIALIZER_EMPTY( name )
+
+/**
+ * @brief Sets a red-black tree node as off-tree.
+ *
+ * Do not use this function on nodes which are a part of a tree.
+ *
+ * @param[in] the_node The node to set off-tree.
+ *
+ * @see _RBTree_Is_node_off_tree().
+ */
+RTEMS_INLINE_ROUTINE void _RBTree_Set_off_tree( RBTree_Node *the_node )
+{
+ RB_COLOR( the_node, Node ) = -1;
+}
+
+/**
+ * @brief Returns true, if this red-black tree node is off-tree, and false
+ * otherwise.
+ *
+ * @param[in] the_node The node to test.
+ *
+ * @retval true The node is not a part of a tree (off-tree).
+ * @retval false Otherwise.
+ *
+ * @see _RBTree_Set_off_tree().
+ */
+RTEMS_INLINE_ROUTINE bool _RBTree_Is_node_off_tree(
+ const RBTree_Node *the_node
+)
+{
+ return RB_COLOR( the_node, Node ) == -1;
+}
+
+/**
+ * @brief Rebalances the red-black tree after insertion of the node.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ * @param[in] the_node The most recently inserted node.
+ */
+void _RBTree_Insert_color(
+ RBTree_Control *the_rbtree,
+ RBTree_Node *the_node
+);
+
+/**
+ * @brief Initializes a red-black tree node.
+ *
+ * In debug configurations, the node is set off tree. In all other
+ * configurations, this function does nothing.
+ *
+ * @param[in] the_node The red-black tree node to initialize.
+ */
+RTEMS_INLINE_ROUTINE void _RBTree_Initialize_node( RBTree_Node *the_node )
+{
+#if defined(RTEMS_DEBUG)
+ _RBTree_Set_off_tree( the_node );
+#else
+ (void) the_node;
+#endif
+}
+
+/**
+ * @brief Adds a child node to a parent node.
+ *
+ * @param[in] child The child node.
+ * @param[in] parent The parent node.
+ * @param[in] link The child node link of the parent node.
+ */
+RTEMS_INLINE_ROUTINE void _RBTree_Add_child(
+ RBTree_Node *child,
+ RBTree_Node *parent,
+ RBTree_Node **link
+)
+{
+ _Assert( _RBTree_Is_node_off_tree( child ) );
+ RB_SET( child, parent, Node );
+ *link = child;
+}
+
+/**
+ * @brief Inserts the node into the red-black tree using the specified parent
+ * node and link.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ * @param[in] the_node The node to insert.
+ * @param[in] parent The parent node.
+ * @param[in] link The child node link of the parent node.
+ *
+ * @code
+ * #include <rtems/score/rbtree.h>
+ *
+ * typedef struct {
+ * int value;
+ * RBTree_Node Node;
+ * } Some_Node;
+ *
+ * bool _Some_Less(
+ * const RBTree_Node *a,
+ * const RBTree_Node *b
+ * )
+ * {
+ * const Some_Node *aa = RTEMS_CONTAINER_OF( a, Some_Node, Node );
+ * const Some_Node *bb = RTEMS_CONTAINER_OF( b, Some_Node, Node );
+ *
+ * return aa->value < bb->value;
+ * }
+ *
+ * void _Some_Insert(
+ * RBTree_Control *the_rbtree,
+ * Some_Node *the_node
+ * )
+ * {
+ * RBTree_Node **link = _RBTree_Root_reference( the_rbtree );
+ * RBTree_Node *parent = NULL;
+ *
+ * while ( *link != NULL ) {
+ * parent = *link;
+ *
+ * if ( _Some_Less( &the_node->Node, parent ) ) {
+ * link = _RBTree_Left_reference( parent );
+ * } else {
+ * link = _RBTree_Right_reference( parent );
+ * }
+ * }
+ *
+ * _RBTree_Insert_with_parent( the_rbtree, &the_node->Node, parent, link );
+ * }
+ * @endcode
+ */
+RTEMS_INLINE_ROUTINE void _RBTree_Insert_with_parent(
+ RBTree_Control *the_rbtree,
+ RBTree_Node *the_node,
+ RBTree_Node *parent,
+ RBTree_Node **link
+)
+{
+ _RBTree_Add_child( the_node, parent, link );
+ _RBTree_Insert_color( the_rbtree, the_node );
+}
+
+/**
+ * @brief Extracts (removes) the node from the red-black tree.
+ *
+ * This function does not set the node off-tree. In case this is desired, then
+ * call _RBTree_Set_off_tree() after the extraction.
+ *
+ * In case the node to extract is not a node of the tree, then this function
+ * yields unpredictable results.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ * @param[in] the_node The node to extract.
+ */
+void _RBTree_Extract(
+ RBTree_Control *the_rbtree,
+ RBTree_Node *the_node
+);
+
+/**
+ * @brief Returns a pointer to root node of the red-black tree.
+ *
+ * The root node may change after insert or extract operations.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ *
+ * @retval NULL The tree is empty.
+ * @retval root The root node.
+ *
+ * @see _RBTree_Is_root().
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Root(
+ const RBTree_Control *the_rbtree
+)
+{
+ return RB_ROOT( the_rbtree );
+}
+
+/**
+ * @brief Returns a reference to the root pointer of the red-black tree.
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Root_reference(
+ RBTree_Control *the_rbtree
+)
+{
+ return &RB_ROOT( the_rbtree );
+}
+
+/**
+ * @brief Returns a constant reference to the root pointer of the red-black tree.
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node * const *_RBTree_Root_const_reference(
+ const RBTree_Control *the_rbtree
+)
+{
+ return &RB_ROOT( the_rbtree );
+}
+
+/**
+ * @brief Returns a pointer to the parent of this node.
+ *
+ * The node must have a parent, thus it is invalid to use this function for the
+ * root node or a node that is not part of a tree. To test for the root node
+ * compare with _RBTree_Root() or use _RBTree_Is_root().
+ *
+ * @param[in] the_node The node of interest.
+ *
+ * @retval parent The parent of this node.
+ * @retval undefined The node is the root node or not part of a tree.
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Parent(
+ const RBTree_Node *the_node
+)
+{
+ return RB_PARENT( the_node, Node );
+}
+
+/**
+ * @brief Return pointer to the left of this node.
+ *
+ * This function returns a pointer to the left node of this node.
+ *
+ * @param[in] the_node is the node to be operated upon.
+ *
+ * @return This method returns the left node on the rbtree.
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Left(
+ const RBTree_Node *the_node
+)
+{
+ return RB_LEFT( the_node, Node );
+}
+
+/**
+ * @brief Returns a reference to the left child pointer of the red-black tree
+ * node.
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Left_reference(
+ RBTree_Node *the_node
+)
+{
+ return &RB_LEFT( the_node, Node );
+}
+
+/**
+ * @brief Return pointer to the right of this node.
+ *
+ * This function returns a pointer to the right node of this node.
+ *
+ * @param[in] the_node is the node to be operated upon.
+ *
+ * @return This method returns the right node on the rbtree.
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Right(
+ const RBTree_Node *the_node
+)
+{
+ return RB_RIGHT( the_node, Node );
+}
+
+/**
+ * @brief Returns a reference to the right child pointer of the red-black tree
+ * node.
+ */
+RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Right_reference(
+ RBTree_Node *the_node
+)
+{
+ return &RB_RIGHT( the_node, Node );
+}
+
+/**
+ * @brief Is the RBTree empty.
+ *
+ * This function returns true if there are no nodes on @a the_rbtree and
+ * false otherwise.
+ *
+ * @param[in] the_rbtree is the rbtree to be operated upon.
+ *
+ * @retval true There are no nodes on @a the_rbtree.
+ * @retval false There are nodes on @a the_rbtree.
+ */
+RTEMS_INLINE_ROUTINE bool _RBTree_Is_empty(
+ const RBTree_Control *the_rbtree
+)
+{
+ return RB_EMPTY( the_rbtree );
+}
+
+/**
+ * @brief Returns true if this node is the root node of a red-black tree, and
+ * false otherwise.
+ *
+ * The root node may change after insert or extract operations. In case the
+ * node is not a node of a tree, then this function yields unpredictable
+ * results.
+ *
+ * @param[in] the_node The node of interest.
+ *
+ * @retval true The node is the root node.
+ * @retval false Otherwise.
+ *
+ * @see _RBTree_Root().
+ */
+RTEMS_INLINE_ROUTINE bool _RBTree_Is_root(
+ const RBTree_Node *the_node
+)
+{
+ return _RBTree_Parent( the_node ) == NULL;
+}
+
+/**
+ * @brief Initialize this RBTree as empty.
+ *
+ * This routine initializes @a the_rbtree to contain zero nodes.
+ */
+RTEMS_INLINE_ROUTINE void _RBTree_Initialize_empty(
+ RBTree_Control *the_rbtree
+)
+{
+ RB_INIT( the_rbtree );
+}
+
+/**
+ * @brief Initializes this red-black tree to contain exactly the specified
+ * node.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ * @param[in] the_node The one and only node.
+ */
+RTEMS_INLINE_ROUTINE void _RBTree_Initialize_one(
+ RBTree_Control *the_rbtree,
+ RBTree_Node *the_node
+)
+{
+ _Assert( _RBTree_Is_node_off_tree( the_node ) );
+ RB_ROOT( the_rbtree ) = the_node;
+ RB_PARENT( the_node, Node ) = NULL;
+ RB_LEFT( the_node, Node ) = NULL;
+ RB_RIGHT( the_node, Node ) = NULL;
+ RB_COLOR( the_node, Node ) = RB_BLACK;
+}
+
+/**
+ * @brief Returns the minimum node of the red-black tree.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ *
+ * @retval NULL The red-black tree is empty.
+ * @retval node The minimum node.
+ */
+RBTree_Node *_RBTree_Minimum( const RBTree_Control *the_rbtree );
+
+/**
+ * @brief Returns the maximum node of the red-black tree.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ *
+ * @retval NULL The red-black tree is empty.
+ * @retval node The maximum node.
+ */
+RBTree_Node *_RBTree_Maximum( const RBTree_Control *the_rbtree );
+
+/**
+ * @brief Returns the predecessor of a node.
+ *
+ * @param[in] node is the node.
+ *
+ * @retval NULL The predecessor does not exist. Otherwise it returns
+ * the predecessor node.
+ */
+RBTree_Node *_RBTree_Predecessor( const RBTree_Node *node );
+
+/**
+ * @brief Returns the successor of a node.
+ *
+ * @param[in] node is the node.
+ *
+ * @retval NULL The successor does not exist. Otherwise the successor node.
+ */
+RBTree_Node *_RBTree_Successor( const RBTree_Node *node );
+
+/**
+ * @brief Replaces a node in the red-black tree without a rebalance.
+ *
+ * @param[in] the_rbtree The red-black tree control.
+ * @param[in] victim The victim node.
+ * @param[in] replacement The replacement node.
+ */
+void _RBTree_Replace_node(
+ RBTree_Control *the_rbtree,
+ RBTree_Node *victim,
+ RBTree_Node *replacement
+);
+
+/**
+ * @brief Inserts the node into the red-black tree.
+ *
+ * @param the_rbtree The red-black tree control.
+ * @param the_node The node to insert.
+ * @param key The key of the node to insert. This key must be equal to the key
+ * stored in the node to insert. The separate key parameter is provided for
+ * two reasons. Firstly, it allows to share the less operator with
+ * _RBTree_Find_inline(). Secondly, the compiler may generate better code if
+ * the key is stored in a local variable.
+ * @param less Must return true if the specified key is less than the key of
+ * the node, otherwise false.
+ *
+ * @retval true The inserted node is the new minimum node according to the
+ * specified less order function.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _RBTree_Insert_inline(
+ RBTree_Control *the_rbtree,
+ RBTree_Node *the_node,
+ const void *key,
+ bool ( *less )( const void *, const RBTree_Node * )
+)
+{
+ RBTree_Node **link;
+ RBTree_Node *parent;
+ bool is_new_minimum;
+
+ link = _RBTree_Root_reference( the_rbtree );
+ parent = NULL;
+ is_new_minimum = true;
+
+ while ( *link != NULL ) {
+ parent = *link;
+
+ if ( ( *less )( key, parent ) ) {
+ link = _RBTree_Left_reference( parent );
+ } else {
+ link = _RBTree_Right_reference( parent );
+ is_new_minimum = false;
+ }
+ }
+
+ _RBTree_Add_child( the_node, parent, link );
+ _RBTree_Insert_color( the_rbtree, the_node );
+ return is_new_minimum;
+}
+
+/**
+ * @brief Finds an object in the red-black tree with the specified key.
+ *
+ * @param the_rbtree The red-black tree control.
+ * @param key The key to look after.
+ * @param equal Must return true if the specified key equals the key of the
+ * node, otherwise false.
+ * @param less Must return true if the specified key is less than the key of
+ * the node, otherwise false.
+ * @param map In case a node with the specified key is found, then this
+ * function is called to map the node to the object returned. Usually it
+ * performs some offset operation via RTEMS_CONTAINER_OF() to map the node to
+ * its containing object. Thus, the return type is a void pointer and not a
+ * red-black tree node.
+ *
+ * @retval object An object with the specified key.
+ * @retval NULL No object with the specified key exists in the red-black tree.
+ */
+RTEMS_INLINE_ROUTINE void *_RBTree_Find_inline(
+ const RBTree_Control *the_rbtree,
+ const void *key,
+ bool ( *equal )( const void *, const RBTree_Node * ),
+ bool ( *less )( const void *, const RBTree_Node * ),
+ void *( *map )( RBTree_Node * )
+)
+{
+ RBTree_Node * const *link;
+ RBTree_Node *parent;
+
+ link = _RBTree_Root_const_reference( the_rbtree );
+ parent = NULL;
+
+ while ( *link != NULL ) {
+ parent = *link;
+
+ if ( ( *equal )( key, parent ) ) {
+ return ( *map )( parent );
+ } else if ( ( *less )( key, parent ) ) {
+ link = _RBTree_Left_reference( parent );
+ } else {
+ link = _RBTree_Right_reference( parent );
+ }
+ }
+
+ return NULL;
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/rbtreeimpl.h b/cpukit/include/rtems/score/rbtreeimpl.h
new file mode 100644
index 0000000000..bf92e29228
--- /dev/null
+++ b/cpukit/include/rtems/score/rbtreeimpl.h
@@ -0,0 +1,72 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with Red-Black Trees
+ *
+ * This include file contains the bodies of the routines which are
+ * associated with Red-Black Trees and inlined.
+ *
+ * @note The routines in this file are ordered from simple
+ * to complex. No other RBTree Handler routine is referenced
+ * unless it has already been defined.
+ */
+
+/*
+ * Copyright (c) 2010-2012 Gedare Bloom.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_RBTREEIMPL_H
+#define _RTEMS_SCORE_RBTREEIMPL_H
+
+#include <rtems/score/rbtree.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreRBTree
+ */
+/**@{**/
+
+/**
+ * @brief Red-black tree visitor.
+ *
+ * @param[in] node The node.
+ * @param[in] visitor_arg The visitor argument.
+ *
+ * @retval true Stop the iteration.
+ * @retval false Continue the iteration.
+ *
+ * @see _RBTree_Iterate().
+ */
+typedef bool (*RBTree_Visitor)(
+ const RBTree_Node *node,
+ void *visitor_arg
+);
+
+/**
+ * @brief Red-black tree iteration.
+ *
+ * @param[in] rbtree The red-black tree.
+ * @param[in] visitor The visitor.
+ * @param[in] visitor_arg The visitor argument.
+ */
+void _RBTree_Iterate(
+ const RBTree_Control *rbtree,
+ RBTree_Visitor visitor,
+ void *visitor_arg
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/scheduler.h b/cpukit/include/rtems/score/scheduler.h
new file mode 100644
index 0000000000..a6066c8e4a
--- /dev/null
+++ b/cpukit/include/rtems/score/scheduler.h
@@ -0,0 +1,556 @@
+/**
+ * @file rtems/score/scheduler.h
+ *
+ * @brief Constants and Structures Associated with the Scheduler
+ *
+ * This include file contains all the constants and structures associated
+ * with the scheduler.
+ */
+
+/*
+ * Copyright (C) 2010 Gedare Bloom.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULER_H
+#define _RTEMS_SCORE_SCHEDULER_H
+
+#include <rtems/score/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct Per_CPU_Control;
+
+/**
+ * @defgroup ScoreScheduler Scheduler Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality related to managing sets of threads
+ * that are ready for execution.
+ */
+/**@{*/
+
+typedef struct _Scheduler_Control Scheduler_Control;
+
+/**
+ * @brief The scheduler operations.
+ */
+typedef struct {
+ /** @see _Scheduler_Handler_initialization() */
+ void ( *initialize )( const Scheduler_Control * );
+
+ /** @see _Scheduler_Schedule() */
+ void ( *schedule )( const Scheduler_Control *, Thread_Control *);
+
+ /** @see _Scheduler_Yield() */
+ void ( *yield )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ Scheduler_Node *
+ );
+
+ /** @see _Scheduler_Block() */
+ void ( *block )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ Scheduler_Node *
+ );
+
+ /** @see _Scheduler_Unblock() */
+ void ( *unblock )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ Scheduler_Node *
+ );
+
+ /** @see _Scheduler_Update_priority() */
+ void ( *update_priority )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ Scheduler_Node *
+ );
+
+ /** @see _Scheduler_Map_priority() */
+ Priority_Control ( *map_priority )(
+ const Scheduler_Control *,
+ Priority_Control
+ );
+
+ /** @see _Scheduler_Unmap_priority() */
+ Priority_Control ( *unmap_priority )(
+ const Scheduler_Control *,
+ Priority_Control
+ );
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Ask for help operation.
+ *
+ * @param[in] scheduler The scheduler instance to ask for help.
+ * @param[in] the_thread The thread needing help.
+ * @param[in] node The scheduler node.
+ *
+ * @retval true Ask for help was successful.
+ * @retval false Otherwise.
+ */
+ bool ( *ask_for_help )(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+ );
+
+ /**
+ * @brief Reconsider help operation.
+ *
+ * @param[in] scheduler The scheduler instance to reconsider the help
+ * request.
+ * @param[in] the_thread The thread reconsidering a help request.
+ * @param[in] node The scheduler node.
+ */
+ void ( *reconsider_help_request )(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+ );
+
+ /**
+ * @brief Withdraw node operation.
+ *
+ * @param[in] scheduler The scheduler instance to withdraw the node.
+ * @param[in] the_thread The thread using the node.
+ * @param[in] node The scheduler node to withdraw.
+ * @param[in] next_state The next thread scheduler state in case the node is
+ * scheduled.
+ */
+ void ( *withdraw_node )(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+ );
+
+ /**
+ * @brief Add processor operation.
+ *
+ * @param[in] scheduler The scheduler instance to add the processor.
+ * @param[in] idle The idle thread of the processor to add.
+ */
+ void ( *add_processor )(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle
+ );
+
+ /**
+ * @brief Remove processor operation.
+ *
+ * @param[in] scheduler The scheduler instance to remove the processor.
+ * @param[in] cpu The processor to remove.
+ *
+ * @return The idle thread of the removed processor.
+ */
+ Thread_Control *( *remove_processor )(
+ const Scheduler_Control *scheduler,
+ struct Per_CPU_Control *cpu
+ );
+#endif
+
+ /** @see _Scheduler_Node_initialize() */
+ void ( *node_initialize )(
+ const Scheduler_Control *,
+ Scheduler_Node *,
+ Thread_Control *,
+ Priority_Control
+ );
+
+ /** @see _Scheduler_Node_destroy() */
+ void ( *node_destroy )( const Scheduler_Control *, Scheduler_Node * );
+
+ /** @see _Scheduler_Release_job() */
+ void ( *release_job ) (
+ const Scheduler_Control *,
+ Thread_Control *,
+ Priority_Node *,
+ uint64_t,
+ Thread_queue_Context *
+ );
+
+ /** @see _Scheduler_Cancel_job() */
+ void ( *cancel_job ) (
+ const Scheduler_Control *,
+ Thread_Control *,
+ Priority_Node *,
+ Thread_queue_Context *
+ );
+
+ /** @see _Scheduler_Tick() */
+ void ( *tick )( const Scheduler_Control *, Thread_Control * );
+
+ /** @see _Scheduler_Start_idle() */
+ void ( *start_idle )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ struct Per_CPU_Control *
+ );
+
+#if defined(RTEMS_SMP)
+ /** @see _Scheduler_Set_affinity() */
+ bool ( *set_affinity )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ Scheduler_Node *,
+ const Processor_mask *
+ );
+#endif
+} Scheduler_Operations;
+
+/**
+ * @brief Scheduler context.
+ *
+ * The scheduler context of a particular scheduler implementation must place
+ * this structure at the begin of its context structure.
+ */
+typedef struct Scheduler_Context {
+ /**
+ * @brief Lock to protect this scheduler instance.
+ */
+ ISR_LOCK_MEMBER( Lock )
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The set of processors owned by this scheduler instance.
+ */
+ Processor_mask Processors;
+#endif
+} Scheduler_Context;
+
+/**
+ * @brief Scheduler control.
+ */
+struct _Scheduler_Control {
+ /**
+ * @brief Reference to a statically allocated scheduler context.
+ */
+ Scheduler_Context *context;
+
+ /**
+ * @brief The scheduler operations.
+ */
+ Scheduler_Operations Operations;
+
+ /**
+ * @brief The maximum priority value of this scheduler.
+ *
+ * It defines the lowest (least important) thread priority for this
+ * scheduler. For example the idle threads have this priority.
+ */
+ Priority_Control maximum_priority;
+
+ /**
+ * @brief The scheduler name.
+ */
+ uint32_t name;
+};
+
+/**
+ * @brief Registered schedulers.
+ *
+ * Application provided via <rtems/confdefs.h>.
+ *
+ * @see _Scheduler_Count.
+ */
+extern const Scheduler_Control _Scheduler_Table[];
+
+/**
+ * @brief Count of registered schedulers.
+ *
+ * Application provided via <rtems/confdefs.h> on SMP configurations.
+ *
+ * It is very important that this is a compile-time constant on uni-processor
+ * configurations (in this case RTEMS_SMP is not defined) so that the compiler
+ * can optimize the some loops away
+ *
+ * @see _Scheduler_Table.
+ */
+#if defined(RTEMS_SMP)
+ extern const size_t _Scheduler_Count;
+#else
+ #define _Scheduler_Count ( (size_t) 1 )
+#endif
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The scheduler assignment default attributes.
+ */
+ #define SCHEDULER_ASSIGN_DEFAULT UINT32_C(0x0)
+
+ /**
+ * @brief The presence of this processor is optional.
+ */
+ #define SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL SCHEDULER_ASSIGN_DEFAULT
+
+ /**
+ * @brief The presence of this processor is mandatory.
+ */
+ #define SCHEDULER_ASSIGN_PROCESSOR_MANDATORY UINT32_C(0x1)
+
+ /**
+ * @brief Scheduler assignment.
+ */
+ typedef struct {
+ /**
+ * @brief The scheduler for this processor.
+ */
+ const Scheduler_Control *scheduler;
+
+ /**
+ * @brief The scheduler assignment attributes.
+ *
+ * Use @ref SCHEDULER_ASSIGN_DEFAULT to select default attributes.
+ *
+ * The presence of a processor can be
+ * - @ref SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL, or
+ * - @ref SCHEDULER_ASSIGN_PROCESSOR_MANDATORY.
+ */
+ uint32_t attributes;
+ } Scheduler_Assignment;
+
+ /**
+ * @brief The scheduler assignments.
+ *
+ * The length of this array must be equal to the maximum processors.
+ *
+ * Application provided via <rtems/confdefs.h>.
+ *
+ * @see _Scheduler_Table and rtems_configuration_get_maximum_processors().
+ */
+ extern const Scheduler_Assignment _Scheduler_Initial_assignments[];
+#endif
+
+/**
+ * @brief Returns the scheduler internal thread priority mapped by
+ * SCHEDULER_PRIORITY_MAP().
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] priority The user visible thread priority.
+ *
+ * @return priority The scheduler internal thread priority.
+ */
+Priority_Control _Scheduler_default_Map_priority(
+ const Scheduler_Control *scheduler,
+ Priority_Control priority
+);
+
+/**
+ * @brief Returns the user visible thread priority unmapped by
+ * SCHEDULER_PRIORITY_UNMAP().
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] priority The scheduler internal thread priority.
+ *
+ * @return priority The user visible thread priority.
+ */
+Priority_Control _Scheduler_default_Unmap_priority(
+ const Scheduler_Control *scheduler,
+ Priority_Control priority
+);
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] the_thread Unused.
+ * @param[in] node Unused.
+ *
+ * @retval false Always.
+ */
+ bool _Scheduler_default_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+ );
+
+ /**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] the_thread Unused.
+ * @param[in] node Unused.
+ */
+ void _Scheduler_default_Reconsider_help_request(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+ );
+
+ /**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] the_thread Unused.
+ * @param[in] node Unused.
+ * @param[in] next_state Unused.
+ */
+ void _Scheduler_default_Withdraw_node(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+ );
+
+ #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+ _Scheduler_default_Ask_for_help, \
+ _Scheduler_default_Reconsider_help_request, \
+ _Scheduler_default_Withdraw_node, \
+ NULL, \
+ NULL,
+#else
+ #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP
+#endif
+
+/**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] the_thread Unused.
+ */
+void _Scheduler_default_Schedule(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread
+);
+
+/**
+ * @brief Performs the scheduler base node initialization.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] node The node to initialize.
+ * @param[in] the_thread Unused.
+ * @param[in] priority The thread priority.
+ */
+void _Scheduler_default_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+/**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] node Unused.
+ */
+void _Scheduler_default_Node_destroy(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] the_thread Unused.
+ * @param[in] priority_node Unused.
+ * @param[in] deadline Unused.
+ * @param[in] queue_context Unused.
+ *
+ * @retval NULL Always.
+ */
+void _Scheduler_default_Release_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] the_thread Unused.
+ * @param[in] priority_node Unused.
+ * @param[in] queue_context Unused.
+ *
+ * @retval NULL Always.
+ */
+void _Scheduler_default_Cancel_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Performs tick operations depending on the CPU budget algorithm for
+ * each executing thread.
+ *
+ * This routine is invoked as part of processing each clock tick.
+ *
+ * @param[in] scheduler The scheduler.
+ * @param[in] executing An executing thread.
+ */
+void _Scheduler_default_Tick(
+ const Scheduler_Control *scheduler,
+ Thread_Control *executing
+);
+
+/**
+ * @brief Starts an idle thread.
+ *
+ * @param[in] scheduler The scheduler.
+ * @param[in] the_thread An idle thread.
+ * @param[in] cpu This parameter is unused.
+ */
+void _Scheduler_default_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ struct Per_CPU_Control *cpu
+);
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Default implementation of the set affinity scheduler operation.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] thread The associated thread.
+ * @param[in] node The home scheduler node of the associated thread.
+ * @param[in] affinity The new processor affinity set for the thread.
+ *
+ * @retval true The processor set of the scheduler is a subset of the affinity set.
+ * @retval false Otherwise.
+ */
+ bool _Scheduler_default_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ const Processor_mask *affinity
+ );
+
+ #define SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ , _Scheduler_default_Set_affinity
+#else
+ #define SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY
+#endif
+
+/**
+ * @brief This defines the lowest (least important) thread priority of the
+ * first scheduler instance.
+ */
+#define PRIORITY_MAXIMUM ( _Scheduler_Table[ 0 ].maximum_priority )
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulercbs.h b/cpukit/include/rtems/score/schedulercbs.h
new file mode 100644
index 0000000000..635abce125
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulercbs.h
@@ -0,0 +1,346 @@
+/**
+ * @file rtems/score/schedulercbs.h
+ *
+ * @brief Thread manipulation for the CBS scheduler
+ *
+ * This include file contains all the constants and structures associated
+ * with the manipulation of threads for the CBS scheduler.
+ */
+
+/*
+ * Copryight (c) 2011 Petr Benes.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERCBS_H
+#define _RTEMS_SCORE_SCHEDULERCBS_H
+
+#include <rtems/score/chain.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/scheduler.h>
+#include <rtems/score/rbtree.h>
+#include <rtems/score/scheduleredf.h>
+#include <rtems/rtems/signal.h>
+#include <rtems/rtems/timer.h>
+#include <rtems/score/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSchedulerCBS CBS Scheduler
+ *
+ * @ingroup ScoreScheduler
+ */
+/**@{*/
+
+#define SCHEDULER_CBS_MAXIMUM_PRIORITY SCHEDULER_EDF_MAXIMUM_PRIORITY
+
+/**
+ * Entry points for the Constant Bandwidth Server Scheduler.
+ *
+ * @note: The CBS scheduler is an enhancement of EDF scheduler,
+ * therefor some routines are similar.
+ */
+#define SCHEDULER_CBS_ENTRY_POINTS \
+ { \
+ _Scheduler_EDF_Initialize, /* initialize entry point */ \
+ _Scheduler_EDF_Schedule, /* schedule entry point */ \
+ _Scheduler_EDF_Yield, /* yield entry point */ \
+ _Scheduler_EDF_Block, /* block entry point */ \
+ _Scheduler_CBS_Unblock, /* unblock entry point */ \
+ _Scheduler_EDF_Update_priority, /* update priority entry point */ \
+ _Scheduler_EDF_Map_priority, /* map priority entry point */ \
+ _Scheduler_EDF_Unmap_priority, /* unmap priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+ _Scheduler_CBS_Node_initialize, /* node initialize entry point */ \
+ _Scheduler_default_Node_destroy, /* node destroy entry point */ \
+ _Scheduler_CBS_Release_job, /* new period of task */ \
+ _Scheduler_CBS_Cancel_job, /* cancel period of task */ \
+ _Scheduler_default_Tick, /* tick entry point */ \
+ _Scheduler_default_Start_idle /* start idle entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ }
+
+/* Return values for CBS server. */
+#define SCHEDULER_CBS_OK 0
+#define SCHEDULER_CBS_ERROR_GENERIC -16
+#define SCHEDULER_CBS_ERROR_NO_MEMORY -17
+#define SCHEDULER_CBS_ERROR_INVALID_PARAMETER -18
+#define SCHEDULER_CBS_ERROR_UNAUTHORIZED -19
+#define SCHEDULER_CBS_ERROR_UNIMPLEMENTED -20
+#define SCHEDULER_CBS_ERROR_MISSING_COMPONENT -21
+#define SCHEDULER_CBS_ERROR_INCONSISTENT_STATE -22
+#define SCHEDULER_CBS_ERROR_SYSTEM_OVERLOAD -23
+#define SCHEDULER_CBS_ERROR_INTERNAL_ERROR -24
+#define SCHEDULER_CBS_ERROR_NOT_FOUND -25
+#define SCHEDULER_CBS_ERROR_FULL -26
+#define SCHEDULER_CBS_ERROR_EMPTY -27
+#define SCHEDULER_CBS_ERROR_NOSERVER SCHEDULER_CBS_ERROR_NOT_FOUND
+
+/** Maximum number of simultaneous servers. */
+extern const uint32_t _Scheduler_CBS_Maximum_servers;
+
+/** Server id. */
+typedef uint32_t Scheduler_CBS_Server_id;
+
+/** Callback function invoked when a budget overrun of a task occurs. */
+typedef void (*Scheduler_CBS_Budget_overrun)(
+ Scheduler_CBS_Server_id server_id
+);
+
+/**
+ * This structure handles server parameters.
+ */
+typedef struct {
+ /** Relative deadline of the server. */
+ time_t deadline;
+ /** Budget (computation time) of the server. */
+ time_t budget;
+} Scheduler_CBS_Parameters;
+
+/**
+ * This structure represents a time server.
+ */
+typedef struct {
+ /**
+ * Task id.
+ *
+ * @note: The current implementation of CBS handles only one task per server.
+ */
+ rtems_id task_id;
+ /** Server paramenters. */
+ Scheduler_CBS_Parameters parameters;
+ /** Callback function invoked when a budget overrun occurs. */
+ Scheduler_CBS_Budget_overrun cbs_budget_overrun;
+
+ /**
+ * @brief Indicates if this CBS server is initialized.
+ *
+ * @see _Scheduler_CBS_Create_server() and _Scheduler_CBS_Destroy_server().
+ */
+ bool initialized;
+} Scheduler_CBS_Server;
+
+/**
+ * This structure handles CBS specific data of a thread.
+ */
+typedef struct {
+ /** EDF scheduler specific data of a task. */
+ Scheduler_EDF_Node Base;
+ /** CBS server specific data of a task. */
+ Scheduler_CBS_Server *cbs_server;
+
+ Priority_Node *deadline_node;
+} Scheduler_CBS_Node;
+
+
+/**
+ * List of servers. The @a Scheduler_CBS_Server is the index to the array
+ * of pointers to @a _Scheduler_CBS_Server_list.
+ */
+extern Scheduler_CBS_Server _Scheduler_CBS_Server_list[];
+
+void _Scheduler_CBS_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_CBS_Release_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
+);
+
+void _Scheduler_CBS_Cancel_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief _Scheduler_CBS_Initialize
+ *
+ * Initializes the CBS library.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Initialize(void);
+
+/**
+ * @brief Attach a task to an already existing server.
+ *
+ * Attach a task to an already existing server.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Attach_thread (
+ Scheduler_CBS_Server_id server_id,
+ rtems_id task_id
+);
+
+/**
+ * @brief Detach from the CBS Server.
+ *
+ * Detach from the CBS Server.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Detach_thread (
+ Scheduler_CBS_Server_id server_id,
+ rtems_id task_id
+);
+
+/**
+ * @brief Cleanup resources associated to the CBS Library.
+ *
+ * Cleanup resources associated to the CBS Library.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Cleanup (void);
+
+/**
+ * @brief Create a new server with specified parameters.
+ *
+ * Create a new server with specified parameters.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Create_server (
+ Scheduler_CBS_Parameters *params,
+ Scheduler_CBS_Budget_overrun budget_overrun_callback,
+ rtems_id *server_id
+);
+
+/**
+ * @brief Detach all tasks from a server and destroy it.
+ *
+ * Detach all tasks from a server and destroy it.
+ *
+ * @param[in] server_id is the ID of the server
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Destroy_server (
+ Scheduler_CBS_Server_id server_id
+);
+
+/**
+ * @brief Retrieve the approved budget.
+ *
+ * Retrieve the budget that has been approved for the subsequent
+ * server instances.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Get_approved_budget (
+ Scheduler_CBS_Server_id server_id,
+ time_t *approved_budget
+);
+
+/**
+ * @brief Retrieve remaining budget for the current server instance.
+ *
+ * Retrieve remaining budget for the current server instance.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Get_remaining_budget (
+ Scheduler_CBS_Server_id server_id,
+ time_t *remaining_budget
+);
+
+/**
+ * @brief Get relative time info.
+ *
+ * Retrieve time info relative to @a server_id. The server status code is returned.
+ *
+ * @param[in] server_id is the server to get the status code from.
+ * @param[in] exec_time is the execution time.
+ * @param[in] abs_time is not apparently used.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Get_execution_time (
+ Scheduler_CBS_Server_id server_id,
+ time_t *exec_time,
+ time_t *abs_time
+);
+
+/**
+ * @brief Retrieve CBS scheduling parameters.
+ *
+ * Retrieve CBS scheduling parameters.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Get_parameters (
+ Scheduler_CBS_Server_id server_id,
+ Scheduler_CBS_Parameters *params
+);
+
+/**
+ * @brief Get a thread server id.
+ *
+ * Get a thread server id, or SCHEDULER_CBS_ERROR_NOT_FOUND if it is not
+ * attached to any server.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Get_server_id (
+ rtems_id task_id,
+ Scheduler_CBS_Server_id *server_id
+);
+
+/**
+ * @brief Set parameters for CBS scheduling.
+ *
+ * Change CBS scheduling parameters.
+ *
+ * @param[in] server_id is the ID of the server.
+ * @param[in] parameters are the parameters to set.
+ *
+ * @retval status code.
+ */
+int _Scheduler_CBS_Set_parameters (
+ Scheduler_CBS_Server_id server_id,
+ Scheduler_CBS_Parameters *parameters
+);
+
+/**
+ * @brief Invoked when a limited time quantum is exceeded.
+ *
+ * This routine is invoked when a limited time quantum is exceeded.
+ */
+void _Scheduler_CBS_Budget_callout(
+ Thread_Control *the_thread
+);
+
+/**
+ * @brief Initializes a CBS specific scheduler node of @a the_thread.
+ */
+void _Scheduler_CBS_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulercbsimpl.h b/cpukit/include/rtems/score/schedulercbsimpl.h
new file mode 100644
index 0000000000..ed75979f75
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulercbsimpl.h
@@ -0,0 +1,59 @@
+/**
+ * @file
+ *
+ * @brief CBS Scheduler Implementation
+ *
+ * @ingroup ScoreSchedulerCBS
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERCBSIMPL_H
+#define _RTEMS_SCORE_SCHEDULERCBSIMPL_H
+
+#include <rtems/score/schedulercbs.h>
+#include <rtems/score/schedulerimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreSchedulerCBS
+ *
+ * @{
+ */
+
+RTEMS_INLINE_ROUTINE Scheduler_CBS_Node *_Scheduler_CBS_Thread_get_node(
+ Thread_Control *the_thread
+)
+{
+ return (Scheduler_CBS_Node *) _Thread_Scheduler_get_home_node( the_thread );
+}
+
+RTEMS_INLINE_ROUTINE Scheduler_CBS_Node *_Scheduler_CBS_Node_downcast(
+ Scheduler_Node *node
+)
+{
+ return (Scheduler_CBS_Node *) node;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERCBSIMPL_H */
diff --git a/cpukit/include/rtems/score/scheduleredf.h b/cpukit/include/rtems/score/scheduleredf.h
new file mode 100644
index 0000000000..91c303ca56
--- /dev/null
+++ b/cpukit/include/rtems/score/scheduleredf.h
@@ -0,0 +1,197 @@
+/**
+ * @file rtems/score/scheduleredf.h
+ *
+ * @brief Data Related to the Manipulation of Threads for the EDF Scheduler
+ *
+ * This include file contains all the constants and structures associated
+ * with the manipulation of threads for the EDF scheduler.
+ */
+
+/*
+ * Copryight (c) 2011 Petr Benes.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULEREDF_H
+#define _RTEMS_SCORE_SCHEDULEREDF_H
+
+#include <rtems/score/priority.h>
+#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerpriority.h>
+#include <rtems/score/rbtree.h>
+
+#include <limits.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSchedulerEDF EDF Scheduler
+ *
+ * @ingroup ScoreScheduler
+ */
+/**@{*/
+
+/*
+ * Actually the EDF scheduler supports a maximum priority of
+ * 0x7fffffffffffffff, but the user API is limited to uint32_t or int for
+ * thread priorities. Ignore ILP64 targets for now.
+ */
+#define SCHEDULER_EDF_MAXIMUM_PRIORITY INT_MAX
+
+/**
+ * Entry points for the Earliest Deadline First Scheduler.
+ */
+#define SCHEDULER_EDF_ENTRY_POINTS \
+ { \
+ _Scheduler_EDF_Initialize, /* initialize entry point */ \
+ _Scheduler_EDF_Schedule, /* schedule entry point */ \
+ _Scheduler_EDF_Yield, /* yield entry point */ \
+ _Scheduler_EDF_Block, /* block entry point */ \
+ _Scheduler_EDF_Unblock, /* unblock entry point */ \
+ _Scheduler_EDF_Update_priority, /* update priority entry point */ \
+ _Scheduler_EDF_Map_priority, /* map priority entry point */ \
+ _Scheduler_EDF_Unmap_priority, /* unmap priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+ _Scheduler_EDF_Node_initialize, /* node initialize entry point */ \
+ _Scheduler_default_Node_destroy, /* node destroy entry point */ \
+ _Scheduler_EDF_Release_job, /* new period of task */ \
+ _Scheduler_EDF_Cancel_job, /* cancel period of task */ \
+ _Scheduler_default_Tick, /* tick entry point */ \
+ _Scheduler_default_Start_idle /* start idle entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ }
+
+typedef struct {
+ /**
+ * @brief Basic scheduler context.
+ */
+ Scheduler_Context Base;
+
+ /**
+ * Top of the ready queue.
+ */
+ RBTree_Control Ready;
+} Scheduler_EDF_Context;
+
+/**
+ * @brief Scheduler node specialization for EDF schedulers.
+ */
+typedef struct {
+ /**
+ * @brief Basic scheduler node.
+ */
+ Scheduler_Node Base;
+
+ /**
+ * Rbtree node related to this thread.
+ */
+ RBTree_Node Node;
+
+ /**
+ * @brief The thread priority currently used for this scheduler instance.
+ */
+ Priority_Control priority;
+} Scheduler_EDF_Node;
+
+/**
+ * @brief Initialize EDF scheduler.
+ *
+ * This routine initializes the EDF scheduler.
+ *
+ * @param[in] scheduler The scheduler instance.
+ */
+void _Scheduler_EDF_Initialize( const Scheduler_Control *scheduler );
+
+void _Scheduler_EDF_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Sets the heir thread to be the next ready thread
+ * in the rbtree ready queue.
+ *
+ * This kernel routine sets the heir thread to be the next ready thread
+ * in the rbtree ready queue.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] the_thread being scheduled.
+ */
+void _Scheduler_EDF_Schedule(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread
+);
+
+/**
+ * @brief Initializes an EDF specific scheduler node of @a the_thread.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] node being initialized.
+ * @param[in] the_thread the thread of the node.
+ * @param[in] priority The thread priority.
+ */
+void _Scheduler_EDF_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_EDF_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+Priority_Control _Scheduler_EDF_Map_priority(
+ const Scheduler_Control *scheduler,
+ Priority_Control priority
+);
+
+Priority_Control _Scheduler_EDF_Unmap_priority(
+ const Scheduler_Control *scheduler,
+ Priority_Control priority
+);
+
+void _Scheduler_EDF_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_Release_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
+);
+
+void _Scheduler_EDF_Cancel_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/scheduleredfimpl.h b/cpukit/include/rtems/score/scheduleredfimpl.h
new file mode 100644
index 0000000000..f6bd7d8384
--- /dev/null
+++ b/cpukit/include/rtems/score/scheduleredfimpl.h
@@ -0,0 +1,164 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSchedulerEDF
+ *
+ * @brief EDF Scheduler Implementation
+ */
+
+/*
+ * Copryight (c) 2011 Petr Benes.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULEREDFIMPL_H
+#define _RTEMS_SCORE_SCHEDULEREDFIMPL_H
+
+#include <rtems/score/scheduleredf.h>
+#include <rtems/score/schedulerimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreSchedulerEDF
+ *
+ * @{
+ */
+
+/**
+ * This is just a most significant bit of Priority_Control type. It
+ * distinguishes threads which are deadline driven (priority
+ * represented by a lower number than @a SCHEDULER_EDF_PRIO_MSB) from those
+ * ones who do not have any deadlines and thus are considered background
+ * tasks.
+ */
+#define SCHEDULER_EDF_PRIO_MSB 0x8000000000000000
+
+RTEMS_INLINE_ROUTINE Scheduler_EDF_Context *
+ _Scheduler_EDF_Get_context( const Scheduler_Control *scheduler )
+{
+ return (Scheduler_EDF_Context *) _Scheduler_Get_context( scheduler );
+}
+
+RTEMS_INLINE_ROUTINE Scheduler_EDF_Node *_Scheduler_EDF_Thread_get_node(
+ Thread_Control *the_thread
+)
+{
+ return (Scheduler_EDF_Node *) _Thread_Scheduler_get_home_node( the_thread );
+}
+
+RTEMS_INLINE_ROUTINE Scheduler_EDF_Node * _Scheduler_EDF_Node_downcast(
+ Scheduler_Node *node
+)
+{
+ return (Scheduler_EDF_Node *) node;
+}
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less(
+ const void *left,
+ const RBTree_Node *right
+)
+{
+ const Priority_Control *the_left;
+ const Scheduler_EDF_Node *the_right;
+ Priority_Control prio_left;
+ Priority_Control prio_right;
+
+ the_left = left;
+ the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
+
+ prio_left = *the_left;
+ prio_right = the_right->priority;
+
+ return prio_left < prio_right;
+}
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Priority_less_equal(
+ const void *left,
+ const RBTree_Node *right
+)
+{
+ const Priority_Control *the_left;
+ const Scheduler_EDF_Node *the_right;
+ Priority_Control prio_left;
+ Priority_Control prio_right;
+
+ the_left = left;
+ the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
+
+ prio_left = *the_left;
+ prio_right = the_right->priority;
+
+ return prio_left <= prio_right;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue(
+ Scheduler_EDF_Context *context,
+ Scheduler_EDF_Node *node,
+ Priority_Control insert_priority
+)
+{
+ _RBTree_Insert_inline(
+ &context->Ready,
+ &node->Node,
+ &insert_priority,
+ _Scheduler_EDF_Priority_less_equal
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Extract(
+ Scheduler_EDF_Context *context,
+ Scheduler_EDF_Node *node
+)
+{
+ _RBTree_Extract( &context->Ready, &node->Node );
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Extract_body(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ Scheduler_EDF_Context *context;
+ Scheduler_EDF_Node *the_node;
+
+ context = _Scheduler_EDF_Get_context( scheduler );
+ the_node = _Scheduler_EDF_Node_downcast( node );
+
+ _Scheduler_EDF_Extract( context, the_node );
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Schedule_body(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ bool force_dispatch
+)
+{
+ Scheduler_EDF_Context *context;
+ RBTree_Node *first;
+ Scheduler_EDF_Node *node;
+
+ (void) the_thread;
+
+ context = _Scheduler_EDF_Get_context( scheduler );
+ first = _RBTree_Minimum( &context->Ready );
+ node = RTEMS_CONTAINER_OF( first, Scheduler_EDF_Node, Node );
+
+ _Scheduler_Update_heir( node->Base.owner, force_dispatch );
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/scheduleredfsmp.h b/cpukit/include/rtems/score/scheduleredfsmp.h
new file mode 100644
index 0000000000..018568190e
--- /dev/null
+++ b/cpukit/include/rtems/score/scheduleredfsmp.h
@@ -0,0 +1,200 @@
+/**
+ * @file
+ *
+ * @brief EDF SMP Scheduler API
+ *
+ * @ingroup ScoreSchedulerSMPEDF
+ */
+
+/*
+ * Copyright (c) 2017 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULEREDFSMP_H
+#define _RTEMS_SCORE_SCHEDULEREDFSMP_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/scheduleredf.h>
+#include <rtems/score/schedulersmp.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSchedulerSMPEDF EDF Priority SMP Scheduler
+ *
+ * @ingroup ScoreSchedulerSMP
+ *
+ * @{
+ */
+
+typedef struct {
+ Scheduler_SMP_Node Base;
+
+ /**
+ * @brief Generation number to ensure FIFO/LIFO order for threads of the same
+ * priority across different ready queues.
+ */
+ int64_t generation;
+
+ /**
+ * @brief The ready queue index depending on the processor affinity of the thread.
+ *
+ * The ready queue index zero is used for threads with a one-to-all thread
+ * processor affinity. Threads with a one-to-one processor affinity use the
+ * processor index plus one as the ready queue index.
+ */
+ uint32_t ready_queue_index;
+} Scheduler_EDF_SMP_Node;
+
+typedef struct {
+ /**
+ * @brief Chain node for Scheduler_SMP_Context::Affine_queues.
+ */
+ Chain_Node Node;
+
+ /**
+ * @brief The ready threads of the corresponding affinity.
+ */
+ RBTree_Control Queue;
+
+ /**
+ * @brief The scheduled thread of the corresponding processor.
+ */
+ Scheduler_EDF_SMP_Node *scheduled;
+} Scheduler_EDF_SMP_Ready_queue;
+
+typedef struct {
+ Scheduler_SMP_Context Base;
+
+ /**
+ * @brief Current generation for LIFO (index 0) and FIFO (index 1) ordering.
+ */
+ int64_t generations[ 2 ];
+
+ /**
+ * @brief Chain of ready queues with affine threads to determine the highest
+ * priority ready thread.
+ */
+ Chain_Control Affine_queues;
+
+ /**
+ * @brief A table with ready queues.
+ *
+ * The index zero queue is used for threads with a one-to-all processor
+ * affinity. Index one corresponds to processor index zero, and so on.
+ */
+ Scheduler_EDF_SMP_Ready_queue Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
+} Scheduler_EDF_SMP_Context;
+
+#define SCHEDULER_EDF_SMP_ENTRY_POINTS \
+ { \
+ _Scheduler_EDF_SMP_Initialize, \
+ _Scheduler_default_Schedule, \
+ _Scheduler_EDF_SMP_Yield, \
+ _Scheduler_EDF_SMP_Block, \
+ _Scheduler_EDF_SMP_Unblock, \
+ _Scheduler_EDF_SMP_Update_priority, \
+ _Scheduler_EDF_Map_priority, \
+ _Scheduler_EDF_Unmap_priority, \
+ _Scheduler_EDF_SMP_Ask_for_help, \
+ _Scheduler_EDF_SMP_Reconsider_help_request, \
+ _Scheduler_EDF_SMP_Withdraw_node, \
+ _Scheduler_EDF_SMP_Add_processor, \
+ _Scheduler_EDF_SMP_Remove_processor, \
+ _Scheduler_EDF_SMP_Node_initialize, \
+ _Scheduler_default_Node_destroy, \
+ _Scheduler_EDF_Release_job, \
+ _Scheduler_EDF_Cancel_job, \
+ _Scheduler_default_Tick, \
+ _Scheduler_EDF_SMP_Start_idle, \
+ _Scheduler_EDF_SMP_Set_affinity \
+ }
+
+void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler );
+
+void _Scheduler_EDF_SMP_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_EDF_SMP_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+bool _Scheduler_EDF_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Reconsider_help_request(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Withdraw_node(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+);
+
+void _Scheduler_EDF_SMP_Add_processor(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle
+);
+
+Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
+ const Scheduler_Control *scheduler,
+ struct Per_CPU_Control *cpu
+);
+
+void _Scheduler_EDF_SMP_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle,
+ struct Per_CPU_Control *cpu
+);
+
+bool _Scheduler_EDF_SMP_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ const Processor_mask *affinity
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_SCHEDULEREDFSMP_H */
diff --git a/cpukit/include/rtems/score/schedulerimpl.h b/cpukit/include/rtems/score/schedulerimpl.h
new file mode 100644
index 0000000000..10c12242a9
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulerimpl.h
@@ -0,0 +1,1203 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with the Manipulation of the Scheduler
+ *
+ * This inline file contains all of the inlined routines associated with
+ * the manipulation of the scheduler.
+ */
+
+/*
+ * Copyright (C) 2010 Gedare Bloom.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ * Copyright (c) 2014, 2017 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
+#define _RTEMS_SCORE_SCHEDULERIMPL_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/priorityimpl.h>
+#include <rtems/score/smpimpl.h>
+#include <rtems/score/status.h>
+#include <rtems/score/threadimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreScheduler
+ */
+/**@{**/
+
+/**
+ * @brief Maps a priority value to support the append indicator.
+ */
+#define SCHEDULER_PRIORITY_MAP( priority ) ( ( priority ) << 1 )
+
+/**
+ * @brief Returns the plain priority value.
+ */
+#define SCHEDULER_PRIORITY_UNMAP( priority ) ( ( priority ) >> 1 )
+
+/**
+ * @brief Clears the priority append indicator bit.
+ */
+#define SCHEDULER_PRIORITY_PURIFY( priority ) \
+ ( ( priority ) & ~( (Priority_Control) SCHEDULER_PRIORITY_APPEND_FLAG ) )
+
+/**
+ * @brief Returns the priority control with the append indicator bit set.
+ */
+#define SCHEDULER_PRIORITY_APPEND( priority ) \
+ ( ( priority ) | SCHEDULER_PRIORITY_APPEND_FLAG )
+
+/**
+ * @brief Returns true, if the item should be appended to its priority group,
+ * otherwise returns false and the item should be prepended to its priority
+ * group.
+ */
+#define SCHEDULER_PRIORITY_IS_APPEND( priority ) \
+ ( ( ( priority ) & SCHEDULER_PRIORITY_APPEND_FLAG ) != 0 )
+
+/**
+ * @brief Initializes the scheduler to the policy chosen by the user.
+ *
+ * This routine initializes the scheduler to the policy chosen by the user
+ * through confdefs, or to the priority scheduler with ready chains by
+ * default.
+ */
+void _Scheduler_Handler_initialization( void );
+
+RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
+ const Scheduler_Control *scheduler
+)
+{
+ return scheduler->context;
+}
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
+ const Per_CPU_Control *cpu
+)
+{
+#if defined(RTEMS_SMP)
+ return cpu->Scheduler.control;
+#else
+ (void) cpu;
+ return &_Scheduler_Table[ 0 ];
+#endif
+}
+
+/**
+ * @brief Acquires the scheduler instance inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] lock_context The lock context to use for
+ * _Scheduler_Release_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
+ const Scheduler_Control *scheduler,
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ Scheduler_Context *context;
+
+ context = _Scheduler_Get_context( scheduler );
+ _ISR_lock_Acquire( &context->Lock, lock_context );
+#else
+ (void) scheduler;
+ (void) lock_context;
+#endif
+}
+
+/**
+ * @brief Releases the scheduler instance inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] lock_context The lock context used for
+ * _Scheduler_Acquire_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
+ const Scheduler_Control *scheduler,
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ Scheduler_Context *context;
+
+ context = _Scheduler_Get_context( scheduler );
+ _ISR_lock_Release( &context->Lock, lock_context );
+#else
+ (void) scheduler;
+ (void) lock_context;
+#endif
+}
+
+#if defined(RTEMS_SMP)
+void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
+
+/**
+ * @brief Registers an ask for help request if necessary.
+ *
+ * The actual ask for help operation is carried out during
+ * _Thread_Do_dispatch() on a processor related to the thread. This yields a
+ * better separation of scheduler instances. A thread of one scheduler
+ * instance should not be forced to carry out too much work for threads on
+ * other scheduler instances.
+ *
+ * @param[in] the_thread The thread in need for help.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
+{
+ _Assert( _Thread_State_is_owner( the_thread ) );
+
+ if ( the_thread->Scheduler.helping_nodes > 0 ) {
+ _Scheduler_Request_ask_for_help( the_thread );
+ }
+}
+#endif
+
+/**
+ * The preferred method to add a new scheduler is to define the jump table
+ * entries and add a case to the _Scheduler_Initialize routine.
+ *
+ * Generic scheduling implementations that rely on the ready queue only can
+ * be found in the _Scheduler_queue_XXX functions.
+ */
+
+/*
+ * Passing the Scheduler_Control* to these functions allows for multiple
+ * scheduler's to exist simultaneously, which could be useful on an SMP
+ * system. Then remote Schedulers may be accessible. How to protect such
+ * accesses remains an open problem.
+ */
+
+/**
+ * @brief General scheduling decision.
+ *
+ * This kernel routine implements the scheduling decision logic for
+ * the scheduler. It does NOT dispatch.
+ *
+ * @param[in] the_thread The thread which state changed previously.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
+{
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler = _Thread_Scheduler_get_home( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+
+ ( *scheduler->Operations.schedule )( scheduler, the_thread );
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
+}
+
+/**
+ * @brief Scheduler yield with a particular thread.
+ *
+ * This routine is invoked when a thread wishes to voluntarily transfer control
+ * of the processor to another thread.
+ *
+ * @param[in] the_thread The yielding thread.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
+{
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler = _Thread_Scheduler_get_home( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.yield )(
+ scheduler,
+ the_thread,
+ _Thread_Scheduler_get_home_node( the_thread )
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+}
+
+/**
+ * @brief Blocks a thread with respect to the scheduler.
+ *
+ * This routine removes @a the_thread from the scheduling decision for
+ * the scheduler. The primary task is to remove the thread from the
+ * ready queue. It performs any necessary schedulering operations
+ * including the selection of a new heir thread.
+ *
+ * @param[in] the_thread The thread.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
+{
+#if defined(RTEMS_SMP)
+ Chain_Node *node;
+ const Chain_Node *tail;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.block )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+
+ while ( node != tail ) {
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.withdraw_node )(
+ scheduler,
+ the_thread,
+ scheduler_node,
+ THREAD_SCHEDULER_BLOCKED
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ }
+#else
+ const Scheduler_Control *scheduler;
+
+ scheduler = _Thread_Scheduler_get_home( the_thread );
+ ( *scheduler->Operations.block )(
+ scheduler,
+ the_thread,
+ _Thread_Scheduler_get_home_node( the_thread )
+ );
+#endif
+}
+
+/**
+ * @brief Unblocks a thread with respect to the scheduler.
+ *
+ * This operation must fetch the latest thread priority value for this
+ * scheduler instance and update its internal state if necessary.
+ *
+ * @param[in] the_thread The thread.
+ *
+ * @see _Scheduler_Node_get_priority().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
+{
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler = _Thread_Scheduler_get_home( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.unblock )(
+ scheduler,
+ the_thread,
+ _Thread_Scheduler_get_home_node( the_thread )
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+}
+
+/**
+ * @brief Propagates a priority change of a thread to the scheduler.
+ *
+ * On uni-processor configurations, this operation must evaluate the thread
+ * state. In case the thread is not ready, then the priority update should be
+ * deferred to the next scheduler unblock operation.
+ *
+ * The operation must update the heir and thread dispatch necessary variables
+ * in case the set of scheduled threads changes.
+ *
+ * @param[in] the_thread The thread changing its priority.
+ *
+ * @see _Scheduler_Node_get_priority().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
+{
+#if defined(RTEMS_SMP)
+ Chain_Node *node;
+ const Chain_Node *tail;
+
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+ do {
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ } while ( node != tail );
+#else
+ const Scheduler_Control *scheduler;
+
+ scheduler = _Thread_Scheduler_get_home( the_thread );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ _Thread_Scheduler_get_home_node( the_thread )
+ );
+#endif
+}
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Changes the sticky level of the home scheduler node and propagates a
+ * priority change of a thread to the scheduler.
+ *
+ * @param[in] the_thread The thread changing its priority or sticky level.
+ *
+ * @see _Scheduler_Update_priority().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
+ Thread_Control *the_thread,
+ int sticky_level_change
+)
+{
+ Chain_Node *node;
+ const Chain_Node *tail;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+
+ scheduler_node->sticky_level += sticky_level_change;
+ _Assert( scheduler_node->sticky_level >= 0 );
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+ node = _Chain_Next( node );
+
+ while ( node != tail ) {
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ }
+}
+#endif
+
+/**
+ * @brief Maps a thread priority from the user domain to the scheduler domain.
+ *
+ * Let M be the maximum scheduler priority. The mapping must be bijective in
+ * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
+ * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M]. For
+ * other values the mapping is undefined.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] priority The user domain thread priority.
+ *
+ * @return The corresponding thread priority of the scheduler domain is returned.
+ */
+RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
+ const Scheduler_Control *scheduler,
+ Priority_Control priority
+)
+{
+ return ( *scheduler->Operations.map_priority )( scheduler, priority );
+}
+
+/**
+ * @brief Unmaps a thread priority from the scheduler domain to the user domain.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] priority The scheduler domain thread priority.
+ *
+ * @return The corresponding thread priority of the user domain is returned.
+ */
+RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
+ const Scheduler_Control *scheduler,
+ Priority_Control priority
+)
+{
+ return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
+}
+
+/**
+ * @brief Initializes a scheduler node.
+ *
+ * The scheduler node contains arbitrary data on function entry. The caller
+ * must ensure that _Scheduler_Node_destroy() will be called after a
+ * _Scheduler_Node_initialize() before the memory of the scheduler node is
+ * destroyed.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] node The scheduler node to initialize.
+ * @param[in] the_thread The thread of the scheduler node to initialize.
+ * @param[in] priority The thread priority.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+)
+{
+ ( *scheduler->Operations.node_initialize )(
+ scheduler,
+ node,
+ the_thread,
+ priority
+ );
+}
+
+/**
+ * @brief Destroys a scheduler node.
+ *
+ * The caller must ensure that _Scheduler_Node_destroy() will be called only
+ * after a corresponding _Scheduler_Node_initialize().
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] node The scheduler node to destroy.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node
+)
+{
+ ( *scheduler->Operations.node_destroy )( scheduler, node );
+}
+
+/**
+ * @brief Releases a job of a thread with respect to the scheduler.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] priority_node The priority node of the job.
+ * @param[in] deadline The deadline in watchdog ticks since boot.
+ * @param[in] queue_context The thread queue context to provide the set of
+ * threads for _Thread_Priority_update().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
+)
+{
+ const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ ( *scheduler->Operations.release_job )(
+ scheduler,
+ the_thread,
+ priority_node,
+ deadline,
+ queue_context
+ );
+}
+
+/**
+ * @brief Cancels a job of a thread with respect to the scheduler.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] priority_node The priority node of the job.
+ * @param[in] queue_context The thread queue context to provide the set of
+ * threads for _Thread_Priority_update().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ ( *scheduler->Operations.cancel_job )(
+ scheduler,
+ the_thread,
+ priority_node,
+ queue_context
+ );
+}
+
+/**
+ * @brief Scheduler method invoked at each clock tick.
+ *
+ * This method is invoked at each clock tick to allow the scheduler
+ * implementation to perform any activities required. For the
+ * scheduler which support standard RTEMS features, this includes
+ * time-slicing management.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
+{
+ const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
+ Thread_Control *executing = cpu->executing;
+
+ if ( scheduler != NULL && executing != NULL ) {
+ ( *scheduler->Operations.tick )( scheduler, executing );
+ }
+}
+
+/**
+ * @brief Starts the idle thread for a particular processor.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in,out] the_thread The idle thread for the processor.
+ * @param[in,out] cpu The processor for the idle thread.
+ *
+ * @see _Thread_Create_idle().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu
+)
+{
+ ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
+}
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
+ const Scheduler_Control *scheduler,
+ uint32_t cpu_index
+)
+{
+#if defined(RTEMS_SMP)
+ const Per_CPU_Control *cpu;
+ const Scheduler_Control *scheduler_of_cpu;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
+
+ return scheduler_of_cpu == scheduler;
+#else
+ (void) scheduler;
+ (void) cpu_index;
+
+ return true;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
+ const Scheduler_Control *scheduler
+)
+{
+#if defined(RTEMS_SMP)
+ return &_Scheduler_Get_context( scheduler )->Processors;
+#else
+ return &_Processor_mask_The_one_and_only;
+#endif
+}
+
+bool _Scheduler_Get_affinity(
+ Thread_Control *the_thread,
+ size_t cpusetsize,
+ cpu_set_t *cpuset
+);
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ const Processor_mask *affinity
+)
+{
+ (void) scheduler;
+ (void) the_thread;
+ (void) node;
+ return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
+}
+
+bool _Scheduler_Set_affinity(
+ Thread_Control *the_thread,
+ size_t cpusetsize,
+ const cpu_set_t *cpuset
+);
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ void ( *extract )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ Scheduler_Node *
+ ),
+ void ( *schedule )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ bool
+ )
+)
+{
+ ( *extract )( scheduler, the_thread, node );
+
+ /* TODO: flash critical section? */
+
+ if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
+ ( *schedule )( scheduler, the_thread, true );
+ }
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
+ const Scheduler_Control *scheduler
+)
+{
+#if defined(RTEMS_SMP)
+ const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ return _Processor_mask_Count( &context->Processors );
+#else
+ (void) scheduler;
+
+ return 1;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
+{
+ return _Objects_Build_id(
+ OBJECTS_FAKE_OBJECTS_API,
+ OBJECTS_FAKE_OBJECTS_SCHEDULERS,
+ _Objects_Local_node,
+ (uint16_t) ( scheduler_index + 1 )
+ );
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
+{
+ uint32_t minimum_id = _Scheduler_Build_id( 0 );
+
+ return id - minimum_id;
+}
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
+ Objects_Id id
+)
+{
+ uint32_t index;
+
+ index = _Scheduler_Get_index_by_id( id );
+
+ if ( index >= _Scheduler_Count ) {
+ return NULL;
+ }
+
+ return &_Scheduler_Table[ index ];
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
+ const Scheduler_Control *scheduler
+)
+{
+ return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
+}
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Gets an idle thread from the scheduler instance.
+ *
+ * @param[in] context The scheduler instance context.
+ *
+ * @retval idle An idle thread for use. This function must always return an
+ * idle thread. If none is available, then this is a fatal error.
+ */
+typedef Thread_Control *( *Scheduler_Get_idle_thread )(
+ Scheduler_Context *context
+);
+
+/**
+ * @brief Releases an idle thread to the scheduler instance for reuse.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] idle The idle thread to release
+ */
+typedef void ( *Scheduler_Release_idle_thread )(
+ Scheduler_Context *context,
+ Thread_Control *idle
+);
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
+ Thread_Control *the_thread,
+ Thread_Scheduler_state new_state
+)
+{
+ _Assert(
+ _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
+ || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
+ || !_System_state_Is_up( _System_state_Get() )
+ );
+
+ the_thread->Scheduler.state = new_state;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
+ Scheduler_Node *node,
+ Thread_Control *idle
+)
+{
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+ _Assert(
+ _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
+ );
+
+ _Scheduler_Node_set_user( node, idle );
+ node->idle = idle;
+}
+
+/**
+ * @brief Use an idle thread for this scheduler node.
+ *
+ * A thread those home scheduler node has a sticky level greater than zero may
+ * use an idle thread in the home scheduler instance in case it executes
+ * currently in another scheduler instance or in case it is in a blocking
+ * state.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to use the idle thread.
+ * @param[in] cpu The processor for the idle thread.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Per_CPU_Control *cpu,
+ Scheduler_Get_idle_thread get_idle_thread
+)
+{
+ Thread_Control *idle = ( *get_idle_thread )( context );
+
+ _Scheduler_Set_idle_thread( node, idle );
+ _Thread_Set_CPU( idle, cpu );
+ return idle;
+}
+
+typedef enum {
+ SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
+ SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
+ SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
+} Scheduler_Try_to_schedule_action;
+
+/**
+ * @brief Try to schedule this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to get scheduled.
+ * @param[in] idle A potential idle thread used by a potential victim node.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ *
+ * @retval true This node can be scheduled.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
+_Scheduler_Try_to_schedule_node(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Thread_Control *idle,
+ Scheduler_Get_idle_thread get_idle_thread
+)
+{
+ ISR_lock_Context lock_context;
+ Scheduler_Try_to_schedule_action action;
+ Thread_Control *owner;
+
+ action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
+ owner = _Scheduler_Node_get_owner( node );
+ _Assert( _Scheduler_Node_get_user( node ) == owner );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
+ _Thread_Scheduler_acquire_critical( owner, &lock_context );
+
+ if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
+ _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ } else if (
+ owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
+ && node->sticky_level <= 1
+ ) {
+ action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
+ } else if ( node->sticky_level == 0 ) {
+ action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
+ } else if ( idle != NULL ) {
+ action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
+ } else {
+ _Scheduler_Use_idle_thread(
+ context,
+ node,
+ _Thread_Get_CPU( owner ),
+ get_idle_thread
+ );
+ }
+
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ return action;
+}
+
+/**
+ * @brief Release an idle thread using this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which may have an idle thread as user.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval idle The idle thread which used this node.
+ * @retval NULL This node had no idle thread as an user.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ Thread_Control *idle = _Scheduler_Node_get_idle( node );
+
+ if ( idle != NULL ) {
+ Thread_Control *owner = _Scheduler_Node_get_owner( node );
+
+ node->idle = NULL;
+ _Scheduler_Node_set_user( node, owner );
+ ( *release_idle_thread )( context, idle );
+ }
+
+ return idle;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
+ Scheduler_Node *needs_idle,
+ Scheduler_Node *uses_idle,
+ Thread_Control *idle
+)
+{
+ uses_idle->idle = NULL;
+ _Scheduler_Node_set_user(
+ uses_idle,
+ _Scheduler_Node_get_owner( uses_idle )
+ );
+ _Scheduler_Set_idle_thread( needs_idle, idle );
+}
+
+/**
+ * @brief Block this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] thread The thread which wants to get blocked referencing this
+ * node. This is not necessarily the user of this node in case the node
+ * participates in the scheduler helping protocol.
+ * @param[in] node The node which wants to get blocked.
+ * @param[in] is_scheduled This node is scheduled.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ *
+ * @retval thread_cpu The processor of the thread. Indicates to continue with
+ * the blocking operation.
+ * @retval NULL Otherwise.
+ */
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ bool is_scheduled,
+ Scheduler_Get_idle_thread get_idle_thread
+)
+{
+ int sticky_level;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *thread_cpu;
+
+ sticky_level = node->sticky_level;
+ --sticky_level;
+ node->sticky_level = sticky_level;
+ _Assert( sticky_level >= 0 );
+
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
+ thread_cpu = _Thread_Get_CPU( thread );
+ _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+
+ if ( sticky_level > 0 ) {
+ if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
+ Thread_Control *idle;
+
+ idle = _Scheduler_Use_idle_thread(
+ context,
+ node,
+ thread_cpu,
+ get_idle_thread
+ );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
+ }
+
+ return NULL;
+ }
+
+ _Assert( thread == _Scheduler_Node_get_user( node ) );
+ return thread_cpu;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
+ Scheduler_Context *context,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ Thread_Control *idle;
+ Thread_Control *owner;
+ Per_CPU_Control *cpu;
+
+ idle = _Scheduler_Node_get_idle( node );
+ owner = _Scheduler_Node_get_owner( node );
+
+ node->idle = NULL;
+ _Assert( _Scheduler_Node_get_user( node ) == idle );
+ _Scheduler_Node_set_user( node, owner );
+ ( *release_idle_thread )( context, idle );
+
+ cpu = _Thread_Get_CPU( idle );
+ _Thread_Set_CPU( the_thread, cpu );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
+}
+
+/**
+ * @brief Unblock this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] the_thread The thread which wants to get unblocked.
+ * @param[in] node The node which wants to get unblocked.
+ * @param[in] is_scheduled This node is scheduled.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval true Continue with the unblocking operation.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
+ Scheduler_Context *context,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ bool is_scheduled,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ bool unblock;
+
+ ++node->sticky_level;
+ _Assert( node->sticky_level > 0 );
+
+ if ( is_scheduled ) {
+ _Scheduler_Discard_idle_thread(
+ context,
+ the_thread,
+ node,
+ release_idle_thread
+ );
+ _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
+ unblock = false;
+ } else {
+ _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
+ unblock = true;
+ }
+
+ return unblock;
+}
+#endif
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
+ Thread_Control *new_heir,
+ bool force_dispatch
+)
+{
+ Thread_Control *heir = _Thread_Heir;
+
+ if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
+#if defined(RTEMS_SMP)
+ /*
+ * We need this state only for _Thread_Get_CPU_time_used(). Cannot use
+ * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
+ * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
+ * schedulers.
+ */
+ heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
+ new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
+#endif
+ _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
+ _Thread_Heir = new_heir;
+ _Thread_Dispatch_necessary = true;
+ }
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
+ const Scheduler_Control *new_scheduler,
+ Thread_Control *the_thread,
+ Priority_Control priority
+)
+{
+ Scheduler_Node *new_scheduler_node;
+ Scheduler_Node *old_scheduler_node;
+#if defined(RTEMS_SMP)
+ ISR_lock_Context lock_context;
+ const Scheduler_Control *old_scheduler;
+
+#endif
+
+ if ( the_thread->Wait.queue != NULL ) {
+ return STATUS_RESOURCE_IN_USE;
+ }
+
+ old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
+ _Priority_Plain_extract(
+ &old_scheduler_node->Wait.Priority,
+ &the_thread->Real_priority
+ );
+
+ if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
+ _Priority_Plain_insert(
+ &old_scheduler_node->Wait.Priority,
+ &the_thread->Real_priority,
+ the_thread->Real_priority.priority
+ );
+ return STATUS_RESOURCE_IN_USE;
+ }
+
+#if defined(RTEMS_SMP)
+ if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
+ _Priority_Plain_insert(
+ &old_scheduler_node->Wait.Priority,
+ &the_thread->Real_priority,
+ the_thread->Real_priority.priority
+ );
+ return STATUS_RESOURCE_IN_USE;
+ }
+
+ old_scheduler = _Thread_Scheduler_get_home( the_thread );
+ new_scheduler_node = _Thread_Scheduler_get_node_by_index(
+ the_thread,
+ _Scheduler_Get_index( new_scheduler )
+ );
+
+ _Scheduler_Acquire_critical( new_scheduler, &lock_context );
+
+ if (
+ _Scheduler_Get_processor_count( new_scheduler ) == 0
+ || !( *new_scheduler->Operations.set_affinity )(
+ new_scheduler,
+ the_thread,
+ new_scheduler_node,
+ &the_thread->Scheduler.Affinity
+ )
+ ) {
+ _Scheduler_Release_critical( new_scheduler, &lock_context );
+ _Priority_Plain_insert(
+ &old_scheduler_node->Wait.Priority,
+ &the_thread->Real_priority,
+ the_thread->Real_priority.priority
+ );
+ return STATUS_UNSATISFIED;
+ }
+
+ the_thread->Scheduler.home = new_scheduler;
+
+ _Scheduler_Release_critical( new_scheduler, &lock_context );
+
+ _Thread_Scheduler_process_requests( the_thread );
+#else
+ new_scheduler_node = old_scheduler_node;
+#endif
+
+ the_thread->Start.initial_priority = priority;
+ _Priority_Node_set_priority( &the_thread->Real_priority, priority );
+ _Priority_Initialize_one(
+ &new_scheduler_node->Wait.Priority,
+ &the_thread->Real_priority
+ );
+
+#if defined(RTEMS_SMP)
+ if ( old_scheduler != new_scheduler ) {
+ States_Control current_state;
+
+ current_state = the_thread->current_state;
+
+ if ( _States_Is_ready( current_state ) ) {
+ _Scheduler_Block( the_thread );
+ }
+
+ _Assert( old_scheduler_node->sticky_level == 0 );
+ _Assert( new_scheduler_node->sticky_level == 0 );
+
+ _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
+ _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
+ _Chain_Initialize_one(
+ &the_thread->Scheduler.Wait_nodes,
+ &new_scheduler_node->Thread.Wait_node
+ );
+ _Chain_Extract_unprotected(
+ &old_scheduler_node->Thread.Scheduler_node.Chain
+ );
+ _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
+ _Chain_Initialize_one(
+ &the_thread->Scheduler.Scheduler_nodes,
+ &new_scheduler_node->Thread.Scheduler_node.Chain
+ );
+
+ _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
+
+ if ( _States_Is_ready( current_state ) ) {
+ _Scheduler_Unblock( the_thread );
+ }
+
+ return STATUS_SUCCESSFUL;
+ }
+#endif
+
+ _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
+ _Scheduler_Update_priority( the_thread );
+ return STATUS_SUCCESSFUL;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulernode.h b/cpukit/include/rtems/score/schedulernode.h
new file mode 100644
index 0000000000..d62e983853
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulernode.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2014, 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERNODE_H
+#define _RTEMS_SCORE_SCHEDULERNODE_H
+
+#include <rtems/score/basedefs.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/smplockseq.h>
+
+struct _Thread_Control;
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief The scheduler node requests.
+ */
+typedef enum {
+ /**
+ * @brief The scheduler node is not on the list of pending requests.
+ */
+ SCHEDULER_NODE_REQUEST_NOT_PENDING,
+
+ /**
+ * @brief There is a pending scheduler node request to add this scheduler
+ * node to the Thread_Control::Scheduler::Scheduler_nodes chain.
+ */
+ SCHEDULER_NODE_REQUEST_ADD,
+
+ /**
+ * @brief There is a pending scheduler node request to remove this scheduler
+ * node from the Thread_Control::Scheduler::Scheduler_nodes chain.
+ */
+ SCHEDULER_NODE_REQUEST_REMOVE,
+
+ /**
+ * @brief The scheduler node is on the list of pending requests, but nothing
+ * should change.
+ */
+ SCHEDULER_NODE_REQUEST_NOTHING,
+
+} Scheduler_Node_request;
+#endif
+
+typedef struct Scheduler_Node Scheduler_Node;
+
+/**
+ * @brief Scheduler node for per-thread data.
+ */
+struct Scheduler_Node {
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Chain node for usage in various scheduler data structures.
+ *
+ * Strictly, this is the wrong place for this field since the data structures
+ * to manage scheduler nodes belong to the particular scheduler
+ * implementation. Currently, all SMP scheduler implementations use chains
+ * or red-black trees. The node is here to simplify things, just like the
+ * object node in the thread control block.
+ */
+ union {
+ Chain_Node Chain;
+ RBTree_Node RBTree;
+ } Node;
+
+ /**
+ * @brief The sticky level determines if this scheduler node should use an
+ * idle thread in case this node is scheduled and the owner thread is
+ * blocked.
+ */
+ int sticky_level;
+
+ /**
+ * @brief The thread using this node.
+ *
+ * This is either the owner or an idle thread.
+ */
+ struct _Thread_Control *user;
+
+ /**
+ * @brief The idle thread claimed by this node in case the sticky level is
+ * greater than zero and the thread is block or is scheduled on another
+ * scheduler instance.
+ *
+ * This is necessary to ensure the priority ceiling protocols work across
+ * scheduler boundaries.
+ */
+ struct _Thread_Control *idle;
+#endif
+
+ /**
+ * @brief The thread owning this node.
+ */
+ struct _Thread_Control *owner;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Block to register and manage this scheduler node in the thread
+ * control block of the owner of this scheduler node.
+ */
+ struct {
+ /**
+ * @brief Node to add this scheduler node to
+ * Thread_Control::Scheduler::Wait_nodes.
+ */
+ Chain_Node Wait_node;
+
+ /**
+ * @brief Node to add this scheduler node to
+ * Thread_Control::Scheduler::Scheduler_nodes or a temporary remove list.
+ */
+ union {
+ /**
+ * @brief The node for Thread_Control::Scheduler::Scheduler_nodes.
+ */
+ Chain_Node Chain;
+
+ /**
+ * @brief The next pointer for a temporary remove list.
+ *
+ * @see _Thread_Scheduler_process_requests().
+ */
+ Scheduler_Node *next;
+ } Scheduler_node;
+
+ /**
+ * @brief Link to the next scheduler node in the
+ * Thread_Control::Scheduler::requests list.
+ */
+ Scheduler_Node *next_request;
+
+ /**
+ * @brief The current scheduler node request.
+ */
+ Scheduler_Node_request request;
+ } Thread;
+#endif
+
+ /**
+ * @brief Thread wait support block.
+ */
+ struct {
+ Priority_Aggregation Priority;
+ } Wait;
+
+ /**
+ * @brief The thread priority information used by the scheduler.
+ *
+ * The thread priority is manifest in two independent areas. One area is the
+ * user visible thread priority along with a potential thread queue. The
+ * other is the scheduler. During a thread priority change, the user visible
+ * thread priority and the thread queue are first updated and the thread
+ * priority value here is changed. Once this is done the scheduler is
+ * notified via the update priority operation, so that it can update its
+ * internal state and honour a new thread priority value.
+ */
+ struct {
+ /**
+ * @brief The thread priority value of this scheduler node.
+ *
+ * The producer of this value is _Thread_Change_priority(). The consumer
+ * is the scheduler via the unblock and update priority operations.
+ *
+ * This priority control consists of two parts. One part is the plain
+ * priority value (most-significant 63 bits). The other part is the
+ * least-significant bit which indicates if the thread should be appended
+ * (bit set) or prepended (bit cleared) to its priority group, see
+ * SCHEDULER_PRIORITY_APPEND().
+ */
+ Priority_Control value;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Sequence lock to synchronize priority value updates.
+ */
+ SMP_sequence_lock_Control Lock;
+#endif
+ } Priority;
+};
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief The size of a scheduler node.
+ *
+ * This value is provided via <rtems/confdefs.h>.
+ */
+extern const size_t _Scheduler_Node_size;
+#endif
+
+#if defined(RTEMS_SMP)
+#define SCHEDULER_NODE_OF_THREAD_WAIT_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Thread.Wait_node )
+
+#define SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Thread.Scheduler_node.Chain )
+#endif
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERNODE_H */
diff --git a/cpukit/include/rtems/score/schedulernodeimpl.h b/cpukit/include/rtems/score/schedulernodeimpl.h
new file mode 100644
index 0000000000..8997b3f218
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulernodeimpl.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2014, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERNODEIMPL_H
+#define _RTEMS_SCORE_SCHEDULERNODEIMPL_H
+
+#include <rtems/score/schedulernode.h>
+#include <rtems/score/priorityimpl.h>
+
+struct _Scheduler_Control;
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Priority.Node.Node.Chain )
+
+#define SCHEDULER_NODE_OF_WAIT_PRIORITY( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Priority )
+
+/**
+ * @brief Priority append indicator for the priority control used for the
+ * scheduler node priority.
+ */
+#define SCHEDULER_PRIORITY_APPEND_FLAG 1
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
+ const struct _Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+)
+{
+ node->owner = the_thread;
+
+ node->Priority.value = priority;
+
+#if defined(RTEMS_SMP)
+ _Chain_Initialize_node( &node->Thread.Wait_node );
+ node->Wait.Priority.scheduler = scheduler;
+ node->user = the_thread;
+ node->idle = NULL;
+ _SMP_sequence_lock_Initialize( &node->Priority.Lock );
+#else
+ (void) scheduler;
+ (void) the_thread;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Node_get_scheduler(
+ const Scheduler_Node *node
+)
+{
+ return _Priority_Get_scheduler( &node->Wait.Priority );
+}
+
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
+ const Scheduler_Node *node
+)
+{
+ return node->owner;
+}
+
+RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
+ Scheduler_Node *node
+)
+{
+ Priority_Control priority;
+
+#if defined(RTEMS_SMP)
+ unsigned int seq;
+
+ do {
+ seq = _SMP_sequence_lock_Read_begin( &node->Priority.Lock );
+#endif
+
+ priority = node->Priority.value;
+
+#if defined(RTEMS_SMP)
+ } while ( _SMP_sequence_lock_Read_retry( &node->Priority.Lock, seq ) );
+#endif
+
+ return priority;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(
+ Scheduler_Node *node,
+ Priority_Control new_priority,
+ bool prepend_it
+)
+{
+#if defined(RTEMS_SMP)
+ unsigned int seq;
+
+ seq = _SMP_sequence_lock_Write_begin( &node->Priority.Lock );
+#endif
+
+ new_priority |= ( prepend_it ? 0 : SCHEDULER_PRIORITY_APPEND_FLAG );
+ node->Priority.value = new_priority;
+
+#if defined(RTEMS_SMP)
+ _SMP_sequence_lock_Write_end( &node->Priority.Lock, seq );
+#endif
+}
+
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
+ const Scheduler_Node *node
+)
+{
+ return node->user;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
+ Scheduler_Node *node,
+ Thread_Control *user
+)
+{
+ node->user = user;
+}
+
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
+ const Scheduler_Node *node
+)
+{
+ return node->idle;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERNODEIMPL_H */
diff --git a/cpukit/include/rtems/score/schedulerpriority.h b/cpukit/include/rtems/score/schedulerpriority.h
new file mode 100644
index 0000000000..f5ae66102d
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulerpriority.h
@@ -0,0 +1,163 @@
+/**
+ * @file rtems/score/schedulerpriority.h
+ *
+ * @brief Thread Manipulation with the Priority-Based Scheduler
+ *
+ * This include file contains all the constants and structures associated
+ * with the manipulation of threads for the priority-based scheduler.
+ */
+
+/*
+ * Copryight (c) 2010 Gedare Bloom.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERPRIORITY_H
+#define _RTEMS_SCORE_SCHEDULERPRIORITY_H
+
+#include <rtems/score/chain.h>
+#include <rtems/score/prioritybitmap.h>
+#include <rtems/score/scheduler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSchedulerDPS Deterministic Priority Scheduler
+ *
+ * @ingroup ScoreScheduler
+ */
+/**@{*/
+
+/**
+ * Entry points for the Deterministic Priority Based Scheduler.
+ */
+#define SCHEDULER_PRIORITY_ENTRY_POINTS \
+ { \
+ _Scheduler_priority_Initialize, /* initialize entry point */ \
+ _Scheduler_priority_Schedule, /* schedule entry point */ \
+ _Scheduler_priority_Yield, /* yield entry point */ \
+ _Scheduler_priority_Block, /* block entry point */ \
+ _Scheduler_priority_Unblock, /* unblock entry point */ \
+ _Scheduler_priority_Update_priority, /* update priority entry point */ \
+ _Scheduler_default_Map_priority, /* map priority entry point */ \
+ _Scheduler_default_Unmap_priority, /* unmap priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+ _Scheduler_priority_Node_initialize, /* node initialize entry point */ \
+ _Scheduler_default_Node_destroy, /* node destroy entry point */ \
+ _Scheduler_default_Release_job, /* new period of task */ \
+ _Scheduler_default_Cancel_job, /* cancel period of task */ \
+ _Scheduler_default_Tick, /* tick entry point */ \
+ _Scheduler_default_Start_idle /* start idle entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ }
+
+typedef struct {
+ /**
+ * @brief Basic scheduler context.
+ */
+ Scheduler_Context Base;
+
+ /**
+ * @brief Bit map to indicate non-empty ready queues.
+ */
+ Priority_bit_map_Control Bit_map;
+
+ /**
+ * @brief One ready queue per priority level.
+ */
+ Chain_Control Ready[ 0 ];
+} Scheduler_priority_Context;
+
+/**
+ * @brief Data for ready queue operations.
+ */
+typedef struct {
+ /**
+ * @brief The thread priority currently used by the scheduler.
+ */
+ unsigned int current_priority;
+
+ /** This field points to the Ready FIFO for this thread's priority. */
+ Chain_Control *ready_chain;
+
+ /** This field contains precalculated priority map indices. */
+ Priority_bit_map_Information Priority_map;
+} Scheduler_priority_Ready_queue;
+
+/**
+ * @brief Scheduler node specialization for Deterministic Priority schedulers.
+ */
+typedef struct {
+ /**
+ * @brief Basic scheduler node.
+ */
+ Scheduler_Node Base;
+
+ /**
+ * @brief The associated ready queue of this node.
+ */
+ Scheduler_priority_Ready_queue Ready_queue;
+} Scheduler_priority_Node;
+
+/**
+ * @brief Initializes the priority scheduler.
+ * This routine initializes the priority scheduler.
+ */
+void _Scheduler_priority_Initialize( const Scheduler_Control *scheduler );
+
+void _Scheduler_priority_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Sets the heir thread to be the next ready thread.
+ *
+ * This kernel routine sets the heir thread to be the next ready thread
+ * by invoking the_scheduler->ready_queue->operations->first().
+ */
+void _Scheduler_priority_Schedule(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread
+);
+
+void _Scheduler_priority_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *base_node
+);
+
+void _Scheduler_priority_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_priority_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h
new file mode 100644
index 0000000000..d988d5752a
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h
@@ -0,0 +1,181 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSchedulerPriorityAffinitySMP
+ *
+ * @brief Deterministic Priority Affinity SMP Scheduler API
+ */
+
+/*
+ * COPYRIGHT (c) 2014.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERPRIORITYAFFINITYSMP_H
+#define _RTEMS_SCORE_SCHEDULERPRIORITYAFFINITYSMP_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerpriority.h>
+#include <rtems/score/schedulersmp.h>
+#include <rtems/score/schedulerprioritysmp.h>
+
+#include <sys/cpuset.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreSchedulerPriorityAffinitySMP Deterministic Priority Affinity SMP Scheduler
+ *
+ * @ingroup ScoreSchedulerPrioritySMP
+ *
+ * This is an extension of the Deterministic Priority SMP Scheduler. which
+ * is an implementation of the global fixed priority scheduler (G-FP).
+ * It adds thread to core affinity support.
+ *
+ * @note This is the first iteration of this scheduler. It currently tracks
+ * the requested affinity to exercise the Scheduler Framework but it
+ * does not honor that affinity in assigning threads to cores. This
+ * will be added in a subsequent revision.
+ * @{
+ */
+
+/**
+ * @brief Entry points for the Deterministic Priority Affinity SMP Scheduler.
+ */
+#define SCHEDULER_PRIORITY_AFFINITY_SMP_ENTRY_POINTS \
+ { \
+ _Scheduler_priority_SMP_Initialize, \
+ _Scheduler_default_Schedule, \
+ _Scheduler_priority_SMP_Yield, \
+ _Scheduler_priority_affinity_SMP_Block, \
+ _Scheduler_priority_affinity_SMP_Unblock, \
+ _Scheduler_priority_affinity_SMP_Update_priority, \
+ _Scheduler_default_Map_priority, \
+ _Scheduler_default_Unmap_priority, \
+ _Scheduler_priority_affinity_SMP_Ask_for_help, \
+ _Scheduler_priority_affinity_SMP_Reconsider_help_request, \
+ _Scheduler_priority_affinity_SMP_Withdraw_node, \
+ _Scheduler_priority_affinity_SMP_Add_processor, \
+ _Scheduler_priority_affinity_SMP_Remove_processor, \
+ _Scheduler_priority_affinity_SMP_Node_initialize, \
+ _Scheduler_default_Node_destroy, \
+ _Scheduler_default_Release_job, \
+ _Scheduler_default_Cancel_job, \
+ _Scheduler_default_Tick, \
+ _Scheduler_SMP_Start_idle, \
+ _Scheduler_priority_affinity_SMP_Set_affinity \
+ }
+
+/**
+ * @brief Initializes per thread scheduler information
+ *
+ * This routine allocates @a thread->scheduler.
+ *
+ * @param[in] scheduler points to the scheduler specific information.
+ * @param[in] node is the node the scheduler is allocating
+ * management memory for.
+ * @param[in] the_thread the thread of the node.
+ * @param[in] priority is the thread priority.
+ */
+void _Scheduler_priority_affinity_SMP_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_priority_affinity_SMP_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_affinity_SMP_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_affinity_SMP_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+bool _Scheduler_priority_affinity_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_affinity_SMP_Withdraw_node(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+);
+
+void _Scheduler_priority_affinity_SMP_Add_processor(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle
+);
+
+Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
+ const Scheduler_Control *scheduler,
+ struct Per_CPU_Control *cpu
+);
+
+/**
+ * @brief Set affinity for the priority affinity SMP scheduler.
+ *
+ * @param[in] scheduler The scheduler of the thread.
+ * @param[in] thread The associated thread.
+ * @param[in] affinity The new affinity set.
+ *
+ * @retval true if successful
+ * @retval false if unsuccessful
+ */
+bool _Scheduler_priority_affinity_SMP_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ const Processor_mask *affinity
+);
+
+/**
+ * @brief Scheduler node specialization for Deterministic Priority Affinity SMP
+ * schedulers.
+ *
+ * This is a per thread structure.
+ */
+typedef struct {
+ /**
+ * @brief SMP priority scheduler node.
+ */
+ Scheduler_priority_SMP_Node Base;
+
+ /**
+ * @brief The thread processor affinity set.
+ */
+ Processor_mask Affinity;
+} Scheduler_priority_affinity_SMP_Node;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYAFFINITYSMP_H */
diff --git a/cpukit/include/rtems/score/schedulerpriorityimpl.h b/cpukit/include/rtems/score/schedulerpriorityimpl.h
new file mode 100644
index 0000000000..354065fac4
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulerpriorityimpl.h
@@ -0,0 +1,241 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with the Manipulation of the
+ * Priority-Based Scheduling Structures
+ *
+ * This inline file contains all of the inlined routines associated with
+ * the manipulation of the priority-based scheduling structures.
+ */
+
+/*
+ * Copyright (C) 2010 Gedare Bloom.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERPRIORITYIMPL_H
+#define _RTEMS_SCORE_SCHEDULERPRIORITYIMPL_H
+
+#include <rtems/score/schedulerpriority.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/prioritybitmapimpl.h>
+#include <rtems/score/schedulerimpl.h>
+#include <rtems/score/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreSchedulerDPS
+ */
+/**@{**/
+
+RTEMS_INLINE_ROUTINE Scheduler_priority_Context *
+ _Scheduler_priority_Get_context( const Scheduler_Control *scheduler )
+{
+ return (Scheduler_priority_Context *) _Scheduler_Get_context( scheduler );
+}
+
+RTEMS_INLINE_ROUTINE Scheduler_priority_Node *_Scheduler_priority_Thread_get_node(
+ Thread_Control *the_thread
+)
+{
+ return (Scheduler_priority_Node *) _Thread_Scheduler_get_home_node( the_thread );
+}
+
+RTEMS_INLINE_ROUTINE Scheduler_priority_Node *_Scheduler_priority_Node_downcast(
+ Scheduler_Node *node
+)
+{
+ return (Scheduler_priority_Node *) node;
+}
+
+/**
+ * @brief Ready queue initialization.
+ *
+ * This routine initializes @a ready_queues for priority-based scheduling.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_initialize(
+ Chain_Control *ready_queues,
+ Priority_Control maximum_priority
+)
+{
+ size_t index;
+
+ for ( index = 0 ; index <= (size_t) maximum_priority ; ++index ) {
+ _Chain_Initialize_empty( &ready_queues[ index ] );
+ }
+}
+
+/**
+ * @brief Enqueues a node on the specified ready queue.
+ *
+ * The node is placed as the last element of its priority group.
+ *
+ * @param[in] node The node to enqueue.
+ * @param[in] ready_queue The ready queue.
+ * @param[in] bit_map The priority bit map of the scheduler instance.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue(
+ Chain_Node *node,
+ Scheduler_priority_Ready_queue *ready_queue,
+ Priority_bit_map_Control *bit_map
+)
+{
+ Chain_Control *ready_chain = ready_queue->ready_chain;
+
+ _Chain_Append_unprotected( ready_chain, node );
+ _Priority_bit_map_Add( bit_map, &ready_queue->Priority_map );
+}
+
+/**
+ * @brief Enqueues a node on the specified ready queue as first.
+ *
+ * The node is placed as the first element of its priority group.
+ *
+ * @param[in] node The node to enqueue as first.
+ * @param[in] ready_queue The ready queue.
+ * @param[in] bit_map The priority bit map of the scheduler instance.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue_first(
+ Chain_Node *node,
+ Scheduler_priority_Ready_queue *ready_queue,
+ Priority_bit_map_Control *bit_map
+)
+{
+ Chain_Control *ready_chain = ready_queue->ready_chain;
+
+ _Chain_Prepend_unprotected( ready_chain, node );
+ _Priority_bit_map_Add( bit_map, &ready_queue->Priority_map );
+}
+
+/**
+ * @brief Extracts a node from the specified ready queue.
+ *
+ * @param[in] node The node to extract.
+ * @param[in] ready_queue The ready queue.
+ * @param[in] bit_map The priority bit map of the scheduler instance.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_extract(
+ Chain_Node *node,
+ Scheduler_priority_Ready_queue *ready_queue,
+ Priority_bit_map_Control *bit_map
+)
+{
+ Chain_Control *ready_chain = ready_queue->ready_chain;
+
+ if ( _Chain_Has_only_one_node( ready_chain ) ) {
+ _Chain_Initialize_empty( ready_chain );
+ _Chain_Initialize_node( node );
+ _Priority_bit_map_Remove( bit_map, &ready_queue->Priority_map );
+ } else {
+ _Chain_Extract_unprotected( node );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_priority_Extract_body(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ Scheduler_priority_Context *context;
+ Scheduler_priority_Node *the_node;
+
+ context = _Scheduler_priority_Get_context( scheduler );
+ the_node = _Scheduler_priority_Node_downcast( node );
+
+ _Scheduler_priority_Ready_queue_extract(
+ &the_thread->Object.Node,
+ &the_node->Ready_queue,
+ &context->Bit_map
+ );
+}
+
+/**
+ * @brief Return a pointer to the first node.
+ *
+ * This routines returns a pointer to the first node on @a ready_queues.
+ *
+ * @param[in] bit_map The priority bit map of the scheduler instance.
+ * @param[in] ready_queues The ready queues of the scheduler instance.
+ *
+ * @return This method returns the first node.
+ */
+RTEMS_INLINE_ROUTINE Chain_Node *_Scheduler_priority_Ready_queue_first(
+ Priority_bit_map_Control *bit_map,
+ Chain_Control *ready_queues
+)
+{
+ Priority_Control index = _Priority_bit_map_Get_highest( bit_map );
+ Chain_Node *first = _Chain_First( &ready_queues[ index ] );
+
+ _Assert( first != _Chain_Tail( &ready_queues[ index ] ) );
+
+ return first;
+}
+
+/**
+ * @brief Scheduling decision logic.
+ *
+ * This kernel routine implements scheduling decision logic
+ * for priority-based scheduling.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_priority_Schedule_body(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ bool force_dispatch
+)
+{
+ Scheduler_priority_Context *context =
+ _Scheduler_priority_Get_context( scheduler );
+ Thread_Control *heir = (Thread_Control *)
+ _Scheduler_priority_Ready_queue_first(
+ &context->Bit_map,
+ &context->Ready[ 0 ]
+ );
+
+ ( void ) the_thread;
+
+ _Scheduler_Update_heir( heir, force_dispatch );
+}
+
+/**
+ * @brief Updates the specified ready queue data according to the new priority
+ * value.
+ *
+ * @param[in] ready_queue The ready queue.
+ * @param[in] new_priority The new priority.
+ * @param[in] bit_map The priority bit map of the scheduler instance.
+ * @param[in] ready_queues The ready queues of the scheduler instance.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_update(
+ Scheduler_priority_Ready_queue *ready_queue,
+ unsigned int new_priority,
+ Priority_bit_map_Control *bit_map,
+ Chain_Control *ready_queues
+)
+{
+ ready_queue->current_priority = new_priority;
+ ready_queue->ready_chain = &ready_queues[ new_priority ];
+
+ _Priority_bit_map_Initialize_information(
+ bit_map,
+ &ready_queue->Priority_map,
+ new_priority
+ );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulerprioritysmp.h b/cpukit/include/rtems/score/schedulerprioritysmp.h
new file mode 100644
index 0000000000..6671da5b7a
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulerprioritysmp.h
@@ -0,0 +1,171 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSchedulerPrioritySMP
+ *
+ * @brief Deterministic Priority SMP Scheduler API
+ */
+
+/*
+ * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H
+#define _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerpriority.h>
+#include <rtems/score/schedulersmp.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreSchedulerPrioritySMP Deterministic Priority SMP Scheduler
+ *
+ * @ingroup ScoreSchedulerSMP
+ *
+ * This is an implementation of the global fixed priority scheduler (G-FP). It
+ * uses one ready chain per priority to ensure constant time insert operations.
+ * The scheduled chain uses linear insert operations and has at most processor
+ * count entries. Since the processor and priority count are constants all
+ * scheduler operations complete in a bounded execution time.
+ *
+ * The thread preempt mode will be ignored.
+ *
+ * @{
+ */
+
+/**
+ * @brief Scheduler context specialization for Deterministic Priority SMP
+ * schedulers.
+ */
+typedef struct {
+ Scheduler_SMP_Context Base;
+ Priority_bit_map_Control Bit_map;
+ Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
+} Scheduler_priority_SMP_Context;
+
+/**
+ * @brief Scheduler node specialization for Deterministic Priority SMP
+ * schedulers.
+ */
+typedef struct {
+ /**
+ * @brief SMP scheduler node.
+ */
+ Scheduler_SMP_Node Base;
+
+ /**
+ * @brief The associated ready queue of this node.
+ */
+ Scheduler_priority_Ready_queue Ready_queue;
+} Scheduler_priority_SMP_Node;
+
+/**
+ * @brief Entry points for the Priority SMP Scheduler.
+ */
+#define SCHEDULER_PRIORITY_SMP_ENTRY_POINTS \
+ { \
+ _Scheduler_priority_SMP_Initialize, \
+ _Scheduler_default_Schedule, \
+ _Scheduler_priority_SMP_Yield, \
+ _Scheduler_priority_SMP_Block, \
+ _Scheduler_priority_SMP_Unblock, \
+ _Scheduler_priority_SMP_Update_priority, \
+ _Scheduler_default_Map_priority, \
+ _Scheduler_default_Unmap_priority, \
+ _Scheduler_priority_SMP_Ask_for_help, \
+ _Scheduler_priority_SMP_Reconsider_help_request, \
+ _Scheduler_priority_SMP_Withdraw_node, \
+ _Scheduler_priority_SMP_Add_processor, \
+ _Scheduler_priority_SMP_Remove_processor, \
+ _Scheduler_priority_SMP_Node_initialize, \
+ _Scheduler_default_Node_destroy, \
+ _Scheduler_default_Release_job, \
+ _Scheduler_default_Cancel_job, \
+ _Scheduler_default_Tick, \
+ _Scheduler_SMP_Start_idle \
+ SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ }
+
+void _Scheduler_priority_SMP_Initialize( const Scheduler_Control *scheduler );
+
+void _Scheduler_priority_SMP_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_priority_SMP_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_SMP_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_SMP_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+bool _Scheduler_priority_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_SMP_Reconsider_help_request(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_priority_SMP_Withdraw_node(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+);
+
+void _Scheduler_priority_SMP_Add_processor(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle
+);
+
+Thread_Control *_Scheduler_priority_SMP_Remove_processor(
+ const Scheduler_Control *scheduler,
+ struct Per_CPU_Control *cpu
+);
+
+void _Scheduler_priority_SMP_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H */
diff --git a/cpukit/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
new file mode 100644
index 0000000000..17d6e552f3
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
@@ -0,0 +1,184 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSchedulerPrioritySMP
+ *
+ * @brief Deterministic Priority SMP Scheduler API
+ */
+
+/*
+ * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERPRIORITYSMPIMPL_H
+#define _RTEMS_SCORE_SCHEDULERPRIORITYSMPIMPL_H
+
+#include <rtems/score/schedulerprioritysmp.h>
+#include <rtems/score/schedulerpriorityimpl.h>
+#include <rtems/score/schedulersimpleimpl.h>
+#include <rtems/score/schedulersmpimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @ingroup ScoreSchedulerPrioritySMP
+ * @{
+ */
+
+static inline Scheduler_priority_SMP_Context *_Scheduler_priority_SMP_Get_self(
+ Scheduler_Context *context
+)
+{
+ return (Scheduler_priority_SMP_Context *) context;
+}
+
+static inline Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Thread_get_node(
+ Thread_Control *thread
+)
+{
+ return (Scheduler_priority_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
+}
+
+static inline Scheduler_priority_SMP_Node *
+_Scheduler_priority_SMP_Node_downcast( Scheduler_Node *node )
+{
+ return (Scheduler_priority_SMP_Node *) node;
+}
+
+static inline bool _Scheduler_priority_SMP_Has_ready( Scheduler_Context *context )
+{
+ Scheduler_priority_SMP_Context *self =
+ _Scheduler_priority_SMP_Get_self( context );
+
+ return !_Priority_bit_map_Is_empty( &self->Bit_map );
+}
+
+static inline void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled_to_ready
+)
+{
+ Scheduler_priority_SMP_Context *self =
+ _Scheduler_priority_SMP_Get_self( context );
+ Scheduler_priority_SMP_Node *node =
+ _Scheduler_priority_SMP_Node_downcast( scheduled_to_ready );
+
+ _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
+ _Scheduler_priority_Ready_queue_enqueue_first(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+}
+
+static inline void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *ready_to_scheduled
+)
+{
+ Scheduler_priority_SMP_Context *self;
+ Scheduler_priority_SMP_Node *node;
+ Priority_Control insert_priority;
+
+ self = _Scheduler_priority_SMP_Get_self( context );
+ node = _Scheduler_priority_SMP_Node_downcast( ready_to_scheduled );
+
+ _Scheduler_priority_Ready_queue_extract(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+ insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+ _Chain_Insert_ordered_unprotected(
+ &self->Base.Scheduled,
+ &node->Base.Base.Node.Chain,
+ &insert_priority,
+ _Scheduler_SMP_Priority_less_equal
+ );
+}
+
+static inline void _Scheduler_priority_SMP_Insert_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *node_base,
+ Priority_Control insert_priority
+)
+{
+ Scheduler_priority_SMP_Context *self;
+ Scheduler_priority_SMP_Node *node;
+
+ self = _Scheduler_priority_SMP_Get_self( context );
+ node = _Scheduler_priority_SMP_Node_downcast( node_base );
+
+ if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
+ _Scheduler_priority_Ready_queue_enqueue(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+ } else {
+ _Scheduler_priority_Ready_queue_enqueue_first(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+ }
+}
+
+static inline void _Scheduler_priority_SMP_Extract_from_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *thread
+)
+{
+ Scheduler_priority_SMP_Context *self =
+ _Scheduler_priority_SMP_Get_self( context );
+ Scheduler_priority_SMP_Node *node =
+ _Scheduler_priority_SMP_Node_downcast( thread );
+
+ _Scheduler_priority_Ready_queue_extract(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+}
+
+static inline void _Scheduler_priority_SMP_Do_update(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_update,
+ Priority_Control new_priority
+)
+{
+ Scheduler_priority_SMP_Context *self;
+ Scheduler_priority_SMP_Node *node;
+
+ self = _Scheduler_priority_SMP_Get_self( context );
+ node = _Scheduler_priority_SMP_Node_downcast( node_to_update );
+
+ _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
+ _Scheduler_priority_Ready_queue_update(
+ &node->Ready_queue,
+ SCHEDULER_PRIORITY_UNMAP( new_priority ),
+ &self->Bit_map,
+ &self->Ready[ 0 ]
+ );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYSMPIMPL_H */
diff --git a/cpukit/include/rtems/score/schedulersimple.h b/cpukit/include/rtems/score/schedulersimple.h
new file mode 100644
index 0000000000..0d410d5676
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulersimple.h
@@ -0,0 +1,126 @@
+/**
+ * @file rtems/score/schedulersimple.h
+ *
+ * @brief Manipulation of Threads Simple-Priority-Based Ready Queue
+ *
+ * This include file contains all the constants and structures associated
+ * with the manipulation of threads on a simple-priority-based ready queue.
+ */
+
+/*
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERSIMPLE_H
+#define _RTEMS_SCORE_SCHEDULERSIMPLE_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerpriority.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSchedulerSimple Simple Priority Scheduler
+ *
+ * @ingroup ScoreScheduler
+ */
+/**@{*/
+
+#define SCHEDULER_SIMPLE_MAXIMUM_PRIORITY 255
+
+/**
+ * Entry points for Scheduler Simple
+ */
+#define SCHEDULER_SIMPLE_ENTRY_POINTS \
+ { \
+ _Scheduler_simple_Initialize, /* initialize entry point */ \
+ _Scheduler_simple_Schedule, /* schedule entry point */ \
+ _Scheduler_simple_Yield, /* yield entry point */ \
+ _Scheduler_simple_Block, /* block entry point */ \
+ _Scheduler_simple_Unblock, /* unblock entry point */ \
+ _Scheduler_simple_Update_priority, /* update priority entry point */ \
+ _Scheduler_default_Map_priority, /* map priority entry point */ \
+ _Scheduler_default_Unmap_priority, /* unmap priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+ _Scheduler_default_Node_initialize, /* node initialize entry point */ \
+ _Scheduler_default_Node_destroy, /* node destroy entry point */ \
+ _Scheduler_default_Release_job, /* new period of task */ \
+ _Scheduler_default_Cancel_job, /* cancel period of task */ \
+ _Scheduler_default_Tick, /* tick entry point */ \
+ _Scheduler_default_Start_idle /* start idle entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ }
+
+/**
+ * @brief Simple scheduler context.
+ */
+typedef struct {
+ /**
+ * @brief Basic scheduler context.
+ */
+ Scheduler_Context Base;
+
+ /**
+ * @brief One ready queue for all ready threads.
+ */
+ Chain_Control Ready;
+} Scheduler_simple_Context;
+
+/**
+ * @brief Initialize simple scheduler.
+ *
+ * This routine initializes the simple scheduler.
+ */
+void _Scheduler_simple_Initialize( const Scheduler_Control *scheduler );
+
+/**
+ * This routine sets the heir thread to be the next ready thread
+ * on the ready queue by getting the first node in the scheduler
+ * information.
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] the_thread causing the scheduling operation.
+ */
+void _Scheduler_simple_Schedule(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread
+);
+
+void _Scheduler_simple_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_simple_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_simple_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_simple_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulersimpleimpl.h b/cpukit/include/rtems/score/schedulersimpleimpl.h
new file mode 100644
index 0000000000..3891839281
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulersimpleimpl.h
@@ -0,0 +1,103 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with the Manipulation of the
+ * Priority-Based Scheduling Structures
+ *
+ * This inline file contains all of the inlined routines associated with
+ * the manipulation of the priority-based scheduling structures.
+ */
+
+/*
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERSIMPLEIMPL_H
+#define _RTEMS_SCORE_SCHEDULERSIMPLEIMPL_H
+
+#include <rtems/score/schedulersimple.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/schedulerimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreSchedulerSimple
+ */
+/**@{**/
+
+RTEMS_INLINE_ROUTINE Scheduler_simple_Context *
+ _Scheduler_simple_Get_context( const Scheduler_Control *scheduler )
+{
+ return (Scheduler_simple_Context *) _Scheduler_Get_context( scheduler );
+}
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Priority_less_equal(
+ const void *to_insert,
+ const Chain_Node *next
+)
+{
+ const unsigned int *priority_to_insert;
+ const Thread_Control *thread_next;
+
+ priority_to_insert = (const unsigned int *) to_insert;
+ thread_next = (const Thread_Control *) next;
+
+ return *priority_to_insert <= _Thread_Get_priority( thread_next );
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert(
+ Chain_Control *chain,
+ Thread_Control *to_insert,
+ unsigned int insert_priority
+)
+{
+ _Chain_Insert_ordered_unprotected(
+ chain,
+ &to_insert->Object.Node,
+ &insert_priority,
+ _Scheduler_simple_Priority_less_equal
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_simple_Extract(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ (void) scheduler;
+ (void) node;
+
+ _Chain_Extract_unprotected( &the_thread->Object.Node );
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_simple_Schedule_body(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ bool force_dispatch
+)
+{
+ Scheduler_simple_Context *context =
+ _Scheduler_simple_Get_context( scheduler );
+ Thread_Control *heir = (Thread_Control *) _Chain_First( &context->Ready );
+
+ ( void ) the_thread;
+
+ _Scheduler_Update_heir( heir, force_dispatch );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulersimplesmp.h b/cpukit/include/rtems/score/schedulersimplesmp.h
new file mode 100644
index 0000000000..bc75b205d5
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulersimplesmp.h
@@ -0,0 +1,155 @@
+/**
+ * @file
+ *
+ * @brief Simple SMP Scheduler API
+ *
+ * @ingroup ScoreSchedulerSMPSimple
+ */
+
+/*
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2013, 2016 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERSIMPLE_SMP_H
+#define _RTEMS_SCORE_SCHEDULERSIMPLE_SMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerpriority.h>
+#include <rtems/score/schedulersmp.h>
+
+/**
+ * @defgroup ScoreSchedulerSMPSimple Simple Priority SMP Scheduler
+ *
+ * @ingroup ScoreSchedulerSMP
+ *
+ * The Simple Priority SMP Scheduler allocates a processor for the processor
+ * count highest priority ready threads. The thread priority and position in
+ * the ready chain are the only information to determine the scheduling
+ * decision. Threads with an allocated processor are in the scheduled chain.
+ * After initialization the scheduled chain has exactly processor count nodes.
+ * Each processor has exactly one allocated thread after initialization. All
+ * enqueue and extract operations may exchange threads with the scheduled
+ * chain. One thread will be added and another will be removed. The scheduled
+ * and ready chain is ordered according to the thread priority order. The
+ * chain insert operations are O(count of ready threads), thus this scheduler
+ * is unsuitable for most real-time applications.
+ *
+ * The thread preempt mode will be ignored.
+ *
+ * @{
+ */
+
+typedef struct {
+ Scheduler_SMP_Context Base;
+ Chain_Control Ready;
+} Scheduler_simple_SMP_Context;
+
+#define SCHEDULER_SIMPLE_SMP_MAXIMUM_PRIORITY 255
+
+/**
+ * @brief Entry points for the Simple SMP Scheduler.
+ */
+#define SCHEDULER_SIMPLE_SMP_ENTRY_POINTS \
+ { \
+ _Scheduler_simple_SMP_Initialize, \
+ _Scheduler_default_Schedule, \
+ _Scheduler_simple_SMP_Yield, \
+ _Scheduler_simple_SMP_Block, \
+ _Scheduler_simple_SMP_Unblock, \
+ _Scheduler_simple_SMP_Update_priority, \
+ _Scheduler_default_Map_priority, \
+ _Scheduler_default_Unmap_priority, \
+ _Scheduler_simple_SMP_Ask_for_help, \
+ _Scheduler_simple_SMP_Reconsider_help_request, \
+ _Scheduler_simple_SMP_Withdraw_node, \
+ _Scheduler_simple_SMP_Add_processor, \
+ _Scheduler_simple_SMP_Remove_processor, \
+ _Scheduler_simple_SMP_Node_initialize, \
+ _Scheduler_default_Node_destroy, \
+ _Scheduler_default_Release_job, \
+ _Scheduler_default_Cancel_job, \
+ _Scheduler_default_Tick, \
+ _Scheduler_SMP_Start_idle \
+ SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ }
+
+void _Scheduler_simple_SMP_Initialize( const Scheduler_Control *scheduler );
+
+void _Scheduler_simple_SMP_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_simple_SMP_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_simple_SMP_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_simple_SMP_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+bool _Scheduler_simple_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_simple_SMP_Reconsider_help_request(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_simple_SMP_Withdraw_node(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+);
+
+void _Scheduler_simple_SMP_Add_processor(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle
+);
+
+Thread_Control *_Scheduler_simple_SMP_Remove_processor(
+ const Scheduler_Control *scheduler,
+ struct Per_CPU_Control *cpu
+);
+
+void _Scheduler_simple_SMP_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/schedulersmp.h b/cpukit/include/rtems/score/schedulersmp.h
new file mode 100644
index 0000000000..0bd899a6a6
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulersmp.h
@@ -0,0 +1,127 @@
+/**
+ * @file
+ *
+ * @brief SMP Scheduler API
+ *
+ * @ingroup ScoreSchedulerSMP
+ */
+
+/*
+ * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERSMP_H
+#define _RTEMS_SCORE_SCHEDULERSMP_H
+
+#include <rtems/score/chain.h>
+#include <rtems/score/scheduler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreSchedulerSMP SMP Scheduler
+ *
+ * @ingroup ScoreScheduler
+ *
+ * @{
+ */
+
+/**
+ * @brief Scheduler context specialization for SMP schedulers.
+ */
+typedef struct {
+ /**
+ * @brief Basic scheduler context.
+ */
+ Scheduler_Context Base;
+
+ /**
+ * @brief The chain of scheduled nodes.
+ */
+ Chain_Control Scheduled;
+
+ /**
+ * @brief Chain of the available idle threads.
+ *
+ * Idle threads are used for the scheduler helping protocol. It is crucial
+ * that the idle threads preserve their relative order. This is the case for
+ * this priority based scheduler.
+ */
+ Chain_Control Idle_threads;
+} Scheduler_SMP_Context;
+
+/**
+ * @brief SMP scheduler node states.
+ */
+typedef enum {
+ /**
+ * @brief This scheduler node is blocked.
+ *
+ * A scheduler node is blocked if the corresponding thread is not ready.
+ */
+ SCHEDULER_SMP_NODE_BLOCKED,
+
+ /**
+ * @brief The scheduler node is scheduled.
+ *
+ * A scheduler node is scheduled if the corresponding thread is ready and the
+ * scheduler allocated a processor for it. A scheduled node is assigned to
+ * exactly one processor. The count of scheduled nodes in this scheduler
+ * instance equals the processor count owned by the scheduler instance.
+ */
+ SCHEDULER_SMP_NODE_SCHEDULED,
+
+ /**
+ * @brief This scheduler node is ready.
+ *
+ * A scheduler node is ready if the corresponding thread is ready and the
+ * scheduler did not allocate a processor for it.
+ */
+ SCHEDULER_SMP_NODE_READY
+} Scheduler_SMP_Node_state;
+
+/**
+ * @brief Scheduler node specialization for SMP schedulers.
+ */
+typedef struct {
+ /**
+ * @brief Basic scheduler node.
+ */
+ Scheduler_Node Base;
+
+ /**
+ * @brief The state of this node.
+ */
+ Scheduler_SMP_Node_state state;
+
+ /**
+ * @brief The current priority of thread owning this node.
+ */
+ Priority_Control priority;
+} Scheduler_SMP_Node;
+
+void _Scheduler_SMP_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle,
+ struct Per_CPU_Control *cpu
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERSMP_H */
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
new file mode 100644
index 0000000000..e152eb0878
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -0,0 +1,1482 @@
+/**
+ * @file
+ *
+ * @brief SMP Scheduler Implementation
+ *
+ * @ingroup ScoreSchedulerSMP
+ */
+
+/*
+ * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
+#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
+
+#include <rtems/score/schedulersmp.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/schedulersimpleimpl.h>
+#include <rtems/bspIo.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreSchedulerSMP
+ *
+ * The scheduler nodes can be in four states
+ * - @ref SCHEDULER_SMP_NODE_BLOCKED,
+ * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
+ * - @ref SCHEDULER_SMP_NODE_READY.
+ *
+ * State transitions are triggered via basic operations
+ * - _Scheduler_SMP_Enqueue(),
+ * - _Scheduler_SMP_Enqueue_scheduled(), and
+ * - _Scheduler_SMP_Block().
+ *
+ * @dot
+ * digraph {
+ * node [style="filled"];
+ *
+ * bs [label="BLOCKED"];
+ * ss [label="SCHEDULED", fillcolor="green"];
+ * rs [label="READY", fillcolor="red"];
+ *
+ * edge [label="enqueue"];
+ * edge [fontcolor="darkgreen", color="darkgreen"];
+ *
+ * bs -> ss;
+ *
+ * edge [fontcolor="red", color="red"];
+ *
+ * bs -> rs;
+ *
+ * edge [label="enqueue other"];
+ *
+ * ss -> rs;
+ *
+ * edge [label="block"];
+ * edge [fontcolor="black", color="black"];
+ *
+ * ss -> bs;
+ * rs -> bs;
+ *
+ * edge [label="block other"];
+ * edge [fontcolor="darkgreen", color="darkgreen"];
+ *
+ * rs -> ss;
+ * }
+ * @enddot
+ *
+ * During system initialization each processor of the scheduler instance starts
+ * with an idle thread assigned to it. Lets have a look at an example with two
+ * idle threads I and J with priority 5. We also have blocked threads A, B and
+ * C with priorities 1, 2 and 3 respectively. The scheduler nodes are ordered
+ * with respect to the thread priority from left to right in the below
+ * diagrams. The highest priority node (lowest priority number) is the
+ * leftmost node. Since the processor assignment is independent of the thread
+ * priority the processor indices may move from one state to the other.
+ *
+ * @dot
+ * digraph {
+ * node [style="filled"];
+ * edge [dir="none"];
+ * subgraph {
+ * rank = same;
+ *
+ * i [label="I (5)", fillcolor="green"];
+ * j [label="J (5)", fillcolor="green"];
+ * a [label="A (1)"];
+ * b [label="B (2)"];
+ * c [label="C (3)"];
+ * i -> j;
+ * }
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * p0 [label="PROCESSOR 0", shape="box"];
+ * p1 [label="PROCESSOR 1", shape="box"];
+ * }
+ *
+ * i -> p0;
+ * j -> p1;
+ * }
+ * @enddot
+ *
+ * Lets start A. For this an enqueue operation is performed.
+ *
+ * @dot
+ * digraph {
+ * node [style="filled"];
+ * edge [dir="none"];
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * i [label="I (5)", fillcolor="green"];
+ * j [label="J (5)", fillcolor="red"];
+ * a [label="A (1)", fillcolor="green"];
+ * b [label="B (2)"];
+ * c [label="C (3)"];
+ * a -> i;
+ * }
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * p0 [label="PROCESSOR 0", shape="box"];
+ * p1 [label="PROCESSOR 1", shape="box"];
+ * }
+ *
+ * i -> p0;
+ * a -> p1;
+ * }
+ * @enddot
+ *
+ * Lets start C.
+ *
+ * @dot
+ * digraph {
+ * node [style="filled"];
+ * edge [dir="none"];
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * a [label="A (1)", fillcolor="green"];
+ * c [label="C (3)", fillcolor="green"];
+ * i [label="I (5)", fillcolor="red"];
+ * j [label="J (5)", fillcolor="red"];
+ * b [label="B (2)"];
+ * a -> c;
+ * i -> j;
+ * }
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * p0 [label="PROCESSOR 0", shape="box"];
+ * p1 [label="PROCESSOR 1", shape="box"];
+ * }
+ *
+ * c -> p0;
+ * a -> p1;
+ * }
+ * @enddot
+ *
+ * Lets start B.
+ *
+ * @dot
+ * digraph {
+ * node [style="filled"];
+ * edge [dir="none"];
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * a [label="A (1)", fillcolor="green"];
+ * b [label="B (2)", fillcolor="green"];
+ * c [label="C (3)", fillcolor="red"];
+ * i [label="I (5)", fillcolor="red"];
+ * j [label="J (5)", fillcolor="red"];
+ * a -> b;
+ * c -> i -> j;
+ * }
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * p0 [label="PROCESSOR 0", shape="box"];
+ * p1 [label="PROCESSOR 1", shape="box"];
+ * }
+ *
+ * b -> p0;
+ * a -> p1;
+ * }
+ * @enddot
+ *
+ * Lets change the priority of thread A to 4.
+ *
+ * @dot
+ * digraph {
+ * node [style="filled"];
+ * edge [dir="none"];
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * b [label="B (2)", fillcolor="green"];
+ * c [label="C (3)", fillcolor="green"];
+ * a [label="A (4)", fillcolor="red"];
+ * i [label="I (5)", fillcolor="red"];
+ * j [label="J (5)", fillcolor="red"];
+ * b -> c;
+ * a -> i -> j;
+ * }
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * p0 [label="PROCESSOR 0", shape="box"];
+ * p1 [label="PROCESSOR 1", shape="box"];
+ * }
+ *
+ * b -> p0;
+ * c -> p1;
+ * }
+ * @enddot
+ *
+ * Now perform a blocking operation with thread B. Please note that thread A
+ * migrated now from processor 0 to processor 1 and thread C still executes on
+ * processor 1.
+ *
+ * @dot
+ * digraph {
+ * node [style="filled"];
+ * edge [dir="none"];
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * c [label="C (3)", fillcolor="green"];
+ * a [label="A (4)", fillcolor="green"];
+ * i [label="I (5)", fillcolor="red"];
+ * j [label="J (5)", fillcolor="red"];
+ * b [label="B (2)"];
+ * c -> a;
+ * i -> j;
+ * }
+ *
+ * subgraph {
+ * rank = same;
+ *
+ * p0 [label="PROCESSOR 0", shape="box"];
+ * p1 [label="PROCESSOR 1", shape="box"];
+ * }
+ *
+ * a -> p0;
+ * c -> p1;
+ * }
+ * @enddot
+ *
+ * @{
+ */
+
+typedef bool ( *Scheduler_SMP_Has_ready )(
+ Scheduler_Context *context
+);
+
+typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
+ Scheduler_Context *context,
+ Scheduler_Node *node
+);
+
+typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
+ Scheduler_Context *context,
+ Scheduler_Node *filter
+);
+
+typedef void ( *Scheduler_SMP_Extract )(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_extract
+);
+
+typedef void ( *Scheduler_SMP_Insert )(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_insert,
+ Priority_Control insert_priority
+);
+
+typedef void ( *Scheduler_SMP_Move )(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_move
+);
+
+typedef bool ( *Scheduler_SMP_Ask_for_help )(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+typedef void ( *Scheduler_SMP_Update )(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_update,
+ Priority_Control new_priority
+);
+
+typedef void ( *Scheduler_SMP_Set_affinity )(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ void *arg
+);
+
+typedef bool ( *Scheduler_SMP_Enqueue )(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_enqueue,
+ Priority_Control priority
+);
+
+typedef void ( *Scheduler_SMP_Allocate_processor )(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled,
+ Scheduler_Node *victim,
+ Per_CPU_Control *victim_cpu
+);
+
+typedef void ( *Scheduler_SMP_Register_idle )(
+ Scheduler_Context *context,
+ Scheduler_Node *idle,
+ Per_CPU_Control *cpu
+);
+
+static inline void _Scheduler_SMP_Do_nothing_register_idle(
+ Scheduler_Context *context,
+ Scheduler_Node *idle,
+ Per_CPU_Control *cpu
+)
+{
+ (void) context;
+ (void) idle;
+ (void) cpu;
+}
+
+static inline bool _Scheduler_SMP_Priority_less_equal(
+ const void *to_insert,
+ const Chain_Node *next
+)
+{
+ const Priority_Control *priority_to_insert;
+ const Scheduler_SMP_Node *node_next;
+
+ priority_to_insert = (const Priority_Control *) to_insert;
+ node_next = (const Scheduler_SMP_Node *) next;
+
+ return *priority_to_insert <= node_next->priority;
+}
+
+static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
+ Scheduler_Context *context
+)
+{
+ return (Scheduler_SMP_Context *) context;
+}
+
+static inline void _Scheduler_SMP_Initialize(
+ Scheduler_SMP_Context *self
+)
+{
+ _Chain_Initialize_empty( &self->Scheduled );
+ _Chain_Initialize_empty( &self->Idle_threads );
+}
+
+static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
+ Thread_Control *thread
+)
+{
+ return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
+}
+
+static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
+ Thread_Control *thread
+)
+{
+ return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
+}
+
+static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
+ Scheduler_Node *node
+)
+{
+ return (Scheduler_SMP_Node *) node;
+}
+
+static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
+ const Scheduler_Node *node
+)
+{
+ return ( (const Scheduler_SMP_Node *) node )->state;
+}
+
+static inline Priority_Control _Scheduler_SMP_Node_priority(
+ const Scheduler_Node *node
+)
+{
+ return ( (const Scheduler_SMP_Node *) node )->priority;
+}
+
+static inline void _Scheduler_SMP_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_SMP_Node *node,
+ Thread_Control *thread,
+ Priority_Control priority
+)
+{
+ _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
+ node->state = SCHEDULER_SMP_NODE_BLOCKED;
+ node->priority = priority;
+}
+
+static inline void _Scheduler_SMP_Node_update_priority(
+ Scheduler_SMP_Node *node,
+ Priority_Control new_priority
+)
+{
+ node->priority = new_priority;
+}
+
+static inline void _Scheduler_SMP_Node_change_state(
+ Scheduler_Node *node,
+ Scheduler_SMP_Node_state new_state
+)
+{
+ Scheduler_SMP_Node *the_node;
+
+ the_node = _Scheduler_SMP_Node_downcast( node );
+ the_node->state = new_state;
+}
+
+static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
+ const Scheduler_Context *context,
+ const Per_CPU_Control *cpu
+)
+{
+ return cpu->Scheduler.context == context;
+}
+
+static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
+ Scheduler_Context *context
+)
+{
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ Thread_Control *idle = (Thread_Control *)
+ _Chain_Get_first_unprotected( &self->Idle_threads );
+
+ _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
+
+ return idle;
+}
+
+static inline void _Scheduler_SMP_Release_idle_thread(
+ Scheduler_Context *context,
+ Thread_Control *idle
+)
+{
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+
+ _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
+}
+
+static inline void _Scheduler_SMP_Exctract_idle_thread(
+ Thread_Control *idle
+)
+{
+ _Chain_Extract_unprotected( &idle->Object.Node );
+}
+
+static inline void _Scheduler_SMP_Allocate_processor_lazy(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled,
+ Scheduler_Node *victim,
+ Per_CPU_Control *victim_cpu
+)
+{
+ Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
+ Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
+ Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+ Thread_Control *heir;
+
+ _Assert( _ISR_Get_level() != 0 );
+
+ if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
+ if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
+ heir = scheduled_cpu->heir;
+ _Thread_Dispatch_update_heir(
+ cpu_self,
+ scheduled_cpu,
+ scheduled_thread
+ );
+ } else {
+ /* We have to force a migration to our processor set */
+ heir = scheduled_thread;
+ }
+ } else {
+ heir = scheduled_thread;
+ }
+
+ if ( heir != victim_thread ) {
+ _Thread_Set_CPU( heir, victim_cpu );
+ _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
+ }
+}
+
+/*
+ * This method is slightly different from
+ * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
+ * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
+ * but does not take into account affinity.
+ */
+static inline void _Scheduler_SMP_Allocate_processor_exact(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled,
+ Scheduler_Node *victim,
+ Per_CPU_Control *victim_cpu
+)
+{
+ Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+
+ (void) context;
+ (void) victim;
+
+ _Thread_Set_CPU( scheduled_thread, victim_cpu );
+ _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
+}
+
+static inline void _Scheduler_SMP_Allocate_processor(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled,
+ Scheduler_Node *victim,
+ Per_CPU_Control *victim_cpu,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
+ ( *allocate_processor )( context, scheduled, victim, victim_cpu );
+}
+
+static inline Thread_Control *_Scheduler_SMP_Preempt(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled,
+ Scheduler_Node *victim,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Thread_Control *victim_thread;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *victim_cpu;
+
+ victim_thread = _Scheduler_Node_get_user( victim );
+ _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
+
+ _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
+
+ victim_cpu = _Thread_Get_CPU( victim_thread );
+
+ if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
+ _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
+
+ if ( victim_thread->Scheduler.helping_nodes > 0 ) {
+ _Per_CPU_Acquire( victim_cpu );
+ _Chain_Append_unprotected(
+ &victim_cpu->Threads_in_need_for_help,
+ &victim_thread->Scheduler.Help_node
+ );
+ _Per_CPU_Release( victim_cpu );
+ }
+ }
+
+ _Thread_Scheduler_release_critical( victim_thread, &lock_context );
+
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ scheduled,
+ victim,
+ victim_cpu,
+ allocate_processor
+ );
+
+ return victim_thread;
+}
+
+static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *filter
+)
+{
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ Chain_Control *scheduled = &self->Scheduled;
+ Scheduler_Node *lowest_scheduled =
+ (Scheduler_Node *) _Chain_Last( scheduled );
+
+ (void) filter;
+
+ _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
+ _Assert(
+ _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
+ );
+
+ return lowest_scheduled;
+}
+
+static inline void _Scheduler_SMP_Enqueue_to_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control priority,
+ Scheduler_Node *lowest_scheduled,
+ Scheduler_SMP_Insert insert_scheduled,
+ Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_Try_to_schedule_action action;
+
+ action = _Scheduler_Try_to_schedule_node(
+ context,
+ node,
+ _Scheduler_Node_get_idle( lowest_scheduled ),
+ _Scheduler_SMP_Get_idle_thread
+ );
+
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ _Scheduler_SMP_Preempt(
+ context,
+ node,
+ lowest_scheduled,
+ allocate_processor
+ );
+
+ ( *insert_scheduled )( context, node, priority );
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ _Scheduler_Release_idle_thread(
+ context,
+ lowest_scheduled,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
+ _Scheduler_SMP_Node_change_state(
+ lowest_scheduled,
+ SCHEDULER_SMP_NODE_READY
+ );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
+
+ ( *insert_scheduled )( context, node, priority );
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ _Scheduler_Exchange_idle_thread(
+ node,
+ lowest_scheduled,
+ _Scheduler_Node_get_idle( lowest_scheduled )
+ );
+ } else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ }
+}
+
+/**
+ * @brief Enqueues a node according to the specified order function.
+ *
+ * The node must not be in the scheduled state.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node to enqueue.
+ * @param[in] priority The node insert priority.
+ * @param[in] order The order function.
+ * @param[in] insert_ready Function to insert a node into the set of ready
+ * nodes.
+ * @param[in] insert_scheduled Function to insert a node into the set of
+ * scheduled nodes.
+ * @param[in] move_from_scheduled_to_ready Function to move a node from the set
+ * of scheduled nodes to the set of ready nodes.
+ * @param[in] get_lowest_scheduled Function to select the node from the
+ * scheduled nodes to replace. It may not be possible to find one, in this
+ * case a pointer must be returned so that the order functions returns false
+ * if this pointer is passed as the second argument to the order function.
+ * @param[in] allocate_processor Function to allocate a processor to a node
+ * based on the rules of the scheduler.
+ */
+static inline bool _Scheduler_SMP_Enqueue(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority,
+ Chain_Node_order order,
+ Scheduler_SMP_Insert insert_ready,
+ Scheduler_SMP_Insert insert_scheduled,
+ Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ bool needs_help;
+ Scheduler_Node *lowest_scheduled;
+
+ lowest_scheduled = ( *get_lowest_scheduled )( context, node );
+
+ if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
+ _Scheduler_SMP_Enqueue_to_scheduled(
+ context,
+ node,
+ insert_priority,
+ lowest_scheduled,
+ insert_scheduled,
+ move_from_scheduled_to_ready,
+ allocate_processor
+ );
+ needs_help = false;
+ } else {
+ ( *insert_ready )( context, node, insert_priority );
+ needs_help = true;
+ }
+
+ return needs_help;
+}
+
+/**
+ * @brief Enqueues a scheduled node according to the specified order
+ * function.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node to enqueue.
+ * @param[in] order The order function.
+ * @param[in] extract_from_ready Function to extract a node from the set of
+ * ready nodes.
+ * @param[in] get_highest_ready Function to get the highest ready node.
+ * @param[in] insert_ready Function to insert a node into the set of ready
+ * nodes.
+ * @param[in] insert_scheduled Function to insert a node into the set of
+ * scheduled nodes.
+ * @param[in] move_from_ready_to_scheduled Function to move a node from the set
+ * of ready nodes to the set of scheduled nodes.
+ * @param[in] allocate_processor Function to allocate a processor to a node
+ * based on the rules of the scheduler.
+ */
+static inline bool _Scheduler_SMP_Enqueue_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *const node,
+ Priority_Control insert_priority,
+ Chain_Node_order order,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Insert insert_ready,
+ Scheduler_SMP_Insert insert_scheduled,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ while ( true ) {
+ Scheduler_Node *highest_ready;
+ Scheduler_Try_to_schedule_action action;
+
+ highest_ready = ( *get_highest_ready )( context, node );
+
+ /*
+ * The node has been extracted from the scheduled chain. We have to place
+ * it now on the scheduled or ready set.
+ */
+ if (
+ node->sticky_level > 0
+ && ( *order )( &insert_priority, &highest_ready->Node.Chain )
+ ) {
+ ( *insert_scheduled )( context, node, insert_priority );
+
+ if ( _Scheduler_Node_get_idle( node ) != NULL ) {
+ Thread_Control *owner;
+ ISR_lock_Context lock_context;
+
+ owner = _Scheduler_Node_get_owner( node );
+ _Thread_Scheduler_acquire_critical( owner, &lock_context );
+
+ if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ _Thread_Scheduler_cancel_need_for_help(
+ owner,
+ _Thread_Get_CPU( owner )
+ );
+ _Scheduler_Discard_idle_thread(
+ context,
+ owner,
+ node,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ }
+
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ }
+
+ return false;
+ }
+
+ action = _Scheduler_Try_to_schedule_node(
+ context,
+ highest_ready,
+ _Scheduler_Node_get_idle( node ),
+ _Scheduler_SMP_Get_idle_thread
+ );
+
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ Thread_Control *idle;
+
+ _Scheduler_SMP_Preempt(
+ context,
+ highest_ready,
+ node,
+ allocate_processor
+ );
+
+ ( *insert_ready )( context, node, insert_priority );
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+ idle = _Scheduler_Release_idle_thread(
+ context,
+ node,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ return ( idle == NULL );
+ } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+ _Scheduler_SMP_Node_change_state(
+ highest_ready,
+ SCHEDULER_SMP_NODE_SCHEDULED
+ );
+
+ ( *insert_ready )( context, node, insert_priority );
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+ _Scheduler_Exchange_idle_thread(
+ highest_ready,
+ node,
+ _Scheduler_Node_get_idle( node )
+ );
+ return false;
+ } else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Scheduler_SMP_Node_change_state(
+ highest_ready,
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
+
+ ( *extract_from_ready )( context, highest_ready );
+ }
+ }
+}
+
+static inline void _Scheduler_SMP_Extract_from_scheduled(
+ Scheduler_Node *node
+)
+{
+ _Chain_Extract_unprotected( &node->Node.Chain );
+}
+
+static inline void _Scheduler_SMP_Schedule_highest_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *victim,
+ Per_CPU_Control *victim_cpu,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_Try_to_schedule_action action;
+
+ do {
+ Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+
+ action = _Scheduler_Try_to_schedule_node(
+ context,
+ highest_ready,
+ NULL,
+ _Scheduler_SMP_Get_idle_thread
+ );
+
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ highest_ready,
+ victim,
+ victim_cpu,
+ allocate_processor
+ );
+
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+ } else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Scheduler_SMP_Node_change_state(
+ highest_ready,
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
+
+ ( *extract_from_ready )( context, highest_ready );
+ }
+ } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+}
+
+static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *victim,
+ Per_CPU_Control *victim_cpu,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_Try_to_schedule_action action;
+
+ do {
+ Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+
+ action = _Scheduler_Try_to_schedule_node(
+ context,
+ highest_ready,
+ NULL,
+ _Scheduler_SMP_Get_idle_thread
+ );
+
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ _Scheduler_SMP_Preempt(
+ context,
+ highest_ready,
+ victim,
+ allocate_processor
+ );
+
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+ } else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Scheduler_SMP_Node_change_state(
+ highest_ready,
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
+
+ ( *extract_from_ready )( context, highest_ready );
+ }
+ } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+}
+
+/**
+ * @brief Blocks a thread.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] thread The thread of the scheduling operation.
+ * @param[in] node The scheduler node of the thread to block.
+ * @param[in] extract_from_ready Function to extract a node from the set of
+ * ready nodes.
+ * @param[in] get_highest_ready Function to get the highest ready node.
+ * @param[in] move_from_ready_to_scheduled Function to move a node from the set
+ * of ready nodes to the set of scheduled nodes.
+ */
+static inline void _Scheduler_SMP_Block(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_SMP_Node_state node_state;
+ Per_CPU_Control *thread_cpu;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ thread_cpu = _Scheduler_Block_node(
+ context,
+ thread,
+ node,
+ node_state == SCHEDULER_SMP_NODE_SCHEDULED,
+ _Scheduler_SMP_Get_idle_thread
+ );
+
+ if ( thread_cpu != NULL ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Extract_from_scheduled( node );
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ node,
+ thread_cpu,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor
+ );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ ( *extract_from_ready )( context, node );
+ }
+ }
+}
+
+static inline void _Scheduler_SMP_Unblock(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue
+)
+{
+ Scheduler_SMP_Node_state node_state;
+ bool unblock;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+ unblock = _Scheduler_Unblock_node(
+ context,
+ thread,
+ node,
+ node_state == SCHEDULER_SMP_NODE_SCHEDULED,
+ _Scheduler_SMP_Release_idle_thread
+ );
+
+ if ( unblock ) {
+ Priority_Control priority;
+ bool needs_help;
+
+ priority = _Scheduler_Node_get_priority( node );
+ priority = SCHEDULER_PRIORITY_PURIFY( priority );
+
+ if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
+ ( *update )( context, node, priority );
+ }
+
+ if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
+ Priority_Control insert_priority;
+
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
+ needs_help = ( *enqueue )( context, node, insert_priority );
+ } else {
+ _Assert( node_state == SCHEDULER_SMP_NODE_READY );
+ _Assert( node->sticky_level > 0 );
+ _Assert( node->idle == NULL );
+ needs_help = true;
+ }
+
+ if ( needs_help ) {
+ _Scheduler_Ask_for_help( thread );
+ }
+ }
+}
+
+static inline void _Scheduler_SMP_Update_priority(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_SMP_Enqueue enqueue_scheduled,
+ Scheduler_SMP_Ask_for_help ask_for_help
+)
+{
+ Priority_Control priority;
+ Priority_Control insert_priority;
+ Scheduler_SMP_Node_state node_state;
+
+ insert_priority = _Scheduler_Node_get_priority( node );
+ priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
+
+ if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
+ if ( _Thread_Is_ready( thread ) ) {
+ ( *ask_for_help )( context, thread, node );
+ }
+
+ return;
+ }
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Extract_from_scheduled( node );
+ ( *update )( context, node, priority );
+ ( *enqueue_scheduled )( context, node, insert_priority );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ ( *extract_from_ready )( context, node );
+ ( *update )( context, node, priority );
+ ( *enqueue )( context, node, insert_priority );
+ } else {
+ ( *update )( context, node, priority );
+
+ if ( _Thread_Is_ready( thread ) ) {
+ ( *ask_for_help )( context, thread, node );
+ }
+ }
+}
+
+static inline void _Scheduler_SMP_Yield(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_SMP_Enqueue enqueue_scheduled
+)
+{
+ bool needs_help;
+ Scheduler_SMP_Node_state node_state;
+ Priority_Control insert_priority;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+ insert_priority = _Scheduler_SMP_Node_priority( node );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Extract_from_scheduled( node );
+ ( *enqueue_scheduled )( context, node, insert_priority );
+ needs_help = false;
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ ( *extract_from_ready )( context, node );
+
+ needs_help = ( *enqueue )( context, node, insert_priority );
+ } else {
+ needs_help = true;
+ }
+
+ if ( needs_help ) {
+ _Scheduler_Ask_for_help( thread );
+ }
+}
+
+static inline void _Scheduler_SMP_Insert_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_insert,
+ Priority_Control priority_to_insert
+)
+{
+ Scheduler_SMP_Context *self;
+
+ self = _Scheduler_SMP_Get_self( context );
+
+ _Chain_Insert_ordered_unprotected(
+ &self->Scheduled,
+ &node_to_insert->Node.Chain,
+ &priority_to_insert,
+ _Scheduler_SMP_Priority_less_equal
+ );
+}
+
+static inline bool _Scheduler_SMP_Ask_for_help(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Chain_Node_order order,
+ Scheduler_SMP_Insert insert_ready,
+ Scheduler_SMP_Insert insert_scheduled,
+ Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_Node *lowest_scheduled;
+ ISR_lock_Context lock_context;
+ bool success;
+
+ lowest_scheduled = ( *get_lowest_scheduled )( context, node );
+
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
+
+ if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ Scheduler_SMP_Node_state node_state;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
+ Priority_Control insert_priority;
+
+ insert_priority = _Scheduler_SMP_Node_priority( node );
+
+ if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
+ _Thread_Scheduler_cancel_need_for_help(
+ thread,
+ _Thread_Get_CPU( thread )
+ );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+
+ _Scheduler_SMP_Preempt(
+ context,
+ node,
+ lowest_scheduled,
+ allocate_processor
+ );
+
+ ( *insert_scheduled )( context, node, insert_priority );
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ _Scheduler_Release_idle_thread(
+ context,
+ lowest_scheduled,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ success = true;
+ } else {
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+ ( *insert_ready )( context, node, insert_priority );
+ success = false;
+ }
+ } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Thread_Scheduler_cancel_need_for_help(
+ thread,
+ _Thread_Get_CPU( thread )
+ );
+ _Scheduler_Discard_idle_thread(
+ context,
+ thread,
+ node,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+ success = true;
+ } else {
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+ success = false;
+ }
+ } else {
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+ success = false;
+ }
+
+ return success;
+}
+
+static inline void _Scheduler_SMP_Reconsider_help_request(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_ready
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
+
+ if (
+ thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
+ && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
+ && node->sticky_level == 1
+ ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *extract_from_ready )( context, node );
+ }
+
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+}
+
+static inline void _Scheduler_SMP_Withdraw_node(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ ISR_lock_Context lock_context;
+ Scheduler_SMP_Node_state node_state;
+
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
+
+ node_state = _Scheduler_SMP_Node_state( node );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ Per_CPU_Control *thread_cpu;
+
+ thread_cpu = _Thread_Get_CPU( thread );
+ _Scheduler_Thread_change_state( thread, next_state );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+
+ _Scheduler_SMP_Extract_from_scheduled( node );
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ node,
+ thread_cpu,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor
+ );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+ ( *extract_from_ready )( context, node );
+ } else {
+ _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
+ }
+}
+
+static inline void _Scheduler_SMP_Do_start_idle(
+ Scheduler_Context *context,
+ Thread_Control *idle,
+ Per_CPU_Control *cpu,
+ Scheduler_SMP_Register_idle register_idle
+)
+{
+ Scheduler_SMP_Context *self;
+ Scheduler_SMP_Node *node;
+
+ self = _Scheduler_SMP_Get_self( context );
+ node = _Scheduler_SMP_Thread_get_node( idle );
+
+ _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
+ node->state = SCHEDULER_SMP_NODE_SCHEDULED;
+
+ _Thread_Set_CPU( idle, cpu );
+ ( *register_idle )( context, &node->Base, cpu );
+ _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
+ _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
+}
+
+static inline void _Scheduler_SMP_Add_processor(
+ Scheduler_Context *context,
+ Thread_Control *idle,
+ Scheduler_SMP_Has_ready has_ready,
+ Scheduler_SMP_Enqueue enqueue_scheduled,
+ Scheduler_SMP_Register_idle register_idle
+)
+{
+ Scheduler_SMP_Context *self;
+ Scheduler_Node *node;
+
+ self = _Scheduler_SMP_Get_self( context );
+ idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
+ _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
+ node = _Thread_Scheduler_get_home_node( idle );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
+ ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
+
+ if ( ( *has_ready )( &self->Base ) ) {
+ Priority_Control insert_priority;
+
+ insert_priority = _Scheduler_SMP_Node_priority( node );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+ ( *enqueue_scheduled )( &self->Base, node, insert_priority );
+ } else {
+ _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
+ }
+}
+
+static inline Thread_Control *_Scheduler_SMP_Remove_processor(
+ Scheduler_Context *context,
+ Per_CPU_Control *cpu,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue
+)
+{
+ Scheduler_SMP_Context *self;
+ Chain_Node *chain_node;
+ Scheduler_Node *victim_node;
+ Thread_Control *victim_user;
+ Thread_Control *victim_owner;
+ Thread_Control *idle;
+
+ self = _Scheduler_SMP_Get_self( context );
+ chain_node = _Chain_First( &self->Scheduled );
+
+ do {
+ _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
+ victim_node = (Scheduler_Node *) chain_node;
+ victim_user = _Scheduler_Node_get_user( victim_node );
+ chain_node = _Chain_Next( chain_node );
+ } while ( _Thread_Get_CPU( victim_user ) != cpu );
+
+ _Scheduler_SMP_Extract_from_scheduled( victim_node );
+ victim_owner = _Scheduler_Node_get_owner( victim_node );
+
+ if ( !victim_owner->is_idle ) {
+ Scheduler_Node *idle_node;
+
+ _Scheduler_Release_idle_thread(
+ &self->Base,
+ victim_node,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
+ idle_node = _Thread_Scheduler_get_home_node( idle );
+ ( *extract_from_ready )( &self->Base, idle_node );
+ _Scheduler_SMP_Preempt(
+ &self->Base,
+ idle_node,
+ victim_node,
+ _Scheduler_SMP_Allocate_processor_exact
+ );
+
+ if ( !_Chain_Is_empty( &self->Scheduled ) ) {
+ Priority_Control insert_priority;
+
+ insert_priority = _Scheduler_SMP_Node_priority( victim_node );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+ ( *enqueue )( context, victim_node, insert_priority );
+ }
+ } else {
+ _Assert( victim_owner == victim_user );
+ _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
+ idle = victim_owner;
+ _Scheduler_SMP_Exctract_idle_thread( idle );
+ }
+
+ return idle;
+}
+
+static inline void _Scheduler_SMP_Set_affinity(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ void *arg,
+ Scheduler_SMP_Set_affinity set_affinity,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_SMP_Node_state node_state;
+ Priority_Control insert_priority;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+ insert_priority = _Scheduler_SMP_Node_priority( node );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Extract_from_scheduled( node );
+ _Scheduler_SMP_Preempt_and_schedule_highest_ready(
+ context,
+ node,
+ _Thread_Get_CPU( thread ),
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor
+ );
+ ( *set_affinity )( context, node, arg );
+ ( *enqueue )( context, node, insert_priority );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ ( *extract_from_ready )( context, node );
+ ( *set_affinity )( context, node, arg );
+ ( *enqueue )( context, node, insert_priority );
+ } else {
+ ( *set_affinity )( context, node, arg );
+ }
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h
new file mode 100644
index 0000000000..d961f20c68
--- /dev/null
+++ b/cpukit/include/rtems/score/schedulerstrongapa.h
@@ -0,0 +1,171 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSchedulerStrongAPA
+ *
+ * @brief Strong APA Scheduler API
+ */
+
+/*
+ * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H
+#define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerpriority.h>
+#include <rtems/score/schedulersmp.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreSchedulerStrongAPA Strong APA Scheduler
+ *
+ * @ingroup ScoreSchedulerSMP
+ *
+ * This is an implementation of the global fixed priority scheduler (G-FP). It
+ * uses one ready chain per priority to ensure constant time insert operations.
+ * The scheduled chain uses linear insert operations and has at most processor
+ * count entries. Since the processor and priority count are constants all
+ * scheduler operations complete in a bounded execution time.
+ *
+ * The the_thread preempt mode will be ignored.
+ *
+ * @{
+ */
+
+/**
+ * @brief Scheduler context specialization for Strong APA
+ * schedulers.
+ */
+typedef struct {
+ Scheduler_SMP_Context Base;
+ Priority_bit_map_Control Bit_map;
+ Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
+} Scheduler_strong_APA_Context;
+
+/**
+ * @brief Scheduler node specialization for Strong APA
+ * schedulers.
+ */
+typedef struct {
+ /**
+ * @brief SMP scheduler node.
+ */
+ Scheduler_SMP_Node Base;
+
+ /**
+ * @brief The associated ready queue of this node.
+ */
+ Scheduler_priority_Ready_queue Ready_queue;
+} Scheduler_strong_APA_Node;
+
+/**
+ * @brief Entry points for the Strong APA Scheduler.
+ */
+#define SCHEDULER_STRONG_APA_ENTRY_POINTS \
+ { \
+ _Scheduler_strong_APA_Initialize, \
+ _Scheduler_default_Schedule, \
+ _Scheduler_strong_APA_Yield, \
+ _Scheduler_strong_APA_Block, \
+ _Scheduler_strong_APA_Unblock, \
+ _Scheduler_strong_APA_Update_priority, \
+ _Scheduler_default_Map_priority, \
+ _Scheduler_default_Unmap_priority, \
+ _Scheduler_strong_APA_Ask_for_help, \
+ _Scheduler_strong_APA_Reconsider_help_request, \
+ _Scheduler_strong_APA_Withdraw_node, \
+ _Scheduler_strong_APA_Add_processor, \
+ _Scheduler_strong_APA_Remove_processor, \
+ _Scheduler_strong_APA_Node_initialize, \
+ _Scheduler_default_Node_destroy, \
+ _Scheduler_default_Release_job, \
+ _Scheduler_default_Cancel_job, \
+ _Scheduler_default_Tick, \
+ _Scheduler_SMP_Start_idle \
+ SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ }
+
+void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler );
+
+void _Scheduler_strong_APA_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_strong_APA_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_strong_APA_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_strong_APA_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+bool _Scheduler_strong_APA_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_strong_APA_Reconsider_help_request(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_strong_APA_Withdraw_node(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+);
+
+void _Scheduler_strong_APA_Add_processor(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle
+);
+
+Thread_Control *_Scheduler_strong_APA_Remove_processor(
+ const Scheduler_Control *scheduler,
+ struct Per_CPU_Control *cpu
+);
+
+void _Scheduler_strong_APA_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERSTRONGAPA_H */
diff --git a/cpukit/include/rtems/score/semaphoreimpl.h b/cpukit/include/rtems/score/semaphoreimpl.h
new file mode 100644
index 0000000000..a7857db93e
--- /dev/null
+++ b/cpukit/include/rtems/score/semaphoreimpl.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SEMAPHOREIMPL_H
+#define _RTEMS_SCORE_SEMAPHOREIMPL_H
+
+#include <sys/lock.h>
+
+#include <rtems/score/percpu.h>
+#include <rtems/score/threadqimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef struct {
+ Thread_queue_Syslock_queue Queue;
+ unsigned int count;
+} Sem_Control;
+
+#define SEMAPHORE_TQ_OPERATIONS &_Thread_queue_Operations_priority
+
+static inline Sem_Control *_Sem_Get( struct _Semaphore_Control *_sem )
+{
+ return (Sem_Control *) _sem;
+}
+
+static inline Thread_Control *_Sem_Queue_acquire_critical(
+ Sem_Control *sem,
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_Control *executing;
+
+ executing = _Thread_Executing;
+ _Thread_queue_Queue_acquire_critical(
+ &sem->Queue.Queue,
+ &executing->Potpourri_stats,
+ &queue_context->Lock_context.Lock_context
+ );
+
+ return executing;
+}
+
+static inline void _Sem_Queue_release(
+ Sem_Control *sem,
+ ISR_Level level,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Queue_release_critical(
+ &sem->Queue.Queue,
+ &queue_context->Lock_context.Lock_context
+ );
+ _ISR_Local_enable( level );
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SEMAPHOREIMPL_H */
diff --git a/cpukit/include/rtems/score/smp.h b/cpukit/include/rtems/score/smp.h
new file mode 100644
index 0000000000..469025e5dc
--- /dev/null
+++ b/cpukit/include/rtems/score/smp.h
@@ -0,0 +1,64 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMP
+ *
+ * @brief SuperCore SMP Support API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMP_H
+#define _RTEMS_SCORE_SMP_H
+
+#include <rtems/score/cpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSMP SMP Support
+ *
+ * @ingroup Score
+ *
+ * This defines the interface of the SuperCore SMP support.
+ *
+ * @{
+ */
+
+#if defined( RTEMS_SMP )
+ extern uint32_t _SMP_Processor_count;
+
+ static inline uint32_t _SMP_Get_processor_count( void )
+ {
+ return _SMP_Processor_count;
+ }
+#else
+ #define _SMP_Get_processor_count() UINT32_C(1)
+#endif
+
+#if defined( RTEMS_SMP )
+ static inline uint32_t _SMP_Get_current_processor( void )
+ {
+ return _CPU_SMP_Get_current_processor();
+ }
+#else
+ #define _SMP_Get_current_processor() UINT32_C(0)
+#endif
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/smpbarrier.h b/cpukit/include/rtems/score/smpbarrier.h
new file mode 100644
index 0000000000..fddf7bb1cd
--- /dev/null
+++ b/cpukit/include/rtems/score/smpbarrier.h
@@ -0,0 +1,125 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPBarrier
+ *
+ * @brief SMP Barrier API
+ */
+
+/*
+ * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPBARRIER_H
+#define _RTEMS_SCORE_SMPBARRIER_H
+
+#include <rtems/score/cpuopts.h>
+#include <rtems/score/atomic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreSMPBarrier SMP Barriers
+ *
+ * @ingroup Score
+ *
+ * @brief The SMP barrier provides barrier synchronization for SMP systems at
+ * the lowest level.
+ *
+ * The SMP barrier is implemented as a sense barrier, see also Herlihy and
+ * Shavit, "The Art of Multiprocessor Programming", 17.3 Sense-Reversing
+ * Barrier.
+ *
+ * @{
+ */
+
+/**
+ * @brief SMP barrier control.
+ */
+typedef struct {
+ Atomic_Uint value;
+ Atomic_Uint sense;
+} SMP_barrier_Control;
+
+/**
+ * @brief SMP barrier per-thread state.
+ *
+ * Each user of the barrier must provide this per-thread state.
+ */
+typedef struct {
+ unsigned int sense;
+} SMP_barrier_State;
+
+/**
+ * @brief SMP barrier control initializer for static initialization.
+ */
+#define SMP_BARRIER_CONTROL_INITIALIZER \
+ { ATOMIC_INITIALIZER_UINT( 0U ), ATOMIC_INITIALIZER_UINT( 0U ) }
+
+/**
+ * @brief SMP barrier per-thread state initializer for static initialization.
+ */
+#define SMP_BARRIER_STATE_INITIALIZER { 0U }
+
+/**
+ * @brief Initializes a SMP barrier control.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param[out] control The SMP barrier control.
+ */
+static inline void _SMP_barrier_Control_initialize(
+ SMP_barrier_Control *control
+)
+{
+ _Atomic_Init_uint( &control->value, 0U );
+ _Atomic_Init_uint( &control->sense, 0U );
+}
+
+/**
+ * @brief Initializes a SMP barrier per-thread state.
+ *
+ * @param[out] state The SMP barrier control.
+ */
+static inline void _SMP_barrier_State_initialize(
+ SMP_barrier_State *state
+)
+{
+ state->sense = 0U;
+}
+
+/**
+ * @brief Waits on the SMP barrier until count threads rendezvoused.
+ *
+ * @param[in, out] control The SMP barrier control.
+ * @param[in, out] state The SMP barrier per-thread state.
+ * @param[in] count The thread count bound to rendezvous.
+ *
+ * @retval true This processor performed the barrier release.
+ * @retval false Otherwise.
+ */
+bool _SMP_barrier_Wait(
+ SMP_barrier_Control *control,
+ SMP_barrier_State *state,
+ unsigned int count
+);
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SMPBARRIER_H */
diff --git a/cpukit/include/rtems/score/smpimpl.h b/cpukit/include/rtems/score/smpimpl.h
new file mode 100644
index 0000000000..48e6a12498
--- /dev/null
+++ b/cpukit/include/rtems/score/smpimpl.h
@@ -0,0 +1,354 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPImpl
+ *
+ * @brief SuperCore SMP Implementation
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPIMPL_H
+#define _RTEMS_SCORE_SMPIMPL_H
+
+#include <rtems/score/smp.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/processormask.h>
+#include <rtems/fatal.h>
+#include <rtems/rtems/cache.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSMP SMP Support
+ *
+ * @ingroup Score
+ *
+ * This defines the interface of the SuperCore SMP support.
+ *
+ * @{
+ */
+
+/**
+ * @brief SMP message to request a processor shutdown.
+ *
+ * @see _SMP_Send_message().
+ */
+#define SMP_MESSAGE_SHUTDOWN 0x1UL
+
+/**
+ * @brief SMP message to request a test handler invocation.
+ *
+ * @see _SMP_Send_message().
+ */
+#define SMP_MESSAGE_TEST 0x2UL
+
+/**
+ * @brief SMP message to request a multicast action.
+ *
+ * @see _SMP_Send_message().
+ */
+#define SMP_MESSAGE_MULTICAST_ACTION 0x4UL
+
+/**
+ * @brief SMP message to request a clock tick.
+ *
+ * This message is provided for systems without a proper interrupt affinity
+ * support and may be used by the clock driver.
+ *
+ * @see _SMP_Send_message().
+ */
+#define SMP_MESSAGE_CLOCK_TICK 0x8UL
+
+/**
+ * @brief SMP fatal codes.
+ */
+typedef enum {
+ SMP_FATAL_BOOT_PROCESSOR_NOT_ASSIGNED_TO_SCHEDULER,
+ SMP_FATAL_MANDATORY_PROCESSOR_NOT_PRESENT,
+ SMP_FATAL_MULTITASKING_START_ON_INVALID_PROCESSOR,
+ SMP_FATAL_MULTITASKING_START_ON_UNASSIGNED_PROCESSOR,
+ SMP_FATAL_SHUTDOWN,
+ SMP_FATAL_SHUTDOWN_RESPONSE,
+ SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED
+} SMP_Fatal_code;
+
+static inline void _SMP_Fatal( SMP_Fatal_code code )
+{
+ _Terminate( RTEMS_FATAL_SOURCE_SMP, code );
+}
+
+/**
+ * @brief Initialize SMP Handler
+ *
+ * This method initialize the SMP Handler.
+ */
+#if defined( RTEMS_SMP )
+ void _SMP_Handler_initialize( void );
+#else
+ #define _SMP_Handler_initialize() \
+ do { } while ( 0 )
+#endif
+
+#if defined( RTEMS_SMP )
+
+/**
+ * @brief Set of online processors.
+ *
+ * A processor is online if was started during system initialization. In this
+ * case its corresponding bit in the mask is set.
+ *
+ * @see _SMP_Handler_initialize().
+ */
+extern Processor_mask _SMP_Online_processors;
+
+/**
+ * @brief Performs high-level initialization of a secondary processor and runs
+ * the application threads.
+ *
+ * The low-level initialization code must call this function to hand over the
+ * control of this processor to RTEMS. Interrupts must be disabled. It must
+ * be possible to send inter-processor interrupts to this processor. Since
+ * interrupts are disabled the inter-processor interrupt delivery is postponed
+ * until interrupts are enabled the first time. Interrupts are enabled during
+ * the execution begin of threads in case they have interrupt level zero (this
+ * is the default).
+ *
+ * The pre-requisites for the call to this function are
+ * - disabled interrupts,
+ * - delivery of inter-processor interrupts is possible,
+ * - a valid stack pointer and enough stack space,
+ * - a valid code memory, and
+ * - a valid BSS section.
+ *
+ * This function must not be called by the main processor. The main processor
+ * uses _Thread_Start_multitasking() instead.
+ *
+ * This function does not return to the caller.
+ */
+void _SMP_Start_multitasking_on_secondary_processor( void )
+ RTEMS_NO_RETURN;
+
+typedef void ( *SMP_Test_message_handler )( Per_CPU_Control *cpu_self );
+
+extern SMP_Test_message_handler _SMP_Test_message_handler;
+
+/**
+ * @brief Sets the handler for test messages.
+ *
+ * This handler can be used to test the inter-processor interrupt
+ * implementation.
+ */
+static inline void _SMP_Set_test_message_handler(
+ SMP_Test_message_handler handler
+)
+{
+ _SMP_Test_message_handler = handler;
+}
+
+/**
+ * @brief Processes all pending multicast actions.
+ */
+void _SMP_Multicast_actions_process( void );
+
+/**
+ * @brief Interrupt handler for inter-processor interrupts.
+ *
+ * @return The received message.
+ */
+static inline long unsigned _SMP_Inter_processor_interrupt_handler( void )
+{
+ Per_CPU_Control *cpu_self;
+ unsigned long message;
+
+ cpu_self = _Per_CPU_Get();
+
+ /*
+ * In the common case the inter-processor interrupt is issued to carry out a
+ * thread dispatch.
+ */
+ cpu_self->dispatch_necessary = true;
+
+ message = _Atomic_Exchange_ulong(
+ &cpu_self->message,
+ 0,
+ ATOMIC_ORDER_ACQUIRE
+ );
+
+ if ( message != 0 ) {
+ if ( ( message & SMP_MESSAGE_SHUTDOWN ) != 0 ) {
+ _SMP_Fatal( SMP_FATAL_SHUTDOWN_RESPONSE );
+ /* does not continue past here */
+ }
+
+ if ( ( message & SMP_MESSAGE_TEST ) != 0 ) {
+ ( *_SMP_Test_message_handler )( cpu_self );
+ }
+
+ if ( ( message & SMP_MESSAGE_MULTICAST_ACTION ) != 0 ) {
+ _SMP_Multicast_actions_process();
+ }
+ }
+
+ return message;
+}
+
+/**
+ * @brief Returns true, if the processor with the specified index should be
+ * started.
+ *
+ * @param[in] cpu_index The processor index.
+ *
+ * @retval true The processor should be started.
+ * @retval false Otherwise.
+ */
+bool _SMP_Should_start_processor( uint32_t cpu_index );
+
+/**
+ * @brief Sends an SMP message to a processor.
+ *
+ * The target processor may be the sending processor.
+ *
+ * @param[in] cpu_index The target processor of the message.
+ * @param[in] message The message.
+ */
+void _SMP_Send_message( uint32_t cpu_index, unsigned long message );
+
+/**
+ * @brief Sends an SMP message to all other online processors.
+ *
+ * @param[in] message The message.
+ */
+void _SMP_Send_message_broadcast(
+ unsigned long message
+);
+
+/**
+ * @brief Sends an SMP message to a set of processors.
+ *
+ * The sending processor may be part of the set.
+ *
+ * @param[in] targets The set of processors to send the message.
+ * @param[in] message The message.
+ */
+void _SMP_Send_message_multicast(
+ const Processor_mask *targets,
+ unsigned long message
+);
+
+typedef void ( *SMP_Action_handler )( void *arg );
+
+/**
+ * @brief Initiates an SMP multicast action to a set of processors.
+ *
+ * The current processor may be part of the set.
+ *
+ * @param[in] setsize The size of the set of target processors of the message.
+ * @param[in] cpus The set of target processors of the message.
+ * @param[in] handler The multicast action handler.
+ * @param[in] arg The multicast action argument.
+ */
+void _SMP_Multicast_action(
+ const size_t setsize,
+ const cpu_set_t *cpus,
+ SMP_Action_handler handler,
+ void *arg
+);
+
+/**
+ * @brief Executes a handler with argument on the specified processor on behalf
+ * of the boot processor.
+ *
+ * The calling processor must be the boot processor. In case the specified
+ * processor is not online or not in the
+ * PER_CPU_STATE_READY_TO_START_MULTITASKING state, then no action is
+ * performed.
+ *
+ * @param cpu The processor to execute the action.
+ * @param handler The handler of the action.
+ * @param arg The argument of the action.
+ *
+ * @retval true The handler executed on the specified processor.
+ * @retval false Otherwise.
+ *
+ * @see _SMP_Before_multitasking_action_broadcast().
+ */
+bool _SMP_Before_multitasking_action(
+ Per_CPU_Control *cpu,
+ SMP_Action_handler handler,
+ void *arg
+);
+
+/**
+ * @brief Executes a handler with argument on all online processors except the
+ * boot processor on behalf of the boot processor.
+ *
+ * The calling processor must be the boot processor.
+ *
+ * @param handler The handler of the action.
+ * @param arg The argument of the action.
+ *
+ * @retval true The handler executed on all online processors except the boot
+ * processor.
+ * @retval false Otherwise.
+ *
+ * @see _SMP_Before_multitasking_action().
+ */
+bool _SMP_Before_multitasking_action_broadcast(
+ SMP_Action_handler handler,
+ void *arg
+);
+
+#endif /* defined( RTEMS_SMP ) */
+
+/**
+ * @brief Requests a multitasking start on all configured and available
+ * processors.
+ */
+#if defined( RTEMS_SMP )
+ void _SMP_Request_start_multitasking( void );
+#else
+ #define _SMP_Request_start_multitasking() \
+ do { } while ( 0 )
+#endif
+
+/**
+ * @brief Requests a shutdown of all processors.
+ *
+ * This function is a part of the system termination procedure.
+ *
+ * @see _Terminate().
+ */
+#if defined( RTEMS_SMP )
+ void _SMP_Request_shutdown( void );
+#else
+ #define _SMP_Request_shutdown() \
+ do { } while ( 0 )
+#endif
+
+RTEMS_INLINE_ROUTINE const Processor_mask *_SMP_Get_online_processors( void )
+{
+#if defined(RTEMS_SMP)
+ return &_SMP_Online_processors;
+#else
+ return &_Processor_mask_The_one_and_only;
+#endif
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/smplock.h b/cpukit/include/rtems/score/smplock.h
new file mode 100644
index 0000000000..a156edfd92
--- /dev/null
+++ b/cpukit/include/rtems/score/smplock.h
@@ -0,0 +1,327 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPLock
+ *
+ * @brief SMP Lock API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2013, 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPLOCK_H
+#define _RTEMS_SCORE_SMPLOCK_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/smplockstats.h>
+#include <rtems/score/smplockticket.h>
+#include <rtems/score/isrlevel.h>
+
+#if defined(RTEMS_DEBUG)
+#include <rtems/score/assert.h>
+#include <rtems/score/smp.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreSMPLock SMP Locks
+ *
+ * @ingroup Score
+ *
+ * @brief The SMP lock provides mutual exclusion for SMP systems at the lowest
+ * level.
+ *
+ * The SMP lock is implemented as a ticket lock. This provides fairness in
+ * case of concurrent lock attempts.
+ *
+ * This SMP lock API uses a local context for acquire and release pairs. Such
+ * a context may be used to implement for example the Mellor-Crummey and Scott
+ * (MCS) locks in the future.
+ *
+ * @{
+ */
+
+#if defined(RTEMS_DEBUG) || defined(RTEMS_PROFILING)
+#define RTEMS_SMP_LOCK_DO_NOT_INLINE
+#endif
+
+/**
+ * @brief SMP lock control.
+ */
+typedef struct {
+ SMP_ticket_lock_Control Ticket_lock;
+#if defined(RTEMS_DEBUG)
+ /**
+ * @brief The index of the owning processor of this lock.
+ *
+ * The processor index is used instead of the executing thread, so that this
+ * works in interrupt and system initialization context. It is assumed that
+ * thread dispatching is disabled in SMP lock critical sections.
+ *
+ * In case the lock is free, then the value of this field is
+ * SMP_LOCK_NO_OWNER.
+ *
+ * @see _SMP_lock_Is_owner().
+ */
+ uint32_t owner;
+#endif
+#if defined(RTEMS_PROFILING)
+ SMP_lock_Stats Stats;
+#endif
+} SMP_lock_Control;
+
+/**
+ * @brief Local SMP lock context for acquire and release pairs.
+ */
+typedef struct {
+ ISR_Level isr_level;
+#if defined(RTEMS_DEBUG)
+ SMP_lock_Control *lock_used_for_acquire;
+#endif
+#if defined(RTEMS_PROFILING)
+ SMP_lock_Stats_context Stats_context;
+#endif
+} SMP_lock_Context;
+
+#if defined(RTEMS_DEBUG)
+#define SMP_LOCK_NO_OWNER 0
+#endif
+
+/**
+ * @brief SMP lock control initializer for static initialization.
+ */
+#if defined(RTEMS_DEBUG) && defined(RTEMS_PROFILING)
+ #define SMP_LOCK_INITIALIZER( name ) \
+ { \
+ SMP_TICKET_LOCK_INITIALIZER, \
+ SMP_LOCK_NO_OWNER, \
+ SMP_LOCK_STATS_INITIALIZER( name ) \
+ }
+#elif defined(RTEMS_DEBUG)
+ #define SMP_LOCK_INITIALIZER( name ) \
+ { SMP_TICKET_LOCK_INITIALIZER, SMP_LOCK_NO_OWNER }
+#elif defined(RTEMS_PROFILING)
+ #define SMP_LOCK_INITIALIZER( name ) \
+ { SMP_TICKET_LOCK_INITIALIZER, SMP_LOCK_STATS_INITIALIZER( name ) }
+#else
+ #define SMP_LOCK_INITIALIZER( name ) { SMP_TICKET_LOCK_INITIALIZER }
+#endif
+
+static inline void _SMP_lock_Initialize_inline(
+ SMP_lock_Control *lock,
+ const char *name
+)
+{
+ _SMP_ticket_lock_Initialize( &lock->Ticket_lock );
+#if defined(RTEMS_DEBUG)
+ lock->owner = SMP_LOCK_NO_OWNER;
+#endif
+#if defined(RTEMS_PROFILING)
+ _SMP_lock_Stats_initialize( &lock->Stats, name );
+#else
+ (void) name;
+#endif
+}
+
+/**
+ * @brief Initializes an SMP lock.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param[in] lock The SMP lock control.
+ * @param[in] name The name for the SMP lock statistics. This name must be
+ * persistent throughout the life time of this statistics block.
+ */
+#if defined(RTEMS_SMP_LOCK_DO_NOT_INLINE)
+void _SMP_lock_Initialize(
+ SMP_lock_Control *lock,
+ const char * name
+);
+#else
+#define _SMP_lock_Initialize( lock, name ) \
+ _SMP_lock_Initialize_inline( lock, name )
+#endif
+
+static inline void _SMP_lock_Destroy_inline( SMP_lock_Control *lock )
+{
+ _SMP_ticket_lock_Destroy( &lock->Ticket_lock );
+ _SMP_lock_Stats_destroy( &lock->Stats );
+}
+
+/**
+ * @brief Destroys an SMP lock.
+ *
+ * Concurrent destruction leads to unpredictable results.
+ *
+ * @param[in] lock The SMP lock control.
+ */
+#if defined(RTEMS_SMP_LOCK_DO_NOT_INLINE)
+void _SMP_lock_Destroy( SMP_lock_Control *lock );
+#else
+#define _SMP_lock_Destroy( lock ) \
+ _SMP_lock_Destroy_inline( lock )
+#endif
+
+#if defined(RTEMS_DEBUG)
+static inline uint32_t _SMP_lock_Who_am_I( void )
+{
+ /*
+ * The CPU index starts with zero. Increment it by one, to allow global SMP
+ * locks to reside in the BSS section.
+ */
+ return _SMP_Get_current_processor() + 1;
+}
+#endif
+
+static inline void _SMP_lock_Acquire_inline(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+#if defined(RTEMS_DEBUG)
+ context->lock_used_for_acquire = lock;
+#else
+ (void) context;
+#endif
+ _SMP_ticket_lock_Acquire(
+ &lock->Ticket_lock,
+ &lock->Stats,
+ &context->Stats_context
+ );
+#if defined(RTEMS_DEBUG)
+ lock->owner = _SMP_lock_Who_am_I();
+#endif
+}
+
+/**
+ * @brief Acquires an SMP lock.
+ *
+ * This function will not disable interrupts. The caller must ensure that the
+ * current thread of execution is not interrupted indefinite once it obtained
+ * the SMP lock.
+ *
+ * @param[in] lock The SMP lock control.
+ * @param[in] context The local SMP lock context for an acquire and release
+ * pair.
+ */
+void _SMP_lock_Acquire(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+);
+
+static inline void _SMP_lock_Release_inline(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+#if defined(RTEMS_DEBUG)
+ _Assert( context->lock_used_for_acquire == lock );
+ context->lock_used_for_acquire = NULL;
+ _Assert( lock->owner == _SMP_lock_Who_am_I() );
+ lock->owner = SMP_LOCK_NO_OWNER;
+#else
+ (void) context;
+#endif
+ _SMP_ticket_lock_Release(
+ &lock->Ticket_lock,
+ &context->Stats_context
+ );
+}
+
+/**
+ * @brief Releases an SMP lock.
+ *
+ * @param[in] lock The SMP lock control.
+ * @param[in] context The local SMP lock context for an acquire and release
+ * pair.
+ */
+#if defined(RTEMS_SMP_LOCK_DO_NOT_INLINE)
+void _SMP_lock_Release(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+);
+#else
+#define _SMP_lock_Release( lock, context ) \
+ _SMP_lock_Release_inline( lock, context )
+#endif
+
+static inline void _SMP_lock_ISR_disable_and_acquire_inline(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+ _ISR_Local_disable( context->isr_level );
+ _SMP_lock_Acquire_inline( lock, context );
+}
+
+/**
+ * @brief Disables interrupts and acquires the SMP lock.
+ *
+ * @param[in] lock The SMP lock control.
+ * @param[in] context The local SMP lock context for an acquire and release
+ * pair.
+ */
+void _SMP_lock_ISR_disable_and_acquire(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+);
+
+static inline void _SMP_lock_Release_and_ISR_enable_inline(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+ _SMP_lock_Release_inline( lock, context );
+ _ISR_Local_enable( context->isr_level );
+}
+
+/**
+ * @brief Releases the SMP lock and enables interrupts.
+ *
+ * @param[in] lock The SMP lock control.
+ * @param[in] context The local SMP lock context for an acquire and release
+ * pair.
+ */
+#if defined(RTEMS_SMP_LOCK_DO_NOT_INLINE)
+void _SMP_lock_Release_and_ISR_enable(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+);
+#else
+#define _SMP_lock_Release_and_ISR_enable( lock, context ) \
+ _SMP_lock_Release_and_ISR_enable_inline( lock, context )
+#endif
+
+#if defined(RTEMS_DEBUG)
+/**
+ * @brief Returns true, if the SMP lock is owned by the current processor,
+ * otherwise false.
+ *
+ * @param[in] lock The SMP lock control.
+ */
+bool _SMP_lock_Is_owner( const SMP_lock_Control *lock );
+#endif
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_SMPLOCK_H */
diff --git a/cpukit/include/rtems/score/smplockmcs.h b/cpukit/include/rtems/score/smplockmcs.h
new file mode 100644
index 0000000000..5a1ad23dc9
--- /dev/null
+++ b/cpukit/include/rtems/score/smplockmcs.h
@@ -0,0 +1,262 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPLock
+ *
+ * @brief SMP Lock API
+ */
+
+/*
+ * Copyright (c) 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPLOCKMCS_H
+#define _RTEMS_SCORE_SMPLOCKMCS_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/atomic.h>
+#include <rtems/score/smplockstats.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreSMPLock
+ *
+ * @{
+ */
+
+/**
+ * @brief SMP Mellor-Crummey and Scott (MCS) lock context.
+ */
+typedef struct SMP_MCS_lock_Context {
+ /**
+ * @brief The next context on the queue if it exists.
+ */
+ union {
+ /**
+ * @brief The next context as an atomic unsigned integer pointer value.
+ */
+ Atomic_Uintptr atomic;
+
+ /**
+ * @brief The next context as a normal pointer.
+ *
+ * Only provided for debugging purposes.
+ */
+ struct SMP_MCS_lock_Context *normal;
+ } next;
+
+ /**
+ * @brief Indicates if the lock is owned or free in case a previous context
+ * exits on the queue.
+ *
+ * This field is initialized to a non-zero value. The previous lock owner
+ * (which is the owner of the previous context) will set it to zero during
+ * its lock release.
+ */
+ Atomic_Uint locked;
+
+#if defined(RTEMS_PROFILING)
+ SMP_lock_Stats_context Stats_context;
+
+ unsigned int queue_length;
+#endif
+} SMP_MCS_lock_Context;
+
+/**
+ * @brief SMP Mellor-Crummey and Scott (MCS) lock control.
+ */
+typedef struct {
+ /**
+ * @brief The queue tail context.
+ *
+ * The lock is free, in case this field is zero, otherwise it is locked by
+ * the owner of the queue head.
+ */
+ union {
+ /**
+ * @brief The queue tail context as an atomic unsigned integer pointer
+ * value.
+ */
+ Atomic_Uintptr atomic;
+
+ /**
+ * @brief The queue tail context as a normal pointer.
+ *
+ * Only provided for debugging purposes.
+ */
+ struct SMP_MCS_lock_Context *normal;
+ } queue;
+} SMP_MCS_lock_Control;
+
+/**
+ * @brief SMP MCS lock control initializer for static initialization.
+ */
+#define SMP_MCS_LOCK_INITIALIZER { { ATOMIC_INITIALIZER_UINTPTR( 0 ) } }
+
+/**
+ * @brief Initializes an SMP MCS lock.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param lock The SMP MCS lock control.
+ */
+static inline void _SMP_MCS_lock_Initialize( SMP_MCS_lock_Control *lock )
+{
+ _Atomic_Init_uintptr( &lock->queue.atomic, 0 );
+}
+
+/**
+ * @brief Destroys an SMP MCS lock.
+ *
+ * Concurrent destruction leads to unpredictable results.
+ *
+ * @param lock The SMP MCS lock control.
+ */
+static inline void _SMP_MCS_lock_Destroy( SMP_MCS_lock_Control *lock )
+{
+ (void) lock;
+}
+
+static inline void _SMP_MCS_lock_Do_acquire(
+ SMP_MCS_lock_Control *lock,
+ SMP_MCS_lock_Context *context
+#if defined(RTEMS_PROFILING)
+ ,
+ SMP_lock_Stats *stats
+#endif
+)
+{
+ SMP_MCS_lock_Context *previous;
+#if defined(RTEMS_PROFILING)
+ SMP_lock_Stats_acquire_context acquire_context;
+
+ _SMP_lock_Stats_acquire_begin( &acquire_context );
+ context->queue_length = 0;
+#endif
+
+ _Atomic_Store_uintptr( &context->next.atomic, 0, ATOMIC_ORDER_RELAXED );
+ _Atomic_Store_uint( &context->locked, 1, ATOMIC_ORDER_RELAXED );
+
+ previous = (SMP_MCS_lock_Context *) _Atomic_Exchange_uintptr(
+ &lock->queue.atomic,
+ (uintptr_t) context,
+ ATOMIC_ORDER_ACQ_REL
+ );
+
+ if ( previous != NULL ) {
+ unsigned int locked;
+
+ _Atomic_Store_uintptr(
+ &previous->next.atomic,
+ (uintptr_t) context,
+ ATOMIC_ORDER_RELAXED
+ );
+
+ do {
+ locked = _Atomic_Load_uint( &context->locked, ATOMIC_ORDER_ACQUIRE );
+ } while ( locked != 0 );
+ }
+
+#if defined(RTEMS_PROFILING)
+ _SMP_lock_Stats_acquire_end(
+ &acquire_context,
+ stats,
+ &context->Stats_context,
+ context->queue_length
+ );
+#endif
+}
+
+/**
+ * @brief Acquires an SMP MCS lock.
+ *
+ * This function will not disable interrupts. The caller must ensure that the
+ * current thread of execution is not interrupted indefinite once it obtained
+ * the SMP MCS lock.
+ *
+ * @param lock The SMP MCS lock control.
+ * @param context The SMP MCS lock context.
+ * @param stats The SMP lock statistics.
+ */
+#if defined(RTEMS_PROFILING)
+ #define _SMP_MCS_lock_Acquire( lock, context, stats ) \
+ _SMP_MCS_lock_Do_acquire( lock, context, stats )
+#else
+ #define _SMP_MCS_lock_Acquire( lock, context, stats ) \
+ _SMP_MCS_lock_Do_acquire( lock, context )
+#endif
+
+/**
+ * @brief Releases an SMP MCS lock.
+ *
+ * @param lock The SMP MCS lock control.
+ * @param context The SMP MCS lock context.
+ */
+static inline void _SMP_MCS_lock_Release(
+ SMP_MCS_lock_Control *lock,
+ SMP_MCS_lock_Context *context
+)
+{
+ SMP_MCS_lock_Context *next;
+
+ next = (SMP_MCS_lock_Context *) _Atomic_Load_uintptr(
+ &context->next.atomic,
+ ATOMIC_ORDER_RELAXED
+ );
+
+ if ( next == NULL ) {
+ uintptr_t expected;
+ bool success;
+
+ expected = (uintptr_t) context;
+ success = _Atomic_Compare_exchange_uintptr(
+ &lock->queue.atomic,
+ &expected,
+ 0,
+ ATOMIC_ORDER_RELEASE,
+ ATOMIC_ORDER_RELAXED
+ );
+
+ if ( success ) {
+#if defined(RTEMS_PROFILING)
+ _SMP_lock_Stats_release_update( &context->Stats_context );
+#endif
+ /* Nobody waits. So, we are done */
+ return;
+ }
+
+ do {
+ next = (SMP_MCS_lock_Context *) _Atomic_Load_uintptr(
+ &context->next.atomic,
+ ATOMIC_ORDER_RELAXED
+ );
+ } while ( next == NULL );
+ }
+
+#if defined(RTEMS_PROFILING)
+ next->queue_length = context->queue_length + 1;
+ _SMP_lock_Stats_release_update( &context->Stats_context );
+#endif
+
+ _Atomic_Store_uint( &next->locked, 0, ATOMIC_ORDER_RELEASE );
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_SMPLOCKMCS_H */
diff --git a/cpukit/include/rtems/score/smplockseq.h b/cpukit/include/rtems/score/smplockseq.h
new file mode 100644
index 0000000000..5daaee9c6e
--- /dev/null
+++ b/cpukit/include/rtems/score/smplockseq.h
@@ -0,0 +1,176 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPLock
+ *
+ * @brief SMP Lock API
+ */
+
+/*
+ * Copyright (c) 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPLOCKSEQ_H
+#define _RTEMS_SCORE_SMPLOCKSEQ_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/assert.h>
+#include <rtems/score/atomic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreSMPLock
+ *
+ * @{
+ */
+
+/**
+ * @brief SMP sequence lock control.
+ *
+ * The sequence lock offers a consistent data set for readers in the presence
+ * of at most one concurrent writer. Due to the read-modify-write operation in
+ * _SMP_sequence_lock_Read_retry() the data corresponding to the last written
+ * sequence number is observed. To allow multiple writers an additional SMP
+ * lock is necessary to serialize writes.
+ *
+ * See also Hans-J. Boehm, HP Laboratories,
+ * "Can Seqlocks Get Along With Programming Language Memory Models?",
+ * http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+ */
+typedef struct {
+ /**
+ * @brief The sequence number.
+ *
+ * An odd value indicates that a write is in progress.
+ */
+ Atomic_Uint sequence;
+} SMP_sequence_lock_Control;
+
+/**
+ * @brief SMP sequence lock control initializer for static initialization.
+ */
+#define SMP_SEQUENCE_LOCK_INITIALIZER { ATOMIC_INITIALIZER_UINT( 0 ) }
+
+/**
+ * @brief Initializes an SMP sequence lock.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param lock The SMP sequence lock control.
+ */
+static inline void _SMP_sequence_lock_Initialize( SMP_sequence_lock_Control *lock )
+{
+ _Atomic_Init_uint( &lock->sequence, 0 );
+}
+
+/**
+ * @brief Destroys an SMP sequence lock.
+ *
+ * Concurrent destruction leads to unpredictable results.
+ *
+ * @param lock The SMP sequence lock control.
+ */
+static inline void _SMP_sequence_lock_Destroy( SMP_sequence_lock_Control *lock )
+{
+ (void) lock;
+}
+
+/**
+ * @brief Begins an SMP sequence lock write operation.
+ *
+ * This function will not disable interrupts. The caller must ensure that the
+ * current thread of execution is not interrupted indefinite since this would
+ * starve readers.
+ *
+ * @param lock The SMP sequence lock control.
+ *
+ * @return The current sequence number.
+ */
+static inline unsigned int _SMP_sequence_lock_Write_begin(
+ SMP_sequence_lock_Control *lock
+)
+{
+ unsigned int seq;
+
+ seq = _Atomic_Load_uint( &lock->sequence, ATOMIC_ORDER_RELAXED );
+ _Assert( seq % 2 == 0 );
+
+ _Atomic_Store_uint( &lock->sequence, seq + 1, ATOMIC_ORDER_RELAXED );
+
+ /* There is no atomic store with acquire/release semantics */
+ _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
+
+ return seq;
+}
+
+/**
+ * @brief Ends an SMP sequence lock write operation.
+ *
+ * @param lock The SMP sequence lock control.
+ * @param seq The sequence number returned by _SMP_sequence_lock_Write_begin().
+ */
+static inline void _SMP_sequence_lock_Write_end(
+ SMP_sequence_lock_Control *lock,
+ unsigned int seq
+)
+{
+ _Atomic_Store_uint( &lock->sequence, seq + 2, ATOMIC_ORDER_RELEASE );
+}
+
+/**
+ * @brief Begins an SMP sequence lock read operation.
+ *
+ * This function will not disable interrupts.
+ *
+ * @param lock The SMP sequence lock control.
+ *
+ * @return The current sequence number.
+ */
+static inline unsigned int _SMP_sequence_lock_Read_begin(
+ const SMP_sequence_lock_Control *lock
+)
+{
+ return _Atomic_Load_uint( &lock->sequence, ATOMIC_ORDER_ACQUIRE );
+}
+
+/**
+ * @brief Ends an SMP sequence lock read operation and indicates if a retry is
+ * necessary.
+ *
+ * @param lock The SMP sequence lock control.
+ * @param seq The sequence number returned by _SMP_sequence_lock_Read_begin().
+ *
+ * @retval true The read operation must be retried with a call to
+ * _SMP_sequence_lock_Read_begin().
+ * @retval false Otherwise.
+ */
+static inline bool _SMP_sequence_lock_Read_retry(
+ SMP_sequence_lock_Control *lock,
+ unsigned int seq
+)
+{
+ unsigned int seq2;
+
+ seq2 = _Atomic_Fetch_add_uint( &lock->sequence, 0, ATOMIC_ORDER_RELEASE );
+ return seq != seq2 || seq % 2 != 0;
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_SMPLOCKSEQ_H */
diff --git a/cpukit/include/rtems/score/smplockstats.h b/cpukit/include/rtems/score/smplockstats.h
new file mode 100644
index 0000000000..dd8e06c81d
--- /dev/null
+++ b/cpukit/include/rtems/score/smplockstats.h
@@ -0,0 +1,277 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPLock
+ *
+ * @brief SMP Lock API
+ */
+
+/*
+ * Copyright (c) 2013, 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPLOCKSTATS_H
+#define _RTEMS_SCORE_SMPLOCKSTATS_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/chainimpl.h>
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreSMPLock
+ *
+ * @{
+ */
+
+#if defined(RTEMS_PROFILING)
+
+/**
+ * @brief Count of lock contention counters for lock statistics.
+ */
+#define SMP_LOCK_STATS_CONTENTION_COUNTS 4
+
+/**
+ * @brief SMP lock statistics.
+ *
+ * The lock acquire attempt instant is the point in time right after the
+ * interrupt disable action in the lock acquire sequence.
+ *
+ * The lock acquire instant is the point in time right after the lock
+ * acquisition. This is the begin of the critical section code execution.
+ *
+ * The lock release instant is the point in time right before the interrupt
+ * enable action in the lock release sequence.
+ *
+ * The lock section time is the time elapsed between the lock acquire instant
+ * and the lock release instant.
+ *
+ * The lock acquire time is the time elapsed between the lock acquire attempt
+ * instant and the lock acquire instant.
+ */
+typedef struct {
+ /**
+ * @brief Node for SMP lock statistics chain.
+ */
+ Chain_Node Node;
+
+ /**
+ * @brief The maximum lock acquire time in CPU counter ticks.
+ */
+ CPU_Counter_ticks max_acquire_time;
+
+ /**
+ * @brief The maximum lock section time in CPU counter ticks.
+ */
+ CPU_Counter_ticks max_section_time;
+
+ /**
+ * @brief The count of lock uses.
+ *
+ * This value may overflow.
+ */
+ uint64_t usage_count;
+
+ /**
+ * @brief Total lock acquire time in nanoseconds.
+ *
+ * The average lock acquire time is the total acquire time divided by the
+ * lock usage count. The ration of the total section and total acquire times
+ * gives a measure for the lock contention.
+ *
+ * This value may overflow.
+ */
+ uint64_t total_acquire_time;
+
+ /**
+ * @brief The counts of lock acquire operations by contention.
+ *
+ * The contention count for index N corresponds to a lock acquire attempt
+ * with an initial queue length of N. The last index corresponds to all
+ * lock acquire attempts with an initial queue length greater than or equal
+ * to SMP_LOCK_STATS_CONTENTION_COUNTS minus one.
+ *
+ * The values may overflow.
+ */
+ uint64_t contention_counts[SMP_LOCK_STATS_CONTENTION_COUNTS];
+
+ /**
+ * @brief Total lock section time in CPU counter ticks.
+ *
+ * The average lock section time is the total section time divided by the
+ * lock usage count.
+ *
+ * This value may overflow.
+ */
+ uint64_t total_section_time;
+
+ /**
+ * @brief The lock name.
+ */
+ const char *name;
+} SMP_lock_Stats;
+
+/**
+ * @brief Local context for SMP lock statistics.
+ */
+typedef struct {
+ /**
+ * @brief The last lock acquire instant in CPU counter ticks.
+ *
+ * This value is used to measure the lock section time.
+ */
+ CPU_Counter_ticks acquire_instant;
+
+ /**
+ * @brief The lock stats used for the last lock acquire.
+ */
+ SMP_lock_Stats *stats;
+} SMP_lock_Stats_context;
+
+/**
+ * @brief SMP lock statistics initializer for static initialization.
+ */
+#define SMP_LOCK_STATS_INITIALIZER( name ) \
+ { { NULL, NULL }, 0, 0, 0, 0, { 0, 0, 0, 0 }, 0, name }
+
+/**
+ * @brief Initializes an SMP lock statistics block.
+ *
+ * @param[in, out] stats The SMP lock statistics block.
+ * @param[in] name The name for the SMP lock statistics. This name must be
+ * persistent throughout the life time of this statistics block.
+ */
+static inline void _SMP_lock_Stats_initialize(
+ SMP_lock_Stats *stats,
+ const char *name
+)
+{
+ SMP_lock_Stats init = SMP_LOCK_STATS_INITIALIZER( name );
+
+ *stats = init;
+}
+
+/**
+ * @brief Destroys an SMP lock statistics block.
+ *
+ * @param[in] stats The SMP lock statistics block.
+ */
+void _SMP_lock_Stats_destroy( SMP_lock_Stats *stats );
+
+void _SMP_lock_Stats_register( SMP_lock_Stats *stats );
+
+typedef struct {
+ CPU_Counter_ticks first;
+} SMP_lock_Stats_acquire_context;
+
+static inline void _SMP_lock_Stats_acquire_begin(
+ SMP_lock_Stats_acquire_context *acquire_context
+)
+{
+ acquire_context->first = _CPU_Counter_read();
+}
+
+static inline void _SMP_lock_Stats_acquire_end(
+ const SMP_lock_Stats_acquire_context *acquire_context,
+ SMP_lock_Stats *stats,
+ SMP_lock_Stats_context *stats_context,
+ unsigned int queue_length
+)
+{
+ CPU_Counter_ticks second;
+ CPU_Counter_ticks delta;
+
+ second = _CPU_Counter_read();
+ stats_context->acquire_instant = second;
+ delta = _CPU_Counter_difference( second, acquire_context->first );
+
+ ++stats->usage_count;
+
+ stats->total_acquire_time += delta;
+
+ if ( stats->max_acquire_time < delta ) {
+ stats->max_acquire_time = delta;
+ }
+
+ if ( queue_length >= SMP_LOCK_STATS_CONTENTION_COUNTS ) {
+ queue_length = SMP_LOCK_STATS_CONTENTION_COUNTS - 1;
+ }
+ ++stats->contention_counts[ queue_length ];
+
+ stats_context->stats = stats;
+}
+
+/**
+ * @brief Updates an SMP lock statistics block during a lock release.
+ *
+ * @param[in] stats_context The SMP lock statistics context.
+ */
+static inline void _SMP_lock_Stats_release_update(
+ const SMP_lock_Stats_context *stats_context
+)
+{
+ SMP_lock_Stats *stats = stats_context->stats;
+ CPU_Counter_ticks first = stats_context->acquire_instant;
+ CPU_Counter_ticks second = _CPU_Counter_read();
+ CPU_Counter_ticks delta = _CPU_Counter_difference( second, first );
+
+ stats->total_section_time += delta;
+
+ if ( stats->max_section_time < delta ) {
+ stats->max_section_time = delta;
+
+ if ( _Chain_Is_node_off_chain( &stats->Node ) ) {
+ _SMP_lock_Stats_register( stats );
+ }
+ }
+}
+
+typedef struct {
+ Chain_Node Node;
+ SMP_lock_Stats *current;
+} SMP_lock_Stats_iteration_context;
+
+void _SMP_lock_Stats_iteration_start(
+ SMP_lock_Stats_iteration_context *iteration_context
+);
+
+
+bool _SMP_lock_Stats_iteration_next(
+ SMP_lock_Stats_iteration_context *iteration_context,
+ SMP_lock_Stats *snapshot,
+ char *name,
+ size_t name_size
+);
+
+void _SMP_lock_Stats_iteration_stop(
+ SMP_lock_Stats_iteration_context *iteration_context
+);
+
+#else /* RTEMS_PROFILING */
+
+#define _SMP_lock_Stats_initialize( stats, name ) do { } while ( 0 )
+
+#define _SMP_lock_Stats_destroy( stats ) do { } while ( 0 )
+
+#endif /* !RTEMS_PROFILING */
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_SMPLOCKSTATS_H */
diff --git a/cpukit/include/rtems/score/smplockticket.h b/cpukit/include/rtems/score/smplockticket.h
new file mode 100644
index 0000000000..e04c4056a5
--- /dev/null
+++ b/cpukit/include/rtems/score/smplockticket.h
@@ -0,0 +1,187 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPLock
+ *
+ * @brief SMP Lock API
+ */
+
+/*
+ * Copyright (c) 2013, 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPLOCKTICKET_H
+#define _RTEMS_SCORE_SMPLOCKTICKET_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/atomic.h>
+#include <rtems/score/smplockstats.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreSMPLock
+ *
+ * @{
+ */
+
+/**
+ * @brief SMP ticket lock control.
+ */
+typedef struct {
+ Atomic_Uint next_ticket;
+ Atomic_Uint now_serving;
+} SMP_ticket_lock_Control;
+
+/**
+ * @brief SMP ticket lock control initializer for static initialization.
+ */
+#define SMP_TICKET_LOCK_INITIALIZER \
+ { \
+ ATOMIC_INITIALIZER_UINT( 0U ), \
+ ATOMIC_INITIALIZER_UINT( 0U ) \
+ }
+
+/**
+ * @brief Initializes an SMP ticket lock.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param[in] lock The SMP ticket lock control.
+ */
+static inline void _SMP_ticket_lock_Initialize(
+ SMP_ticket_lock_Control *lock
+)
+{
+ _Atomic_Init_uint( &lock->next_ticket, 0U );
+ _Atomic_Init_uint( &lock->now_serving, 0U );
+}
+
+/**
+ * @brief Destroys an SMP ticket lock.
+ *
+ * Concurrent destruction leads to unpredictable results.
+ *
+ * @param[in] lock The SMP ticket lock control.
+ */
+static inline void _SMP_ticket_lock_Destroy( SMP_ticket_lock_Control *lock )
+{
+ (void) lock;
+}
+
+static inline void _SMP_ticket_lock_Do_acquire(
+ SMP_ticket_lock_Control *lock
+#if defined(RTEMS_PROFILING)
+ ,
+ SMP_lock_Stats *stats,
+ SMP_lock_Stats_context *stats_context
+#endif
+)
+{
+ unsigned int my_ticket;
+ unsigned int now_serving;
+#if defined(RTEMS_PROFILING)
+ unsigned int initial_queue_length;
+ SMP_lock_Stats_acquire_context acquire_context;
+
+ _SMP_lock_Stats_acquire_begin( &acquire_context );
+#endif
+
+ my_ticket =
+ _Atomic_Fetch_add_uint( &lock->next_ticket, 1U, ATOMIC_ORDER_RELAXED );
+
+#if defined(RTEMS_PROFILING)
+ now_serving =
+ _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_ACQUIRE );
+ initial_queue_length = my_ticket - now_serving;
+
+ if ( initial_queue_length > 0 ) {
+#endif
+
+ do {
+ now_serving =
+ _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_ACQUIRE );
+ } while ( now_serving != my_ticket );
+
+#if defined(RTEMS_PROFILING)
+ }
+
+ _SMP_lock_Stats_acquire_end(
+ &acquire_context,
+ stats,
+ stats_context,
+ initial_queue_length
+ );
+#endif
+}
+
+/**
+ * @brief Acquires an SMP ticket lock.
+ *
+ * This function will not disable interrupts. The caller must ensure that the
+ * current thread of execution is not interrupted indefinite once it obtained
+ * the SMP ticket lock.
+ *
+ * @param[in] lock The SMP ticket lock control.
+ * @param[in] stats The SMP lock statistics.
+ * @param[out] stats_context The SMP lock statistics context.
+ */
+#if defined(RTEMS_PROFILING)
+ #define _SMP_ticket_lock_Acquire( lock, stats, stats_context ) \
+ _SMP_ticket_lock_Do_acquire( lock, stats, stats_context )
+#else
+ #define _SMP_ticket_lock_Acquire( lock, stats, stats_context ) \
+ _SMP_ticket_lock_Do_acquire( lock )
+#endif
+
+static inline void _SMP_ticket_lock_Do_release(
+ SMP_ticket_lock_Control *lock
+#if defined(RTEMS_PROFILING)
+ ,
+ const SMP_lock_Stats_context *stats_context
+#endif
+)
+{
+ unsigned int current_ticket =
+ _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED );
+ unsigned int next_ticket = current_ticket + 1U;
+
+#if defined(RTEMS_PROFILING)
+ _SMP_lock_Stats_release_update( stats_context );
+#endif
+
+ _Atomic_Store_uint( &lock->now_serving, next_ticket, ATOMIC_ORDER_RELEASE );
+}
+
+/**
+ * @brief Releases an SMP ticket lock.
+ *
+ * @param[in] lock The SMP ticket lock control.
+ * @param[in] stats_context The SMP lock statistics context.
+ */
+#if defined(RTEMS_PROFILING)
+ #define _SMP_ticket_lock_Release( lock, stats_context ) \
+ _SMP_ticket_lock_Do_release( lock, stats_context )
+#else
+ #define _SMP_ticket_lock_Release( lock, stats_context ) \
+ _SMP_ticket_lock_Do_release( lock )
+#endif
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_SMPLOCKTICKET_H */
diff --git a/cpukit/include/rtems/score/stack.h b/cpukit/include/rtems/score/stack.h
new file mode 100644
index 0000000000..9622495092
--- /dev/null
+++ b/cpukit/include/rtems/score/stack.h
@@ -0,0 +1,69 @@
+/**
+ * @file rtems/score/stack.h
+ *
+ * @brief Information About the Thread Stack Handler
+ *
+ * This include file contains all information about the thread
+ * Stack Handler. This Handler provides mechanisms which can be used to
+ * initialize and utilize stacks.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_STACK_H
+#define _RTEMS_SCORE_STACK_H
+
+#include <rtems/score/basedefs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreStack Stack Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which is used in the management
+ * of thread stacks.
+ */
+/**@{*/
+
+/**
+ * The following constant defines the minimum stack size which every
+ * thread must exceed.
+ */
+#define STACK_MINIMUM_SIZE CPU_STACK_MINIMUM_SIZE
+
+/**
+ * The following defines the control block used to manage each stack.
+ */
+typedef struct {
+ /** This is the stack size. */
+ size_t size;
+ /** This is the low memory address of stack. */
+ void *area;
+} Stack_Control;
+
+/**
+ * This variable contains the the minimum stack size;
+ *
+ * @note It is instantiated and set by User Configuration via confdefs.h.
+ */
+extern uint32_t rtems_minimum_stack_size;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/stackimpl.h b/cpukit/include/rtems/score/stackimpl.h
new file mode 100644
index 0000000000..4c622345ff
--- /dev/null
+++ b/cpukit/include/rtems/score/stackimpl.h
@@ -0,0 +1,99 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines from the Stack Handler
+ *
+ * This file contains the static inline implementation of the inlined
+ * routines from the Stack Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_STACKIMPL_H
+#define _RTEMS_SCORE_STACKIMPL_H
+
+#include <rtems/score/stack.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreStack
+ */
+/**@{**/
+
+/**
+ * This routine initializes the_stack record to indicate that
+ * size bytes of memory starting at starting_address have been
+ * reserved for a stack.
+ */
+RTEMS_INLINE_ROUTINE void _Stack_Initialize (
+ Stack_Control *the_stack,
+ void *starting_address,
+ size_t size
+)
+{
+ the_stack->area = starting_address;
+ the_stack->size = size;
+}
+
+/**
+ * This function returns the minimum stack size configured
+ * for this application.
+ *
+ * @return This method returns the minimum stack size;
+ */
+RTEMS_INLINE_ROUTINE uint32_t _Stack_Minimum (void)
+{
+ return rtems_minimum_stack_size;
+}
+
+/**
+ * This function returns true if size bytes is enough memory for
+ * a valid stack area on this processor, and false otherwise.
+ *
+ * @param[in] size is the stack size to check
+ *
+ * @return This method returns true if the stack is large enough.
+ */
+RTEMS_INLINE_ROUTINE bool _Stack_Is_enough (
+ size_t size
+)
+{
+ return ( size >= _Stack_Minimum() );
+}
+
+/**
+ * This function returns the appropriate stack size given the requested
+ * size. If the requested size is below the minimum, then the minimum
+ * configured stack size is returned.
+ *
+ * @param[in] size is the stack size to check
+ *
+ * @return This method returns the appropriate stack size.
+ */
+RTEMS_INLINE_ROUTINE size_t _Stack_Ensure_minimum (
+ size_t size
+)
+{
+ if ( size >= _Stack_Minimum() )
+ return size;
+ return _Stack_Minimum();
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/states.h b/cpukit/include/rtems/score/states.h
new file mode 100644
index 0000000000..ba59af6907
--- /dev/null
+++ b/cpukit/include/rtems/score/states.h
@@ -0,0 +1,50 @@
+/**
+ * @file rtems/score/states.h
+ *
+ * @brief Thread Execution State Information
+ *
+ * This include file defines thread execution state information.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_STATES_H
+#define _RTEMS_SCORE_STATES_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreStates SuperCore Thread States
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which relates to the management of
+ * the state bitmap associated with each thread.
+ */
+/**@{*/
+
+/**
+ * The following type defines the control block used to manage a
+ * thread's state.
+ */
+typedef uint32_t States_Control;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/statesimpl.h b/cpukit/include/rtems/score/statesimpl.h
new file mode 100644
index 0000000000..db462fbb9a
--- /dev/null
+++ b/cpukit/include/rtems/score/statesimpl.h
@@ -0,0 +1,283 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines Associated with Thread State Information
+ *
+ * This file contains the static inline implementation of the inlined
+ * routines associated with thread state information.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2012.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_STATESIMPL_H
+#define _RTEMS_SCORE_STATESIMPL_H
+
+#include <rtems/score/states.h>
+#include <rtems/score/basedefs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreStates
+ */
+/**@{**/
+
+/*
+ * The following constants define the individual states which may be
+ * be used to compose and manipulate a thread's state. More frequently used
+ * states should use lower value bits to ease the use of immediate values on
+ * RISC architectures.
+ */
+
+/** This macro corresponds to a task being ready. */
+#define STATES_READY 0x00000000
+
+/** This macro corresponds to a task waiting for a mutex. */
+#define STATES_WAITING_FOR_MUTEX 0x00000001
+
+/** This macro corresponds to a task waiting for a semaphore. */
+#define STATES_WAITING_FOR_SEMAPHORE 0x00000002
+
+/** This macro corresponds to a task waiting for an event. */
+#define STATES_WAITING_FOR_EVENT 0x00000004
+
+/** This macro corresponds to a task waiting for a system event. */
+#define STATES_WAITING_FOR_SYSTEM_EVENT 0x00000008
+
+/** This macro corresponds to a task waiting for a message. */
+#define STATES_WAITING_FOR_MESSAGE 0x00000010
+
+/** This macro corresponds to a task waiting for a condition variable. */
+#define STATES_WAITING_FOR_CONDITION_VARIABLE 0x00000020
+
+/** This macro corresponds to a task waiting for a futex. */
+#define STATES_WAITING_FOR_FUTEX 0x00000040
+
+/** This macro corresponds to a task waiting for BSD wakeup. */
+#define STATES_WAITING_FOR_BSD_WAKEUP 0x00000080
+
+/**
+ * @brief This macro corresponds to a task which is waiting for a relative or
+ * absolute timeout.
+ */
+#define STATES_WAITING_FOR_TIME 0x00000100
+
+/** This macro corresponds to a task waiting for a period. */
+#define STATES_WAITING_FOR_PERIOD 0x00000200
+
+/** This macro corresponds to a task waiting for a signal. */
+#define STATES_WAITING_FOR_SIGNAL 0x00000400
+
+/** This macro corresponds to a task waiting for a barrier. */
+#define STATES_WAITING_FOR_BARRIER 0x00000800
+
+/** This macro corresponds to a task waiting for a RWLock. */
+#define STATES_WAITING_FOR_RWLOCK 0x00001000
+
+/** This macro corresponds to a task waiting for a join while exiting. */
+#define STATES_WAITING_FOR_JOIN_AT_EXIT 0x00002000
+
+/** This macro corresponds to a task waiting for a join. */
+#define STATES_WAITING_FOR_JOIN 0x00004000
+
+/** This macro corresponds to a task being suspended. */
+#define STATES_SUSPENDED 0x00008000
+
+/** This macro corresponds to a task waiting for a fixed size segment. */
+#define STATES_WAITING_FOR_SEGMENT 0x00010000
+
+/** This macro corresponds to a task those life is changing. */
+#define STATES_LIFE_IS_CHANGING 0x00020000
+
+/** This macro corresponds to a task being held by the debugger. */
+#define STATES_DEBUGGER 0x08000000
+
+/** This macro corresponds to a task which is in an interruptible
+ * blocking state.
+ */
+#define STATES_INTERRUPTIBLE_BY_SIGNAL 0x10000000
+
+/** This macro corresponds to a task waiting for a reply to an MPCI request. */
+#define STATES_WAITING_FOR_RPC_REPLY 0x20000000
+
+/** This macro corresponds to a task being a zombie. */
+#define STATES_ZOMBIE 0x40000000
+
+/** This macro corresponds to a task being created but not yet started. */
+#define STATES_DORMANT 0x80000000
+
+/** This macro corresponds to a task waiting for a local object operation. */
+#define STATES_LOCALLY_BLOCKED ( STATES_WAITING_FOR_SEGMENT | \
+ STATES_WAITING_FOR_MESSAGE | \
+ STATES_WAITING_FOR_SEMAPHORE | \
+ STATES_WAITING_FOR_MUTEX | \
+ STATES_WAITING_FOR_CONDITION_VARIABLE | \
+ STATES_WAITING_FOR_JOIN | \
+ STATES_WAITING_FOR_SIGNAL | \
+ STATES_WAITING_FOR_BARRIER | \
+ STATES_WAITING_FOR_BSD_WAKEUP | \
+ STATES_WAITING_FOR_FUTEX | \
+ STATES_WAITING_FOR_RWLOCK )
+
+/** This macro corresponds to a task waiting which is blocked. */
+#define STATES_BLOCKED ( STATES_LOCALLY_BLOCKED | \
+ STATES_WAITING_FOR_TIME | \
+ STATES_WAITING_FOR_PERIOD | \
+ STATES_WAITING_FOR_EVENT | \
+ STATES_WAITING_FOR_RPC_REPLY | \
+ STATES_WAITING_FOR_SYSTEM_EVENT | \
+ STATES_INTERRUPTIBLE_BY_SIGNAL )
+
+/** All state bits set to one (provided for _Thread_Start()) */
+#define STATES_ALL_SET 0xffffffff
+
+/**
+ * This function sets the given states_to_set into the current_state
+ * passed in. The result is returned to the user in current_state.
+ *
+ * @param[in] states_to_set is the state bits to set
+ * @param[in] current_state is the state set to add them to
+ *
+ * @return This method returns the updated states value.
+ */
+RTEMS_INLINE_ROUTINE States_Control _States_Set (
+ States_Control states_to_set,
+ States_Control current_state
+)
+{
+ return (current_state | states_to_set);
+}
+
+/**
+ * This function clears the given states_to_clear into the current_state
+ * passed in. The result is returned to the user in current_state.
+ *
+ * @param[in] states_to_clear is the state bits to clean
+ * @param[in] current_state is the state set to remove them from
+ *
+ * @return This method returns the updated states value.
+ */
+RTEMS_INLINE_ROUTINE States_Control _States_Clear (
+ States_Control states_to_clear,
+ States_Control current_state
+)
+{
+ return (current_state & ~states_to_clear);
+}
+
+/**
+ * This function returns true if the_states indicates that the
+ * state is READY, and false otherwise.
+ *
+ * @param[in] the_states is the task state set to test
+ *
+ * @return This method returns true if the desired state condition is set.
+ */
+RTEMS_INLINE_ROUTINE bool _States_Is_ready (
+ States_Control the_states
+)
+{
+ return (the_states == STATES_READY);
+}
+
+/**
+ * This function returns true if the DORMANT state is set in
+ * the_states, and false otherwise.
+ *
+ * @param[in] the_states is the task state set to test
+ *
+ * @return This method returns true if the desired state condition is set.
+ */
+RTEMS_INLINE_ROUTINE bool _States_Is_dormant (
+ States_Control the_states
+)
+{
+ return (the_states & STATES_DORMANT);
+}
+
+/**
+ * This function returns true if the SUSPENDED state is set in
+ * the_states, and false otherwise.
+ *
+ * @param[in] the_states is the task state set to test
+ *
+ * @return This method returns true if the desired state condition is set.
+ */
+RTEMS_INLINE_ROUTINE bool _States_Is_suspended (
+ States_Control the_states
+)
+{
+ return (the_states & STATES_SUSPENDED);
+}
+
+/**
+ * This function returns true if the WAITING_FOR_TIME state is set in
+ * the_states, and false otherwise.
+ *
+ * @param[in] the_states is the task state set to test
+ *
+ * @return This method returns true if the desired state condition is set.
+ */
+RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_rpc_reply (
+ States_Control the_states
+)
+{
+ return (the_states & STATES_WAITING_FOR_RPC_REPLY);
+}
+
+RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_join_at_exit(
+ States_Control the_states
+)
+{
+ return ( the_states & STATES_WAITING_FOR_JOIN_AT_EXIT ) != 0;
+}
+
+/**
+ * This function returns true if the task's state is set in
+ * way that allows it to be interrupted by a signal.
+ *
+ * @param[in] the_states is the task state set to test
+ *
+ * @return This method returns true if the desired state condition is set.
+ */
+RTEMS_INLINE_ROUTINE bool _States_Is_interruptible_by_signal (
+ States_Control the_states
+)
+{
+ return (the_states & STATES_INTERRUPTIBLE_BY_SIGNAL);
+
+}
+/**
+ * This function returns true if one of the states which indicates
+ * that a task is blocked waiting for a local resource is set in
+ * the_states, and false otherwise.
+ *
+ * @param[in] the_states is the task state set to test
+ *
+ * @return This method returns true if the desired state condition is set.
+ */
+
+RTEMS_INLINE_ROUTINE bool _States_Is_locally_blocked (
+ States_Control the_states
+)
+{
+ return (the_states & STATES_LOCALLY_BLOCKED);
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/status.h b/cpukit/include/rtems/score/status.h
new file mode 100644
index 0000000000..5b154bb207
--- /dev/null
+++ b/cpukit/include/rtems/score/status.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_STATUS_H
+#define _RTEMS_SCORE_STATUS_H
+
+#include <rtems/score/basedefs.h>
+
+#include <errno.h>
+#include <pthread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @brief Status code parts for the Classic API.
+ *
+ * Must be in synchronization with rtems_status_code.
+ */
+typedef enum {
+ STATUS_CLASSIC_INCORRECT_STATE = 14,
+ STATUS_CLASSIC_INTERNAL_ERROR = 13,
+ STATUS_CLASSIC_INVALID_NUMBER = 10,
+ STATUS_CLASSIC_INVALID_PRIORITY = 19,
+ STATUS_CLASSIC_INVALID_SIZE = 8,
+ STATUS_CLASSIC_NO_MEMORY = 26,
+ STATUS_CLASSIC_NOT_DEFINED = 11,
+ STATUS_CLASSIC_NOT_OWNER_OF_RESOURCE = 23,
+ STATUS_CLASSIC_OBJECT_WAS_DELETED = 7,
+ STATUS_CLASSIC_RESOURCE_IN_USE = 12,
+ STATUS_CLASSIC_SUCCESSFUL = 0,
+ STATUS_CLASSIC_TIMEOUT = 6,
+ STATUS_CLASSIC_TOO_MANY = 5,
+ STATUS_CLASSIC_UNSATISFIED = 13
+} Status_Classic;
+
+/**
+ * @brief Macro to build a status code from Classic and POSIX API parts.
+ */
+#define STATUS_BUILD( classic_status, posix_status ) \
+ ( ( ( (unsigned int) ( posix_status ) ) << 8 ) | ( classic_status ) )
+
+/**
+ * @brief Macro to get the Classic API status code.
+ */
+#define STATUS_GET_CLASSIC( status ) \
+ ( ( status ) & 0xff )
+
+/**
+ * @brief Macro to get the POSIX API status code.
+ *
+ * Performs an arithmetic shift to reconstruct a negative POSIX status.
+ */
+#define STATUS_GET_POSIX( status ) \
+ ( ( ( (int) ( status ) ) | 0xff ) >> 8 )
+
+/**
+ * @brief Status codes.
+ */
+typedef enum {
+ STATUS_BARRIER_AUTOMATICALLY_RELEASED =
+ STATUS_BUILD( STATUS_CLASSIC_SUCCESSFUL, PTHREAD_BARRIER_SERIAL_THREAD ),
+ STATUS_DEADLOCK =
+ STATUS_BUILD( STATUS_CLASSIC_INCORRECT_STATE, EDEADLK ),
+ STATUS_FLUSHED =
+ STATUS_BUILD( STATUS_CLASSIC_UNSATISFIED, EAGAIN ),
+ STATUS_INCORRECT_STATE =
+ STATUS_BUILD( STATUS_CLASSIC_INCORRECT_STATE, EINVAL ),
+ STATUS_INTERRUPTED =
+ STATUS_BUILD( STATUS_CLASSIC_INTERNAL_ERROR, EINTR ),
+ STATUS_INVALID_NUMBER =
+ STATUS_BUILD( STATUS_CLASSIC_INVALID_NUMBER, EINVAL ),
+ STATUS_INVALID_PRIORITY =
+ STATUS_BUILD( STATUS_CLASSIC_INVALID_PRIORITY, EINVAL ),
+ STATUS_MAXIMUM_COUNT_EXCEEDED =
+ STATUS_BUILD( STATUS_CLASSIC_INTERNAL_ERROR, EOVERFLOW ),
+ STATUS_MESSAGE_INVALID_SIZE =
+ STATUS_BUILD( STATUS_CLASSIC_INVALID_SIZE, EMSGSIZE ),
+ STATUS_MESSAGE_QUEUE_WAIT_IN_ISR =
+ STATUS_BUILD( STATUS_CLASSIC_INTERNAL_ERROR, ENOMEM ),
+ STATUS_MESSAGE_QUEUE_WAS_DELETED =
+ STATUS_BUILD( STATUS_CLASSIC_OBJECT_WAS_DELETED, EBADF ),
+ STATUS_MINUS_ONE =
+ -1,
+ STATUS_MUTEX_CEILING_VIOLATED =
+ STATUS_BUILD( STATUS_CLASSIC_INVALID_PRIORITY, EINVAL ),
+ STATUS_NESTING_NOT_ALLOWED =
+ STATUS_BUILD( STATUS_CLASSIC_UNSATISFIED, EDEADLK ),
+ STATUS_NO_MEMORY =
+ STATUS_BUILD( STATUS_CLASSIC_NO_MEMORY, EINVAL ),
+ STATUS_NOT_DEFINED =
+ STATUS_BUILD( STATUS_CLASSIC_NOT_DEFINED, EINVAL ),
+ STATUS_NOT_OWNER =
+ STATUS_BUILD( STATUS_CLASSIC_NOT_OWNER_OF_RESOURCE, EPERM ),
+ STATUS_OBJECT_WAS_DELETED =
+ STATUS_BUILD( STATUS_CLASSIC_OBJECT_WAS_DELETED, EINVAL ),
+ STATUS_RESOURCE_IN_USE =
+ STATUS_BUILD( STATUS_CLASSIC_RESOURCE_IN_USE, EBUSY ),
+ STATUS_RESULT_TOO_LARGE =
+ STATUS_BUILD( STATUS_CLASSIC_UNSATISFIED, ERANGE ),
+ STATUS_SUCCESSFUL =
+ STATUS_BUILD( STATUS_CLASSIC_SUCCESSFUL, 0 ),
+ STATUS_TIMEOUT =
+ STATUS_BUILD( STATUS_CLASSIC_TIMEOUT, ETIMEDOUT ),
+ STATUS_TOO_MANY =
+ STATUS_BUILD( STATUS_CLASSIC_TOO_MANY, EAGAIN ),
+ STATUS_UNAVAILABLE =
+ STATUS_BUILD( STATUS_CLASSIC_UNSATISFIED, EBUSY ),
+ STATUS_UNSATISFIED =
+ STATUS_BUILD( STATUS_CLASSIC_UNSATISFIED, EAGAIN )
+} Status_Control;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_STATUS_H */
diff --git a/cpukit/include/rtems/score/sysstate.h b/cpukit/include/rtems/score/sysstate.h
new file mode 100644
index 0000000000..0e01927d9a
--- /dev/null
+++ b/cpukit/include/rtems/score/sysstate.h
@@ -0,0 +1,119 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSysState
+ *
+ * @brief System State Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SYSSTATE_H
+#define _RTEMS_SCORE_SYSSTATE_H
+
+#include <rtems/score/basedefs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSysState System State Handler
+ *
+ * @ingroup Score
+ *
+ * @brief Management of the internal system state of RTEMS.
+ */
+/**@{**/
+
+/**
+ * @brief System states.
+ */
+typedef enum {
+ /**
+ * @brief The system is before the end of the first phase of initialization.
+ */
+ SYSTEM_STATE_BEFORE_INITIALIZATION,
+
+ /**
+ * @brief The system is between end of the first phase of initialization but
+ * before multitasking is started.
+ */
+ SYSTEM_STATE_BEFORE_MULTITASKING,
+
+ /**
+ * @brief The system is up and operating normally.
+ */
+ SYSTEM_STATE_UP,
+
+ /**
+ * @brief The system reached its terminal state.
+ */
+ SYSTEM_STATE_TERMINATED
+} System_state_Codes;
+
+#define SYSTEM_STATE_CODES_FIRST SYSTEM_STATE_BEFORE_INITIALIZATION
+
+#define SYSTEM_STATE_CODES_LAST SYSTEM_STATE_TERMINATED
+
+#if defined(RTEMS_MULTIPROCESSING)
+extern bool _System_state_Is_multiprocessing;
+#endif
+
+extern System_state_Codes _System_state_Current;
+
+RTEMS_INLINE_ROUTINE void _System_state_Set (
+ System_state_Codes state
+)
+{
+ _System_state_Current = state;
+}
+
+RTEMS_INLINE_ROUTINE System_state_Codes _System_state_Get ( void )
+{
+ return _System_state_Current;
+}
+
+RTEMS_INLINE_ROUTINE bool _System_state_Is_before_initialization (
+ System_state_Codes state
+)
+{
+ return (state == SYSTEM_STATE_BEFORE_INITIALIZATION);
+}
+
+RTEMS_INLINE_ROUTINE bool _System_state_Is_before_multitasking (
+ System_state_Codes state
+)
+{
+ return (state == SYSTEM_STATE_BEFORE_MULTITASKING);
+}
+
+RTEMS_INLINE_ROUTINE bool _System_state_Is_up (
+ System_state_Codes state
+)
+{
+ return (state == SYSTEM_STATE_UP);
+}
+
+RTEMS_INLINE_ROUTINE bool _System_state_Is_terminated (
+ System_state_Codes state
+)
+{
+ return (state == SYSTEM_STATE_TERMINATED);
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/thread.h b/cpukit/include/rtems/score/thread.h
new file mode 100644
index 0000000000..7e0e2722dd
--- /dev/null
+++ b/cpukit/include/rtems/score/thread.h
@@ -0,0 +1,935 @@
+/**
+ * @file rtems/score/thread.h
+ *
+ * @brief Constants and Structures Related with the Thread Control Block
+ *
+ * This include file contains all constants and structures associated
+ * with the thread control block.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2014.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2014, 2016 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_THREAD_H
+#define _RTEMS_SCORE_THREAD_H
+
+#include <rtems/score/atomic.h>
+#include <rtems/score/context.h>
+#if defined(RTEMS_MULTIPROCESSING)
+#include <rtems/score/mppkt.h>
+#endif
+#include <rtems/score/isrlock.h>
+#include <rtems/score/object.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/schedulernode.h>
+#include <rtems/score/stack.h>
+#include <rtems/score/states.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/timestamp.h>
+#include <rtems/score/watchdog.h>
+
+#if defined(RTEMS_SMP)
+#include <rtems/score/processormask.h>
+#endif
+
+struct _pthread_cleanup_context;
+
+struct Per_CPU_Control;
+
+struct _Scheduler_Control;
+
+struct User_extensions_Iterator;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreThread Thread Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality related to the management of
+ * threads. This includes the creation, deletion, and scheduling of threads.
+ *
+ * The following variables are maintained as part of the per cpu data
+ * structure.
+ *
+ * + Idle thread pointer
+ * + Executing thread pointer
+ * + Heir thread pointer
+ */
+/**@{*/
+
+#if defined(RTEMS_POSIX_API)
+ #define RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE
+#endif
+
+/*
+ * With the addition of the Constant Block Scheduler (CBS),
+ * this feature is needed even when POSIX is disabled.
+ */
+#define RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT
+
+#if defined(RTEMS_POSIX_API)
+ #define RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API
+#endif
+
+#if defined(RTEMS_DEBUG)
+#define RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT
+#endif
+
+/*
+ * Only provided for backward compatiblity to not break application
+ * configurations.
+ */
+typedef void *Thread RTEMS_DEPRECATED;
+
+/**
+ * @brief Type of the numeric argument of a thread entry function with at
+ * least one numeric argument.
+ *
+ * This numeric argument type designates an unsigned integer type with the
+ * property that any valid pointer to void can be converted to this type and
+ * then converted back to a pointer to void. The result will compare equal to
+ * the original pointer.
+ */
+typedef CPU_Uint32ptr Thread_Entry_numeric_type;
+
+/**
+ * @brief Data for idle thread entry.
+ */
+typedef struct {
+ void *( *entry )( uintptr_t argument );
+} Thread_Entry_idle;
+
+/**
+ * @brief Data for thread entry with one numeric argument and no return value.
+ */
+typedef struct {
+ void ( *entry )( Thread_Entry_numeric_type argument );
+ Thread_Entry_numeric_type argument;
+} Thread_Entry_numeric;
+
+/**
+ * @brief Data for thread entry with one pointer argument and a pointer return
+ * value.
+ */
+typedef struct {
+ void *( *entry )( void *argument );
+ void *argument;
+} Thread_Entry_pointer;
+
+/**
+ * @brief Thread entry information.
+ */
+typedef struct {
+ /**
+ * @brief Thread entry adaptor.
+ *
+ * Calls the corresponding thread entry with the right parameters.
+ *
+ * @param executing The executing thread.
+ */
+ void ( *adaptor )( Thread_Control *executing );
+
+ /**
+ * @brief Thread entry data used by the adaptor to call the thread entry
+ * function with the right parameters.
+ */
+ union {
+ Thread_Entry_idle Idle;
+ Thread_Entry_numeric Numeric;
+ Thread_Entry_pointer Pointer;
+ } Kinds;
+} Thread_Entry_information;
+
+/**
+ * The following lists the algorithms used to manage the thread cpu budget.
+ *
+ * Reset Timeslice: At each context switch, reset the time quantum.
+ * Exhaust Timeslice: Only reset the quantum once it is consumed.
+ * Callout: Execute routine when budget is consumed.
+ */
+typedef enum {
+ THREAD_CPU_BUDGET_ALGORITHM_NONE,
+ THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE,
+ #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
+ THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE,
+ #endif
+ #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
+ THREAD_CPU_BUDGET_ALGORITHM_CALLOUT
+ #endif
+} Thread_CPU_budget_algorithms;
+
+/** This defines thes the entry point for the thread specific timeslice
+ * budget management algorithm.
+ */
+typedef void (*Thread_CPU_budget_algorithm_callout )( Thread_Control * );
+
+/**
+ * The following structure contains the information which defines
+ * the starting state of a thread.
+ */
+typedef struct {
+ /** This field contains the thread entry information. */
+ Thread_Entry_information Entry;
+ /*-------------- initial execution modes ----------------- */
+ /** This field indicates whether the thread was preemptible when
+ * it started.
+ */
+ bool is_preemptible;
+ /** This field indicates the CPU budget algorith. */
+ Thread_CPU_budget_algorithms budget_algorithm;
+ /** This field is the routine to invoke when the CPU allotment is
+ * consumed.
+ */
+ Thread_CPU_budget_algorithm_callout budget_callout;
+ /** This field is the initial ISR disable level of this thread. */
+ uint32_t isr_level;
+ /** This field is the initial priority. */
+ Priority_Control initial_priority;
+ #if defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API)
+ /** This field indicates whether the SuperCore allocated the stack. */
+ bool core_allocated_stack;
+ #endif
+ /** This field is the stack information. */
+ Stack_Control Initial_stack;
+ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+ /** This field is the initial FP context area address. */
+ Context_Control_fp *fp_context;
+ #endif
+ /** This field is the initial stack area address. */
+ void *stack;
+ /** The thread-local storage (TLS) area */
+ void *tls_area;
+} Thread_Start_information;
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief The thread state with respect to the scheduler.
+ */
+typedef enum {
+ /**
+ * @brief This thread is blocked with respect to the scheduler.
+ *
+ * This thread uses no scheduler nodes.
+ */
+ THREAD_SCHEDULER_BLOCKED,
+
+ /**
+ * @brief This thread is scheduled with respect to the scheduler.
+ *
+ * This thread executes using one of its scheduler nodes. This could be its
+ * own scheduler node or in case it owns resources taking part in the
+ * scheduler helping protocol a scheduler node of another thread.
+ */
+ THREAD_SCHEDULER_SCHEDULED,
+
+ /**
+ * @brief This thread is ready with respect to the scheduler.
+ *
+ * None of the scheduler nodes of this thread is scheduled.
+ */
+ THREAD_SCHEDULER_READY
+} Thread_Scheduler_state;
+#endif
+
+/**
+ * @brief Thread scheduler control.
+ */
+typedef struct {
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Lock to protect the scheduler node change requests.
+ */
+ ISR_lock_Control Lock;
+
+ /**
+ * @brief The current scheduler state of this thread.
+ */
+ Thread_Scheduler_state state;
+
+ /**
+ * @brief The home scheduler control of this thread.
+ */
+ const struct _Scheduler_Control *home;
+
+ /**
+ * @brief The processor assigned by the current scheduler.
+ */
+ struct Per_CPU_Control *cpu;
+
+ /**
+ * @brief Scheduler nodes immediately available to the thread by its home
+ * scheduler instance and due to thread queue ownerships.
+ *
+ * This chain is protected by the thread wait lock.
+ *
+ * This chain is never empty. The first scheduler node on the chain is the
+ * scheduler node of the home scheduler instance.
+ */
+ Chain_Control Wait_nodes;
+
+ /**
+ * @brief Scheduler nodes immediately available to the schedulers for this
+ * thread.
+ *
+ * This chain is protected by the thread state lock.
+ *
+ * This chain is never empty. The first scheduler node on the chain is the
+ * scheduler node of the home scheduler instance.
+ */
+ Chain_Control Scheduler_nodes;
+
+ /**
+ * @brief Node for the Per_CPU_Control::Threads_in_need_for_help chain.
+ *
+ * This chain is protected by the Per_CPU_Control::Lock lock of the assigned
+ * processor.
+ */
+ Chain_Node Help_node;
+
+ /**
+ * @brief Count of nodes scheduler nodes minus one.
+ *
+ * This chain is protected by the thread state lock.
+ */
+ size_t helping_nodes;
+
+ /**
+ * @brief List of pending scheduler node requests.
+ *
+ * This list is protected by the thread scheduler lock.
+ */
+ Scheduler_Node *requests;
+
+ /**
+ * @brief The thread processor affinity set.
+ */
+ Processor_mask Affinity;
+#endif
+
+ /**
+ * @brief The scheduler nodes of this thread.
+ *
+ * Each thread has a scheduler node for each scheduler instance.
+ */
+ Scheduler_Node *nodes;
+} Thread_Scheduler_control;
+
+/**
+ * @brief Union type to hold a pointer to an immutable or a mutable object.
+ *
+ * The main purpose is to enable passing of pointers to read-only send buffers
+ * in the message passing subsystem. This approach is somewhat fragile since
+ * it prevents the compiler to check if the operations on objects are valid
+ * with respect to the constant qualifier. An alternative would be to add a
+ * third pointer argument for immutable objects, but this would increase the
+ * structure size.
+ */
+typedef union {
+ void *mutable_object;
+ const void *immutable_object;
+} Thread_Wait_information_Object_argument_type;
+
+/**
+ * @brief This type is able to contain several flags used to control the wait
+ * class and state of a thread.
+ *
+ * The mutually exclusive wait class flags are
+ * - @ref THREAD_WAIT_CLASS_EVENT,
+ * - @ref THREAD_WAIT_CLASS_SYSTEM_EVENT, and
+ * - @ref THREAD_WAIT_CLASS_OBJECT.
+ *
+ * The mutually exclusive wait state flags are
+ * - @ref THREAD_WAIT_STATE_INTEND_TO_BLOCK,
+ * - @ref THREAD_WAIT_STATE_BLOCKED, and
+ * - @ref THREAD_WAIT_STATE_READY_AGAIN.
+ */
+typedef unsigned int Thread_Wait_flags;
+
+/**
+ * @brief Information required to manage a thread while it is blocked.
+ *
+ * This contains the information required to manage a thread while it is
+ * blocked and to return information to it.
+ */
+typedef struct {
+#if defined(RTEMS_MULTIPROCESSING)
+ /*
+ * @brief This field is the identifier of the remote object this thread is
+ * waiting upon.
+ */
+ Objects_Id remote_id;
+#endif
+ /** This field is used to return an integer while when blocked. */
+ uint32_t count;
+ /** This field is for a pointer to a user return argument. */
+ void *return_argument;
+ /** This field is for a pointer to a second user return argument. */
+ Thread_Wait_information_Object_argument_type
+ return_argument_second;
+ /** This field contains any options in effect on this blocking operation. */
+ uint32_t option;
+ /** This field will contain the return status from a blocking operation.
+ *
+ * @note The following assumes that all API return codes can be
+ * treated as an uint32_t.
+ */
+ uint32_t return_code;
+
+ /**
+ * @brief This field contains several flags used to control the wait class
+ * and state of a thread in case fine-grained locking is used.
+ */
+#if defined(RTEMS_SMP)
+ Atomic_Uint flags;
+#else
+ Thread_Wait_flags flags;
+#endif
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Thread wait lock control block.
+ *
+ * Parts of the thread wait information are protected by the thread wait
+ * default lock and additionally a thread queue lock in case the thread
+ * is enqueued on a thread queue.
+ *
+ * The thread wait lock mechanism protects the following thread variables
+ * - POSIX_API_Control::Attributes,
+ * - Scheduler_Node::Wait,
+ * - Thread_Control::Wait::Lock::Pending_requests,
+ * - Thread_Control::Wait::queue, and
+ * - Thread_Control::Wait::operations.
+ *
+ * @see _Thread_Wait_acquire(), _Thread_Wait_release(), _Thread_Wait_claim(),
+ * _Thread_Wait_restore_default() and _Thread_Wait_tranquilize().
+ */
+ struct {
+ /**
+ * @brief Thread wait default lock.
+ */
+ ISR_lock_Control Default;
+
+ /**
+ * @brief The pending thread wait lock acquire or tranquilize requests in
+ * case the thread is enqueued on a thread queue.
+ */
+ Chain_Control Pending_requests;
+
+ /**
+ * @brief Tranquilizer gate used by _Thread_Wait_tranquilize().
+ *
+ * This gate is closed by _Thread_Wait_claim(). In case there are no
+ * pending requests during a _Thread_Wait_restore_default(), then this gate
+ * is opened immediately, otherwise it is placed on the pending request
+ * chain and opened by _Thread_Wait_remove_request_locked() as the last
+ * gate on the chain to signal overall request completion.
+ */
+ Thread_queue_Gate Tranquilizer;
+ } Lock;
+
+ /**
+ * @brief Thread queue link provided for use by the thread wait lock owner to
+ * build a thread queue path.
+ */
+ Thread_queue_Link Link;
+#endif
+
+ /**
+ * @brief The current thread queue.
+ *
+ * If this field is NULL the thread is not enqueued on a thread queue. This
+ * field is protected by the thread wait default lock.
+ *
+ * @see _Thread_Wait_claim().
+ */
+ Thread_queue_Queue *queue;
+
+ /**
+ * @brief The current thread queue operations.
+ *
+ * This field is protected by the thread lock wait default lock.
+ *
+ * @see _Thread_Wait_claim().
+ */
+ const Thread_queue_Operations *operations;
+
+ Thread_queue_Heads *spare_heads;
+} Thread_Wait_information;
+
+/**
+ * @brief Information required to manage a thread timer.
+ */
+typedef struct {
+ ISR_LOCK_MEMBER( Lock )
+ Watchdog_Header *header;
+ Watchdog_Control Watchdog;
+} Thread_Timer_information;
+
+/**
+ * The following defines the control block used to manage
+ * each thread proxy.
+ *
+ * @note It is critical that proxies and threads have identical
+ * memory images for the shared part.
+ */
+typedef struct {
+ /** This field is the object management structure for each proxy. */
+ Objects_Control Object;
+
+ /**
+ * @see Thread_Control::Join_queue
+ */
+ Thread_queue_Control Join_queue;
+
+ /** This field is the current execution state of this proxy. */
+ States_Control current_state;
+
+ /**
+ * @brief The base priority of this thread in its home scheduler instance.
+ */
+ Priority_Node Real_priority;
+
+#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
+ /** This field is the number of mutexes currently held by this proxy. */
+ uint32_t resource_count;
+#endif
+
+ /**
+ * @brief Scheduler related control.
+ */
+ Thread_Scheduler_control Scheduler;
+
+ /** This field is the blocking information for this proxy. */
+ Thread_Wait_information Wait;
+ /** This field is the Watchdog used to manage proxy delays and timeouts. */
+ Thread_Timer_information Timer;
+#if defined(RTEMS_MULTIPROCESSING)
+ /** This field is the received response packet in an MP system. */
+ MP_packet_Prefix *receive_packet;
+ /****************** end of common block ********************/
+
+ /**
+ * @brief Thread queue callout for _Thread_queue_Enqueue().
+ */
+ Thread_queue_MP_callout thread_queue_callout;
+
+ /**
+ * @brief This field is used to manage the set of active proxies in the system.
+ */
+ RBTree_Node Active;
+
+ /**
+ * @brief The scheduler node providing the thread wait nodes used to enqueue
+ * this thread proxy on a thread queue.
+ */
+ Scheduler_Node Scheduler_node;
+
+ /**
+ * @brief Provide thread queue heads for this thread proxy.
+ *
+ * The actual size of the thread queue heads depends on the application
+ * configuration. Since thread proxies are never destroyed we can use the
+ * same storage place for the thread queue heads.
+ */
+ Thread_queue_Heads Thread_queue_heads[ RTEMS_ZERO_LENGTH_ARRAY ];
+#endif
+} Thread_Proxy_control;
+
+/**
+ * The following record defines the control block used
+ * to manage each thread.
+ *
+ * @note It is critical that proxies and threads have identical
+ * memory images for the shared part.
+ */
+typedef enum {
+ /** This value is for the Classic RTEMS API. */
+ THREAD_API_RTEMS,
+ /** This value is for the POSIX API. */
+ THREAD_API_POSIX
+} Thread_APIs;
+
+/** This macro defines the first API which has threads. */
+#define THREAD_API_FIRST THREAD_API_RTEMS
+
+/** This macro defines the last API which has threads. */
+#define THREAD_API_LAST THREAD_API_POSIX
+
+typedef struct Thread_Action Thread_Action;
+
+/**
+ * @brief Thread action handler.
+ *
+ * The thread action handler will be called with interrupts disabled and a
+ * corresponding lock acquired, e.g. _Thread_State_acquire(). The handler must
+ * release the corresponding lock, e.g. _Thread_State_release(). So, the
+ * corresponding lock may be used to protect private data used by the
+ * particular action.
+ *
+ * Since the action is passed to the handler additional data may be accessed
+ * via RTEMS_CONTAINER_OF().
+ *
+ * @param[in] the_thread The thread performing the action.
+ * @param[in] action The thread action.
+ * @param[in] lock_context The lock context to use for the lock release.
+ */
+typedef void ( *Thread_Action_handler )(
+ Thread_Control *the_thread,
+ Thread_Action *action,
+ ISR_lock_Context *lock_context
+);
+
+/**
+ * @brief Thread action.
+ *
+ * Thread actions can be chained together to trigger a set of actions on
+ * particular events like for example a thread post-switch. Use
+ * _Thread_Action_initialize() to initialize this structure.
+ *
+ * Thread actions are the building block for efficient implementation of
+ * - Classic signals delivery,
+ * - POSIX signals delivery, and
+ * - thread life-cycle changes.
+ *
+ * @see _Thread_Add_post_switch_action() and _Thread_Run_post_switch_actions().
+ */
+struct Thread_Action {
+ Chain_Node Node;
+ Thread_Action_handler handler;
+};
+
+/**
+ * @brief Per-thread information for POSIX Keys.
+ */
+typedef struct {
+ /**
+ * @brief Key value pairs registered for this thread.
+ */
+ RBTree_Control Key_value_pairs;
+
+ /**
+ * @brief Lock to protect the tree operations.
+ */
+ ISR_LOCK_MEMBER( Lock )
+} Thread_Keys_information;
+
+/**
+ * @brief Control block to manage thread actions.
+ *
+ * Use _Thread_Action_control_initialize() to initialize this structure.
+ */
+typedef struct {
+ Chain_Control Chain;
+} Thread_Action_control;
+
+/**
+ * @brief Thread life states.
+ *
+ * The thread life states are orthogonal to the thread states used for
+ * synchronization primitives and blocking operations. They reflect the state
+ * changes triggered with thread restart and delete requests.
+ *
+ * The individual state values must be a power of two to allow use of bit
+ * operations to manipulate and evaluate the thread life state.
+ */
+typedef enum {
+ THREAD_LIFE_PROTECTED = 0x1,
+ THREAD_LIFE_RESTARTING = 0x2,
+ THREAD_LIFE_TERMINATING = 0x4,
+ THREAD_LIFE_CHANGE_DEFERRED = 0x8,
+ THREAD_LIFE_DETACHED = 0x10
+} Thread_Life_state;
+
+/**
+ * @brief Thread life control.
+ */
+typedef struct {
+ /**
+ * @brief Thread life action used to react upon thread restart and delete
+ * requests.
+ */
+ Thread_Action Action;
+
+ /**
+ * @brief The current thread life state.
+ */
+ Thread_Life_state state;
+
+ /**
+ * @brief The count of pending life change requests.
+ */
+ uint32_t pending_life_change_requests;
+
+#if defined(RTEMS_POSIX_API)
+ /**
+ * @brief The thread exit value.
+ *
+ * It is,
+ * - the value passed to pthread_exit(), or
+ * - PTHREAD_CANCELED in case it is cancelled via pthread_cancel(), or
+ * - NULL.
+ */
+ void *exit_value;
+#endif
+} Thread_Life_control;
+
+typedef struct {
+ uint32_t flags;
+ void * control;
+}Thread_Capture_control;
+
+/**
+ * This structure defines the Thread Control Block (TCB).
+ *
+ * Uses a leading underscore in the structure name to allow forward
+ * declarations in standard header files provided by Newlib and GCC.
+ *
+ * In case the second member changes (currently Join_queue), then the memset()
+ * in _Thread_Initialize() must be adjusted.
+ */
+struct _Thread_Control {
+ /** This field is the object management structure for each thread. */
+ Objects_Control Object;
+
+ /**
+ * @brief Thread queue for thread join operations and multi-purpose lock.
+ *
+ * The lock of this thread queue is used for various purposes. It protects
+ * the following fields
+ *
+ * - RTEMS_API_Control::Signal,
+ * - Thread_Control::budget_algorithm,
+ * - Thread_Control::budget_callout,
+ * - Thread_Control::cpu_time_budget,
+ * - Thread_Control::current_state,
+ * - Thread_Control::Post_switch_actions,
+ * - Thread_Control::Scheduler::control, and
+ * - Thread_Control::Scheduler::own_control.
+ *
+ * @see _Thread_State_acquire().
+ */
+ Thread_queue_Control Join_queue;
+
+ /** This field is the current execution state of this thread. */
+ States_Control current_state;
+
+ /**
+ * @brief The base priority of this thread in its home scheduler instance.
+ */
+ Priority_Node Real_priority;
+
+#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
+ /** This field is the number of mutexes currently held by this thread. */
+ uint32_t resource_count;
+#endif
+
+ /**
+ * @brief Scheduler related control.
+ */
+ Thread_Scheduler_control Scheduler;
+
+ /** This field is the blocking information for this thread. */
+ Thread_Wait_information Wait;
+ /** This field is the Watchdog used to manage thread delays and timeouts. */
+ Thread_Timer_information Timer;
+#if defined(RTEMS_MULTIPROCESSING)
+ /** This field is the received response packet in an MP system. */
+ MP_packet_Prefix *receive_packet;
+#endif
+ /*================= end of common block =================*/
+
+#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
+ /**
+ * @brief Potpourri lock statistics.
+ *
+ * These SMP lock statistics are used for all lock objects that lack a
+ * storage space for the statistics. Examples are lock objects used in
+ * external libraries which are independent of the actual RTEMS build
+ * configuration.
+ */
+ SMP_lock_Stats Potpourri_stats;
+#endif
+
+ /** This field is true if the thread is an idle thread. */
+ bool is_idle;
+#if defined(RTEMS_MULTIPROCESSING)
+ /** This field is true if the thread is offered globally */
+ bool is_global;
+#endif
+ /** This field is true if the thread is preemptible. */
+ bool is_preemptible;
+ /** This field is true if the thread uses the floating point unit. */
+ bool is_fp;
+
+ /**
+ * @brief True, if the thread was created with an inherited scheduler
+ * (PTHREAD_INHERIT_SCHED), and false otherwise.
+ */
+ bool was_created_with_inherited_scheduler;
+
+ /** This field is the length of the time quantum that this thread is
+ * allowed to consume. The algorithm used to manage limits on CPU usage
+ * is specified by budget_algorithm.
+ */
+ uint32_t cpu_time_budget;
+ /** This field is the algorithm used to manage this thread's time
+ * quantum. The algorithm may be specified as none which case,
+ * no limit is in place.
+ */
+ Thread_CPU_budget_algorithms budget_algorithm;
+ /** This field is the method invoked with the budgeted time is consumed. */
+ Thread_CPU_budget_algorithm_callout budget_callout;
+ /** This field is the amount of CPU time consumed by this thread
+ * since it was created.
+ */
+ Timestamp_Control cpu_time_used;
+
+ /** This field contains information about the starting state of
+ * this thread.
+ */
+ Thread_Start_information Start;
+
+ Thread_Action_control Post_switch_actions;
+
+ /** This field contains the context of this thread. */
+ Context_Control Registers;
+#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+ /** This field points to the floating point context for this thread.
+ * If NULL, the thread is integer only.
+ */
+ Context_Control_fp *fp_context;
+#endif
+ /** This field points to the newlib reentrancy structure for this thread. */
+ struct _reent *libc_reent;
+ /** This array contains the API extension area pointers. */
+ void *API_Extensions[ THREAD_API_LAST + 1 ];
+
+ /**
+ * @brief The POSIX Keys information.
+ */
+ Thread_Keys_information Keys;
+
+ /**
+ * @brief Thread life-cycle control.
+ *
+ * Control state changes triggered by thread restart and delete requests.
+ */
+ Thread_Life_control Life;
+
+ Thread_Capture_control Capture;
+
+ /**
+ * @brief LIFO list of POSIX cleanup contexts.
+ */
+ struct _pthread_cleanup_context *last_cleanup_context;
+
+ /**
+ * @brief LIFO list of user extensions iterators.
+ */
+ struct User_extensions_Iterator *last_user_extensions_iterator;
+
+ /**
+ * @brief Variable length array of user extension pointers.
+ *
+ * The length is defined by the application via <rtems/confdefs.h>.
+ */
+ void *extensions[ RTEMS_ZERO_LENGTH_ARRAY ];
+};
+
+#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
+/**
+ * This routine is the body of the system idle thread.
+ *
+ * NOTE: This routine is actually instantiated by confdefs.h when needed.
+ */
+void *_Thread_Idle_body(
+ uintptr_t ignored
+);
+#endif
+
+typedef void (*rtems_per_thread_routine)( Thread_Control * );
+
+/* Use rtems_task_iterate() instead */
+void rtems_iterate_over_all_threads(
+ rtems_per_thread_routine routine
+) RTEMS_DEPRECATED;
+
+/**
+ * @brief Thread control add-on.
+ */
+typedef struct {
+ /**
+ * @brief Offset of the pointer field in Thread_Control referencing an
+ * application configuration dependent memory area in the thread control
+ * block.
+ */
+ size_t destination_offset;
+
+ /**
+ * @brief Offset relative to the thread control block begin to an application
+ * configuration dependent memory area.
+ */
+ size_t source_offset;
+} Thread_Control_add_on;
+
+/**
+ * @brief Thread control add-ons.
+ *
+ * The thread control block contains fields that point to application
+ * configuration dependent memory areas, like the scheduler information, the
+ * API control blocks, the user extension context table, and the Newlib
+ * re-entrancy support. Account for these areas in the configuration and
+ * avoid extra workspace allocations for these areas.
+ *
+ * This array is provided via <rtems/confdefs.h>.
+ *
+ * @see _Thread_Control_add_on_count and _Thread_Control_size.
+ */
+extern const Thread_Control_add_on _Thread_Control_add_ons[];
+
+/**
+ * @brief Thread control add-on count.
+ *
+ * Count of entries in _Thread_Control_add_ons.
+ *
+ * This value is provided via <rtems/confdefs.h>.
+ */
+extern const size_t _Thread_Control_add_on_count;
+
+/**
+ * @brief Size of the thread control block of a particular application.
+ *
+ * This value is provided via <rtems/confdefs.h>.
+ *
+ * @see _Thread_Control_add_ons.
+ */
+extern const size_t _Thread_Control_size;
+
+/**
+ * @brief Maximum size of a thread name in characters (including the
+ * terminating '\0' character).
+ *
+ * This value is provided via <rtems/confdefs.h>.
+ */
+extern const size_t _Thread_Maximum_name_size;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/threaddispatch.h b/cpukit/include/rtems/score/threaddispatch.h
new file mode 100644
index 0000000000..63eb4c6fb4
--- /dev/null
+++ b/cpukit/include/rtems/score/threaddispatch.h
@@ -0,0 +1,281 @@
+/**
+ * @brief Constants and Structures Related with Thread Dispatch
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_THREADDISPATCH_H
+#define _RTEMS_SCORE_THREADDISPATCH_H
+
+#include <rtems/score/percpu.h>
+#include <rtems/score/isrlock.h>
+#include <rtems/score/profiling.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreThread
+ *
+ * @{
+ */
+
+#if defined(RTEMS_SMP) || ( CPU_ENABLE_ROBUST_THREAD_DISPATCH == TRUE )
+/**
+ * @brief Enables a robust thread dispatch.
+ *
+ * On each change of the thread dispatch disable level from one to zero the
+ * interrupt status is checked. In case interrupts are disabled and SMP is
+ * enabled or the CPU port needs it, then the system terminates with the fatal
+ * internal error INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT.
+ */
+#define RTEMS_SCORE_ROBUST_THREAD_DISPATCH
+#endif
+
+/**
+ * @brief Indicates if the executing thread is inside a thread dispatch
+ * critical section.
+ *
+ * @retval true Thread dispatching is enabled.
+ * @retval false The executing thread is inside a thread dispatch critical
+ * section and dispatching is not allowed.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Dispatch_is_enabled(void)
+{
+ bool enabled;
+
+#if defined(RTEMS_SMP)
+ ISR_Level level;
+
+ _ISR_Local_disable( level );
+#endif
+
+ enabled = _Thread_Dispatch_disable_level == 0;
+
+#if defined(RTEMS_SMP)
+ _ISR_Local_enable( level );
+#endif
+
+ return enabled;
+}
+
+/**
+ * @brief Gets thread dispatch disable level.
+ *
+ * @return The value of the thread dispatch level.
+ */
+RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_get_disable_level(void)
+{
+ return _Thread_Dispatch_disable_level;
+}
+
+/**
+ * @brief Thread dispatch initialization.
+ *
+ * This routine initializes the thread dispatching subsystem.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void )
+{
+ _Thread_Dispatch_disable_level = 1;
+}
+
+/**
+ * @brief Performs a thread dispatch if necessary.
+ *
+ * This routine is responsible for transferring control of the processor from
+ * the executing thread to the heir thread. Once the heir is running an
+ * attempt is made to run the pending post-switch thread actions.
+ *
+ * As part of this process, it is responsible for the following actions
+ * - update timing information of the executing thread,
+ * - save the context of the executing thread,
+ * - invokation of the thread switch user extensions,
+ * - restore the context of the heir thread, and
+ * - run of pending post-switch thread actions of the resulting executing
+ * thread.
+ *
+ * On entry the thread dispatch level must be equal to zero.
+ */
+void _Thread_Dispatch( void );
+
+/**
+ * @brief Directly do a thread dispatch.
+ *
+ * Must be called with a thread dispatch disable level of one, otherwise the
+ * INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL will occur. This function
+ * is useful for operations which synchronously block, e.g. self restart, self
+ * deletion, yield, sleep.
+ *
+ * @param[in] cpu_self The current processor.
+ *
+ * @see _Thread_Dispatch().
+ */
+void _Thread_Dispatch_direct( Per_CPU_Control *cpu_self );
+
+/**
+ * @brief Performs a thread dispatch on the current processor.
+ *
+ * On entry the thread dispatch disable level must be equal to one and
+ * interrupts must be disabled.
+ *
+ * This function assumes that a thread dispatch is necessary.
+ *
+ * @param[in] cpu_self The current processor.
+ * @param[in] level The previous interrupt level.
+ *
+ * @see _Thread_Dispatch().
+ */
+void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level );
+
+/**
+ * @brief Disables thread dispatching inside a critical section (interrupts
+ * disabled) with the current processor.
+ *
+ * @param[in] cpu_self The current processor.
+ * @param[in] lock_context The lock context of the corresponding
+ * _ISR_lock_ISR_disable() that started the critical section.
+ *
+ * @return The current processor.
+ */
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_with_CPU(
+ Per_CPU_Control *cpu_self,
+ const ISR_lock_Context *lock_context
+)
+{
+ uint32_t disable_level;
+
+ disable_level = cpu_self->thread_dispatch_disable_level;
+ _Profiling_Thread_dispatch_disable_critical(
+ cpu_self,
+ disable_level,
+ lock_context
+ );
+ cpu_self->thread_dispatch_disable_level = disable_level + 1;
+
+ return cpu_self;
+}
+
+/**
+ * @brief Disables thread dispatching inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] lock_context The lock context of the corresponding
+ * _ISR_lock_ISR_disable() that started the critical section.
+ *
+ * @return The current processor.
+ */
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical(
+ const ISR_lock_Context *lock_context
+)
+{
+ return _Thread_Dispatch_disable_with_CPU( _Per_CPU_Get(), lock_context );
+}
+
+/**
+ * @brief Disables thread dispatching.
+ *
+ * @return The current processor.
+ */
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable( void )
+{
+ Per_CPU_Control *cpu_self;
+ ISR_lock_Context lock_context;
+
+#if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING )
+ _ISR_lock_ISR_disable( &lock_context );
+#endif
+
+ cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
+
+#if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING )
+ _ISR_lock_ISR_enable( &lock_context );
+#endif
+
+ return cpu_self;
+}
+
+/**
+ * @brief Enables thread dispatching.
+ *
+ * May perfrom a thread dispatch if necessary as a side-effect.
+ *
+ * @param[in] cpu_self The current processor.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self )
+{
+ uint32_t disable_level = cpu_self->thread_dispatch_disable_level;
+
+ if ( disable_level == 1 ) {
+ ISR_Level level;
+
+ _ISR_Local_disable( level );
+
+ if (
+ cpu_self->dispatch_necessary
+#if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
+ || !_ISR_Is_enabled( level )
+#endif
+ ) {
+ _Thread_Do_dispatch( cpu_self, level );
+ } else {
+ cpu_self->thread_dispatch_disable_level = 0;
+ _Profiling_Thread_dispatch_enable( cpu_self, 0 );
+ }
+
+ _ISR_Local_enable( level );
+ } else {
+ _Assert( disable_level > 0 );
+ cpu_self->thread_dispatch_disable_level = disable_level - 1;
+ }
+}
+
+/**
+ * @brief Unnests thread dispatching.
+ *
+ * @param[in] cpu_self The current processor.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Dispatch_unnest( Per_CPU_Control *cpu_self )
+{
+ _Assert( cpu_self->thread_dispatch_disable_level > 0 );
+ --cpu_self->thread_dispatch_disable_level;
+}
+
+/**
+ * @brief Requests a thread dispatch on the target processor.
+ *
+ * @param[in] cpu_self The current processor.
+ * @param[in] cpu_target The target processor to request a thread dispatch.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Dispatch_request(
+ Per_CPU_Control *cpu_self,
+ Per_CPU_Control *cpu_target
+)
+{
+#if defined( RTEMS_SMP )
+ if ( cpu_self == cpu_target ) {
+ cpu_self->dispatch_necessary = true;
+ } else {
+ _Atomic_Fetch_or_ulong( &cpu_target->message, 0, ATOMIC_ORDER_RELEASE );
+ _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu_target ) );
+ }
+#else
+ cpu_self->dispatch_necessary = true;
+ (void) cpu_target;
+#endif
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_THREADDISPATCH_H */
diff --git a/cpukit/include/rtems/score/threadimpl.h b/cpukit/include/rtems/score/threadimpl.h
new file mode 100644
index 0000000000..b6722fae19
--- /dev/null
+++ b/cpukit/include/rtems/score/threadimpl.h
@@ -0,0 +1,1969 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines from the Thread Handler
+ *
+ * This file contains the macro implementation of the inlined
+ * routines from the Thread handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2014, 2017 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_THREADIMPL_H
+#define _RTEMS_SCORE_THREADIMPL_H
+
+#include <rtems/score/thread.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/objectimpl.h>
+#include <rtems/score/schedulernodeimpl.h>
+#include <rtems/score/statesimpl.h>
+#include <rtems/score/status.h>
+#include <rtems/score/sysstate.h>
+#include <rtems/score/threadqimpl.h>
+#include <rtems/score/todimpl.h>
+#include <rtems/score/freechain.h>
+#include <rtems/score/watchdogimpl.h>
+#include <rtems/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreThread
+ */
+/**@{**/
+
+/**
+ * The following structure contains the information necessary to manage
+ * a thread which it is waiting for a resource.
+ */
+#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
+
+/**
+ * Self for the GNU Ada Run-Time
+ */
+extern void *rtems_ada_self;
+
+typedef struct {
+ Objects_Information Objects;
+
+ Freechain_Control Free_thread_queue_heads;
+} Thread_Information;
+
+/**
+ * The following defines the information control block used to
+ * manage this class of objects.
+ */
+extern Thread_Information _Thread_Internal_information;
+
+/**
+ * @brief Object identifier of the global constructor thread.
+ *
+ * This variable is set by _RTEMS_tasks_Initialize_user_tasks_body() or
+ * _POSIX_Threads_Initialize_user_threads_body().
+ *
+ * It is consumed by _Thread_Handler().
+ */
+extern Objects_Id _Thread_Global_constructor;
+
+/**
+ * The following points to the thread whose floating point
+ * context is currently loaded.
+ */
+#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+extern Thread_Control *_Thread_Allocated_fp;
+#endif
+
+#if defined(RTEMS_SMP)
+#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
+#endif
+
+typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
+
+void _Thread_Iterate(
+ Thread_Visitor visitor,
+ void *arg
+);
+
+void _Thread_Initialize_information(
+ Thread_Information *information,
+ Objects_APIs the_api,
+ uint16_t the_class,
+ uint32_t maximum,
+ bool is_string,
+ uint32_t maximum_name_length
+);
+
+/**
+ * @brief Initialize thread handler.
+ *
+ * This routine performs the initialization necessary for this handler.
+ */
+void _Thread_Handler_initialization(void);
+
+/**
+ * @brief Create idle thread.
+ *
+ * This routine creates the idle thread.
+ *
+ * @warning No thread should be created before this one.
+ */
+void _Thread_Create_idle(void);
+
+/**
+ * @brief Start thread multitasking.
+ *
+ * This routine initiates multitasking. It is invoked only as
+ * part of initialization and its invocation is the last act of
+ * the non-multitasking part of the system initialization.
+ */
+void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
+
+/**
+ * @brief Allocate the requested stack space for the thread.
+ *
+ * Allocate the requested stack space for the thread.
+ * Set the Start.stack field to the address of the stack.
+ *
+ * @param[in] the_thread is the thread where the stack space is requested
+ * @param[in] stack_size is the stack space is requested
+ *
+ * @retval actual size allocated after any adjustment
+ * @retval zero if the allocation failed
+ */
+size_t _Thread_Stack_Allocate(
+ Thread_Control *the_thread,
+ size_t stack_size
+);
+
+/**
+ * @brief Deallocate thread stack.
+ *
+ * Deallocate the Thread's stack.
+ */
+void _Thread_Stack_Free(
+ Thread_Control *the_thread
+);
+
+/**
+ * @brief Initialize thread.
+ *
+ * This routine initializes the specified the thread. It allocates
+ * all memory associated with this thread. It completes by adding
+ * the thread to the local object table so operations on this
+ * thread id are allowed.
+ *
+ * @note If stack_area is NULL, it is allocated from the workspace.
+ *
+ * @note If the stack is allocated from the workspace, then it is
+ * guaranteed to be of at least minimum size.
+ */
+bool _Thread_Initialize(
+ Thread_Information *information,
+ Thread_Control *the_thread,
+ const struct _Scheduler_Control *scheduler,
+ void *stack_area,
+ size_t stack_size,
+ bool is_fp,
+ Priority_Control priority,
+ bool is_preemptible,
+ Thread_CPU_budget_algorithms budget_algorithm,
+ Thread_CPU_budget_algorithm_callout budget_callout,
+ uint32_t isr_level,
+ Objects_Name name
+);
+
+/**
+ * @brief Initializes thread and executes it.
+ *
+ * This routine initializes the executable information for a thread
+ * and makes it ready to execute. After this routine executes, the
+ * thread competes with all other threads for CPU time.
+ *
+ * @param the_thread The thread to be started.
+ * @param entry The thread entry information.
+ */
+bool _Thread_Start(
+ Thread_Control *the_thread,
+ const Thread_Entry_information *entry,
+ ISR_lock_Context *lock_context
+);
+
+void _Thread_Restart_self(
+ Thread_Control *executing,
+ const Thread_Entry_information *entry,
+ ISR_lock_Context *lock_context
+) RTEMS_NO_RETURN;
+
+bool _Thread_Restart_other(
+ Thread_Control *the_thread,
+ const Thread_Entry_information *entry,
+ ISR_lock_Context *lock_context
+);
+
+void _Thread_Yield( Thread_Control *executing );
+
+Thread_Life_state _Thread_Change_life(
+ Thread_Life_state clear,
+ Thread_Life_state set,
+ Thread_Life_state ignore
+);
+
+Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
+
+/**
+ * @brief Kills all zombie threads in the system.
+ *
+ * Threads change into the zombie state as the last step in the thread
+ * termination sequence right before a context switch to the heir thread is
+ * initiated. Since the thread stack is still in use during this phase we have
+ * to postpone the thread stack reclamation until this point. On SMP
+ * configurations we may have to busy wait for context switch completion here.
+ */
+void _Thread_Kill_zombies( void );
+
+void _Thread_Exit(
+ Thread_Control *executing,
+ Thread_Life_state set,
+ void *exit_value
+);
+
+void _Thread_Join(
+ Thread_Control *the_thread,
+ States_Control waiting_for_join,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_Cancel(
+ Thread_Control *the_thread,
+ Thread_Control *executing,
+ void *exit_value
+);
+
+typedef struct {
+ Thread_queue_Context Base;
+ Thread_Control *cancel;
+} Thread_Close_context;
+
+/**
+ * @brief Closes the thread.
+ *
+ * Closes the thread object and starts the thread termination sequence. In
+ * case the executing thread is not terminated, then this function waits until
+ * the terminating thread reached the zombie state.
+ */
+void _Thread_Close(
+ Thread_Control *the_thread,
+ Thread_Control *executing,
+ Thread_Close_context *context
+);
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
+{
+ return _States_Is_ready( the_thread->current_state );
+}
+
+States_Control _Thread_Clear_state_locked(
+ Thread_Control *the_thread,
+ States_Control state
+);
+
+/**
+ * @brief Clears the specified thread state.
+ *
+ * In case the previous state is a non-ready state and the next state is the
+ * ready state, then the thread is unblocked by the scheduler.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] state The state to clear. It must not be zero.
+ *
+ * @return The previous state.
+ */
+States_Control _Thread_Clear_state(
+ Thread_Control *the_thread,
+ States_Control state
+);
+
+States_Control _Thread_Set_state_locked(
+ Thread_Control *the_thread,
+ States_Control state
+);
+
+/**
+ * @brief Sets the specified thread state.
+ *
+ * In case the previous state is the ready state, then the thread is blocked by
+ * the scheduler.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] state The state to set. It must not be zero.
+ *
+ * @return The previous state.
+ */
+States_Control _Thread_Set_state(
+ Thread_Control *the_thread,
+ States_Control state
+);
+
+/**
+ * @brief Initializes enviroment for a thread.
+ *
+ * This routine initializes the context of @a the_thread to its
+ * appropriate starting state.
+ *
+ * @param[in] the_thread is the pointer to the thread control block.
+ */
+void _Thread_Load_environment(
+ Thread_Control *the_thread
+);
+
+void _Thread_Entry_adaptor_idle( Thread_Control *executing );
+
+void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
+
+void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
+
+/**
+ * @brief Wrapper function for all threads.
+ *
+ * This routine is the wrapper function for all threads. It is
+ * the starting point for all threads. The user provided thread
+ * entry point is invoked by this routine. Operations
+ * which must be performed immediately before and after the user's
+ * thread executes are found here.
+ *
+ * @note On entry, it is assumed all interrupts are blocked and that this
+ * routine needs to set the initial isr level. This may or may not
+ * actually be needed by the context switch routine and as a result
+ * interrupts may already be at there proper level. Either way,
+ * setting the initial isr level properly here is safe.
+ */
+void _Thread_Handler( void );
+
+RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_ISR_disable( lock_context );
+ _Thread_State_acquire_critical( the_thread, lock_context );
+}
+
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
+ ISR_lock_Context *lock_context
+)
+{
+ Thread_Control *executing;
+
+ _ISR_lock_ISR_disable( lock_context );
+ executing = _Thread_Executing;
+ _Thread_State_acquire_critical( executing, lock_context );
+
+ return executing;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_State_release(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_State_release_critical( the_thread, lock_context );
+ _ISR_lock_ISR_enable( lock_context );
+}
+
+#if defined(RTEMS_DEBUG)
+RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
+ const Thread_Control *the_thread
+)
+{
+ return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
+}
+#endif
+
+/**
+ * @brief Performs the priority actions specified by the thread queue context
+ * along the thread queue path.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param start_of_path The start thread of the thread queue path.
+ * @param queue_context The thread queue context specifying the thread queue
+ * path and initial thread priority actions.
+ *
+ * @see _Thread_queue_Path_acquire_critical().
+ */
+void _Thread_Priority_perform_actions(
+ Thread_Control *start_of_path,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Adds the specified thread priority node to the corresponding thread
+ * priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to add.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
+ */
+void _Thread_Priority_add(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Removes the specified thread priority node from the corresponding
+ * thread priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to remove.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
+ */
+void _Thread_Priority_remove(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Propagates a thread priority value change in the specified thread
+ * priority node to the corresponding thread priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to change.
+ * @param prepend_it In case this is true, then the thread is prepended to
+ * its priority group in its home scheduler instance, otherwise it is
+ * appended.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
+ */
+void _Thread_Priority_changed(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Changes the thread priority value of the specified thread priority
+ * node in the corresponding thread priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to change.
+ * @param new_priority The new thread priority value of the thread priority
+ * node to change.
+ * @param prepend_it In case this is true, then the thread is prepended to
+ * its priority group in its home scheduler instance, otherwise it is
+ * appended.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Priority_Control new_priority,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
+)
+{
+ _Priority_Node_set_priority( priority_node, new_priority );
+ _Thread_Priority_changed(
+ the_thread,
+ priority_node,
+ prepend_it,
+ queue_context
+ );
+}
+
+/**
+ * @brief Replaces the victim priority node with the replacement priority node
+ * in the corresponding thread priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param victim_node The victim thread priority node.
+ * @param replacement_node The replacement thread priority node.
+ *
+ * @see _Thread_Wait_acquire().
+ */
+void _Thread_Priority_replace(
+ Thread_Control *the_thread,
+ Priority_Node *victim_node,
+ Priority_Node *replacement_node
+);
+
+/**
+ * @brief Adds a priority node to the corresponding thread priority
+ * aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to add.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Priority_add(), _Thread_Priority_change(),
+ * _Thread_Priority_changed() and _Thread_Priority_remove().
+ */
+void _Thread_Priority_update( Thread_queue_Context *queue_context );
+
+#if defined(RTEMS_SMP)
+void _Thread_Priority_and_sticky_update(
+ Thread_Control *the_thread,
+ int sticky_level_change
+);
+#endif
+
+/**
+ * @brief Returns true if the left thread priority is less than the right
+ * thread priority in the intuitive sense of priority and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
+ Priority_Control left,
+ Priority_Control right
+)
+{
+ return left > right;
+}
+
+/**
+ * @brief Returns the highest priority of the left and right thread priorities
+ * in the intuitive sense of priority.
+ */
+RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
+ Priority_Control left,
+ Priority_Control right
+)
+{
+ return _Thread_Priority_less_than( left, right ) ? right : left;
+}
+
+RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
+ Objects_Id id
+)
+{
+ uint32_t the_api;
+
+ the_api = _Objects_Get_API( id );
+
+ if ( !_Objects_Is_api_valid( the_api ) ) {
+ return NULL;
+ }
+
+ /*
+ * Threads are always first class :)
+ *
+ * There is no need to validate the object class of the object identifier,
+ * since this will be done by the object get methods.
+ */
+ return _Objects_Information_table[ the_api ][ 1 ];
+}
+
+/**
+ * @brief Gets a thread by its identifier.
+ *
+ * @see _Objects_Get().
+ */
+Thread_Control *_Thread_Get(
+ Objects_Id id,
+ ISR_lock_Context *lock_context
+);
+
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
+ const Thread_Control *thread
+)
+{
+#if defined(RTEMS_SMP)
+ return thread->Scheduler.cpu;
+#else
+ (void) thread;
+
+ return _Per_CPU_Get();
+#endif
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
+ Thread_Control *thread,
+ Per_CPU_Control *cpu
+)
+{
+#if defined(RTEMS_SMP)
+ thread->Scheduler.cpu = cpu;
+#else
+ (void) thread;
+ (void) cpu;
+#endif
+}
+
+/**
+ * This function returns true if the_thread is the currently executing
+ * thread, and false otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
+ const Thread_Control *the_thread
+)
+{
+ return ( the_thread == _Thread_Executing );
+}
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Returns @a true in case the thread executes currently on some
+ * processor in the system, otherwise @a false.
+ *
+ * Do not confuse this with _Thread_Is_executing() which checks only the
+ * current processor.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
+ const Thread_Control *the_thread
+)
+{
+ return _CPU_Context_Get_is_executing( &the_thread->Registers );
+}
+#endif
+
+/**
+ * This function returns true if the_thread is the heir
+ * thread, and false otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
+ const Thread_Control *the_thread
+)
+{
+ return ( the_thread == _Thread_Heir );
+}
+
+/**
+ * This routine clears any blocking state for the_thread. It performs
+ * any necessary scheduling operations including the selection of
+ * a new heir thread.
+ */
+
+RTEMS_INLINE_ROUTINE void _Thread_Unblock (
+ Thread_Control *the_thread
+)
+{
+ _Thread_Clear_state( the_thread, STATES_BLOCKED );
+}
+
+/**
+ * This function returns true if the floating point context of
+ * the_thread is currently loaded in the floating point unit, and
+ * false otherwise.
+ */
+
+#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
+ const Thread_Control *the_thread
+)
+{
+ return ( the_thread == _Thread_Allocated_fp );
+}
+#endif
+
+/*
+ * If the CPU has hardware floating point, then we must address saving
+ * and restoring it as part of the context switch.
+ *
+ * The second conditional compilation section selects the algorithm used
+ * to context switch between floating point tasks. The deferred algorithm
+ * can be significantly better in a system with few floating point tasks
+ * because it reduces the total number of save and restore FP context
+ * operations. However, this algorithm can not be used on all CPUs due
+ * to unpredictable use of FP registers by some compilers for integer
+ * operations.
+ */
+
+RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
+{
+#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
+ if ( executing->fp_context != NULL )
+ _Context_Save_fp( &executing->fp_context );
+#endif
+#endif
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
+{
+#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
+ if ( (executing->fp_context != NULL) &&
+ !_Thread_Is_allocated_fp( executing ) ) {
+ if ( _Thread_Allocated_fp != NULL )
+ _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
+ _Context_Restore_fp( &executing->fp_context );
+ _Thread_Allocated_fp = executing;
+ }
+#else
+ if ( executing->fp_context != NULL )
+ _Context_Restore_fp( &executing->fp_context );
+#endif
+#endif
+}
+
+/**
+ * This routine is invoked when the currently loaded floating
+ * point context is now longer associated with an active thread.
+ */
+
+#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
+RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
+{
+ _Thread_Allocated_fp = NULL;
+}
+#endif
+
+/**
+ * This function returns true if dispatching is disabled, and false
+ * otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
+{
+ return ( _Thread_Dispatch_necessary );
+}
+
+/**
+ * This function returns true if the_thread is NULL and false otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
+ const Thread_Control *the_thread
+)
+{
+ return ( the_thread == NULL );
+}
+
+/**
+ * @brief Is proxy blocking.
+ *
+ * status which indicates that a proxy is blocking, and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
+ uint32_t code
+)
+{
+ return (code == THREAD_STATUS_PROXY_BLOCKING);
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
+{
+ /* Idle threads */
+ uint32_t maximum_internal_threads =
+ rtems_configuration_get_maximum_processors();
+
+ /* MPCI thread */
+#if defined(RTEMS_MULTIPROCESSING)
+ if ( _System_state_Is_multiprocessing ) {
+ ++maximum_internal_threads;
+ }
+#endif
+
+ return maximum_internal_threads;
+}
+
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
+{
+ return (Thread_Control *)
+ _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
+}
+
+/**
+ * @brief Gets the heir of the processor and makes it executing.
+ *
+ * Must be called with interrupts disabled. The thread dispatch necessary
+ * indicator is cleared as a side-effect.
+ *
+ * @return The heir thread.
+ *
+ * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
+ * _Thread_Dispatch_update_heir().
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
+ Per_CPU_Control *cpu_self
+)
+{
+ Thread_Control *heir;
+
+ heir = cpu_self->heir;
+ cpu_self->dispatch_necessary = false;
+ cpu_self->executing = heir;
+
+ return heir;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu
+)
+{
+ Timestamp_Control last;
+ Timestamp_Control ran;
+
+ last = cpu->cpu_usage_timestamp;
+ _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
+ _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
+ _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
+}
+
+#if defined( RTEMS_SMP )
+RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
+ Per_CPU_Control *cpu_self,
+ Per_CPU_Control *cpu_for_heir,
+ Thread_Control *heir
+)
+{
+ _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
+
+ cpu_for_heir->heir = heir;
+
+ _Thread_Dispatch_request( cpu_self, cpu_for_heir );
+}
+#endif
+
+void _Thread_Get_CPU_time_used(
+ Thread_Control *the_thread,
+ Timestamp_Control *cpu_time_used
+);
+
+RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
+ Thread_Action_control *action_control
+)
+{
+ _Chain_Initialize_empty( &action_control->Chain );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
+ Thread_Action *action
+)
+{
+ _Chain_Set_off_chain( &action->Node );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
+ Thread_Control *the_thread,
+ Thread_Action *action,
+ Thread_Action_handler handler
+)
+{
+ Per_CPU_Control *cpu_of_thread;
+
+ _Assert( _Thread_State_is_owner( the_thread ) );
+
+ cpu_of_thread = _Thread_Get_CPU( the_thread );
+
+ action->handler = handler;
+
+ _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
+
+ _Chain_Append_if_is_off_chain_unprotected(
+ &the_thread->Post_switch_actions.Chain,
+ &action->Node
+ );
+}
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
+ Thread_Life_state life_state
+)
+{
+ return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
+}
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
+ Thread_Life_state life_state
+)
+{
+ return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
+}
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
+ Thread_Life_state life_state
+)
+{
+ return ( life_state
+ & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
+}
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
+ Thread_Life_state life_state
+)
+{
+ return ( life_state
+ & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
+}
+
+RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
+ const Thread_Control *the_thread
+)
+{
+ _Assert( _Thread_State_is_owner( the_thread ) );
+ return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
+ Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
+ ++the_thread->resource_count;
+#else
+ (void) the_thread;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
+ Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
+ --the_thread->resource_count;
+#else
+ (void) the_thread;
+#endif
+}
+
+#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
+/**
+ * @brief Returns true if the thread owns resources, and false otherwise.
+ *
+ * Resources are accounted with the Thread_Control::resource_count resource
+ * counter. This counter is used by mutex objects for example.
+ *
+ * @param[in] the_thread The thread.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
+ const Thread_Control *the_thread
+)
+{
+ return the_thread->resource_count != 0;
+}
+#endif
+
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu
+)
+{
+ _Per_CPU_Acquire( cpu );
+
+ if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
+ _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
+ _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
+ }
+
+ _Per_CPU_Release( cpu );
+}
+#endif
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return the_thread->Scheduler.home;
+#else
+ (void) the_thread;
+ return &_Scheduler_Table[ 0 ];
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
+ return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
+ _Chain_First( &the_thread->Scheduler.Wait_nodes )
+ );
+#else
+ return the_thread->Scheduler.nodes;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
+ const Thread_Control *the_thread,
+ size_t scheduler_index
+)
+{
+#if defined(RTEMS_SMP)
+ return (Scheduler_Node *)
+ ( (uintptr_t) the_thread->Scheduler.nodes
+ + scheduler_index * _Scheduler_Node_size );
+#else
+ _Assert( scheduler_index == 0 );
+ (void) scheduler_index;
+ return the_thread->Scheduler.nodes;
+#endif
+}
+
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
+}
+
+#if defined(RTEMS_SMP)
+void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
+#endif
+
+RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
+ Thread_Control *the_thread,
+ Scheduler_Node *scheduler_node,
+ Scheduler_Node_request request
+)
+{
+ ISR_lock_Context lock_context;
+ Scheduler_Node_request current_request;
+
+ _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
+
+ current_request = scheduler_node->Thread.request;
+
+ if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
+ _Assert(
+ request == SCHEDULER_NODE_REQUEST_ADD
+ || request == SCHEDULER_NODE_REQUEST_REMOVE
+ );
+ _Assert( scheduler_node->Thread.next_request == NULL );
+ scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
+ the_thread->Scheduler.requests = scheduler_node;
+ } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
+ _Assert(
+ ( current_request == SCHEDULER_NODE_REQUEST_ADD
+ && request == SCHEDULER_NODE_REQUEST_REMOVE )
+ || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
+ && request == SCHEDULER_NODE_REQUEST_ADD )
+ );
+ request = SCHEDULER_NODE_REQUEST_NOTHING;
+ }
+
+ scheduler_node->Thread.request = request;
+
+ _Thread_Scheduler_release_critical( the_thread, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
+ Thread_Control *the_thread,
+ Scheduler_Node *scheduler_node
+)
+{
+ _Chain_Append_unprotected(
+ &the_thread->Scheduler.Wait_nodes,
+ &scheduler_node->Thread.Wait_node
+ );
+ _Thread_Scheduler_add_request(
+ the_thread,
+ scheduler_node,
+ SCHEDULER_NODE_REQUEST_ADD
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
+ Thread_Control *the_thread,
+ Scheduler_Node *scheduler_node
+)
+{
+ _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
+ _Thread_Scheduler_add_request(
+ the_thread,
+ scheduler_node,
+ SCHEDULER_NODE_REQUEST_REMOVE
+ );
+}
+#endif
+
+/**
+ * @brief Returns the priority of the thread.
+ *
+ * Returns the user API and thread wait information relevant thread priority.
+ * This includes temporary thread priority adjustments due to locking
+ * protocols, a job release or the POSIX sporadic server for example.
+ *
+ * @return The priority of the thread.
+ */
+RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
+ const Thread_Control *the_thread
+)
+{
+ Scheduler_Node *scheduler_node;
+
+ scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
+ return _Priority_Get_priority( &scheduler_node->Wait.Priority );
+}
+
+/**
+ * @brief Acquires the thread wait default lock inside a critical section
+ * (interrupts disabled).
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] lock_context The lock context used for the corresponding lock
+ * release.
+ *
+ * @see _Thread_Wait_release_default_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
+}
+
+/**
+ * @brief Acquires the thread wait default lock and returns the executing
+ * thread.
+ *
+ * @param[in] lock_context The lock context used for the corresponding lock
+ * release.
+ *
+ * @return The executing thread.
+ *
+ * @see _Thread_Wait_release_default().
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
+ ISR_lock_Context *lock_context
+)
+{
+ Thread_Control *executing;
+
+ _ISR_lock_ISR_disable( lock_context );
+ executing = _Thread_Executing;
+ _Thread_Wait_acquire_default_critical( executing, lock_context );
+
+ return executing;
+}
+
+/**
+ * @brief Acquires the thread wait default lock and disables interrupts.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] lock_context The lock context used for the corresponding lock
+ * release.
+ *
+ * @see _Thread_Wait_release_default().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_ISR_disable( lock_context );
+ _Thread_Wait_acquire_default_critical( the_thread, lock_context );
+}
+
+/**
+ * @brief Releases the thread wait default lock inside a critical section
+ * (interrupts disabled).
+ *
+ * The previous interrupt status is not restored.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] lock_context The lock context used for the corresponding lock
+ * acquire.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
+}
+
+/**
+ * @brief Releases the thread wait default lock and restores the previous
+ * interrupt status.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] lock_context The lock context used for the corresponding lock
+ * acquire.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_Wait_release_default_critical( the_thread, lock_context );
+ _ISR_lock_ISR_enable( lock_context );
+}
+
+#if defined(RTEMS_SMP)
+#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
+ RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
+
+RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
+ Thread_Control *the_thread,
+ Thread_queue_Lock_context *queue_lock_context
+)
+{
+ Chain_Node *first;
+
+ _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
+ first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
+
+ if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
+ _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
+ Thread_queue_Queue *queue,
+ Thread_queue_Lock_context *queue_lock_context
+)
+{
+ _Thread_queue_Queue_acquire_critical(
+ queue,
+ &_Thread_Executing->Potpourri_stats,
+ &queue_lock_context->Lock_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
+ Thread_queue_Queue *queue,
+ Thread_queue_Lock_context *queue_lock_context
+)
+{
+ _Thread_queue_Queue_release_critical(
+ queue,
+ &queue_lock_context->Lock_context
+ );
+}
+#endif
+
+/**
+ * @brief Acquires the thread wait lock inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] queue_context The thread queue context for the corresponding
+ * _Thread_Wait_release_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+#if defined(RTEMS_SMP)
+ Thread_queue_Queue *queue;
+
+ _Thread_Wait_acquire_default_critical(
+ the_thread,
+ &queue_context->Lock_context.Lock_context
+ );
+
+ queue = the_thread->Wait.queue;
+ queue_context->Lock_context.Wait.queue = queue;
+
+ if ( queue != NULL ) {
+ _Thread_queue_Gate_add(
+ &the_thread->Wait.Lock.Pending_requests,
+ &queue_context->Lock_context.Wait.Gate
+ );
+ _Thread_Wait_release_default_critical(
+ the_thread,
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
+
+ if ( queue_context->Lock_context.Wait.queue == NULL ) {
+ _Thread_Wait_release_queue_critical(
+ queue,
+ &queue_context->Lock_context
+ );
+ _Thread_Wait_acquire_default_critical(
+ the_thread,
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Wait_remove_request_locked(
+ the_thread,
+ &queue_context->Lock_context
+ );
+ _Assert( the_thread->Wait.queue == NULL );
+ }
+ }
+#else
+ (void) the_thread;
+ (void) queue_context;
+#endif
+}
+
+/**
+ * @brief Acquires the thread wait default lock and disables interrupts.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] queue_context The thread queue context for the corresponding
+ * _Thread_Wait_release().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+ _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
+ _Thread_Wait_acquire_critical( the_thread, queue_context );
+}
+
+/**
+ * @brief Releases the thread wait lock inside a critical section (interrupts
+ * disabled).
+ *
+ * The previous interrupt status is not restored.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] queue_context The thread queue context used for corresponding
+ * _Thread_Wait_acquire_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+#if defined(RTEMS_SMP)
+ Thread_queue_Queue *queue;
+
+ queue = queue_context->Lock_context.Wait.queue;
+
+ if ( queue != NULL ) {
+ _Thread_Wait_release_queue_critical(
+ queue, &queue_context->Lock_context
+ );
+ _Thread_Wait_acquire_default_critical(
+ the_thread,
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Wait_remove_request_locked(
+ the_thread,
+ &queue_context->Lock_context
+ );
+ }
+
+ _Thread_Wait_release_default_critical(
+ the_thread,
+ &queue_context->Lock_context.Lock_context
+ );
+#else
+ (void) the_thread;
+ (void) queue_context;
+#endif
+}
+
+/**
+ * @brief Releases the thread wait lock and restores the previous interrupt
+ * status.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] queue_context The thread queue context used for corresponding
+ * _Thread_Wait_acquire().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_Wait_release_critical( the_thread, queue_context );
+ _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
+}
+
+/**
+ * @brief Claims the thread wait queue.
+ *
+ * The caller must not be the owner of the default thread wait lock. The
+ * caller must be the owner of the corresponding thread queue lock. The
+ * registration of the corresponding thread queue operations is deferred and
+ * done after the deadlock detection. This is crucial to support timeouts on
+ * SMP configurations.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] queue The new thread queue.
+ *
+ * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
+ Thread_Control *the_thread,
+ Thread_queue_Queue *queue
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
+
+ _Assert( the_thread->Wait.queue == NULL );
+
+#if defined(RTEMS_SMP)
+ _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
+ _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
+ _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
+#endif
+
+ the_thread->Wait.queue = queue;
+
+ _Thread_Wait_release_default_critical( the_thread, &lock_context );
+}
+
+/**
+ * @brief Finalizes the thread wait queue claim via registration of the
+ * corresponding thread queue operations.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] operations The corresponding thread queue operations.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
+ Thread_Control *the_thread,
+ const Thread_queue_Operations *operations
+)
+{
+ the_thread->Wait.operations = operations;
+}
+
+/**
+ * @brief Removes a thread wait lock request.
+ *
+ * On SMP configurations, removes a thread wait lock request.
+ *
+ * On other configurations, this function does nothing.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] queue_lock_context The thread queue lock context used for
+ * corresponding _Thread_Wait_acquire().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
+ Thread_Control *the_thread,
+ Thread_queue_Lock_context *queue_lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ ISR_lock_Context lock_context;
+
+ _Thread_Wait_acquire_default( the_thread, &lock_context );
+ _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
+ _Thread_Wait_release_default( the_thread, &lock_context );
+#else
+ (void) the_thread;
+ (void) queue_lock_context;
+#endif
+}
+
+/**
+ * @brief Restores the default thread wait queue and operations.
+ *
+ * The caller must be the owner of the current thread wait queue lock.
+ *
+ * On SMP configurations, the pending requests are updated to use the stale
+ * thread queue operations.
+ *
+ * @param[in] the_thread The thread.
+ *
+ * @see _Thread_Wait_claim().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
+ Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ ISR_lock_Context lock_context;
+ Chain_Node *node;
+ const Chain_Node *tail;
+
+ _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
+
+ node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
+ tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
+
+ if ( node != tail ) {
+ do {
+ Thread_queue_Context *queue_context;
+
+ queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
+ queue_context->Lock_context.Wait.queue = NULL;
+
+ node = _Chain_Next( node );
+ } while ( node != tail );
+
+ _Thread_queue_Gate_add(
+ &the_thread->Wait.Lock.Pending_requests,
+ &the_thread->Wait.Lock.Tranquilizer
+ );
+ } else {
+ _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
+ }
+#endif
+
+ the_thread->Wait.queue = NULL;
+ the_thread->Wait.operations = &_Thread_queue_Operations_default;
+
+#if defined(RTEMS_SMP)
+ _Thread_Wait_release_default_critical( the_thread, &lock_context );
+#endif
+}
+
+/**
+ * @brief Tranquilizes the thread after a wait on a thread queue.
+ *
+ * After the violent blocking procedure this function makes the thread calm and
+ * peaceful again so that it can carry out its normal work.
+ *
+ * On SMP configurations, ensures that all pending thread wait lock requests
+ * completed before the thread is able to begin a new thread wait procedure.
+ *
+ * On other configurations, this function does nothing.
+ *
+ * It must be called after a _Thread_Wait_claim() exactly once
+ * - after the corresponding thread queue lock was released, and
+ * - the default wait state is restored or some other processor is about to do
+ * this.
+ *
+ * @param[in] the_thread The thread.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
+ Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
+#else
+ (void) the_thread;
+#endif
+}
+
+/**
+ * @brief Cancels a thread wait on a thread queue.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] queue_context The thread queue context used for corresponding
+ * _Thread_Wait_acquire().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+ Thread_queue_Queue *queue;
+
+ queue = the_thread->Wait.queue;
+
+#if defined(RTEMS_SMP)
+ if ( queue != NULL ) {
+ _Assert( queue_context->Lock_context.Wait.queue == queue );
+#endif
+
+ ( *the_thread->Wait.operations->extract )(
+ queue,
+ the_thread,
+ queue_context
+ );
+ _Thread_Wait_restore_default( the_thread );
+
+#if defined(RTEMS_SMP)
+ _Assert( queue_context->Lock_context.Wait.queue == NULL );
+ queue_context->Lock_context.Wait.queue = queue;
+ }
+#endif
+}
+
+/**
+ * @brief The initial thread wait flags value set by _Thread_Initialize().
+ */
+#define THREAD_WAIT_FLAGS_INITIAL 0x0U
+
+/**
+ * @brief Mask to get the thread wait state flags.
+ */
+#define THREAD_WAIT_STATE_MASK 0xffU
+
+/**
+ * @brief Indicates that the thread begins with the blocking operation.
+ *
+ * A blocking operation consists of an optional watchdog initialization and the
+ * setting of the appropriate thread blocking state with the corresponding
+ * scheduler block operation.
+ */
+#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
+
+/**
+ * @brief Indicates that the thread completed the blocking operation.
+ */
+#define THREAD_WAIT_STATE_BLOCKED 0x2U
+
+/**
+ * @brief Indicates that a condition to end the thread wait occurred.
+ *
+ * This could be a timeout, a signal, an event or a resource availability.
+ */
+#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
+
+/**
+ * @brief Mask to get the thread wait class flags.
+ */
+#define THREAD_WAIT_CLASS_MASK 0xff00U
+
+/**
+ * @brief Indicates that the thread waits for an event.
+ */
+#define THREAD_WAIT_CLASS_EVENT 0x100U
+
+/**
+ * @brief Indicates that the thread waits for a system event.
+ */
+#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
+
+/**
+ * @brief Indicates that the thread waits for an object.
+ */
+#define THREAD_WAIT_CLASS_OBJECT 0x400U
+
+/**
+ * @brief Indicates that the thread waits for a period.
+ */
+#define THREAD_WAIT_CLASS_PERIOD 0x800U
+
+RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
+ Thread_Control *the_thread,
+ Thread_Wait_flags flags
+)
+{
+#if defined(RTEMS_SMP)
+ _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
+#else
+ the_thread->Wait.flags = flags;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
+#else
+ return the_thread->Wait.flags;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
+#else
+ return the_thread->Wait.flags;
+#endif
+}
+
+/**
+ * @brief Tries to change the thread wait flags with release semantics in case
+ * of success.
+ *
+ * Must be called inside a critical section (interrupts disabled).
+ *
+ * In case the wait flags are equal to the expected wait flags, then the wait
+ * flags are set to the desired wait flags.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] expected_flags The expected wait flags.
+ * @param[in] desired_flags The desired wait flags.
+ *
+ * @retval true The wait flags were equal to the expected wait flags.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
+ Thread_Control *the_thread,
+ Thread_Wait_flags expected_flags,
+ Thread_Wait_flags desired_flags
+)
+{
+ _Assert( _ISR_Get_level() != 0 );
+
+#if defined(RTEMS_SMP)
+ return _Atomic_Compare_exchange_uint(
+ &the_thread->Wait.flags,
+ &expected_flags,
+ desired_flags,
+ ATOMIC_ORDER_RELEASE,
+ ATOMIC_ORDER_RELAXED
+ );
+#else
+ bool success = ( the_thread->Wait.flags == expected_flags );
+
+ if ( success ) {
+ the_thread->Wait.flags = desired_flags;
+ }
+
+ return success;
+#endif
+}
+
+/**
+ * @brief Tries to change the thread wait flags with acquire semantics.
+ *
+ * In case the wait flags are equal to the expected wait flags, then the wait
+ * flags are set to the desired wait flags.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] expected_flags The expected wait flags.
+ * @param[in] desired_flags The desired wait flags.
+ *
+ * @retval true The wait flags were equal to the expected wait flags.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
+ Thread_Control *the_thread,
+ Thread_Wait_flags expected_flags,
+ Thread_Wait_flags desired_flags
+)
+{
+ bool success;
+#if defined(RTEMS_SMP)
+ return _Atomic_Compare_exchange_uint(
+ &the_thread->Wait.flags,
+ &expected_flags,
+ desired_flags,
+ ATOMIC_ORDER_ACQUIRE,
+ ATOMIC_ORDER_ACQUIRE
+ );
+#else
+ ISR_Level level;
+
+ _ISR_Local_disable( level );
+
+ success = _Thread_Wait_flags_try_change_release(
+ the_thread,
+ expected_flags,
+ desired_flags
+ );
+
+ _ISR_Local_enable( level );
+#endif
+
+ return success;
+}
+
+/**
+ * @brief Returns the object identifier of the object containing the current
+ * thread wait queue.
+ *
+ * This function may be used for debug and system information purposes. The
+ * caller must be the owner of the thread lock.
+ *
+ * @retval 0 The thread waits on no thread queue currently, the thread wait
+ * queue is not contained in an object, or the current thread state provides
+ * insufficient information, e.g. the thread is in the middle of a blocking
+ * operation.
+ * @retval other The object identifier of the object containing the thread wait
+ * queue.
+ */
+Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
+
+RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
+ const Thread_Control *the_thread
+)
+{
+ return (Status_Control) the_thread->Wait.return_code;
+}
+
+/**
+ * @brief Cancels a blocking operation so that the thread can continue its
+ * execution.
+ *
+ * In case this function actually cancelled the blocking operation, then the
+ * thread wait return code is set to the specified status.
+ *
+ * A specialization of this function is _Thread_Timeout().
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] status The thread wait status.
+ */
+void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
+
+/**
+ * @brief General purpose thread wait timeout.
+ *
+ * @param[in] the_watchdog The thread timer watchdog.
+ */
+void _Thread_Timeout( Watchdog_Control *the_watchdog );
+
+RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
+ Thread_Timer_information *timer,
+ Per_CPU_Control *cpu
+)
+{
+ _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
+ timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
+ _Watchdog_Preinitialize( &timer->Watchdog, cpu );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu,
+ Watchdog_Interval ticks
+)
+{
+ ISR_lock_Context lock_context;
+
+ _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
+
+ the_thread->Timer.header =
+ &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
+ the_thread->Timer.Watchdog.routine = _Thread_Timeout;
+ _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
+
+ _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu,
+ Watchdog_Service_routine_entry routine,
+ uint64_t expire
+)
+{
+ ISR_lock_Context lock_context;
+
+ _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
+
+ the_thread->Timer.header =
+ &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
+ the_thread->Timer.Watchdog.routine = routine;
+ _Watchdog_Per_CPU_insert_realtime( &the_thread->Timer.Watchdog, cpu, expire );
+
+ _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+
+ _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
+
+ _Watchdog_Per_CPU_remove(
+ &the_thread->Timer.Watchdog,
+#if defined(RTEMS_SMP)
+ the_thread->Timer.Watchdog.cpu,
+#else
+ _Per_CPU_Get(),
+#endif
+ the_thread->Timer.header
+ );
+
+ _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
+ Thread_Control *the_thread,
+ Thread_queue_Queue *queue
+)
+{
+ _Thread_Wait_tranquilize( the_thread );
+ _Thread_Timer_remove( the_thread );
+
+#if defined(RTEMS_MULTIPROCESSING)
+ if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
+ _Thread_Unblock( the_thread );
+ } else {
+ _Thread_queue_Unblock_proxy( queue, the_thread );
+ }
+#else
+ (void) queue;
+ _Thread_Unblock( the_thread );
+#endif
+}
+
+Status_Control _Thread_Set_name(
+ Thread_Control *the_thread,
+ const char *name
+);
+
+size_t _Thread_Get_name(
+ const Thread_Control *the_thread,
+ char *buffer,
+ size_t buffer_size
+);
+
+/** @}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#if defined(RTEMS_MULTIPROCESSING)
+#include <rtems/score/threadmp.h>
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/threadmp.h b/cpukit/include/rtems/score/threadmp.h
new file mode 100644
index 0000000000..9cde35b649
--- /dev/null
+++ b/cpukit/include/rtems/score/threadmp.h
@@ -0,0 +1,113 @@
+/**
+ * @file rtems/score/threadmp.h
+ *
+ * @brief Multiprocessing Portion of the Thread Package
+ *
+ * This include file contains the specification for all routines
+ * and data specific to the multiprocessing portion of the thread package.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_THREADMP_H
+#define _RTEMS_SCORE_THREADMP_H
+
+#ifndef _RTEMS_SCORE_THREADIMPL_H
+# error "Never use <rtems/score/threadmp.h> directly; include <rtems/score/threadimpl.h> instead."
+#endif
+
+#include <rtems/score/mpciimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreThreadMP Thread Handler Multiprocessing Support
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which is related to managing
+ * threads in a multiprocessor system configuration. This handler must
+ * manage proxies which represent remote threads blocking on local
+ * operations.
+ */
+/**@{*/
+
+/**
+ * @brief Initialize MP thread handler.
+ *
+ * This routine initializes the multiprocessing portion of the Thread Handler.
+ */
+void _Thread_MP_Handler_initialization (
+ uint32_t maximum_proxies
+);
+
+/**
+ * @brief Allocate a MP proxy control block from
+ * the inactive chain of free proxy control blocks.
+ *
+ * This allocates a proxy control block from
+ * the inactive chain of free proxy control blocks.
+ *
+ * @note This function returns a thread control pointer
+ * because proxies are substitutes for remote threads.
+ */
+Thread_Control *_Thread_MP_Allocate_proxy (
+ States_Control the_state
+);
+
+/**
+ * @brief Removes the MP proxy control block for the specified
+ * id from the active chain of proxy control blocks.
+ *
+ * This function removes the proxy control block for the specified
+ * id from the active red-black tree of proxy control blocks.
+ */
+Thread_Control *_Thread_MP_Find_proxy (
+ Objects_Id the_id
+);
+
+/**
+ * This function returns true if the thread in question is the
+ * multiprocessing receive thread.
+ *
+ * @note This is a macro to avoid needing a prototype for
+ * _MPCI_Receive_server_tcb until it is used.
+ */
+#define _Thread_MP_Is_receive(_the_thread) \
+ ((_the_thread) == _MPCI_Receive_server_tcb)
+
+/**
+ * This routine frees a proxy control block to the
+ * inactive chain of free proxy control blocks.
+ */
+void _Thread_MP_Free_proxy( Thread_Control *the_thread );
+
+RTEMS_INLINE_ROUTINE bool _Thread_MP_Is_remote( Objects_Id id )
+{
+ Objects_Information *information;
+
+ information = _Thread_Get_objects_information( id );
+ if ( information == NULL ) {
+ return false;
+ }
+
+ return _Objects_MP_Is_remote( id, information );
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/threadq.h b/cpukit/include/rtems/score/threadq.h
new file mode 100644
index 0000000000..3e618bf5af
--- /dev/null
+++ b/cpukit/include/rtems/score/threadq.h
@@ -0,0 +1,595 @@
+/**
+ * @file
+ *
+ * @brief Constants and Structures Needed to Declare a Thread Queue
+ *
+ * This include file contains all the constants and structures
+ * needed to declare a thread queue.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2014.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_THREADQ_H
+#define _RTEMS_SCORE_THREADQ_H
+
+#include <rtems/score/chain.h>
+#include <rtems/score/isrlock.h>
+#include <rtems/score/object.h>
+#include <rtems/score/priority.h>
+#include <rtems/score/rbtree.h>
+#include <rtems/score/states.h>
+#include <rtems/score/watchdog.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct Scheduler_Node;
+
+/**
+ * @defgroup ScoreThreadQueue Thread Queue Handler
+ *
+ * @ingroup Score
+ *
+ * This handler provides the capability to have threads block in
+ * ordered sets. The sets may be ordered using the FIFO or priority
+ * discipline.
+ */
+/**@{*/
+
+typedef struct _Thread_Control Thread_Control;
+
+typedef struct Thread_queue_Context Thread_queue_Context;
+
+typedef struct Thread_queue_Queue Thread_queue_Queue;
+
+typedef struct Thread_queue_Operations Thread_queue_Operations;
+
+/**
+ * @brief Thread queue enqueue callout.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] the_thread The thread to enqueue.
+ * @param[in] cpu_self The current processor.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ *
+ * @see _Thread_queue_Context_set_enqueue_callout().
+ */
+typedef void ( *Thread_queue_Enqueue_callout )(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ struct Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Thread queue deadlock callout.
+ *
+ * @param the_thread The thread that detected the deadlock.
+ *
+ * @see _Thread_queue_Context_set_deadlock_callout().
+ */
+typedef void ( *Thread_queue_Deadlock_callout )(
+ Thread_Control *the_thread
+);
+
+#if defined(RTEMS_MULTIPROCESSING)
+/**
+ * @brief Multiprocessing (MP) support callout for thread queue operations.
+ *
+ * @param the_proxy The thread proxy of the thread queue operation. A thread
+ * control is actually a thread proxy if and only if
+ * _Objects_Is_local_id( the_proxy->Object.id ) is false.
+ * @param mp_id Object identifier of the object containing the thread queue.
+ *
+ * @see _Thread_queue_Context_set_MP_callout().
+ */
+typedef void ( *Thread_queue_MP_callout )(
+ Thread_Control *the_proxy,
+ Objects_Id mp_id
+);
+#endif
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief The thread queue gate is an SMP synchronization means.
+ *
+ * The gates are added to a list of requests. A busy wait is performed to make
+ * sure that preceding requests are carried out. Each predecessor notifies its
+ * successor about on request completion.
+ *
+ * @see _Thread_queue_Gate_add(), _Thread_queue_Gate_wait(), and
+ * _Thread_queue_Gate_open().
+ */
+typedef struct {
+ Chain_Node Node;
+
+ Atomic_Uint go_ahead;
+} Thread_queue_Gate;
+#endif
+
+typedef struct {
+ /**
+ * @brief The lock context for the thread queue acquire and release
+ * operations.
+ */
+ ISR_lock_Context Lock_context;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Data to support thread queue enqueue operations.
+ */
+ struct {
+ /**
+ * @brief Gate to synchronize thread wait lock requests.
+ *
+ * @see _Thread_Wait_acquire_critical() and _Thread_Wait_tranquilize().
+ */
+ Thread_queue_Gate Gate;
+
+ /**
+ * @brief The thread queue in case the thread is blocked on a thread queue.
+ */
+ Thread_queue_Queue *queue;
+ } Wait;
+#endif
+} Thread_queue_Lock_context;
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief A thread queue link from one thread to another specified by the
+ * thread queue owner and thread wait queue relationships.
+ */
+typedef struct {
+ /**
+ * @brief Node to register this link in the global thread queue links lookup
+ * tree.
+ */
+ RBTree_Node Registry_node;
+
+ /**
+ * @brief The source thread queue determined by the thread queue owner.
+ */
+ Thread_queue_Queue *source;
+
+ /**
+ * @brief The target thread queue determined by the thread wait queue of the
+ * source owner.
+ */
+ Thread_queue_Queue *target;
+
+ /**
+ * @brief Node to add this link to a thread queue path.
+ */
+ Chain_Node Path_node;
+
+ /**
+ * @brief The owner of this thread queue link.
+ */
+ Thread_Control *owner;
+
+ /**
+ * @brief The queue lock context used to acquire the thread wait lock of the
+ * owner.
+ */
+ Thread_queue_Lock_context Lock_context;
+} Thread_queue_Link;
+#endif
+
+/**
+ * @brief Thread queue context for the thread queue methods.
+ *
+ * @see _Thread_queue_Context_initialize().
+ */
+struct Thread_queue_Context {
+ /**
+ * @brief The lock context for the thread queue acquire and release
+ * operations.
+ */
+ Thread_queue_Lock_context Lock_context;
+
+ /**
+ * @brief The thread state for _Thread_queue_Enqueue().
+ */
+ States_Control thread_state;
+
+ /**
+ * @brief The enqueue callout for _Thread_queue_Enqueue().
+ *
+ * The callout is invoked after the release of the thread queue lock with
+ * thread dispatching disabled. Afterwards the thread is blocked. This
+ * callout must be used to install the thread watchdog for timeout handling.
+ *
+ * @see _Thread_queue_Enqueue_do_nothing_extra().
+ * _Thread_queue_Add_timeout_ticks(), and
+ * _Thread_queue_Add_timeout_realtime_timespec().
+ */
+ Thread_queue_Enqueue_callout enqueue_callout;
+
+ /**
+ * @brief Interval to wait.
+ *
+ * May be used by the enqueue callout to register a timeout handler.
+ */
+ union {
+ /**
+ * @brief The timeout in ticks.
+ */
+ Watchdog_Interval ticks;
+
+ /**
+ * @brief The timeout argument, e.g. pointer to struct timespec.
+ */
+ const void *arg;
+ } Timeout;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Representation of a thread queue path from a start thread queue to
+ * the terminal thread queue.
+ *
+ * The start thread queue is determined by the object on which a thread intends
+ * to block. The terminal thread queue is the thread queue reachable via
+ * thread queue links whose owner is not blocked on a thread queue. The thread
+ * queue links are determined by the thread queue owner and thread wait queue
+ * relationships.
+ */
+ struct {
+ /**
+ * @brief The chain of thread queue links defining the thread queue path.
+ */
+ Chain_Control Links;
+
+ /**
+ * @brief The start of a thread queue path.
+ */
+ Thread_queue_Link Start;
+
+ /**
+ * @brief In case of a deadlock, a link for the first thread on the path
+ * that tries to enqueue on a thread queue.
+ */
+ Thread_queue_Link Deadlock;
+ } Path;
+#endif
+
+ /**
+ * @brief Block to manage thread priority changes due to a thread queue
+ * operation.
+ */
+ struct {
+ /**
+ * @brief A priority action list.
+ */
+ Priority_Actions Actions;
+
+ /**
+ * @brief Count of threads to update the priority via
+ * _Thread_Priority_update().
+ */
+ size_t update_count;
+
+ /**
+ * @brief Threads to update the priority via _Thread_Priority_update().
+ *
+ * Currently, a maximum of two threads need an update in one rush, for
+ * example the thread of the thread queue operation and the owner of the
+ * thread queue.
+ */
+ Thread_Control *update[ 2 ];
+ } Priority;
+
+ /**
+ * @brief Invoked in case of a detected deadlock.
+ *
+ * Must be initialized for _Thread_queue_Enqueue() in case the
+ * thread queue may have an owner, e.g. for mutex objects.
+ *
+ * @see _Thread_queue_Context_set_deadlock_callout().
+ */
+ Thread_queue_Deadlock_callout deadlock_callout;
+
+#if defined(RTEMS_MULTIPROCESSING)
+ /**
+ * @brief Callout to unblock the thread in case it is actually a thread
+ * proxy.
+ *
+ * This field is only used on multiprocessing configurations. Used by
+ * thread queue extract and unblock methods for objects with multiprocessing
+ * (MP) support.
+ *
+ * @see _Thread_queue_Context_set_MP_callout().
+ */
+ Thread_queue_MP_callout mp_callout;
+#endif
+};
+
+/**
+ * @brief Thread priority queue.
+ */
+typedef struct {
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Node to enqueue this queue in the FIFO chain of the corresponding
+ * heads structure.
+ *
+ * @see Thread_queue_Heads::Heads::Fifo.
+ */
+ Chain_Node Node;
+#endif
+
+ /**
+ * @brief The actual thread priority queue.
+ */
+ Priority_Aggregation Queue;
+
+ /**
+ * @brief This priority queue is added to a scheduler node of the owner in
+ * case of priority inheritance.
+ */
+ struct Scheduler_Node *scheduler_node;
+} Thread_queue_Priority_queue;
+
+/**
+ * @brief Thread queue heads.
+ *
+ * Each thread is equipped with spare thread queue heads in case it is not
+ * enqueued on a thread queue. The first thread enqueued on a thread queue
+ * will give its spare thread queue heads to that thread queue. The threads
+ * arriving at the queue will add their thread queue heads to the free chain of
+ * the queue heads provided by the first thread enqueued. Once a thread is
+ * dequeued it use the free chain to get new spare thread queue heads.
+ *
+ * Uses a leading underscore in the structure name to allow forward
+ * declarations in standard header files provided by Newlib and GCC.
+ */
+typedef struct _Thread_queue_Heads {
+ /** This union contains the data structures used to manage the blocked
+ * set of tasks which varies based upon the discipline.
+ */
+ union {
+ /**
+ * @brief This is the FIFO discipline list.
+ *
+ * On SMP configurations this FIFO is used to enqueue the per scheduler
+ * instance priority queues of this structure. This ensures FIFO fairness
+ * among the highest priority thread of each scheduler instance.
+ */
+ Chain_Control Fifo;
+
+#if !defined(RTEMS_SMP)
+ /**
+ * @brief This is the set of threads for priority discipline waiting.
+ */
+ Thread_queue_Priority_queue Priority;
+#endif
+ } Heads;
+
+ /**
+ * @brief A chain with free thread queue heads providing the spare thread
+ * queue heads for a thread once it is dequeued.
+ */
+ Chain_Control Free_chain;
+
+ /**
+ * @brief A chain node to add these thread queue heads to the free chain of
+ * the thread queue heads dedicated to the thread queue of an object.
+ */
+ Chain_Node Free_node;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief One priority queue per scheduler instance.
+ */
+ Thread_queue_Priority_queue Priority[ RTEMS_ZERO_LENGTH_ARRAY ];
+#endif
+} Thread_queue_Heads;
+
+#if defined(RTEMS_SMP)
+ #define THREAD_QUEUE_HEADS_SIZE( scheduler_count ) \
+ ( sizeof( Thread_queue_Heads ) \
+ + ( scheduler_count ) * sizeof( Thread_queue_Priority_queue ) )
+#else
+ #define THREAD_QUEUE_HEADS_SIZE( scheduler_count ) \
+ sizeof( Thread_queue_Heads )
+#endif
+
+struct Thread_queue_Queue {
+ /**
+ * @brief Lock to protect this thread queue.
+ *
+ * It may be used to protect additional state of the object embedding this
+ * thread queue.
+ *
+ * Must be the first component of this structure to be able to re-use
+ * implementation parts for structures defined by Newlib <sys/lock.h>.
+ *
+ * @see _Thread_queue_Acquire(), _Thread_queue_Acquire_critical() and
+ * _Thread_queue_Release().
+ */
+#if defined(RTEMS_SMP)
+ SMP_ticket_lock_Control Lock;
+#endif
+
+ /**
+ * @brief The thread queue heads.
+ *
+ * This pointer is NULL, if and only if no threads are enqueued. The first
+ * thread to enqueue will give its spare thread queue heads to this thread
+ * queue.
+ */
+ Thread_queue_Heads *heads;
+
+ /**
+ * @brief The thread queue owner.
+ */
+ Thread_Control *owner;
+
+ /**
+ * @brief The thread queue name.
+ */
+ const char *name;
+};
+
+/**
+ * @brief Thread queue action operation.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] the_thread The thread.
+ * @param[in] queue_context The thread queue context providing the thread queue
+ * action set to perform. Returns the thread queue action set to perform on
+ * the thread queue owner or the empty set in case there is nothing to do.
+ */
+typedef void ( *Thread_queue_Priority_actions_operation )(
+ Thread_queue_Queue *queue,
+ Priority_Actions *priority_actions
+);
+
+/**
+ * @brief Thread queue enqueue operation.
+ *
+ * A potential thread to update the priority due to priority inheritance is
+ * returned via the thread queue context. This thread is handed over to
+ * _Thread_Priority_update().
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] the_thread The thread to enqueue on the queue.
+ */
+typedef void ( *Thread_queue_Enqueue_operation )(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Thread queue extract operation.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] the_thread The thread to extract from the thread queue.
+ */
+typedef void ( *Thread_queue_Extract_operation )(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Thread queue surrender operation.
+ *
+ * This operation must dequeue and return the first thread on the queue.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] heads The thread queue heads. It must not be NULL.
+ * @param[in] previous_owner The previous owner of the thread queue.
+ *
+ * @return The previous first thread on the queue.
+ */
+typedef Thread_Control *( *Thread_queue_Surrender_operation )(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Thread queue first operation.
+ *
+ * @param[in] heads The thread queue heads.
+ *
+ * @retval NULL No thread is present on the thread queue.
+ * @retval first The first thread of the thread queue according to the insert
+ * order. This thread remains on the thread queue.
+ */
+typedef Thread_Control *( *Thread_queue_First_operation )(
+ Thread_queue_Heads *heads
+);
+
+/**
+ * @brief Thread queue operations.
+ *
+ * @see _Thread_wait_Set_operations().
+ */
+struct Thread_queue_Operations {
+ /**
+ * @brief Thread queue priority actions operation.
+ */
+ Thread_queue_Priority_actions_operation priority_actions;
+
+ /**
+ * @brief Thread queue enqueue operation.
+ *
+ * Called by object routines to enqueue the thread.
+ */
+ Thread_queue_Enqueue_operation enqueue;
+
+ /**
+ * @brief Thread queue extract operation.
+ *
+ * Called by object routines to extract a thread from a thread queue.
+ */
+ Thread_queue_Extract_operation extract;
+
+ /**
+ * @brief Thread queue surrender operation.
+ */
+ Thread_queue_Surrender_operation surrender;
+
+ /**
+ * @brief Thread queue first operation.
+ */
+ Thread_queue_First_operation first;
+};
+
+/**
+ * This is the structure used to manage sets of tasks which are blocked
+ * waiting to acquire a resource.
+ */
+typedef struct {
+#if defined(RTEMS_SMP)
+#if defined(RTEMS_DEBUG)
+ /**
+ * @brief The index of the owning processor of the thread queue lock.
+ *
+ * The thread queue lock may be acquired via the thread lock also. This path
+ * is not covered by this field. In case the lock is not owned directly via
+ * _Thread_queue_Acquire(), then the value of this field is
+ * SMP_LOCK_NO_OWNER.
+ *
+ * Must be before the queue component of this structure to be able to re-use
+ * implementation parts for structures defined by Newlib <sys/lock.h>.
+ */
+ uint32_t owner;
+#endif
+
+#if defined(RTEMS_PROFILING)
+ /**
+ * @brief SMP lock statistics in case SMP and profiling are enabled.
+ *
+ * Must be before the queue component of this structure to be able to re-use
+ * implementation parts for structures defined by Newlib <sys/lock.h>.
+ */
+ SMP_lock_Stats Lock_stats;
+#endif
+#endif
+
+ /**
+ * @brief The actual thread queue.
+ */
+ Thread_queue_Queue Queue;
+} Thread_queue_Control;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/threadqimpl.h b/cpukit/include/rtems/score/threadqimpl.h
new file mode 100644
index 0000000000..ecbd8fd42f
--- /dev/null
+++ b/cpukit/include/rtems/score/threadqimpl.h
@@ -0,0 +1,1265 @@
+/**
+ * @file rtems/score/threadq.h
+ *
+ * Constants and Structures Associated with the Manipulation of Objects
+ *
+ * This include file contains all the constants and structures associated
+ * with the manipulation of objects.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2014.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_THREADQIMPL_H
+#define _RTEMS_SCORE_THREADQIMPL_H
+
+#include <rtems/score/threadq.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/priorityimpl.h>
+#include <rtems/score/scheduler.h>
+#include <rtems/score/smp.h>
+#include <rtems/score/status.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threaddispatch.h>
+
+#if defined(RTEMS_DEBUG)
+#include <string.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreThreadQueue
+ */
+/**@{*/
+
+#define THREAD_QUEUE_LINK_OF_PATH_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
+
+/**
+ * @brief Thread queue with a layout compatible to struct _Thread_queue_Queue
+ * defined in Newlib <sys/lock.h>.
+ */
+typedef struct {
+#if !defined(RTEMS_SMP)
+ /*
+ * The struct _Thread_queue_Queue definition is independent of the RTEMS
+ * build configuration. Thus, the storage space for the SMP lock is always
+ * present. In SMP configurations, the SMP lock is contained in the
+ * Thread_queue_Queue.
+ */
+ unsigned int reserved[2];
+#endif
+
+ Thread_queue_Queue Queue;
+} Thread_queue_Syslock_queue;
+
+void _Thread_queue_Enqueue_do_nothing_extra(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_queue_Add_timeout_ticks(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_queue_Add_timeout_monotonic_timespec(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_queue_Add_timeout_realtime_timespec(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Sets the thread wait return code to STATUS_DEADLOCK.
+ */
+void _Thread_queue_Deadlock_status( Thread_Control *the_thread );
+
+/**
+ * @brief Results in an INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal error.
+ */
+void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread );
+
+/**
+ * @brief Initializes a thread queue context.
+ *
+ * @param queue_context The thread queue context to initialize.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
+ Thread_queue_Context *queue_context
+)
+{
+#if defined(RTEMS_DEBUG)
+ memset( queue_context, 0x7f, sizeof( *queue_context ) );
+#if defined(RTEMS_SMP)
+ _Chain_Initialize_node( &queue_context->Lock_context.Wait.Gate.Node );
+#endif
+ queue_context->enqueue_callout = NULL;
+ queue_context->deadlock_callout = NULL;
+#else
+ (void) queue_context;
+#endif
+}
+
+/**
+ * @brief Sets the thread state for the thread to enqueue in the thread queue
+ * context.
+ *
+ * @param queue_context The thread queue context.
+ * @param state The thread state.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_thread_state(
+ Thread_queue_Context *queue_context,
+ States_Control thread_state
+)
+{
+ queue_context->thread_state = thread_state;
+}
+
+/**
+ * @brief Sets the timeout ticks in the thread queue context.
+ *
+ * @param queue_context The thread queue context.
+ * @param ticks The timeout in ticks.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_timeout_ticks(
+ Thread_queue_Context *queue_context,
+ Watchdog_Interval ticks
+)
+{
+ queue_context->Timeout.ticks = ticks;
+}
+
+/**
+ * @brief Sets the timeout argument in the thread queue context.
+ *
+ * @param queue_context The thread queue context.
+ * @param arg The timeout argument.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_timeout_argument(
+ Thread_queue_Context *queue_context,
+ const void *arg
+)
+{
+ queue_context->Timeout.arg = arg;
+}
+
+/**
+ * @brief Sets the enqueue callout in the thread queue context.
+ *
+ * @param queue_context The thread queue context.
+ * @param enqueue_callout The enqueue callout.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_enqueue_callout(
+ Thread_queue_Context *queue_context,
+ Thread_queue_Enqueue_callout enqueue_callout
+)
+{
+ queue_context->enqueue_callout = enqueue_callout;
+}
+
+/**
+ * @brief Sets the do nothing enqueue callout in the thread queue context.
+ *
+ * @param queue_context The thread queue context.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_enqueue_do_nothing_extra(
+ Thread_queue_Context *queue_context
+)
+{
+ queue_context->enqueue_callout = _Thread_queue_Enqueue_do_nothing_extra;
+}
+
+/**
+ * @brief Sets the enqueue callout to add a relative monotonic timeout in
+ * ticks.
+ *
+ * @param queue_context The thread queue context.
+ * @param ticks The timeout in ticks.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_enqueue_timeout_ticks(
+ Thread_queue_Context *queue_context,
+ Watchdog_Interval ticks
+)
+{
+ queue_context->Timeout.ticks = ticks;
+ queue_context->enqueue_callout = _Thread_queue_Add_timeout_ticks;
+}
+
+/**
+ * @brief Sets the enqueue callout to add an absolute monotonic timeout in
+ * timespec format.
+ *
+ * @param queue_context The thread queue context.
+ * @param abstime The absolute monotonic timeout.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
+ Thread_queue_Context *queue_context,
+ const struct timespec *abstime
+)
+{
+ queue_context->Timeout.arg = abstime;
+ queue_context->enqueue_callout =
+ _Thread_queue_Add_timeout_monotonic_timespec;
+}
+
+/**
+ * @brief Sets the enqueue callout to add an absolute realtime timeout in
+ * timespec format.
+ *
+ * @param queue_context The thread queue context.
+ * @param abstime The absolute realtime timeout.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
+ Thread_queue_Context *queue_context,
+ const struct timespec *abstime
+)
+{
+ queue_context->Timeout.arg = abstime;
+ queue_context->enqueue_callout = _Thread_queue_Add_timeout_realtime_timespec;
+}
+
+/**
+ * @brief Sets the deadlock callout in the thread queue
+ * context.
+ *
+ * A deadlock callout must be provided for _Thread_queue_Enqueue()
+ * operations that operate on thread queues which may have an owner, e.g. mutex
+ * objects. Available deadlock callouts are _Thread_queue_Deadlock_status()
+ * and _Thread_queue_Deadlock_fatal().
+ *
+ * @param queue_context The thread queue context.
+ * @param deadlock_callout The deadlock callout.
+ *
+ * @see _Thread_queue_Enqueue().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout(
+ Thread_queue_Context *queue_context,
+ Thread_queue_Deadlock_callout deadlock_callout
+)
+{
+ queue_context->deadlock_callout = deadlock_callout;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_clear_priority_updates(
+ Thread_queue_Context *queue_context
+)
+{
+ queue_context->Priority.update_count = 0;
+}
+
+RTEMS_INLINE_ROUTINE size_t _Thread_queue_Context_save_priority_updates(
+ Thread_queue_Context *queue_context
+)
+{
+ return queue_context->Priority.update_count;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_restore_priority_updates(
+ Thread_queue_Context *queue_context,
+ size_t update_count
+)
+{
+ queue_context->Priority.update_count = update_count;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_add_priority_update(
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread
+)
+{
+ size_t n;
+
+ n = queue_context->Priority.update_count;
+ _Assert( n < RTEMS_ARRAY_SIZE( queue_context->Priority.update ) );
+
+ queue_context->Priority.update_count = n + 1;
+ queue_context->Priority.update[ n ] = the_thread;
+}
+
+#define _Thread_queue_Context_ISR_disable( queue_context, level ) \
+ do { \
+ _ISR_Local_disable( level ); \
+ _ISR_lock_ISR_disable_profile( \
+ &( queue_context )->Lock_context.Lock_context \
+ ) \
+ } while ( 0 )
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_ISR_level(
+ Thread_queue_Context *queue_context,
+ ISR_Level level
+)
+{
+ _ISR_lock_Context_set_level(
+ &queue_context->Lock_context.Lock_context,
+ level
+ );
+}
+
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_queue_Dispatch_disable(
+ Thread_queue_Context *queue_context
+)
+{
+ return _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+}
+
+/**
+ * @brief Sets the MP callout in the thread queue context.
+ *
+ * @param queue_context The thread queue context.
+ * @param mp_callout Callout to unblock the thread in case it is actually a
+ * thread proxy. This parameter is only used on multiprocessing
+ * configurations. Used by thread queue extract and unblock methods for
+ * objects with multiprocessing (MP) support.
+ */
+#if defined(RTEMS_MULTIPROCESSING)
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_MP_callout(
+ Thread_queue_Context *queue_context,
+ Thread_queue_MP_callout mp_callout
+)
+{
+ queue_context->mp_callout = mp_callout;
+}
+#else
+#define _Thread_queue_Context_set_MP_callout( queue_context, mp_callout ) \
+ do { \
+ (void) queue_context; \
+ } while ( 0 )
+#endif
+
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_close(
+ Thread_queue_Gate *gate
+)
+{
+ _Atomic_Store_uint( &gate->go_ahead, 0, ATOMIC_ORDER_RELAXED );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_add(
+ Chain_Control *chain,
+ Thread_queue_Gate *gate
+)
+{
+ _Chain_Append_unprotected( chain, &gate->Node );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_open(
+ Thread_queue_Gate *gate
+)
+{
+ _Atomic_Store_uint( &gate->go_ahead, 1, ATOMIC_ORDER_RELAXED );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_wait(
+ Thread_queue_Gate *gate
+)
+{
+ while ( _Atomic_Load_uint( &gate->go_ahead, ATOMIC_ORDER_RELAXED ) == 0 ) {
+ /* Wait */
+ }
+}
+#endif
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
+ Thread_queue_Heads *heads
+)
+{
+#if defined(RTEMS_SMP)
+ size_t i;
+
+ for ( i = 0; i < _Scheduler_Count; ++i ) {
+ _Chain_Initialize_node( &heads->Priority[ i ].Node );
+ _Priority_Initialize_empty( &heads->Priority[ i ].Queue );
+ heads->Priority[ i ].Queue.scheduler = &_Scheduler_Table[ i ];
+ }
+#endif
+
+ _Chain_Initialize_empty( &heads->Free_chain );
+ _Chain_Initialize_node( &heads->Free_node );
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_initialize(
+ Thread_queue_Queue *queue,
+ const char *name
+)
+{
+#if defined(RTEMS_SMP)
+ _SMP_ticket_lock_Initialize( &queue->Lock );
+#endif
+ queue->heads = NULL;
+ queue->owner = NULL;
+ queue->name = name;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_do_acquire_critical(
+ Thread_queue_Queue *queue,
+#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
+ SMP_lock_Stats *lock_stats,
+#endif
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ _SMP_ticket_lock_Acquire(
+ &queue->Lock,
+ lock_stats,
+ &lock_context->Lock_context.Stats_context
+ );
+#else
+ (void) queue;
+ (void) lock_context;
+#endif
+}
+
+#if defined(RTEMS_SMP) && defined( RTEMS_PROFILING )
+ #define \
+ _Thread_queue_Queue_acquire_critical( queue, lock_stats, lock_context ) \
+ _Thread_queue_Queue_do_acquire_critical( queue, lock_stats, lock_context )
+#else
+ #define \
+ _Thread_queue_Queue_acquire_critical( queue, lock_stats, lock_context ) \
+ _Thread_queue_Queue_do_acquire_critical( queue, lock_context )
+#endif
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release_critical(
+ Thread_queue_Queue *queue,
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ _SMP_ticket_lock_Release(
+ &queue->Lock,
+ &lock_context->Lock_context.Stats_context
+ );
+#else
+ (void) queue;
+ (void) lock_context;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release(
+ Thread_queue_Queue *queue,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_queue_Queue_release_critical( queue, lock_context );
+ _ISR_lock_ISR_enable( lock_context );
+}
+
+/**
+ * @brief Copies the thread queue name to the specified buffer.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] buffer The buffer for the thread queue name copy.
+ * @param[in] buffer_size The buffer size in characters.
+ * @param[in] id The object identifier in case the thread queue is embedded in
+ * an object with identifier, otherwise it is set to 0.
+ *
+ * @retval The length of the thread queue name. May be greater than or equal
+ * to the buffer size if truncation occurred.
+ */
+size_t _Thread_queue_Queue_get_name_and_id(
+ const Thread_queue_Queue *queue,
+ char *buffer,
+ size_t buffer_size,
+ Objects_Id *id
+);
+
+#if defined(RTEMS_SMP)
+void _Thread_queue_Do_acquire_critical(
+ Thread_queue_Control *the_thread_queue,
+ ISR_lock_Context *lock_context
+);
+#else
+RTEMS_INLINE_ROUTINE void _Thread_queue_Do_acquire_critical(
+ Thread_queue_Control *the_thread_queue,
+ ISR_lock_Context *lock_context
+)
+{
+ (void) the_thread_queue;
+ (void) lock_context;
+}
+#endif
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire_critical(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Do_acquire_critical(
+ the_thread_queue,
+ &queue_context->Lock_context.Lock_context
+ );
+}
+
+#if defined(RTEMS_SMP)
+void _Thread_queue_Acquire(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+);
+#else
+RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ (void) the_thread_queue;
+ _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
+}
+#endif
+
+#if defined(RTEMS_DEBUG)
+RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_lock_owner(
+ const Thread_queue_Control *the_thread_queue
+)
+{
+#if defined(RTEMS_SMP)
+ return the_thread_queue->owner == _SMP_lock_Who_am_I();
+#else
+ return _ISR_Get_level() != 0;
+#endif
+}
+#endif
+
+#if defined(RTEMS_SMP)
+void _Thread_queue_Do_release_critical(
+ Thread_queue_Control *the_thread_queue,
+ ISR_lock_Context *lock_context
+);
+#else
+RTEMS_INLINE_ROUTINE void _Thread_queue_Do_release_critical(
+ Thread_queue_Control *the_thread_queue,
+ ISR_lock_Context *lock_context
+)
+{
+ (void) the_thread_queue;
+ (void) lock_context;
+ _Assert( _Thread_queue_Is_lock_owner( the_thread_queue ) );
+}
+#endif
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Release_critical(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Do_release_critical(
+ the_thread_queue,
+ &queue_context->Lock_context.Lock_context
+ );
+}
+
+#if defined(RTEMS_SMP)
+void _Thread_queue_Release(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+);
+#else
+RTEMS_INLINE_ROUTINE void _Thread_queue_Release(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ (void) the_thread_queue;
+ _Assert( _Thread_queue_Is_lock_owner( the_thread_queue ) );
+ _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
+}
+#endif
+
+Thread_Control *_Thread_queue_Do_dequeue(
+ Thread_queue_Control *the_thread_queue,
+ const Thread_queue_Operations *operations
+#if defined(RTEMS_MULTIPROCESSING)
+ ,
+ Thread_queue_MP_callout mp_callout
+#endif
+);
+
+/**
+ * @brief Gets a pointer to a thread waiting on the_thread_queue.
+ *
+ * This function returns a pointer to a thread waiting on
+ * the_thread_queue. The selection of this thread is based on
+ * the discipline of the_thread_queue. If no threads are waiting
+ * on the_thread_queue, then NULL is returned.
+ *
+ * - INTERRUPT LATENCY:
+ * + single case
+ */
+#if defined(RTEMS_MULTIPROCESSING)
+ #define _Thread_queue_Dequeue( \
+ the_thread_queue, \
+ operations, \
+ mp_callout \
+ ) \
+ _Thread_queue_Do_dequeue( \
+ the_thread_queue, \
+ operations, \
+ mp_callout \
+ )
+#else
+ #define _Thread_queue_Dequeue( \
+ the_thread_queue, \
+ operations, \
+ mp_callout \
+ ) \
+ _Thread_queue_Do_dequeue( \
+ the_thread_queue, \
+ operations \
+ )
+#endif
+
+/**
+ * @brief Blocks the thread and places it on the thread queue.
+ *
+ * This enqueues the thread on the thread queue, blocks the thread, and
+ * optionally starts the thread timer in case the timeout discipline is not
+ * WATCHDOG_NO_TIMEOUT. Timeout discipline and value are in the queue_context.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue lock and register it as the new thread lock.
+ * Thread dispatching is disabled before the thread queue lock is released.
+ * Thread dispatching is enabled once the sequence to block the thread is
+ * complete. The operation to enqueue the thread on the queue is protected by
+ * the thread queue lock. This makes it possible to use the thread queue lock
+ * to protect the state of objects embedding the thread queue and directly
+ * enter _Thread_queue_Enqueue() in case the thread must block.
+ *
+ * The thread queue context must be set up with the following functions,
+ * otherwise the behaviour is unpredictable
+ *
+ * - _Thread_queue_Context_set_thread_state(),
+ *
+ * - _Thread_queue_Context_set_enqueue_callout() or
+ * _Thread_queue_Context_set_enqueue_do_nothing_extra() or
+ * _Thread_queue_Context_set_enqueue_timeout_ticks() or
+ * _Thread_queue_Context_set_enqueue_timeout_monotonic_timespec() or
+ * _Thread_queue_Context_set_enqueue_timeout_realtime_timespec(),
+ *
+ * - _Thread_queue_Context_set_deadlock_callout().
+ *
+ * @code
+ * #include <rtems/score/threadqimpl.h>
+ * #include <rtems/score/statesimpl.h>
+ *
+ * #define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
+ *
+ * typedef struct {
+ * Thread_queue_Control Queue;
+ * } Mutex;
+ *
+ * void _Mutex_Obtain( Mutex *mutex )
+ * {
+ * Thread_queue_Context queue_context;
+ * Thread_Control *executing;
+ *
+ * _Thread_queue_Context_initialize( &queue_context );
+ * _Thread_queue_Acquire( &mutex->Queue, queue_context );
+ *
+ * executing = _Thread_Executing;
+ *
+ * if ( mutex->Queue.owner == NULL ) {
+ * mutex->Queue.owner = executing;
+ * _Thread_queue_Release( &mutex->Queue, queue_context );
+ * } else {
+ * _Thread_queue_Context_set_thread_state(
+ * &queue_context,
+ * STATES_WAITING_FOR_MUTEX
+ * );
+ * _Thread_queue_Context_set_enqueue_do_nothing_extra( &queue_context );
+ * _Thread_queue_Context_set_deadlock_callout(
+ * queue_context,
+ * _Thread_queue_Deadlock_fatal
+ * );
+ * _Thread_queue_Enqueue(
+ * &mutex->Queue.Queue,
+ * MUTEX_TQ_OPERATIONS,
+ * executing,
+ * &queue_context
+ * );
+ * }
+ * }
+ * @endcode
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] operations The thread queue operations.
+ * @param[in] the_thread The thread to enqueue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ */
+void _Thread_queue_Enqueue(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Enqueues the thread on the thread queue and busy waits for dequeue.
+ *
+ * Optionally starts the thread timer in case the timeout discipline is not
+ * WATCHDOG_NO_TIMEOUT. Timeout discipline and value are in the queue_context.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue lock and register it as the new thread lock.
+ *
+ * The thread priorities of the owner and the are updated with respect to the
+ * scheduler. The sticky level of the thread is incremented. A thread
+ * dispatch is performed if necessary.
+ *
+ * Afterwards, the thread busy waits on the thread wait flags until a timeout
+ * occurs or the thread queue is surrendered to this thread. So, it sticks to
+ * the processor instead of blocking with respect to the scheduler.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] operations The thread queue operations.
+ * @param[in] the_thread The thread to enqueue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ */
+Status_Control _Thread_queue_Enqueue_sticky(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+#endif
+
+/**
+ * @brief Extracts the thread from the thread queue, restores the default wait
+ * operations and restores the default thread lock.
+ *
+ * The caller must be the owner of the thread queue lock. The thread queue
+ * lock is not released.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] operations The thread queue operations.
+ * @param[in] the_thread The thread to extract.
+ * @param[in] queue_context The thread queue context.
+ *
+ * @return Returns the unblock indicator for _Thread_queue_Unblock_critical().
+ * True indicates, that this thread must be unblocked by the scheduler later in
+ * _Thread_queue_Unblock_critical(), and false otherwise. In case false is
+ * returned, then the thread queue enqueue procedure was interrupted. Thus it
+ * will unblock itself and the thread wait information is no longer accessible,
+ * since this thread may already block on another resource in an SMP
+ * configuration.
+ */
+bool _Thread_queue_Extract_locked(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Unblocks the thread which was on the thread queue before.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue lock. Thread dispatching is disabled before the
+ * thread queue lock is released and an unblock is necessary. Thread
+ * dispatching is enabled once the sequence to unblock the thread is complete.
+ *
+ * @param[in] unblock The unblock indicator returned by
+ * _Thread_queue_Extract_locked().
+ * @param[in] queue The actual thread queue.
+ * @param[in] the_thread The thread to extract.
+ * @param[in] lock_context The lock context of the lock acquire.
+ */
+void _Thread_queue_Unblock_critical(
+ bool unblock,
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+);
+
+/**
+ * @brief Extracts the thread from the thread queue and unblocks it.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue lock and restore the default thread lock. Thread
+ * dispatching is disabled before the thread queue lock is released and an
+ * unblock is necessary. Thread dispatching is enabled once the sequence to
+ * unblock the thread is complete. This makes it possible to use the thread
+ * queue lock to protect the state of objects embedding the thread queue and
+ * directly enter _Thread_queue_Extract_critical() to finalize an operation in
+ * case a waiting thread exists.
+ *
+ * @code
+ * #include <rtems/score/threadqimpl.h>
+ *
+ * typedef struct {
+ * Thread_queue_Control Queue;
+ * Thread_Control *owner;
+ * } Mutex;
+ *
+ * void _Mutex_Release( Mutex *mutex )
+ * {
+ * Thread_queue_Context queue_context;
+ * Thread_Control *first;
+ *
+ * _Thread_queue_Context_initialize( &queue_context, NULL );
+ * _Thread_queue_Acquire( &mutex->Queue, queue_context );
+ *
+ * first = _Thread_queue_First_locked( &mutex->Queue );
+ * mutex->owner = first;
+ *
+ * if ( first != NULL ) {
+ * _Thread_queue_Extract_critical(
+ * &mutex->Queue.Queue,
+ * mutex->Queue.operations,
+ * first,
+ * &queue_context
+ * );
+ * }
+ * @endcode
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] operations The thread queue operations.
+ * @param[in] the_thread The thread to extract.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ */
+void _Thread_queue_Extract_critical(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Extracts thread from thread queue.
+ *
+ * This routine removes @a the_thread its thread queue
+ * and cancels any timeouts associated with this blocking.
+ *
+ * @param[in] the_thread is the pointer to a thread control block that
+ * is to be removed
+ */
+void _Thread_queue_Extract( Thread_Control *the_thread );
+
+/**
+ * @brief Extracts the_thread from the_thread_queue.
+ *
+ * This routine extracts the_thread from the_thread_queue
+ * and ensures that if there is a proxy for this task on
+ * another node, it is also dealt with.
+ */
+void _Thread_queue_Extract_with_proxy(
+ Thread_Control *the_thread
+);
+
+/**
+ * @brief Surrenders the thread queue previously owned by the thread to the
+ * first enqueued thread.
+ *
+ * The owner of the thread queue must be set to NULL by the caller.
+ *
+ * This function releases the thread queue lock. In addition it performs a
+ * thread dispatch if necessary.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] heads The thread queue heads. It must not be NULL.
+ * @param[in] previous_owner The previous owner thread surrendering the thread
+ * queue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ * @param[in] operations The thread queue operations.
+ */
+void _Thread_queue_Surrender(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+);
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Surrenders the thread queue previously owned by the thread to the
+ * first enqueued thread.
+ *
+ * The owner of the thread queue must be set to NULL by the caller.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue.
+ *
+ * The thread priorities of the previous owner and the new owner are updated. The
+ * sticky level of the previous owner is decremented. A thread dispatch is
+ * performed if necessary.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] heads The thread queue heads. It must not be NULL.
+ * @param[in] previous_owner The previous owner thread surrendering the thread
+ * queue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ * @param[in] operations The thread queue operations.
+ */
+void _Thread_queue_Surrender_sticky(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+);
+#endif
+
+RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_empty(
+ const Thread_queue_Queue *queue
+)
+{
+ return queue->heads == NULL;
+}
+
+/**
+ * @brief Returns the first thread on the thread queue if it exists, otherwise
+ * @c NULL.
+ *
+ * The caller must be the owner of the thread queue lock. The thread queue
+ * lock is not released.
+ *
+ * @param[in] the_thread_queue The thread queue.
+ * @param[in] operations The thread queue operations.
+ *
+ * @retval NULL No thread is present on the thread queue.
+ * @retval first The first thread on the thread queue according to the enqueue
+ * order.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_queue_First_locked(
+ Thread_queue_Control *the_thread_queue,
+ const Thread_queue_Operations *operations
+)
+{
+ Thread_queue_Heads *heads = the_thread_queue->Queue.heads;
+
+ if ( heads != NULL ) {
+ return ( *operations->first )( heads );
+ } else {
+ return NULL;
+ }
+}
+
+/**
+ * @brief Returns the first thread on the thread queue if it exists, otherwise
+ * @c NULL.
+ *
+ * @param[in] the_thread_queue The thread queue.
+ *
+ * @retval NULL No thread is present on the thread queue.
+ * @retval first The first thread on the thread queue according to the enqueue
+ * order.
+ */
+Thread_Control *_Thread_queue_First(
+ Thread_queue_Control *the_thread_queue,
+ const Thread_queue_Operations *operations
+);
+
+/**
+ * @brief Thread queue flush filter function.
+ *
+ * Called under protection of the thread queue lock by
+ * _Thread_queue_Flush_critical() to optionally alter the thread wait
+ * information and control the iteration.
+ *
+ * @param the_thread The thread to extract. This is the first parameter to
+ * optimize for architectures that use the same register for the first
+ * parameter and the return value.
+ * @param queue The actual thread queue.
+ * @param queue_context The thread queue context of the lock acquire. May be
+ * used to pass additional data to the filter function via an overlay
+ * structure. The filter function should not release or acquire the thread
+ * queue lock.
+ *
+ * @retval the_thread Extract this thread.
+ * @retval NULL Do not extract this thread and stop the thread queue flush
+ * operation. Threads that are already extracted will complete the flush
+ * operation.
+ */
+typedef Thread_Control *( *Thread_queue_Flush_filter )(
+ Thread_Control *the_thread,
+ Thread_queue_Queue *queue,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Default thread queue flush filter function.
+ *
+ * @param the_thread The thread to extract.
+ * @param queue Unused.
+ * @param queue_context Unused.
+ *
+ * @retval the_thread Extract this thread.
+ */
+Thread_Control *_Thread_queue_Flush_default_filter(
+ Thread_Control *the_thread,
+ Thread_queue_Queue *queue,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Status unavailable thread queue flush filter function.
+ *
+ * Sets the thread wait return code of the thread to STATUS_UNAVAILABLE.
+ *
+ * @param the_thread The thread to extract.
+ * @param queue Unused.
+ * @param queue_context Unused.
+ *
+ * @retval the_thread Extract this thread.
+ */
+Thread_Control *_Thread_queue_Flush_status_unavailable(
+ Thread_Control *the_thread,
+ Thread_queue_Queue *queue,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Status object was deleted thread queue flush filter function.
+ *
+ * Sets the thread wait return code of the thread to STATUS_OBJECT_WAS_DELETED
+ *
+ * @param the_thread The thread to extract.
+ * @param queue Unused.
+ * @param queue_context Unused.
+ *
+ * @retval the_thread Extract this thread.
+ */
+Thread_Control *_Thread_queue_Flush_status_object_was_deleted(
+ Thread_Control *the_thread,
+ Thread_queue_Queue *queue,
+ Thread_queue_Context *queue_context
+);
+
+/**
+ * @brief Unblocks all threads enqueued on the thread queue.
+ *
+ * This function iteratively extracts the first enqueued thread of the thread
+ * queue until the thread queue is empty or the filter function indicates a
+ * stop. The thread timers of the extracted threads are cancelled. The
+ * extracted threads are unblocked.
+ *
+ * @param queue The actual thread queue.
+ * @param operations The thread queue operations.
+ * @param filter The filter functions is called for each thread to extract from
+ * the thread queue. It may be used to alter the thread under protection of
+ * the thread queue lock, for example to set the thread wait return code.
+ * The return value of the filter function controls if the thread queue flush
+ * operation should stop or continue.
+ * @param queue_context The thread queue context of the lock acquire. May be
+ * used to pass additional data to the filter function via an overlay
+ * structure. The filter function should not release or acquire the thread
+ * queue lock.
+ *
+ * @return The count of extracted threads.
+ */
+size_t _Thread_queue_Flush_critical(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_queue_Flush_filter filter,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_queue_Initialize(
+ Thread_queue_Control *the_thread_queue,
+ const char *name
+);
+
+#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG) && defined(RTEMS_PROFILING)
+ #define THREAD_QUEUE_INITIALIZER( _name ) \
+ { \
+ .Lock_stats = SMP_LOCK_STATS_INITIALIZER( _name ), \
+ .owner = SMP_LOCK_NO_OWNER, \
+ .Queue = { \
+ .Lock = SMP_TICKET_LOCK_INITIALIZER, \
+ .heads = NULL, \
+ .owner = NULL, \
+ .name = _name \
+ } \
+ }
+#elif defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
+ #define THREAD_QUEUE_INITIALIZER( _name ) \
+ { \
+ .owner = SMP_LOCK_NO_OWNER, \
+ .Queue = { \
+ .Lock = SMP_TICKET_LOCK_INITIALIZER, \
+ .heads = NULL, \
+ .owner = NULL, \
+ .name = _name \
+ } \
+ }
+#elif defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
+ #define THREAD_QUEUE_INITIALIZER( _name ) \
+ { \
+ .Lock_stats = SMP_LOCK_STATS_INITIALIZER( _name ), \
+ .Queue = { \
+ .Lock = SMP_TICKET_LOCK_INITIALIZER, \
+ .heads = NULL, \
+ .owner = NULL, \
+ .name = _name \
+ } \
+ }
+#elif defined(RTEMS_SMP)
+ #define THREAD_QUEUE_INITIALIZER( _name ) \
+ { \
+ .Queue = { \
+ .Lock = SMP_TICKET_LOCK_INITIALIZER, \
+ .heads = NULL, \
+ .owner = NULL, \
+ .name = _name \
+ } \
+ }
+#else
+ #define THREAD_QUEUE_INITIALIZER( _name ) \
+ { \
+ .Queue = { \
+ .heads = NULL, \
+ .owner = NULL, \
+ .name = _name \
+ } \
+ }
+#endif
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Destroy(
+ Thread_queue_Control *the_thread_queue
+)
+{
+#if defined(RTEMS_SMP)
+ _SMP_ticket_lock_Destroy( &the_thread_queue->Queue.Lock );
+ _SMP_lock_Stats_destroy( &the_thread_queue->Lock_stats );
+#endif
+}
+
+#if defined(RTEMS_MULTIPROCESSING)
+void _Thread_queue_MP_callout_do_nothing(
+ Thread_Control *the_proxy,
+ Objects_Id mp_id
+);
+
+void _Thread_queue_Unblock_proxy(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread
+);
+#endif
+
+#if defined(RTEMS_SMP)
+bool _Thread_queue_Path_acquire_critical(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_queue_Path_release_critical(
+ Thread_queue_Context *queue_context
+);
+#endif
+
+/**
+ * @brief Helper structure to ensure that all objects containing a thread queue
+ * have the right layout.
+ *
+ * @see _Thread_Wait_get_id() and THREAD_QUEUE_OBJECT_ASSERT().
+ */
+typedef struct {
+ Objects_Control Object;
+ Thread_queue_Control Wait_queue;
+} Thread_queue_Object;
+
+#define THREAD_QUEUE_OBJECT_ASSERT( object_type, wait_queue_member ) \
+ RTEMS_STATIC_ASSERT( \
+ offsetof( object_type, wait_queue_member ) \
+ == offsetof( Thread_queue_Object, Wait_queue ) \
+ && RTEMS_HAVE_MEMBER_SAME_TYPE( \
+ object_type, \
+ wait_queue_member, \
+ Thread_queue_Object, \
+ Wait_queue \
+ ), \
+ object_type \
+ )
+
+#define THREAD_QUEUE_QUEUE_TO_OBJECT( queue ) \
+ RTEMS_CONTAINER_OF( \
+ queue, \
+ Thread_queue_Object, \
+ Wait_queue.Queue \
+ )
+
+extern const Thread_queue_Operations _Thread_queue_Operations_default;
+
+extern const Thread_queue_Operations _Thread_queue_Operations_FIFO;
+
+extern const Thread_queue_Operations _Thread_queue_Operations_priority;
+
+extern const Thread_queue_Operations _Thread_queue_Operations_priority_inherit;
+
+/**
+ * @brief The special thread queue name to indicated that the thread queue is
+ * embedded in an object with identifier.
+ *
+ * @see _Thread_queue_Object_initialize().
+ */
+extern const char _Thread_queue_Object_name[];
+
+/**
+ * @brief Initializes a thread queue embedded in an object with identifier.
+ *
+ * The object must have the layout specified by Thread_queue_Object. It should
+ * be ensured with the THREAD_QUEUE_OBJECT_ASSERT() static assertion.
+ *
+ * @param[in] the_thread_queue The thread queue.
+ */
+void _Thread_queue_Object_initialize(
+ Thread_queue_Control *the_thread_queue
+);
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/timecounter.h b/cpukit/include/rtems/score/timecounter.h
new file mode 100644
index 0000000000..79444de482
--- /dev/null
+++ b/cpukit/include/rtems/score/timecounter.h
@@ -0,0 +1,244 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreTimecounter
+ *
+ * @brief Timecounter API
+ */
+
+/*
+ * Copyright (c) 2015 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TIMECOUNTER_H
+#define _RTEMS_SCORE_TIMECOUNTER_H
+
+#include <sys/time.h>
+#include <sys/timetc.h>
+#include <machine/_timecounter.h>
+
+#include <rtems/score/isrlock.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreTimecounter Timecounter Handler
+ *
+ * @ingroup Score
+ *
+ * @{
+ */
+
+/**
+ * @brief Returns the wall clock time in the bintime format.
+ *
+ * @param[out] bt Returns the wall clock time.
+ */
+void _Timecounter_Bintime( struct bintime *bt );
+
+/**
+ * @brief Returns the wall clock time in the timespec format.
+ *
+ * @param[out] ts Returns the wall clock time.
+ */
+void _Timecounter_Nanotime( struct timespec *ts );
+
+/**
+ * @brief Returns the wall clock time in the timeval format.
+ *
+ * @param[out] tv Returns the wall clock time.
+ */
+void _Timecounter_Microtime( struct timeval *tv );
+
+/**
+ * @brief Returns the uptime in the bintime format.
+ *
+ * @param[out] bt Returns the uptime.
+ */
+void _Timecounter_Binuptime( struct bintime *bt );
+
+/**
+ * @brief Returns the uptime in the sbintime_t format.
+ *
+ * @return Returns the uptime.
+ */
+sbintime_t _Timecounter_Sbinuptime( void );
+
+/**
+ * @brief Returns the uptime in the timespec format.
+ *
+ * @param[out] ts Returns the uptime.
+ */
+void _Timecounter_Nanouptime( struct timespec *ts );
+
+/**
+ * @brief Returns the uptime in the timeval format.
+ *
+ * @param[out] tv Returns the uptime.
+ */
+void _Timecounter_Microuptime( struct timeval *tv );
+
+/**
+ * @brief Returns the wall clock time in the bintime format.
+ *
+ * This function obtains the time with a lower overhead and lower accuracy
+ * compared to the _Timecounter_Bintime() variant.
+ *
+ * @param[out] ts Returns the wall clock time.
+ */
+void _Timecounter_Getbintime( struct bintime *bt );
+
+/**
+ * @brief Returns the wall clock time in the timespec format.
+ *
+ * This function obtains the time with a lower overhead and lower accuracy
+ * compared to the _Timecounter_Nanotime() variant.
+ *
+ * @param[out] ts Returns the wall clock time.
+ *
+ * @see _Timecounter_Getbintime().
+ */
+void _Timecounter_Getnanotime( struct timespec *ts );
+
+/**
+ * @brief Returns the wall clock time in the timeval format.
+ *
+ * This function obtains the time with a lower overhead and lower accuracy
+ * compared to the _Timecounter_Microtime() variant.
+ *
+ * @param[out] tv Returns the wall clock time.
+ *
+ * @see _Timecounter_Getbintime().
+ */
+void _Timecounter_Getmicrotime( struct timeval *tv );
+
+/**
+ * @brief Returns the uptime in the bintime format.
+ *
+ * This function obtains the time with a lower overhead and lower accuracy
+ * compared to the _Timecounter_Binuptime() variant.
+ *
+ * @param[out] ts Returns the uptime.
+ */
+void _Timecounter_Getbinuptime( struct bintime *bt );
+
+/**
+ * @brief Returns the uptime in the timespec format.
+ *
+ * This function obtains the time with a lower overhead and lower accuracy
+ * compared to the _Timecounter_Nanouptime() variant.
+ *
+ * @param[out] ts Returns the uptime.
+ */
+void _Timecounter_Getnanouptime( struct timespec *ts );
+
+/**
+ * @brief Returns the uptime in the timeval format.
+ *
+ * This function obtains the time with a lower overhead and lower accuracy
+ * compared to the _Timecounter_Microuptime() variant.
+ *
+ * @param[out] tv Returns the uptime.
+ */
+void _Timecounter_Getmicrouptime( struct timeval *tv );
+
+/**
+ * @brief Returns the boot time in the timeval format.
+ *
+ * @param[out] tv Returns the boot time.
+ */
+void _Timecounter_Getboottime( struct timeval *tv );
+
+/**
+ * @brief Returns the boot time in the bintime format.
+ *
+ * @param[out] tv Returns the boot time.
+ */
+void _Timecounter_Getboottimebin( struct bintime *bt );
+
+/**
+ * @brief Installs the timecounter.
+ *
+ * The timecounter structure must contain valid values in the fields
+ * tc_get_timecount, tc_counter_mask, tc_frequency and tc_quality. All other
+ * fields must be zero initialized.
+ *
+ * @param[in] tc The timecounter.
+ */
+void _Timecounter_Install( struct timecounter *tc );
+
+/**
+ * @brief Performs a timecounter tick.
+ */
+void _Timecounter_Tick( void );
+
+/**
+ * @brief Lock to protect the timecounter mechanic.
+ */
+ISR_LOCK_DECLARE( extern, _Timecounter_Lock )
+
+/**
+ * @brief Acquires the timecounter lock.
+ *
+ * @param[in] lock_context The lock context.
+ *
+ * See _Timecounter_Tick_simple().
+ */
+#define _Timecounter_Acquire( lock_context ) \
+ _ISR_lock_ISR_disable_and_acquire( &_Timecounter_Lock, lock_context )
+
+/**
+ * @brief Performs a simple timecounter tick.
+ *
+ * This is a special purpose tick function for simple timecounter to support
+ * legacy clock drivers.
+ *
+ * @param[in] delta The time in timecounter ticks elapsed since the last call
+ * to _Timecounter_Tick_simple().
+ * @param[in] offset The current value of the timecounter.
+ * @param[in] lock_context The lock context of the corresponding
+ * _Timecounter_Acquire().
+ */
+void _Timecounter_Tick_simple(
+ uint32_t delta,
+ uint32_t offset,
+ ISR_lock_Context *lock_context
+);
+
+/**
+ * @brief The wall clock time in seconds.
+ */
+extern volatile time_t _Timecounter_Time_second;
+
+/**
+ * @brief The uptime in seconds.
+ *
+ * For compatibility with the FreeBSD network stack the initial value is one
+ * second.
+ */
+extern volatile int32_t _Timecounter_Time_uptime;
+
+/**
+ * @brief The current timecounter.
+ */
+extern struct timecounter *_Timecounter;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_TIMECOUNTER_H */
diff --git a/cpukit/include/rtems/score/timecounterimpl.h b/cpukit/include/rtems/score/timecounterimpl.h
new file mode 100644
index 0000000000..a48ac70683
--- /dev/null
+++ b/cpukit/include/rtems/score/timecounterimpl.h
@@ -0,0 +1,50 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreTimecounter
+ *
+ * @brief Timecounter Implementation
+ */
+
+/*
+ * Copyright (c) 2015 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TIMECOUNTERIMPL_H
+#define _RTEMS_SCORE_TIMECOUNTERIMPL_H
+
+#include <rtems/score/timecounter.h>
+#include <sys/timetc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreTimecounter
+ *
+ * @{
+ */
+
+void _Timecounter_Set_clock(
+ const struct bintime *bt,
+ ISR_lock_Context *lock_context
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_TIMECOUNTERIMPL_H */
diff --git a/cpukit/include/rtems/score/timespec.h b/cpukit/include/rtems/score/timespec.h
new file mode 100644
index 0000000000..72a000177f
--- /dev/null
+++ b/cpukit/include/rtems/score/timespec.h
@@ -0,0 +1,272 @@
+/**
+ * @file rtems/score/timespec.h
+ *
+ * This include file contains helpers for manipulating timespecs.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TIMESPEC_H
+#define _RTEMS_SCORE_TIMESPEC_H
+
+/**
+ * @defgroup Timespec Helpers
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality related to manipulating
+ * POSIX struct timespecs.
+ */
+/**@{*/
+
+#include <stdbool.h> /* bool */
+#include <stdint.h> /* uint32_t */
+#include <time.h> /* struct timespec */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @brief Set timespec to seconds nanosecond.
+ *
+ * This method sets the timespec to the specified seconds and nanoseconds
+ * value.
+ *
+ * @param[in] _time points to the timespec instance to validate.
+ * @param[in] _seconds is the seconds portion of the timespec
+ * @param[in] _nanoseconds is the nanoseconds portion of the timespec
+ */
+#define _Timespec_Set( _time, _seconds, _nanoseconds ) \
+ do { \
+ (_time)->tv_sec = (_seconds); \
+ (_time)->tv_nsec = (_nanoseconds); \
+ } while (0)
+
+/**
+ * @brief Sets the Timespec to Zero
+ *
+ * This method sets the timespec to zero.
+ * value.
+ *
+ * @param[in] _time points to the timespec instance to zero.
+ */
+#define _Timespec_Set_to_zero( _time ) \
+ do { \
+ (_time)->tv_sec = 0; \
+ (_time)->tv_nsec = 0; \
+ } while (0)
+
+/**
+ * @brief Get seconds portion of timespec.
+ *
+ * This method returns the seconds portion of the specified timespec
+ *
+ * @param[in] _time points to the timespec
+ *
+ * @retval The seconds portion of @a _time.
+ */
+#define _Timespec_Get_seconds( _time ) \
+ ((_time)->tv_sec)
+
+/**
+ * @brief Get nanoseconds portion of timespec.
+ *
+ * This method returns the nanoseconds portion of the specified timespec
+ *
+ * @param[in] _time points to the timespec
+ *
+ * @retval The nanoseconds portion of @a _time.
+ */
+#define _Timespec_Get_nanoseconds( _time ) \
+ ((_time)->tv_nsec)
+
+/**
+ * @brief Get the timestamp as nanoseconds.
+ *
+ * This method returns the timestamp as nanoseconds.
+ *
+ * @param[in] time points to the timestamp.
+ *
+ * @retval The time in nanoseconds.
+ */
+uint64_t _Timespec_Get_as_nanoseconds(
+ const struct timespec *time
+);
+
+/**
+ * @brief Check if timespec is valid.
+ *
+ * This method determines the validity of a timespec.
+ *
+ * @param[in] time is the timespec instance to validate.
+ *
+ * @retval This method returns true if @a time is valid and
+ * false otherwise.
+ */
+bool _Timespec_Is_valid(
+ const struct timespec *time
+);
+
+/**
+ * @brief The Timespec "less than" operator.
+ *
+ * This method is the less than operator for timespecs.
+ *
+ * @param[in] lhs is the left hand side timespec
+ * @param[in] rhs is the right hand side timespec
+ *
+ * @retval This method returns true if @a lhs is less than the @a rhs and
+ * false otherwise.
+ */
+bool _Timespec_Less_than(
+ const struct timespec *lhs,
+ const struct timespec *rhs
+);
+
+/**
+ * @brief The Timespec "greater than" operator.
+ *
+ * This method is the greater than operator for timespecs.
+ *
+ * @param[in] _lhs is the left hand side timespec
+ * @param[in] _rhs is the right hand side timespec
+ *
+ * @retval This method returns true if @a lhs is greater than the @a rhs and
+ * false otherwise.
+ */
+#define _Timespec_Greater_than( _lhs, _rhs ) \
+ _Timespec_Less_than( _rhs, _lhs )
+
+/**
+ * @brief The Timespec "equal to" operator.
+ *
+ * This method is the is equal to than operator for timespecs.
+ *
+ * @param[in] lhs is the left hand side timespec
+ * @param[in] rhs is the right hand side timespec
+ *
+ * @retval This method returns true if @a lhs is equal to @a rhs and
+ * false otherwise.
+ */
+#define _Timespec_Equal_to( lhs, rhs ) \
+ ( ((lhs)->tv_sec == (rhs)->tv_sec) && \
+ ((lhs)->tv_nsec == (rhs)->tv_nsec) \
+ )
+
+/**
+ * @brief Add two timespecs.
+ *
+ * This routine adds two timespecs. The second argument is added
+ * to the first.
+ *
+ * @param[in] time is the base time to be added to
+ * @param[in] add is the timespec to add to the first argument
+ *
+ * @retval This method returns the number of seconds @a time increased by.
+ */
+uint32_t _Timespec_Add_to(
+ struct timespec *time,
+ const struct timespec *add
+);
+
+/**
+ * @brief Convert timespec to number of ticks.
+ *
+ * This routine convert the @a time timespec to the corresponding number
+ * of clock ticks.
+ *
+ * @param[in] time is the time to be converted
+ *
+ * @retval This method returns the number of ticks computed.
+ */
+uint32_t _Timespec_To_ticks(
+ const struct timespec *time
+);
+
+/**
+ * @brief Convert ticks to timespec.
+ *
+ * This routine converts the @a ticks value to the corresponding
+ * timespec format @a time.
+ *
+ * @param[in] time is the timespec format time result
+ * @param[in] ticks is the number of ticks to convert
+ */
+void _Timespec_From_ticks(
+ uint32_t ticks,
+ struct timespec *time
+);
+
+/**
+ * @brief Subtract two timespec.
+ *
+ * This routine subtracts two timespecs. @a result is set to
+ * @a end - @a start.
+ *
+ * @param[in] start is the starting time
+ * @param[in] end is the ending time
+ * @param[in] result is the difference between starting and ending time.
+ *
+ * @retval This method fills in @a result.
+ */
+void _Timespec_Subtract(
+ const struct timespec *start,
+ const struct timespec *end,
+ struct timespec *result
+);
+
+/**
+ * @brief Divide timespec by an integer.
+ *
+ * This routine divides a timespec by an integer value. The expected
+ * use is to assist in benchmark calculations where you typically
+ * divide a duration by a number of iterations.
+ *
+ * @param[in] time is the total
+ * @param[in] iterations is the number of iterations
+ * @param[in] result is the average time.
+ *
+ * @retval This method fills in @a result.
+ */
+void _Timespec_Divide_by_integer(
+ const struct timespec *time,
+ uint32_t iterations,
+ struct timespec *result
+);
+
+/**
+ * @brief Divide a timespec by anonther timespec.
+ *
+ * This routine divides a timespec by another timespec. The
+ * intended use is for calculating percentages to three decimal points.
+ *
+ * @param[in] lhs is the left hand number
+ * @param[in] rhs is the right hand number
+ * @param[in] ival_percentage is the integer portion of the average
+ * @param[in] fval_percentage is the thousandths of percentage
+ *
+ * @retval This method fills in @a result.
+ */
+void _Timespec_Divide(
+ const struct timespec *lhs,
+ const struct timespec *rhs,
+ uint32_t *ival_percentage,
+ uint32_t *fval_percentage
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/timestamp.h b/cpukit/include/rtems/score/timestamp.h
new file mode 100644
index 0000000000..6fc17ced9c
--- /dev/null
+++ b/cpukit/include/rtems/score/timestamp.h
@@ -0,0 +1,323 @@
+/**
+ * @file rtems/score/timestamp.h
+ *
+ * @brief Helpers for Manipulating Timestamps
+ *
+ * This include file contains helpers for manipulating timestamps.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TIMESTAMP_H
+#define _RTEMS_SCORE_TIMESTAMP_H
+
+/**
+ * @defgroup SuperCoreTimeStamp Score Timestamp
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality related to manipulating
+ * SuperCore Timestamps. SuperCore Timestamps may be used to
+ * represent time of day, uptime, or intervals.
+ *
+ * The key attribute of the SuperCore Timestamp handler is that it
+ * is a completely opaque handler. There can be multiple implementations
+ * of the required functionality and with a recompile, RTEMS can use
+ * any implementation. It is intended to be a simple wrapper.
+ *
+ * This handler can be implemented as either struct timespec or
+ * unsigned64 bit numbers. The use of a wrapper class allows the
+ * the implementation of timestamps to change on a per architecture
+ * basis. This is an important option as the performance of this
+ * handler is critical.
+ */
+/**@{*/
+
+#include <sys/time.h>
+
+#include <rtems/score/basedefs.h>
+#include <rtems/score/timespec.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Define the Timestamp control type.
+ */
+typedef sbintime_t Timestamp_Control;
+
+/**
+ * @brief Set timestamp to specified seconds and nanoseconds.
+ *
+ * This method sets the timestamp to the specified @a _seconds and @a _nanoseconds
+ * value.
+ *
+ * @param[in] _time points to the timestamp instance to validate.
+ * @param[in] _seconds is the seconds portion of the timestamp
+ * @param[in] _nanoseconds is the nanoseconds portion of the timestamp
+ */
+RTEMS_INLINE_ROUTINE void _Timestamp_Set(
+ Timestamp_Control *_time,
+ time_t _seconds,
+ long _nanoseconds
+)
+{
+ struct timespec _ts;
+
+ _ts.tv_sec = _seconds;
+ _ts.tv_nsec = _nanoseconds;
+
+ *_time = tstosbt(_ts);
+}
+
+/**
+ * @brief Sets the timestamp to zero.
+ *
+ * This method sets the timestamp to zero.
+ * value.
+ *
+ * @param[in] _time points to the timestamp instance to zero.
+ */
+
+RTEMS_INLINE_ROUTINE void _Timestamp_Set_to_zero(
+ Timestamp_Control *_time
+)
+{
+ *_time = 0;
+}
+
+/**
+ * @brief Less than operator for timestamps.
+ *
+ * This method is the less than operator for timestamps.
+ *
+ * @param[in] _lhs points to the left hand side timestamp
+ * @param[in] _rhs points to the right hand side timestamp
+ *
+ * @retval This method returns true if @a _lhs is less than the @a _rhs and
+ * false otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Timestamp_Less_than(
+ const Timestamp_Control *_lhs,
+ const Timestamp_Control *_rhs
+)
+{
+ return *_lhs < *_rhs;
+}
+
+/**
+ * @brief Greater than operator for timestamps.
+ *
+ * This method is the greater than operator for timestamps.
+ *
+ * @param[in] _lhs points to the left hand side timestamp
+ * @param[in] _rhs points to the right hand side timestamp
+ *
+ * @retval This method returns true if @a _lhs is greater than the @a _rhs and
+ * false otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Timestamp_Greater_than(
+ const Timestamp_Control *_lhs,
+ const Timestamp_Control *_rhs
+)
+{
+ return *_lhs > *_rhs;
+}
+
+/**
+ * @brief Equal to than operator for timestamps.
+ *
+ * This method is the is equal to than operator for timestamps.
+ *
+ * @param[in] _lhs points to the left hand side timestamp
+ * @param[in] _rhs points to the right hand side timestamp
+ *
+ * @retval This method returns true if @a _lhs is equal to @a _rhs and
+ * false otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE bool _Timestamp_Equal_to(
+ const Timestamp_Control *_lhs,
+ const Timestamp_Control *_rhs
+)
+{
+ return *_lhs == *_rhs;
+}
+
+/**
+ * @brief Adds two timestamps.
+ *
+ * This routine adds two timestamps. The second argument is added
+ * to the first.
+ *
+ * @param[in] _time points to the base time to be added to
+ * @param[in] _add points to the timestamp to add to the first argument
+ */
+RTEMS_INLINE_ROUTINE void _Timestamp_Add_to(
+ Timestamp_Control *_time,
+ const Timestamp_Control *_add
+)
+{
+ *_time += *_add;
+}
+
+/**
+ * @brief Subtracts two timestamps.
+ *
+ * This routine subtracts two timestamps. @a result is set to
+ * @a end - @a start.
+ *
+ * @param[in] _start points to the starting time
+ * @param[in] _end points to the ending time
+ * @param[in] _result points to the difference between
+ * starting and ending time.
+ *
+ * @retval This method fills in @a _result.
+ */
+RTEMS_INLINE_ROUTINE void _Timestamp_Subtract(
+ const Timestamp_Control *_start,
+ const Timestamp_Control *_end,
+ Timestamp_Control *_result
+)
+{
+ *_result = *_end - *_start;
+}
+
+/**
+ * @brief Divides a timestamp by another timestamp.
+ *
+ * This routine divides a timestamp by another timestamp. The
+ * intended use is for calculating percentages to three decimal points.
+ *
+ * @param[in] _lhs points to the left hand number
+ * @param[in] _rhs points to the right hand number
+ * @param[in] _ival_percentage points to the integer portion of the average
+ * @param[in] _fval_percentage points to the thousandths of percentage
+ *
+ * @retval This method fills in @a result.
+ */
+RTEMS_INLINE_ROUTINE void _Timestamp_Divide(
+ const Timestamp_Control *_lhs,
+ const Timestamp_Control *_rhs,
+ uint32_t *_ival_percentage,
+ uint32_t *_fval_percentage
+)
+{
+ struct timespec _ts_left;
+ struct timespec _ts_right;
+
+ _ts_left = sbttots( *_lhs );
+ _ts_right = sbttots( *_rhs );
+
+ _Timespec_Divide(
+ &_ts_left,
+ &_ts_right,
+ _ival_percentage,
+ _fval_percentage
+ );
+}
+
+/**
+ * @brief Get seconds portion of timestamp.
+ *
+ * This method returns the seconds portion of the specified timestamp
+ *
+ * @param[in] _time points to the timestamp
+ *
+ * @retval The seconds portion of @a _time.
+ */
+RTEMS_INLINE_ROUTINE time_t _Timestamp_Get_seconds(
+ const Timestamp_Control *_time
+)
+{
+ return (*_time >> 32);
+}
+
+/**
+ * @brief Get nanoseconds portion of timestamp.
+ *
+ * This method returns the nanoseconds portion of the specified timestamp
+ *
+ * @param[in] _time points to the timestamp
+ *
+ * @retval The nanoseconds portion of @a _time.
+ */
+RTEMS_INLINE_ROUTINE uint32_t _Timestamp_Get_nanoseconds(
+ const Timestamp_Control *_time
+)
+{
+ struct timespec _ts;
+
+ _ts = sbttots( *_time );
+
+ return (uint32_t) _ts.tv_nsec;
+}
+
+/**
+ * @brief Get the timestamp as nanoseconds.
+ *
+ * This method returns the timestamp as nanoseconds.
+ *
+ * @param[in] _time points to the timestamp
+ *
+ * @retval The time in nanoseconds.
+ */
+RTEMS_INLINE_ROUTINE uint64_t _Timestamp_Get_as_nanoseconds(
+ const Timestamp_Control *_time
+)
+{
+ struct timespec _ts;
+
+ _ts = sbttots( *_time );
+
+ return _Timespec_Get_as_nanoseconds( &_ts );
+}
+
+/**
+ * @brief Convert timestamp to struct timespec.
+ *
+ * This method returns the seconds portion of the specified @a _timestamp.
+ *
+ * @param[in] _timestamp points to the timestamp
+ * @param[in] _timespec points to the timespec
+ */
+RTEMS_INLINE_ROUTINE void _Timestamp_To_timespec(
+ const Timestamp_Control *_timestamp,
+ struct timespec *_timespec
+)
+{
+ *_timespec = sbttots( *_timestamp );
+}
+
+/**
+ * @brief Convert timestamp to struct timeval.
+ *
+ * @param[in] _timestamp points to the timestamp
+ * @param[in] _timeval points to the timeval
+ */
+RTEMS_INLINE_ROUTINE void _Timestamp_To_timeval(
+ const Timestamp_Control *_timestamp,
+ struct timeval *_timeval
+)
+{
+ *_timeval = sbttotv( *_timestamp );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+/**@}*/
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/tls.h b/cpukit/include/rtems/score/tls.h
new file mode 100644
index 0000000000..644e54e6f7
--- /dev/null
+++ b/cpukit/include/rtems/score/tls.h
@@ -0,0 +1,217 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreTLS
+ *
+ * @brief Thread-Local Storage (TLS)
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TLS_H
+#define _RTEMS_SCORE_TLS_H
+
+#include <rtems/score/cpu.h>
+
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreTLS Thread-Local Storage (TLS)
+ *
+ * @ingroup Score
+ *
+ * @brief Thread-local storage (TLS) support.
+ *
+ * Variants I and II are according to Ulrich Drepper, "ELF Handling For
+ * Thread-Local Storage".
+ *
+ * @{
+ */
+
+extern char _TLS_Data_begin[];
+
+extern char _TLS_Data_end[];
+
+extern char _TLS_Data_size[];
+
+extern char _TLS_BSS_begin[];
+
+extern char _TLS_BSS_end[];
+
+extern char _TLS_BSS_size[];
+
+extern char _TLS_Size[];
+
+extern char _TLS_Alignment[];
+
+typedef struct {
+ /*
+ * FIXME: Not sure if the generation number type is correct for all
+ * architectures.
+ */
+ uint32_t generation_number;
+
+ void *tls_blocks[1];
+} TLS_Dynamic_thread_vector;
+
+typedef struct TLS_Thread_control_block {
+#ifdef __i386__
+ struct TLS_Thread_control_block *tcb;
+#else
+ TLS_Dynamic_thread_vector *dtv;
+ uintptr_t reserved;
+#endif
+} TLS_Thread_control_block;
+
+typedef struct {
+ uintptr_t module;
+ uintptr_t offset;
+} TLS_Index;
+
+static inline uintptr_t _TLS_Get_size( void )
+{
+ /*
+ * Do not use _TLS_Size here since this will lead GCC to assume that this
+ * symbol is not 0 and the tests for 0 will be optimized away.
+ */
+ return (uintptr_t) _TLS_BSS_end - (uintptr_t) _TLS_Data_begin;
+}
+
+static inline uintptr_t _TLS_Heap_align_up( uintptr_t val )
+{
+ uintptr_t msk = CPU_HEAP_ALIGNMENT - 1;
+
+ return (val + msk) & ~msk;
+}
+
+static inline uintptr_t _TLS_Get_thread_control_block_area_size(
+ uintptr_t alignment
+)
+{
+ return alignment <= sizeof(TLS_Thread_control_block) ?
+ sizeof(TLS_Thread_control_block) : alignment;
+}
+
+static inline uintptr_t _TLS_Get_allocation_size(
+ uintptr_t size,
+ uintptr_t alignment
+)
+{
+ uintptr_t allocation_size = 0;
+
+ allocation_size += _TLS_Heap_align_up( size );
+ allocation_size += _TLS_Get_thread_control_block_area_size( alignment );
+
+#ifndef __i386__
+ allocation_size += sizeof(TLS_Dynamic_thread_vector);
+#endif
+
+ return allocation_size;
+}
+
+static inline void *_TLS_Copy_and_clear( void *tls_area )
+{
+ tls_area = memcpy(
+ tls_area,
+ _TLS_Data_begin,
+ (size_t) ((uintptr_t)_TLS_Data_size)
+ );
+
+
+ memset(
+ (char *) tls_area + (size_t)((intptr_t) _TLS_BSS_begin) -
+ (size_t)((intptr_t) _TLS_Data_begin),
+ 0,
+ ((size_t) (intptr_t)_TLS_BSS_size)
+ );
+
+ return tls_area;
+}
+
+static inline void *_TLS_Initialize(
+ void *tls_block,
+ TLS_Thread_control_block *tcb,
+ TLS_Dynamic_thread_vector *dtv
+)
+{
+#ifdef __i386__
+ (void) dtv;
+ tcb->tcb = tcb;
+#else
+ tcb->dtv = dtv;
+ dtv->generation_number = 1;
+ dtv->tls_blocks[0] = tls_block;
+#endif
+
+ return _TLS_Copy_and_clear( tls_block );
+}
+
+/* Use Variant I, TLS offsets emitted by linker takes the TCB into account */
+static inline void *_TLS_TCB_at_area_begin_initialize( void *tls_area )
+{
+ void *tls_block = (char *) tls_area
+ + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
+ TLS_Thread_control_block *tcb = tls_area;
+ uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size );
+ TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
+ ((char *) tls_block + aligned_size);
+
+ return _TLS_Initialize( tls_block, tcb, dtv );
+}
+
+/* Use Variant I, TLS offsets emitted by linker neglects the TCB */
+static inline void *_TLS_TCB_before_TLS_block_initialize( void *tls_area )
+{
+ void *tls_block = (char *) tls_area
+ + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
+ TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
+ ((char *) tls_block - sizeof(*tcb));
+ uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size );
+ TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
+ ((char *) tls_block + aligned_size);
+
+ return _TLS_Initialize( tls_block, tcb, dtv );
+}
+
+/* Use Variant II */
+static inline void *_TLS_TCB_after_TLS_block_initialize( void *tls_area )
+{
+ uintptr_t size = (uintptr_t) _TLS_Size;
+ uintptr_t tls_align = (uintptr_t) _TLS_Alignment;
+ uintptr_t tls_mask = tls_align - 1;
+ uintptr_t heap_align = _TLS_Heap_align_up( tls_align );
+ uintptr_t heap_mask = heap_align - 1;
+ TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
+ ((char *) tls_area + ((size + heap_mask) & ~heap_mask));
+ void *tls_block = (char *) tcb - ((size + tls_mask) & ~tls_mask);
+ TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
+ ((char *) tcb + sizeof(*tcb));
+
+ _TLS_Initialize( tls_block, tcb, dtv );
+
+ return tcb;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_TLS_H */
diff --git a/cpukit/include/rtems/score/tod.h b/cpukit/include/rtems/score/tod.h
new file mode 100644
index 0000000000..c0ab5e795d
--- /dev/null
+++ b/cpukit/include/rtems/score/tod.h
@@ -0,0 +1,32 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreTOD
+ *
+ * @brief Time of Day Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TOD_H
+#define _RTEMS_SCORE_TOD_H
+
+#include <rtems/score/basedefs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/todimpl.h b/cpukit/include/rtems/score/todimpl.h
new file mode 100644
index 0000000000..b00ab6cca2
--- /dev/null
+++ b/cpukit/include/rtems/score/todimpl.h
@@ -0,0 +1,304 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreTOD
+ *
+ * @brief Time of Day Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TODIMPL_H
+#define _RTEMS_SCORE_TODIMPL_H
+
+#include <rtems/score/tod.h>
+#include <rtems/score/timestamp.h>
+#include <rtems/score/timecounterimpl.h>
+#include <rtems/score/watchdog.h>
+
+#include <sys/time.h>
+#include <time.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreTOD Time of Day Handler
+ *
+ * @ingroup Score
+ *
+ * The following constants are related to the time of day and are
+ * independent of RTEMS.
+ */
+/**@{*/
+
+/**
+ * This constant represents the number of seconds in a minute.
+ */
+#define TOD_SECONDS_PER_MINUTE (uint32_t)60
+
+/**
+ * This constant represents the number of minutes per hour.
+ */
+#define TOD_MINUTES_PER_HOUR (uint32_t)60
+
+/**
+ * This constant represents the number of months in a year.
+ */
+#define TOD_MONTHS_PER_YEAR (uint32_t)12
+
+/**
+ * This constant represents the number of days in a non-leap year.
+ */
+#define TOD_DAYS_PER_YEAR (uint32_t)365
+
+/**
+ * This constant represents the number of hours per day.
+ */
+#define TOD_HOURS_PER_DAY (uint32_t)24
+
+/**
+ * This constant represents the number of seconds in a day which does
+ * not include a leap second.
+ */
+#define TOD_SECONDS_PER_DAY (uint32_t) (TOD_SECONDS_PER_MINUTE * \
+ TOD_MINUTES_PER_HOUR * \
+ TOD_HOURS_PER_DAY)
+
+/**
+ * This constant represents the number of seconds in a non-leap year.
+ */
+#define TOD_SECONDS_PER_NON_LEAP_YEAR (365 * TOD_SECONDS_PER_DAY)
+
+/**
+ * This constant represents the number of millisecond in a second.
+ */
+#define TOD_MILLISECONDS_PER_SECOND (uint32_t)1000
+
+/**
+ * This constant represents the number of microseconds in a second.
+ */
+#define TOD_MICROSECONDS_PER_SECOND (uint32_t)1000000
+
+/**
+ * This constant represents the number of nanoseconds in a second.
+ */
+#define TOD_NANOSECONDS_PER_SECOND (uint32_t)1000000000
+
+/**
+ * This constant represents the number of nanoseconds in a mircosecond.
+ */
+#define TOD_NANOSECONDS_PER_MICROSECOND (uint32_t)1000
+
+/**@}*/
+
+/**
+ * Seconds from January 1, 1970 to January 1, 1988. Used to account for
+ * differences between POSIX API and RTEMS core. The timespec format time
+ * is kept in POSIX compliant form.
+ */
+#define TOD_SECONDS_1970_THROUGH_1988 \
+ (((1987 - 1970 + 1) * TOD_SECONDS_PER_NON_LEAP_YEAR) + \
+ (4 * TOD_SECONDS_PER_DAY))
+
+/**
+ * @brief Earliest year to which an time of day can be initialized.
+ *
+ * The following constant define the earliest year to which an
+ * time of day can be initialized. This is considered the
+ * epoch.
+ */
+#define TOD_BASE_YEAR 1988
+
+/**
+ * @defgroup ScoreTOD Time Of Day (TOD) Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality used to manage time of day.
+ */
+/**@{*/
+
+/**
+ * @brief TOD control.
+ */
+typedef struct {
+ /**
+ * @brief Indicates if the time of day is set.
+ *
+ * This is true if the application has set the current
+ * time of day, and false otherwise.
+ */
+ bool is_set;
+} TOD_Control;
+
+extern TOD_Control _TOD;
+
+void _TOD_Lock( void );
+
+void _TOD_Unlock( void );
+
+#if defined(RTEMS_DEBUG)
+bool _TOD_Is_owner( void );
+#endif
+
+static inline void _TOD_Acquire( ISR_lock_Context *lock_context )
+{
+ _Timecounter_Acquire( lock_context );
+}
+
+/**
+ * @brief Sets the time of day.
+ *
+ * The caller must be the owner of the TOD lock.
+ *
+ * @param tod The new time of day in timespec format representing
+ * the time since UNIX Epoch.
+ * @param lock_context The ISR lock context used for the corresponding
+ * _TOD_Acquire(). The caller must be the owner of the TOD lock. This
+ * function will release the TOD lock.
+ */
+void _TOD_Set(
+ const struct timespec *tod,
+ ISR_lock_Context *lock_context
+);
+
+/**
+ * @brief Gets the current time in the timespec format.
+ *
+ * @param[out] time is the value gathered by the request
+ */
+static inline void _TOD_Get(
+ struct timespec *tod
+)
+{
+ _Timecounter_Nanotime( tod );
+}
+
+/**
+ * @brief Gets the system uptime with potential accuracy to the nanosecond.
+ *
+ * This routine returns the system uptime with potential accuracy
+ * to the nanosecond.
+ *
+ * The initial uptime value is undefined.
+ *
+ * @param[in] time is a pointer to the uptime to be returned
+ */
+static inline void _TOD_Get_uptime(
+ Timestamp_Control *time
+)
+{
+ *time = _Timecounter_Sbinuptime();
+}
+
+/**
+ * @brief Gets the system uptime with potential accuracy to the nanosecond.
+ * to the nanosecond.
+ *
+ * The initial uptime value is zero.
+ *
+ * @param[in] time is a pointer to the uptime to be returned
+ */
+static inline void _TOD_Get_zero_based_uptime(
+ Timestamp_Control *time
+)
+{
+ *time = _Timecounter_Sbinuptime() - SBT_1S;
+}
+
+/**
+ * @brief Gets the system uptime with potential accuracy to the nanosecond.
+ *
+ * The initial uptime value is zero.
+ *
+ * @param[in] time is a pointer to the uptime to be returned
+ */
+static inline void _TOD_Get_zero_based_uptime_as_timespec(
+ struct timespec *time
+)
+{
+ _Timecounter_Nanouptime( time );
+ --time->tv_sec;
+}
+
+/**
+ * @brief Number of seconds Since RTEMS epoch.
+ *
+ * The following contains the number of seconds from 00:00:00
+ * January 1, TOD_BASE_YEAR until the current time of day.
+ */
+static inline uint32_t _TOD_Seconds_since_epoch( void )
+{
+ return (uint32_t) _Timecounter_Time_second;
+}
+
+/**
+ * @brief Gets number of ticks in a second.
+ *
+ * This method returns the number of ticks in a second.
+ *
+ * @note If the clock tick value does not multiply evenly into a second
+ * then this number of ticks will be slightly shorter than a second.
+ */
+uint32_t TOD_TICKS_PER_SECOND_method(void);
+
+/**
+ * @brief Gets number of ticks in a second.
+ *
+ * This method exists to hide the fact that TOD_TICKS_PER_SECOND can not
+ * be implemented as a macro in a .h file due to visibility issues.
+ * The Configuration Table is not available to SuperCore .h files but
+ * is available to their .c files.
+ */
+#define TOD_TICKS_PER_SECOND TOD_TICKS_PER_SECOND_method()
+
+/**
+ * This routine returns a timeval based upon the internal timespec format TOD.
+ */
+
+RTEMS_INLINE_ROUTINE void _TOD_Get_timeval(
+ struct timeval *time
+)
+{
+ _Timecounter_Microtime( time );
+}
+
+/**
+ * @brief Adjust the Time of Time
+ *
+ * This method is used to adjust the current time of day by the
+ * specified amount.
+ *
+ * @param[in] delta is the amount to adjust
+ */
+void _TOD_Adjust(
+ const struct timespec *delta
+);
+
+/**
+ * @brief Check if the TOD is Set
+ *
+ * @return TRUE is the time is set. FALSE otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _TOD_Is_set( void )
+{
+ return _TOD.is_set;
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/userext.h b/cpukit/include/rtems/score/userext.h
new file mode 100644
index 0000000000..5af5824808
--- /dev/null
+++ b/cpukit/include/rtems/score/userext.h
@@ -0,0 +1,273 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreUserExt
+ *
+ * @brief User Extension Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_USEREXT_H
+#define _RTEMS_SCORE_USEREXT_H
+
+#include <rtems/score/interr.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void User_extensions_routine RTEMS_DEPRECATED;
+
+/**
+ * @defgroup ScoreUserExt User Extension Handler
+ *
+ * @ingroup Score
+ *
+ * @brief The User Extension Handler provides invocation of application
+ * dependent routines at critical points in the life of each thread and the
+ * system as a whole.
+ */
+/**@{**/
+
+/**
+ * @brief Task create extension.
+ *
+ * It corresponds to _Thread_Initialize() (used by the rtems_task_create()
+ * directive and pthread_create()).
+ *
+ * It is invoked after the new thread has been completely initialized, but
+ * before it is placed on a ready chain.
+ *
+ * Thread dispatching may be disabled. This depends on the context of the
+ * _Thread_Initialize() call. Thread dispatch is disabled during the creation
+ * of the idle thread and the initialization threads. It can be considered as
+ * an invalid API usage, if the application calls _Thread_Initialize() with
+ * disabled thread dispatching. Disabled thread dispatching is different from
+ * disabled preemption.
+ *
+ * It can be assumed that the executing thread locked the allocator mutex.
+ * The only exception is the creation of the idle thread. In this case the
+ * allocator mutex is not locked. Since the allocator mutex allows nesting the
+ * normal memory allocation routines can be used.
+ *
+ * @param[in] executing The executing thread.
+ * @param[in] created The created thread.
+ *
+ * @retval true Successful operation.
+ * @retval false A thread create user extension will frequently attempt to
+ * allocate resources. If this allocation fails, then the extension should
+ * return @a false and the entire thread create operation will fail.
+ */
+typedef bool ( *User_extensions_thread_create_extension )(
+ Thread_Control *executing,
+ Thread_Control *created
+);
+
+/**
+ * @brief Task delete extension.
+ *
+ * It corresponds to _Thread_Close() (used by the rtems_task_delete()
+ * directive, pthread_exit() and pthread_cancel()).
+ *
+ * It is invoked before all resources of the thread are deleted. The executing
+ * and deleted arguments are never equal.
+ *
+ * Thread dispatching is enabled. The executing thread locked the allocator
+ * mutex.
+ *
+ * @param[in] executing The executing thread.
+ * @param[in] deleted The deleted thread.
+ */
+typedef void( *User_extensions_thread_delete_extension )(
+ Thread_Control *executing,
+ Thread_Control *deleted
+);
+
+/**
+ * @brief Task start extension.
+ *
+ * It corresponds to _Thread_Start() (used by the rtems_task_start()
+ * directive).
+ *
+ * It is invoked after the environment of the thread has been loaded and the
+ * thread has been made ready.
+ *
+ * Thread dispatching is disabled. The executing thread is not the holder of
+ * the allocator mutex.
+ *
+ * @param[in] executing The executing thread.
+ * @param[in] started The started thread.
+ */
+typedef void( *User_extensions_thread_start_extension )(
+ Thread_Control *executing,
+ Thread_Control *started
+);
+
+/**
+ * @brief Task restart extension.
+ *
+ * It corresponds to _Thread_Restart() (used by the rtems_task_restart()
+ * directive).
+ *
+ * It is invoked in the context of the restarted thread right before the
+ * execution context is reloaded. The executing and restarted arguments are
+ * always equal. The thread stack reflects the previous execution context.
+ *
+ * Thread dispatching is enabled. The thread is not the holder of the
+ * allocator mutex. The thread life is protected. Thread restart and delete
+ * requests issued by restart extensions lead to recursion.
+ *
+ * @param[in] executing The executing thread.
+ * @param[in] restarted The executing thread. Yes, the executing thread.
+ */
+typedef void( *User_extensions_thread_restart_extension )(
+ Thread_Control *executing,
+ Thread_Control *restarted
+);
+
+/**
+ * @brief Task switch extension.
+ *
+ * It corresponds to _Thread_Dispatch().
+ *
+ * It is invoked before the context switch from the executing to the heir
+ * thread.
+ *
+ * Thread dispatching is disabled. The state of the allocator mutex is
+ * arbitrary. Interrupts are disabled and the per-CPU lock is acquired on SMP
+ * configurations.
+ *
+ * The context switches initiated through _Thread_Start_multitasking() are not
+ * covered by this extension.
+ *
+ * @param[in] executing The executing thread.
+ * @param[in] heir The heir thread.
+ */
+typedef void( *User_extensions_thread_switch_extension )(
+ Thread_Control *executing,
+ Thread_Control *heir
+);
+
+/**
+ * @brief Task begin extension.
+ *
+ * It corresponds to _Thread_Handler().
+ *
+ * Thread dispatching is disabled. The executing thread is not the holder of
+ * the allocator mutex.
+ *
+ * @param[in] executing The executing thread.
+ */
+typedef void( *User_extensions_thread_begin_extension )(
+ Thread_Control *executing
+);
+
+/**
+ * @brief Task exitted extension.
+ *
+ * It corresponds to _Thread_Handler() after a return of the entry function.
+ *
+ * Thread dispatching is disabled. The state of the allocator mutex is
+ * arbitrary.
+ *
+ * @param[in] executing The executing thread.
+ */
+typedef void( *User_extensions_thread_exitted_extension )(
+ Thread_Control *executing
+);
+
+/**
+ * @brief Fatal error extension.
+ *
+ * It corresponds to _Terminate() (used by the rtems_fatal() directive).
+ *
+ * This extension should not call any RTEMS directives.
+ *
+ * @param[in] source The fatal source indicating the subsystem the fatal
+ * condition originated in.
+ * @param[in] always_set_to_false This parameter is always set to false and
+ * provided only for backward compatibility reasons.
+ * @param[in] code The fatal error code. This value must be interpreted with
+ * respect to the source.
+ */
+typedef void( *User_extensions_fatal_extension )(
+ Internal_errors_Source source,
+ bool always_set_to_false,
+ Internal_errors_t code
+);
+
+/**
+ * @brief Task termination extension.
+ *
+ * This extension is invoked by _Thread_Life_action_handler() in case a
+ * termination request is recognized.
+ *
+ * It is invoked in the context of the terminated thread right before the
+ * thread dispatch to the heir thread. The POSIX cleanup and key destructors
+ * execute in this context.
+ *
+ * Thread dispatching is enabled. The thread is not the holder of the
+ * allocator mutex. The thread life is protected. Thread restart and delete
+ * requests issued by terminate extensions lead to recursion.
+ *
+ * @param[in] terminated The terminated thread.
+ */
+typedef void( *User_extensions_thread_terminate_extension )(
+ Thread_Control *terminated
+);
+
+/**
+ * @brief User extension table.
+ */
+typedef struct {
+ User_extensions_thread_create_extension thread_create;
+ User_extensions_thread_start_extension thread_start;
+ User_extensions_thread_restart_extension thread_restart;
+ User_extensions_thread_delete_extension thread_delete;
+ User_extensions_thread_switch_extension thread_switch;
+ User_extensions_thread_begin_extension thread_begin;
+ User_extensions_thread_exitted_extension thread_exitted;
+ User_extensions_fatal_extension fatal;
+ User_extensions_thread_terminate_extension thread_terminate;
+} User_extensions_Table;
+
+/**
+ * @brief Manages the switch callouts.
+ *
+ * They are managed separately from other extensions for performance reasons.
+ */
+typedef struct {
+ Chain_Node Node;
+ User_extensions_thread_switch_extension thread_switch;
+} User_extensions_Switch_control;
+
+/**
+ * @brief Manages each user extension set.
+ *
+ * The switch control is part of the extensions control even if not used due to
+ * the extension not having a switch handler.
+ */
+typedef struct {
+ Chain_Node Node;
+ User_extensions_Switch_control Switch;
+ User_extensions_Table Callouts;
+} User_extensions_Control;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/userextimpl.h b/cpukit/include/rtems/score/userextimpl.h
new file mode 100644
index 0000000000..5ad2c63765
--- /dev/null
+++ b/cpukit/include/rtems/score/userextimpl.h
@@ -0,0 +1,369 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreUserExt
+ *
+ * @brief User Extension Handler API
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_USEREXTIMPL_H
+#define _RTEMS_SCORE_USEREXTIMPL_H
+
+#include <rtems/score/userext.h>
+#include <rtems/score/isrlock.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/percpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreUserExt User Extension Handler
+ *
+ * @ingroup Score
+ *
+ * @addtogroup ScoreUserExt
+ */
+/**@{**/
+
+/**
+ * @brief Chain iterator for dynamic user extensions.
+ *
+ * Since user extensions may delete or restart the executing thread, we must
+ * clean up registered iterators.
+ *
+ * @see _User_extensions_Iterate(), _User_extensions_Destroy_iterators() and
+ * Thread_Control::last_user_extensions_iterator.
+ */
+typedef struct User_extensions_Iterator {
+ Chain_Iterator Iterator;
+ struct User_extensions_Iterator *previous;
+} User_extensions_Iterator;
+
+typedef struct {
+ /**
+ * @brief Active dynamically added user extensions.
+ */
+ Chain_Control Active;
+
+ /**
+ * @brief Chain iterator registration.
+ */
+ Chain_Iterator_registry Iterators;
+
+ /**
+ * @brief Lock to protect User_extensions_List::Active and
+ * User_extensions_List::Iterators.
+ */
+ ISR_LOCK_MEMBER( Lock )
+} User_extensions_List;
+
+/**
+ * @brief List of active extensions.
+ */
+extern User_extensions_List _User_extensions_List;
+
+/**
+ * @brief List of active task switch extensions.
+ */
+extern Chain_Control _User_extensions_Switches_list;
+
+/**
+ * @name Extension Maintainance
+ */
+/**@{**/
+
+void _User_extensions_Handler_initialization( void );
+
+void _User_extensions_Add_set(
+ User_extensions_Control *extension
+);
+
+RTEMS_INLINE_ROUTINE void _User_extensions_Add_API_set(
+ User_extensions_Control *extension
+)
+{
+ _User_extensions_Add_set( extension );
+}
+
+RTEMS_INLINE_ROUTINE void _User_extensions_Add_set_with_table(
+ User_extensions_Control *extension,
+ const User_extensions_Table *extension_table
+)
+{
+ extension->Callouts = *extension_table;
+
+ _User_extensions_Add_set( extension );
+}
+
+void _User_extensions_Remove_set(
+ User_extensions_Control *extension
+);
+
+/**
+ * @brief User extension visitor.
+ *
+ * @param[in, out] executing The currently executing thread.
+ * @param[in, out] arg The argument passed to _User_extensions_Iterate().
+ * @param[in] callouts The current callouts.
+ */
+typedef void (*User_extensions_Visitor)(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+typedef struct {
+ Thread_Control *created;
+ bool ok;
+} User_extensions_Thread_create_context;
+
+void _User_extensions_Thread_create_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+void _User_extensions_Thread_delete_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+void _User_extensions_Thread_start_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+void _User_extensions_Thread_restart_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+void _User_extensions_Thread_begin_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+void _User_extensions_Thread_exitted_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+typedef struct {
+ Internal_errors_Source source;
+ Internal_errors_t error;
+} User_extensions_Fatal_context;
+
+void _User_extensions_Fatal_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+void _User_extensions_Thread_terminate_visitor(
+ Thread_Control *executing,
+ void *arg,
+ const User_extensions_Table *callouts
+);
+
+/**
+ * @brief Iterates through all user extensions and calls the visitor for each.
+ *
+ * @param[in, out] arg The argument passed to the visitor.
+ * @param[in] visitor The visitor for each extension.
+ * @param[in] direction The iteration direction for dynamic extensions.
+ */
+void _User_extensions_Iterate(
+ void *arg,
+ User_extensions_Visitor visitor,
+ Chain_Iterator_direction direction
+);
+
+/** @} */
+
+/**
+ * @name Extension Callout Dispatcher
+ */
+/**@{**/
+
+static inline bool _User_extensions_Thread_create( Thread_Control *created )
+{
+ User_extensions_Thread_create_context ctx = { created, true };
+
+ _User_extensions_Iterate(
+ &ctx,
+ _User_extensions_Thread_create_visitor,
+ CHAIN_ITERATOR_FORWARD
+ );
+
+ return ctx.ok;
+}
+
+static inline void _User_extensions_Thread_delete( Thread_Control *deleted )
+{
+ _User_extensions_Iterate(
+ deleted,
+ _User_extensions_Thread_delete_visitor,
+ CHAIN_ITERATOR_BACKWARD
+ );
+}
+
+static inline void _User_extensions_Thread_start( Thread_Control *started )
+{
+ _User_extensions_Iterate(
+ started,
+ _User_extensions_Thread_start_visitor,
+ CHAIN_ITERATOR_FORWARD
+ );
+}
+
+static inline void _User_extensions_Thread_restart( Thread_Control *restarted )
+{
+ _User_extensions_Iterate(
+ restarted,
+ _User_extensions_Thread_restart_visitor,
+ CHAIN_ITERATOR_FORWARD
+ );
+}
+
+static inline void _User_extensions_Thread_begin( Thread_Control *executing )
+{
+ _User_extensions_Iterate(
+ executing,
+ _User_extensions_Thread_begin_visitor,
+ CHAIN_ITERATOR_FORWARD
+ );
+}
+
+static inline void _User_extensions_Thread_switch(
+ Thread_Control *executing,
+ Thread_Control *heir
+)
+{
+ const Chain_Control *chain = &_User_extensions_Switches_list;
+ const Chain_Node *tail = _Chain_Immutable_tail( chain );
+ const Chain_Node *node = _Chain_Immutable_first( chain );
+
+ if ( node != tail ) {
+ Per_CPU_Control *cpu_self;
+#if defined(RTEMS_SMP)
+ ISR_Level level;
+#endif
+
+ cpu_self = _Per_CPU_Get();
+
+#if defined(RTEMS_SMP)
+ _ISR_Local_disable( level );
+#endif
+ _Per_CPU_Acquire( cpu_self );
+
+ while ( node != tail ) {
+ const User_extensions_Switch_control *extension =
+ (const User_extensions_Switch_control *) node;
+
+ (*extension->thread_switch)( executing, heir );
+
+ node = _Chain_Immutable_next( node );
+ }
+
+ _Per_CPU_Release( cpu_self );
+#if defined(RTEMS_SMP)
+ _ISR_Local_enable( level );
+#endif
+ }
+}
+
+static inline void _User_extensions_Thread_exitted( Thread_Control *executing )
+{
+ _User_extensions_Iterate(
+ executing,
+ _User_extensions_Thread_exitted_visitor,
+ CHAIN_ITERATOR_FORWARD
+ );
+}
+
+static inline void _User_extensions_Fatal(
+ Internal_errors_Source source,
+ Internal_errors_t error
+)
+{
+ User_extensions_Fatal_context ctx = { source, error };
+
+ _User_extensions_Iterate(
+ &ctx,
+ _User_extensions_Fatal_visitor,
+ CHAIN_ITERATOR_FORWARD
+ );
+}
+
+static inline void _User_extensions_Thread_terminate(
+ Thread_Control *executing
+)
+{
+ _User_extensions_Iterate(
+ executing,
+ _User_extensions_Thread_terminate_visitor,
+ CHAIN_ITERATOR_BACKWARD
+ );
+}
+
+static inline void _User_extensions_Acquire( ISR_lock_Context *lock_context )
+{
+ _ISR_lock_ISR_disable_and_acquire(
+ &_User_extensions_List.Lock,
+ lock_context
+ );
+}
+
+static inline void _User_extensions_Release( ISR_lock_Context *lock_context )
+{
+ _ISR_lock_Release_and_ISR_enable(
+ &_User_extensions_List.Lock,
+ lock_context
+ );
+}
+
+static inline void _User_extensions_Destroy_iterators(
+ Thread_Control *the_thread
+)
+{
+ ISR_lock_Context lock_context;
+ User_extensions_Iterator *iter;
+
+ _User_extensions_Acquire( &lock_context );
+
+ iter = the_thread->last_user_extensions_iterator;
+
+ while ( iter != NULL ) {
+ _Chain_Iterator_destroy( &iter->Iterator );
+ iter = iter->previous;
+ }
+
+ _User_extensions_Release( &lock_context );
+}
+
+/** @} */
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/watchdog.h b/cpukit/include/rtems/score/watchdog.h
new file mode 100644
index 0000000000..dbb092bbef
--- /dev/null
+++ b/cpukit/include/rtems/score/watchdog.h
@@ -0,0 +1,166 @@
+/**
+ * @file rtems/score/watchdog.h
+ *
+ * @brief Constants and Structures Associated with Watchdog Timers
+ *
+ * This include file contains all the constants and structures associated
+ * with watchdog timers. This Handler provides mechanisms which can be
+ * used to initialize and manipulate watchdog timers.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_WATCHDOG_H
+#define _RTEMS_SCORE_WATCHDOG_H
+
+#include <rtems/score/basedefs.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/rbtree.h>
+
+struct Per_CPU_Control;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreWatchdog Watchdog Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality related to the scheduling of
+ * watchdog functions to be called at specific times in the future.
+ *
+ * @note This handler does not have anything to do with hardware watchdog
+ * timers.
+ */
+/**@{*/
+
+typedef struct Watchdog_Control Watchdog_Control;
+
+/**
+ * @brief Type is used to specify the length of intervals.
+ *
+ * This type is used to specify the length of intervals.
+ */
+typedef uint32_t Watchdog_Interval;
+
+/**
+ * @brief Special watchdog ticks value to indicate an infinite wait.
+ */
+#define WATCHDOG_NO_TIMEOUT 0
+
+/**
+ * @brief Return type from a Watchdog Service Routine.
+ *
+ * This type defines the return type from a Watchdog Service Routine.
+ */
+typedef void Watchdog_Service_routine;
+
+/**
+ * @brief Pointer to a watchdog service routine.
+ *
+ * This type define a pointer to a watchdog service routine.
+ */
+typedef Watchdog_Service_routine
+ ( *Watchdog_Service_routine_entry )( Watchdog_Control * );
+
+/**
+ * @brief The watchdog header to manage scheduled watchdogs.
+ */
+typedef struct {
+ /**
+ * @brief Red-black tree of scheduled watchdogs sorted by expiration time.
+ */
+ RBTree_Control Watchdogs;
+
+ /**
+ * @brief The scheduled watchdog with the earliest expiration time or NULL in
+ * case no watchdog is scheduled.
+ */
+ RBTree_Node *first;
+} Watchdog_Header;
+
+/**
+ * @brief The control block used to manage each watchdog timer.
+ *
+ * The following record defines the control block used
+ * to manage each watchdog timer.
+ */
+struct Watchdog_Control {
+ /**
+ * @brief Nodes for the watchdog.
+ */
+ union {
+ /**
+ * @brief this field is a red-black tree node structure and allows this to
+ * be placed on a red-black tree used to manage the scheduled watchdogs.
+ */
+ RBTree_Node RBTree;
+
+ /**
+ * @brief this field is a chain node structure and allows this to be placed
+ * on a chain used to manage pending watchdogs by the timer server.
+ */
+ Chain_Node Chain;
+ } Node;
+
+#if defined(RTEMS_SMP)
+ /** @brief This field references the processor of this watchdog control. */
+ struct Per_CPU_Control *cpu;
+#endif
+
+ /** @brief This field is the function to invoke. */
+ Watchdog_Service_routine_entry routine;
+
+ /** @brief This field is the expiration time point. */
+ uint64_t expire;
+};
+
+/**
+ * @brief The watchdog ticks counter.
+ *
+ * With a 1ms watchdog tick, this counter overflows after 50 days since boot.
+ */
+extern volatile Watchdog_Interval _Watchdog_Ticks_since_boot;
+
+/**
+ * @brief The watchdog nanoseconds per tick.
+ *
+ * This constant is defined by the application configuration via
+ * <rtems/confdefs.h>.
+ */
+extern const uint32_t _Watchdog_Nanoseconds_per_tick;
+
+/**
+ * @brief The watchdog ticks per second.
+ *
+ * This constant is defined by the application configuration via
+ * <rtems/confdefs.h>.
+ */
+extern const uint32_t _Watchdog_Ticks_per_second;
+
+/**
+ * @brief The maximum number of seconds representable in the monotonic watchdog
+ * format.
+ *
+ * This constant is defined by the application configuration via
+ * <rtems/confdefs.h>.
+ */
+extern const uint64_t _Watchdog_Monotonic_max_seconds;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/watchdogimpl.h b/cpukit/include/rtems/score/watchdogimpl.h
new file mode 100644
index 0000000000..f219a70768
--- /dev/null
+++ b/cpukit/include/rtems/score/watchdogimpl.h
@@ -0,0 +1,574 @@
+/**
+ * @file
+ *
+ * @brief Inlined Routines in the Watchdog Handler
+ *
+ * This file contains the static inline implementation of all inlined
+ * routines in the Watchdog Handler.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2004.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_WATCHDOGIMPL_H
+#define _RTEMS_SCORE_WATCHDOGIMPL_H
+
+#include <rtems/score/watchdog.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/isrlock.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/rbtreeimpl.h>
+
+#include <sys/types.h>
+#include <sys/timespec.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreWatchdog
+ * @{
+ */
+
+/**
+ * @brief Watchdog states.
+ */
+typedef enum {
+ /**
+ * @brief The watchdog is scheduled and a black node in the red-black tree.
+ */
+ WATCHDOG_SCHEDULED_BLACK,
+
+ /**
+ * @brief The watchdog is scheduled and a red node in the red-black tree.
+ */
+ WATCHDOG_SCHEDULED_RED,
+
+ /**
+ * @brief The watchdog is inactive.
+ */
+ WATCHDOG_INACTIVE,
+
+ /**
+ * @brief The watchdog is on a chain of pending watchdogs.
+ *
+ * This state is used by the timer server for example.
+ */
+ WATCHDOG_PENDING
+} Watchdog_State;
+
+/**
+ * @brief Watchdog initializer for static initialization.
+ *
+ * The processor of this watchdog is set to processor with index zero.
+ *
+ * @see _Watchdog_Preinitialize().
+ */
+#if defined(RTEMS_SMP)
+ #define WATCHDOG_INITIALIZER( routine ) \
+ { \
+ { { { NULL, NULL, NULL, WATCHDOG_INACTIVE } } }, \
+ &_Per_CPU_Information[ 0 ].per_cpu, \
+ ( routine ), \
+ 0 \
+ }
+#else
+ #define WATCHDOG_INITIALIZER( routine ) \
+ { \
+ { { { NULL, NULL, NULL, WATCHDOG_INACTIVE } } }, \
+ ( routine ), \
+ 0 \
+ }
+#endif
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Header_initialize(
+ Watchdog_Header *header
+)
+{
+ _RBTree_Initialize_empty( &header->Watchdogs );
+ header->first = NULL;
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Header_destroy(
+ Watchdog_Header *header
+)
+{
+ /* Do nothing */
+ (void) header;
+}
+
+/**
+ * @brief Performs a watchdog tick.
+ *
+ * @param cpu The processor for this watchdog tick.
+ */
+void _Watchdog_Tick( struct Per_CPU_Control *cpu );
+
+RTEMS_INLINE_ROUTINE Watchdog_State _Watchdog_Get_state(
+ const Watchdog_Control *the_watchdog
+)
+{
+ return RB_COLOR( &the_watchdog->Node.RBTree, Node );
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Set_state(
+ Watchdog_Control *the_watchdog,
+ Watchdog_State state
+)
+{
+ RB_COLOR( &the_watchdog->Node.RBTree, Node ) = state;
+}
+
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Watchdog_Get_CPU(
+ const Watchdog_Control *the_watchdog
+)
+{
+#if defined(RTEMS_SMP)
+ return the_watchdog->cpu;
+#else
+ return _Per_CPU_Get_by_index( 0 );
+#endif
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Set_CPU(
+ Watchdog_Control *the_watchdog,
+ Per_CPU_Control *cpu
+)
+{
+#if defined(RTEMS_SMP)
+ the_watchdog->cpu = cpu;
+#else
+ (void) cpu;
+#endif
+}
+
+/**
+ * @brief Pre-initializes a watchdog.
+ *
+ * This routine must be called before a watchdog is used in any way. The
+ * exception are statically initialized watchdogs via WATCHDOG_INITIALIZER().
+ *
+ * @param[in] the_watchdog The uninitialized watchdog.
+ */
+RTEMS_INLINE_ROUTINE void _Watchdog_Preinitialize(
+ Watchdog_Control *the_watchdog,
+ Per_CPU_Control *cpu
+)
+{
+ _Watchdog_Set_CPU( the_watchdog, cpu );
+ _Watchdog_Set_state( the_watchdog, WATCHDOG_INACTIVE );
+
+#if defined(RTEMS_DEBUG)
+ the_watchdog->routine = NULL;
+ the_watchdog->expire = 0;
+#endif
+}
+
+/**
+ * @brief Initializes a watchdog with a new service routine.
+ *
+ * The watchdog must be inactive.
+ */
+RTEMS_INLINE_ROUTINE void _Watchdog_Initialize(
+ Watchdog_Control *the_watchdog,
+ Watchdog_Service_routine_entry routine
+)
+{
+ _Assert( _Watchdog_Get_state( the_watchdog ) == WATCHDOG_INACTIVE );
+ the_watchdog->routine = routine;
+}
+
+void _Watchdog_Do_tickle(
+ Watchdog_Header *header,
+ uint64_t now,
+#if defined(RTEMS_SMP)
+ ISR_lock_Control *lock,
+#endif
+ ISR_lock_Context *lock_context
+);
+
+#if defined(RTEMS_SMP)
+ #define _Watchdog_Tickle( header, now, lock, lock_context ) \
+ _Watchdog_Do_tickle( header, now, lock, lock_context )
+#else
+ #define _Watchdog_Tickle( header, now, lock, lock_context ) \
+ _Watchdog_Do_tickle( header, now, lock_context )
+#endif
+
+/**
+ * @brief Inserts a watchdog into the set of scheduled watchdogs according to
+ * the specified expiration time.
+ *
+ * The watchdog must be inactive.
+ */
+void _Watchdog_Insert(
+ Watchdog_Header *header,
+ Watchdog_Control *the_watchdog,
+ uint64_t expire
+);
+
+/**
+ * @brief In case the watchdog is scheduled, then it is removed from the set of
+ * scheduled watchdogs.
+ *
+ * The watchdog must be initialized before this call.
+ */
+void _Watchdog_Remove(
+ Watchdog_Header *header,
+ Watchdog_Control *the_watchdog
+);
+
+/**
+ * @brief In case the watchdog is scheduled, then it is removed from the set of
+ * scheduled watchdogs.
+ *
+ * The watchdog must be initialized before this call.
+ *
+ * @retval 0 The now time is greater than or equal to the expiration time of
+ * the watchdog.
+ * @retval other The difference of the now and expiration time.
+ */
+RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Cancel(
+ Watchdog_Header *header,
+ Watchdog_Control *the_watchdog,
+ uint64_t now
+)
+{
+ uint64_t expire;
+ uint64_t remaining;
+
+ expire = the_watchdog->expire;
+
+ if ( now < expire ) {
+ remaining = expire - now;
+ } else {
+ remaining = 0;
+ }
+
+ _Watchdog_Remove( header, the_watchdog );
+
+ return remaining;
+}
+
+RTEMS_INLINE_ROUTINE bool _Watchdog_Is_scheduled(
+ const Watchdog_Control *the_watchdog
+)
+{
+ return _Watchdog_Get_state( the_watchdog ) < WATCHDOG_INACTIVE;
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Next_first(
+ Watchdog_Header *header,
+ Watchdog_Control *the_watchdog
+)
+{
+ RBTree_Node *node = _RBTree_Right( &the_watchdog->Node.RBTree );
+
+ if ( node != NULL ) {
+ RBTree_Node *left;
+
+ while ( ( left = _RBTree_Left( node ) ) != NULL ) {
+ node = left;
+ }
+
+ header->first = node;
+ } else {
+ header->first = _RBTree_Parent( &the_watchdog->Node.RBTree );
+ }
+}
+
+/**
+ * @brief The maximum watchdog ticks value for the far future.
+ */
+#define WATCHDOG_MAXIMUM_TICKS UINT64_MAX
+
+#define WATCHDOG_NANOSECONDS_PER_SECOND 1000000000
+
+/**
+ * @brief The bits necessary to store 1000000000
+ * (= WATCHDOG_NANOSECONDS_PER_SECOND) nanoseconds.
+ *
+ * The expiration time is an unsigned 64-bit integer. To store absolute
+ * timeouts we use 30 bits (2**30 == 1073741824) for the nanoseconds and 34
+ * bits for the seconds since UNIX Epoch. This leads to a year 2514 problem.
+ */
+#define WATCHDOG_BITS_FOR_1E9_NANOSECONDS 30
+
+/**
+ * @brief The maximum number of seconds representable in the realtime watchdog
+ * format.
+ *
+ * We have 2**34 bits for the seconds part.
+ */
+#define WATCHDOG_REALTIME_MAX_SECONDS 0x3ffffffff
+
+RTEMS_INLINE_ROUTINE bool _Watchdog_Is_valid_timespec(
+ const struct timespec *ts
+)
+{
+ return ts != NULL
+ && (unsigned long) ts->tv_nsec < WATCHDOG_NANOSECONDS_PER_SECOND;
+}
+
+RTEMS_INLINE_ROUTINE bool _Watchdog_Is_valid_interval_timespec(
+ const struct timespec *ts
+)
+{
+ return _Watchdog_Is_valid_timespec( ts ) && ts->tv_sec >= 0;
+}
+
+RTEMS_INLINE_ROUTINE const struct timespec * _Watchdog_Future_timespec(
+ struct timespec *now,
+ const struct timespec *delta
+)
+{
+ uint64_t sec;
+
+ if ( !_Watchdog_Is_valid_interval_timespec( delta ) ) {
+ return NULL;
+ }
+
+ sec = (uint64_t) now->tv_sec;
+ sec += (uint64_t) delta->tv_sec;
+ now->tv_nsec += delta->tv_nsec;
+
+ /* We have 2 * (2**63 - 1) + 1 == UINT64_MAX */
+ if ( now->tv_nsec >= WATCHDOG_NANOSECONDS_PER_SECOND ) {
+ now->tv_nsec -= WATCHDOG_NANOSECONDS_PER_SECOND;
+ ++sec;
+ }
+
+ if ( sec <= INT64_MAX ) {
+ now->tv_sec = sec;
+ } else {
+ now->tv_sec = INT64_MAX;
+ }
+
+ return now;
+}
+
+RTEMS_INLINE_ROUTINE bool _Watchdog_Is_far_future_monotonic_timespec(
+ const struct timespec *ts
+)
+{
+ return ts->tv_sec >= _Watchdog_Monotonic_max_seconds;
+}
+
+RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Monotonic_from_timespec(
+ const struct timespec *ts
+)
+{
+ uint64_t ticks;
+
+ _Assert( _Watchdog_Is_valid_timespec( ts ) );
+ _Assert( ts->tv_sec >= 0 );
+ _Assert( !_Watchdog_Is_far_future_monotonic_timespec( ts ) );
+
+ ticks = (uint64_t) ts->tv_sec * _Watchdog_Ticks_per_second;
+ ticks += (unsigned long) ts->tv_nsec / _Watchdog_Nanoseconds_per_tick;
+
+ return ticks;
+}
+
+RTEMS_INLINE_ROUTINE bool _Watchdog_Is_far_future_realtime_timespec(
+ const struct timespec *ts
+)
+{
+ return ts->tv_sec > WATCHDOG_REALTIME_MAX_SECONDS;
+}
+
+RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Realtime_from_seconds(
+ uint32_t seconds
+)
+{
+ uint64_t ticks = seconds;
+
+ ticks <<= WATCHDOG_BITS_FOR_1E9_NANOSECONDS;
+
+ return ticks;
+}
+
+RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Realtime_from_timespec(
+ const struct timespec *ts
+)
+{
+ uint64_t ticks;
+
+ _Assert( _Watchdog_Is_valid_timespec( ts ) );
+ _Assert( ts->tv_sec >= 0 );
+ _Assert( !_Watchdog_Is_far_future_realtime_timespec( ts ) );
+
+ ticks = (uint64_t) ts->tv_sec;
+ ticks <<= WATCHDOG_BITS_FOR_1E9_NANOSECONDS;
+ ticks |= (uint32_t) ts->tv_nsec;
+
+ return ticks;
+}
+
+RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Realtime_from_sbintime(
+ sbintime_t sbt
+)
+{
+ uint64_t ticks = ( sbt >> 32 ) << WATCHDOG_BITS_FOR_1E9_NANOSECONDS;
+
+ ticks |= ( (uint64_t) 1000000000 * (uint32_t) sbt ) >> 32;
+
+ return ticks;
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_acquire_critical(
+ Per_CPU_Control *cpu,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_Acquire( &cpu->Watchdog.Lock, lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_release_critical(
+ Per_CPU_Control *cpu,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_Release( &cpu->Watchdog.Lock, lock_context );
+}
+
+RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Per_CPU_insert_ticks(
+ Watchdog_Control *the_watchdog,
+ Per_CPU_Control *cpu,
+ Watchdog_Interval ticks
+)
+{
+ ISR_lock_Context lock_context;
+ Watchdog_Header *header;
+ uint64_t expire;
+
+ header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
+
+ _Watchdog_Set_CPU( the_watchdog, cpu );
+
+ _Watchdog_Per_CPU_acquire_critical( cpu, &lock_context );
+ expire = ticks + cpu->Watchdog.ticks;
+ _Watchdog_Insert(header, the_watchdog, expire);
+ _Watchdog_Per_CPU_release_critical( cpu, &lock_context );
+ return expire;
+}
+
+RTEMS_INLINE_ROUTINE bool _Watchdog_Per_CPU_lazy_insert_monotonic(
+ Watchdog_Control *the_watchdog,
+ Per_CPU_Control *cpu,
+ uint64_t expire
+)
+{
+ ISR_lock_Context lock_context;
+ Watchdog_Header *header;
+ bool insert;
+
+ header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
+
+ _Watchdog_Set_CPU( the_watchdog, cpu );
+
+ _Watchdog_Per_CPU_acquire_critical( cpu, &lock_context );
+ insert = ( expire > cpu->Watchdog.ticks );
+
+ if ( insert ) {
+ _Watchdog_Insert(header, the_watchdog, expire);
+ }
+
+ _Watchdog_Per_CPU_release_critical( cpu, &lock_context );
+ return insert;
+}
+
+RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Per_CPU_insert_realtime(
+ Watchdog_Control *the_watchdog,
+ Per_CPU_Control *cpu,
+ uint64_t expire
+)
+{
+ ISR_lock_Context lock_context;
+ Watchdog_Header *header;
+
+ header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
+
+ _Watchdog_Set_CPU( the_watchdog, cpu );
+
+ _Watchdog_Per_CPU_acquire_critical( cpu, &lock_context );
+ _Watchdog_Insert(header, the_watchdog, expire);
+ _Watchdog_Per_CPU_release_critical( cpu, &lock_context );
+ return expire;
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_remove(
+ Watchdog_Control *the_watchdog,
+ Per_CPU_Control *cpu,
+ Watchdog_Header *header
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Watchdog_Per_CPU_acquire_critical( cpu, &lock_context );
+ _Watchdog_Remove(
+ header,
+ the_watchdog
+ );
+ _Watchdog_Per_CPU_release_critical( cpu, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_remove_monotonic(
+ Watchdog_Control *the_watchdog
+)
+{
+ Per_CPU_Control *cpu;
+
+ cpu = _Watchdog_Get_CPU( the_watchdog );
+ _Watchdog_Per_CPU_remove(
+ the_watchdog,
+ cpu,
+ &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ]
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_remove_realtime(
+ Watchdog_Control *the_watchdog
+)
+{
+ Per_CPU_Control *cpu;
+
+ cpu = _Watchdog_Get_CPU( the_watchdog );
+ _Watchdog_Per_CPU_remove(
+ the_watchdog,
+ cpu,
+ &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ]
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_tickle_realtime(
+ Per_CPU_Control *cpu,
+ uint64_t now
+)
+{
+ ISR_lock_Context lock_context;
+
+ _ISR_lock_ISR_disable_and_acquire( &cpu->Watchdog.Lock, &lock_context );
+ _Watchdog_Tickle(
+ &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ],
+ now,
+ &cpu->Watchdog.Lock,
+ &lock_context
+ );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/cpukit/include/rtems/score/wkspace.h b/cpukit/include/rtems/score/wkspace.h
new file mode 100644
index 0000000000..3676ff28c4
--- /dev/null
+++ b/cpukit/include/rtems/score/wkspace.h
@@ -0,0 +1,138 @@
+/**
+ * @file rtems/score/wkspace.h
+ *
+ * @brief Information Related to the RAM Workspace
+ *
+ * This include file contains information related to the
+ * RAM Workspace. This Handler provides mechanisms which can be used to
+ * define, initialize and manipulate the workspace.
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2009.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_WKSPACE_H
+#define _RTEMS_SCORE_WKSPACE_H
+
+#include <rtems/score/heap.h>
+#include <rtems/score/interr.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreWorkspace Workspace Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality related to the management of
+ * the RTEMS Executive Workspace.
+ */
+/**@{*/
+
+/**
+ * @brief Executive workspace control.
+ *
+ * This is the heap control structure used to manage the RTEMS Executive
+ * Workspace.
+ */
+extern Heap_Control _Workspace_Area;
+
+/**
+ * @brief Initilize workspace handler.
+ *
+ * This routine performs the initialization necessary for this handler.
+ */
+void _Workspace_Handler_initialization(
+ Heap_Area *areas,
+ size_t area_count,
+ Heap_Initialization_or_extend_handler extend
+);
+
+/**
+ * @brief Allocate memory from workspace.
+ *
+ * This routine returns the address of a block of memory of size
+ * bytes. If a block of the appropriate size cannot be allocated
+ * from the workspace, then NULL is returned.
+ *
+ * @param size is the requested size
+ *
+ * @retval a pointer to the requested memory or NULL.
+ */
+void *_Workspace_Allocate(
+ size_t size
+);
+
+/**
+ * @brief Allocate aligned memory from workspace.
+ *
+ * @param[in] size The size of the requested memory.
+ * @param[in] alignment The alignment of the requested memory.
+ *
+ * @retval NULL Not enough resources.
+ * @retval other The memory area begin.
+ */
+void *_Workspace_Allocate_aligned( size_t size, size_t alignment );
+
+/**
+ * @brief Free memory to the workspace.
+ *
+ * This function frees the specified block of memory. If the block
+ * belongs to the Workspace and can be successfully freed, then
+ * true is returned. Otherwise false is returned.
+ *
+ * @param block is the memory to free
+ *
+ * @note If @a block is equal to NULL, then the request is ignored.
+ * This allows the caller to not worry about whether or not
+ * a pointer is NULL.
+ */
+
+void _Workspace_Free(
+ void *block
+);
+
+/**
+ * @brief Workspace allocate or fail with fatal error.
+ *
+ * This routine returns the address of a block of memory of @a size
+ * bytes. If a block of the appropriate size cannot be allocated
+ * from the workspace, then the internal error handler is invoked.
+ *
+ * @param[in] size is the desired number of bytes to allocate
+ * @retval If successful, the starting address of the allocated memory
+ */
+void *_Workspace_Allocate_or_fatal_error(
+ size_t size
+);
+
+/**
+ * @brief Duplicates string with memory from the workspace.
+ *
+ * @param[in] string is the pointer to a zero terminated string.
+ * @param[in] len is the length of the string (equal to strlen(string)).
+ *
+ * @retval NULL Not enough memory.
+ * @retval other Duplicated string.
+ */
+char *_Workspace_String_duplicate(
+ const char *string,
+ size_t len
+);
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */