summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/include/rtems/score')
-rw-r--r--cpukit/include/rtems/score/address.h14
-rw-r--r--cpukit/include/rtems/score/assert.h19
-rw-r--r--cpukit/include/rtems/score/atomic.h962
-rw-r--r--cpukit/include/rtems/score/basedefs.h151
-rw-r--r--cpukit/include/rtems/score/chain.h24
-rw-r--r--cpukit/include/rtems/score/chainimpl.h84
-rw-r--r--cpukit/include/rtems/score/copyrt.h2
-rw-r--r--cpukit/include/rtems/score/corebarrierimpl.h10
-rw-r--r--cpukit/include/rtems/score/coremsgbuffer.h2
-rw-r--r--cpukit/include/rtems/score/coremsgimpl.h28
-rw-r--r--cpukit/include/rtems/score/coremuteximpl.h38
-rw-r--r--cpukit/include/rtems/score/corerwlockimpl.h6
-rw-r--r--cpukit/include/rtems/score/coresemimpl.h12
-rw-r--r--cpukit/include/rtems/score/cpustdatomic.h984
-rw-r--r--cpukit/include/rtems/score/freechainimpl.h8
-rw-r--r--cpukit/include/rtems/score/hash.h10
-rw-r--r--cpukit/include/rtems/score/heap.h8
-rw-r--r--cpukit/include/rtems/score/heapimpl.h54
-rw-r--r--cpukit/include/rtems/score/interr.h5
-rw-r--r--cpukit/include/rtems/score/io.h150
-rw-r--r--cpukit/include/rtems/score/isr.h14
-rw-r--r--cpukit/include/rtems/score/isrlock.h4
-rw-r--r--cpukit/include/rtems/score/memory.h40
-rw-r--r--cpukit/include/rtems/score/mpci.h3
-rw-r--r--cpukit/include/rtems/score/mpciimpl.h5
-rw-r--r--cpukit/include/rtems/score/mrsp.h2
-rw-r--r--cpukit/include/rtems/score/mrspimpl.h36
-rw-r--r--cpukit/include/rtems/score/muteximpl.h2
-rw-r--r--cpukit/include/rtems/score/object.h8
-rw-r--r--cpukit/include/rtems/score/objectdata.h4
-rw-r--r--cpukit/include/rtems/score/objectimpl.h77
-rw-r--r--cpukit/include/rtems/score/onceimpl.h2
-rw-r--r--cpukit/include/rtems/score/percpu.h16
-rw-r--r--cpukit/include/rtems/score/percpudata.h2
-rw-r--r--cpukit/include/rtems/score/priority.h13
-rw-r--r--cpukit/include/rtems/score/prioritybitmapimpl.h22
-rw-r--r--cpukit/include/rtems/score/priorityimpl.h66
-rw-r--r--cpukit/include/rtems/score/processormask.h381
-rw-r--r--cpukit/include/rtems/score/processormaskimpl.h437
-rw-r--r--cpukit/include/rtems/score/profiling.h7
-rw-r--r--cpukit/include/rtems/score/protectedheap.h75
-rw-r--r--cpukit/include/rtems/score/rbtree.h38
-rw-r--r--cpukit/include/rtems/score/schedulercbsimpl.h6
-rw-r--r--cpukit/include/rtems/score/scheduleredfimpl.h39
-rw-r--r--cpukit/include/rtems/score/scheduleredfsmp.h2
-rw-r--r--cpukit/include/rtems/score/schedulerimpl.h129
-rw-r--r--cpukit/include/rtems/score/schedulernode.h2
-rw-r--r--cpukit/include/rtems/score/schedulernodeimpl.h22
-rw-r--r--cpukit/include/rtems/score/schedulerpriority.h8
-rw-r--r--cpukit/include/rtems/score/schedulerpriorityimpl.h56
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmp.h2
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmpimpl.h2
-rw-r--r--cpukit/include/rtems/score/schedulersimpleimpl.h30
-rw-r--r--cpukit/include/rtems/score/schedulersimplesmp.h2
-rw-r--r--cpukit/include/rtems/score/schedulersmp.h2
-rw-r--r--cpukit/include/rtems/score/schedulersmpimpl.h2
-rw-r--r--cpukit/include/rtems/score/schedulerstrongapa.h2
-rw-r--r--cpukit/include/rtems/score/scheduleruniimpl.h221
-rw-r--r--cpukit/include/rtems/score/semaphoreimpl.h2
-rw-r--r--cpukit/include/rtems/score/smpbarrier.h2
-rw-r--r--cpukit/include/rtems/score/smpimpl.h21
-rw-r--r--cpukit/include/rtems/score/smplock.h2
-rw-r--r--cpukit/include/rtems/score/smplockmcs.h2
-rw-r--r--cpukit/include/rtems/score/smplockseq.h2
-rw-r--r--cpukit/include/rtems/score/smplockstats.h6
-rw-r--r--cpukit/include/rtems/score/smplockticket.h2
-rw-r--r--cpukit/include/rtems/score/stack.h75
-rw-r--r--cpukit/include/rtems/score/stackimpl.h10
-rw-r--r--cpukit/include/rtems/score/statesimpl.h18
-rw-r--r--cpukit/include/rtems/score/status.h2
-rw-r--r--cpukit/include/rtems/score/sysstate.h12
-rw-r--r--cpukit/include/rtems/score/thread.h110
-rw-r--r--cpukit/include/rtems/score/threadcpubudget.h2
-rw-r--r--cpukit/include/rtems/score/threaddispatch.h23
-rw-r--r--cpukit/include/rtems/score/threadidledata.h2
-rw-r--r--cpukit/include/rtems/score/threadimpl.h250
-rw-r--r--cpukit/include/rtems/score/threadmp.h2
-rw-r--r--cpukit/include/rtems/score/threadqimpl.h70
-rw-r--r--cpukit/include/rtems/score/threadqops.h2
-rw-r--r--cpukit/include/rtems/score/timecounter.h2
-rw-r--r--cpukit/include/rtems/score/timecounterimpl.h2
-rw-r--r--cpukit/include/rtems/score/timespec.h2
-rw-r--r--cpukit/include/rtems/score/timestampimpl.h26
-rw-r--r--cpukit/include/rtems/score/tls.h286
-rw-r--r--cpukit/include/rtems/score/todimpl.h19
-rw-r--r--cpukit/include/rtems/score/userextimpl.h4
-rw-r--r--cpukit/include/rtems/score/watchdogimpl.h52
-rw-r--r--cpukit/include/rtems/score/wkspacedata.h2
-rw-r--r--cpukit/include/rtems/score/wkspaceinitmulti.h4
-rw-r--r--cpukit/include/rtems/score/wkspaceinitone.h4
90 files changed, 2639 insertions, 2733 deletions
diff --git a/cpukit/include/rtems/score/address.h b/cpukit/include/rtems/score/address.h
index 02e7c5f954..e6a06f5996 100644
--- a/cpukit/include/rtems/score/address.h
+++ b/cpukit/include/rtems/score/address.h
@@ -69,7 +69,7 @@ extern "C" {
*
* @return This method returns the resulting address.
*/
-RTEMS_INLINE_ROUTINE void *_Addresses_Add_offset (
+static inline void *_Addresses_Add_offset (
const void *base,
uintptr_t offset
)
@@ -90,7 +90,7 @@ RTEMS_INLINE_ROUTINE void *_Addresses_Add_offset (
* @return This method returns the resulting address.
*/
-RTEMS_INLINE_ROUTINE void *_Addresses_Subtract_offset (
+static inline void *_Addresses_Subtract_offset (
const void *base,
uintptr_t offset
)
@@ -109,7 +109,7 @@ RTEMS_INLINE_ROUTINE void *_Addresses_Subtract_offset (
*
* @return This method returns the resulting address.
*/
-RTEMS_INLINE_ROUTINE intptr_t _Addresses_Subtract(
+static inline intptr_t _Addresses_Subtract(
const void *left,
const void *right
)
@@ -129,7 +129,7 @@ RTEMS_INLINE_ROUTINE intptr_t _Addresses_Subtract(
* @retval true The @a address is aligned.
* @retval false The @a address is not aligned.
*/
-RTEMS_INLINE_ROUTINE bool _Addresses_Is_aligned(
+static inline bool _Addresses_Is_aligned(
const void *address
)
{
@@ -152,7 +152,7 @@ RTEMS_INLINE_ROUTINE bool _Addresses_Is_aligned(
* @retval true The @a address is within the memory range specified
* @retval false The @a address is not within the memory range specified.
*/
-RTEMS_INLINE_ROUTINE bool _Addresses_Is_in_range (
+static inline bool _Addresses_Is_in_range (
const void *address,
const void *base,
const void *limit
@@ -174,7 +174,7 @@ RTEMS_INLINE_ROUTINE bool _Addresses_Is_in_range (
*
* @return Returns the aligned address.
*/
-RTEMS_INLINE_ROUTINE void *_Addresses_Align_up(
+static inline void *_Addresses_Align_up(
void *address,
size_t alignment
)
@@ -196,7 +196,7 @@ RTEMS_INLINE_ROUTINE void *_Addresses_Align_up(
*
* @return Returns the aligned address.
*/
-RTEMS_INLINE_ROUTINE void *_Addresses_Align_down(
+static inline void *_Addresses_Align_down(
void *address,
size_t alignment
)
diff --git a/cpukit/include/rtems/score/assert.h b/cpukit/include/rtems/score/assert.h
index 9eeccacf76..ad92a585fd 100644
--- a/cpukit/include/rtems/score/assert.h
+++ b/cpukit/include/rtems/score/assert.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2013, 2014 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -71,22 +71,7 @@ extern "C" {
* @note This is based on the code in newlib's assert.h.
*/
#ifndef __RTEMS_ASSERT_FUNCTION
- /* Use g++'s demangled names in C++. */
- #if defined __cplusplus && defined __GNUC__
- #define __RTEMS_ASSERT_FUNCTION __PRETTY_FUNCTION__
-
- /* C99 requires the use of __func__. */
- #elif __STDC_VERSION__ >= 199901L
- #define __RTEMS_ASSERT_FUNCTION __func__
-
- /* Older versions of gcc don't have __func__ but can use __FUNCTION__. */
- #elif __GNUC__ >= 2
- #define __RTEMS_ASSERT_FUNCTION __FUNCTION__
-
- /* failed to detect __func__ support. */
- #else
- #define __RTEMS_ASSERT_FUNCTION ((char *) 0)
- #endif
+ #define __RTEMS_ASSERT_FUNCTION RTEMS_FUNCTION_NAME
#endif /* !__RTEMS_ASSERT_FUNCTION */
#if !defined( RTEMS_SCHEDSIM )
diff --git a/cpukit/include/rtems/score/atomic.h b/cpukit/include/rtems/score/atomic.h
index 161b0ec03e..9ef1779e60 100644
--- a/cpukit/include/rtems/score/atomic.h
+++ b/cpukit/include/rtems/score/atomic.h
@@ -10,6 +10,7 @@
*/
/*
+ * Copyright (C) 2015 embedded brains GmbH & Co. KG
* COPYRIGHT (c) 2012-2013 Deng Hengyi.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,7 +38,7 @@
#ifndef _RTEMS_SCORE_ATOMIC_H
#define _RTEMS_SCORE_ATOMIC_H
-#include <rtems/score/cpuatomic.h>
+#include <rtems/score/basedefs.h>
/**
* @defgroup RTEMSScoreAtomic Atomic Operations
@@ -54,122 +55,935 @@
* @{
*/
-typedef CPU_atomic_Uint Atomic_Uint;
+#ifdef RTEMS_SMP
+ #if defined(__cplusplus) \
+ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9))
+ /*
+ * The GCC 4.9 ships its own <stdatomic.h> which is not C++ compatible. The
+ * suggested solution was to include <atomic> in case C++ is used. This works
+ * at least with GCC 4.9. See also:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60940
+ */
+ #include <atomic>
+ #define _RTEMS_SCORE_ATOMIC_USE_ATOMIC
+ #else
+ #include <stdatomic.h>
+ #define _RTEMS_SCORE_ATOMIC_USE_STDATOMIC
+ #endif
+#else
+ #include <rtems/score/isrlevel.h>
+#endif
-typedef CPU_atomic_Ulong Atomic_Ulong;
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
-typedef CPU_atomic_Uintptr Atomic_Uintptr;
+typedef std::atomic_uint Atomic_Uint;
-typedef CPU_atomic_Flag Atomic_Flag;
+typedef std::atomic_ulong Atomic_Ulong;
-typedef CPU_atomic_Order Atomic_Order;
+typedef std::atomic_uintptr_t Atomic_Uintptr;
-#define ATOMIC_ORDER_RELAXED CPU_ATOMIC_ORDER_RELAXED
+typedef std::atomic_flag Atomic_Flag;
-#define ATOMIC_ORDER_ACQUIRE CPU_ATOMIC_ORDER_ACQUIRE
+typedef std::memory_order Atomic_Order;
-#define ATOMIC_ORDER_RELEASE CPU_ATOMIC_ORDER_RELEASE
+#define ATOMIC_ORDER_RELAXED std::memory_order_relaxed
-#define ATOMIC_ORDER_ACQ_REL CPU_ATOMIC_ORDER_ACQ_REL
+#define ATOMIC_ORDER_ACQUIRE std::memory_order_acquire
-#define ATOMIC_ORDER_SEQ_CST CPU_ATOMIC_ORDER_SEQ_CST
+#define ATOMIC_ORDER_RELEASE std::memory_order_release
-#define ATOMIC_INITIALIZER_UINT( value ) CPU_ATOMIC_INITIALIZER_UINT( value )
+#define ATOMIC_ORDER_ACQ_REL std::memory_order_acq_rel
-#define ATOMIC_INITIALIZER_ULONG( value ) CPU_ATOMIC_INITIALIZER_ULONG( value )
+#define ATOMIC_ORDER_SEQ_CST std::memory_order_seq_cst
-#define ATOMIC_INITIALIZER_UINTPTR( value ) CPU_ATOMIC_INITIALIZER_UINTPTR( value )
+#define ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value )
-#define ATOMIC_INITIALIZER_FLAG CPU_ATOMIC_INITIALIZER_FLAG
+#define ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value )
-#define _Atomic_Fence( order ) _CPU_atomic_Fence( order )
+#define ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value )
-#define _Atomic_Init_uint( obj, desired ) \
- _CPU_atomic_Init_uint( obj, desired )
+#define ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT
-#define _Atomic_Init_ulong( obj, desired ) \
- _CPU_atomic_Init_ulong( obj, desired )
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
-#define _Atomic_Init_uintptr( obj, desired ) \
- _CPU_atomic_Init_uintptr( obj, desired )
+typedef atomic_uint Atomic_Uint;
-#define _Atomic_Load_uint( obj, order ) \
- _CPU_atomic_Load_uint( obj, order )
+typedef atomic_ulong Atomic_Ulong;
-#define _Atomic_Load_ulong( obj, order ) \
- _CPU_atomic_Load_ulong( obj, order )
+typedef atomic_uintptr_t Atomic_Uintptr;
-#define _Atomic_Load_uintptr( obj, order ) \
- _CPU_atomic_Load_uintptr( obj, order )
+typedef atomic_flag Atomic_Flag;
-#define _Atomic_Store_uint( obj, desr, order ) \
- _CPU_atomic_Store_uint( obj, desr, order )
+typedef memory_order Atomic_Order;
-#define _Atomic_Store_ulong( obj, desr, order ) \
- _CPU_atomic_Store_ulong( obj, desr, order )
+#define ATOMIC_ORDER_RELAXED memory_order_relaxed
-#define _Atomic_Store_uintptr( obj, desr, order ) \
- _CPU_atomic_Store_uintptr( obj, desr, order )
+#define ATOMIC_ORDER_ACQUIRE memory_order_acquire
-#define _Atomic_Fetch_add_uint( obj, arg, order ) \
- _CPU_atomic_Fetch_add_uint( obj, arg, order )
+#define ATOMIC_ORDER_RELEASE memory_order_release
-#define _Atomic_Fetch_add_ulong( obj, arg, order ) \
- _CPU_atomic_Fetch_add_ulong( obj, arg, order )
+#define ATOMIC_ORDER_ACQ_REL memory_order_acq_rel
-#define _Atomic_Fetch_add_uintptr( obj, arg, order ) \
- _CPU_atomic_Fetch_add_uintptr( obj, arg, order )
+#define ATOMIC_ORDER_SEQ_CST memory_order_seq_cst
-#define _Atomic_Fetch_sub_uint( obj, arg, order ) \
- _CPU_atomic_Fetch_sub_uint( obj, arg, order )
+#define ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value )
-#define _Atomic_Fetch_sub_ulong( obj, arg, order ) \
- _CPU_atomic_Fetch_sub_ulong( obj, arg, order )
+#define ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value )
-#define _Atomic_Fetch_sub_uintptr( obj, arg, order ) \
- _CPU_atomic_Fetch_sub_uintptr( obj, arg, order )
+#define ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value )
-#define _Atomic_Fetch_or_uint( obj, arg, order ) \
- _CPU_atomic_Fetch_or_uint( obj, arg, order )
+#define ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT
-#define _Atomic_Fetch_or_ulong( obj, arg, order ) \
- _CPU_atomic_Fetch_or_ulong( obj, arg, order )
+#else
-#define _Atomic_Fetch_or_uintptr( obj, arg, order ) \
- _CPU_atomic_Fetch_or_uintptr( obj, arg, order )
+typedef unsigned int Atomic_Uint;
-#define _Atomic_Fetch_and_uint( obj, arg, order ) \
- _CPU_atomic_Fetch_and_uint( obj, arg, order )
+typedef unsigned long Atomic_Ulong;
-#define _Atomic_Fetch_and_ulong( obj, arg, order ) \
- _CPU_atomic_Fetch_and_ulong( obj, arg, order )
+typedef uintptr_t Atomic_Uintptr;
-#define _Atomic_Fetch_and_uintptr( obj, arg, order ) \
- _CPU_atomic_Fetch_and_uintptr( obj, arg, order )
+typedef bool Atomic_Flag;
-#define _Atomic_Exchange_uint( obj, desr, order ) \
- _CPU_atomic_Exchange_uint( obj, desr, order )
+typedef int Atomic_Order;
-#define _Atomic_Exchange_ulong( obj, desr, order ) \
- _CPU_atomic_Exchange_ulong( obj, desr, order )
+#define ATOMIC_ORDER_RELAXED 0
-#define _Atomic_Exchange_uintptr( obj, desr, order ) \
- _CPU_atomic_Exchange_uintptr( obj, desr, order )
+#define ATOMIC_ORDER_ACQUIRE 2
-#define _Atomic_Compare_exchange_uint( obj, expected, desired, succ, fail ) \
- _CPU_atomic_Compare_exchange_uint( obj, expected, desired, succ, fail )
+#define ATOMIC_ORDER_RELEASE 3
-#define _Atomic_Compare_exchange_ulong( obj, expected, desired, succ, fail ) \
- _CPU_atomic_Compare_exchange_ulong( obj, expected, desired, succ, fail )
+#define ATOMIC_ORDER_ACQ_REL 4
-#define _Atomic_Compare_exchange_uintptr( obj, expected, desired, succ, fail ) \
- _CPU_atomic_Compare_exchange_uintptr( obj, expected, desired, succ, fail )
+#define ATOMIC_ORDER_SEQ_CST 5
-#define _Atomic_Flag_clear( obj, order ) \
- _CPU_atomic_Flag_clear( obj, order )
+#define ATOMIC_INITIALIZER_UINT( value ) ( value )
-#define _Atomic_Flag_test_and_set( obj, order ) \
- _CPU_atomic_Flag_test_and_set( obj, order )
+#define ATOMIC_INITIALIZER_ULONG( value ) ( value )
+
+#define ATOMIC_INITIALIZER_UINTPTR( value ) ( value )
+
+#define ATOMIC_INITIALIZER_FLAG false
+
+#endif
+
+/**
+ * @brief Sets up a cpu fence.
+ *
+ * @param[out] order The order for the fence.
+ */
+static inline void _Atomic_Fence( Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ std::atomic_thread_fence( order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_thread_fence( order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+#endif
+}
+
+/**
+ * @brief Initializes Uint.
+ *
+ * @param[out] obj The CPU atomic Uint to initialize.
+ * @param desired The desired value for @a obj.
+ */
+static inline void _Atomic_Init_uint( Atomic_Uint *obj, unsigned int desired )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_init( obj, desired );
+#else
+ *obj = desired;
+#endif
+}
+
+/**
+ * @brief Initializes Ulong.
+ *
+ * @param[out] obj The CPU atomic Ulong to initialize.
+ * @param desired The desired value for @a obj.
+ */
+static inline void _Atomic_Init_ulong( Atomic_Ulong *obj, unsigned long desired )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_init( obj, desired );
+#else
+ *obj = desired;
+#endif
+}
+
+/**
+ * @brief Initializes Uintptr.
+ *
+ * @param[out] obj The CPU atomic Uintptr to initialize.
+ * @param desired The desired value for @a obj.
+ */
+static inline void _Atomic_Init_uintptr( Atomic_Uintptr *obj, uintptr_t desired )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ obj->store( desired );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_init( obj, desired );
+#else
+ *obj = desired;
+#endif
+}
+
+/**
+ * @brief Loads value of Uint considering the order.
+ *
+ * @param obj The CPU atomic Uint to get the value from.
+ * @param order The atomic order for getting the value.
+ *
+ * @return The value of @a obj considering the @a order.
+ */
+static inline unsigned int _Atomic_Load_uint( const Atomic_Uint *obj, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->load( order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_load_explicit( obj, order );
+#else
+ unsigned int val;
+
+ (void) order;
+ val = *obj;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Loads value of Ulong considering the order.
+ *
+ * @param obj The CPU atomic Ulong to get the value from.
+ * @param order The atomic order for getting the value.
+ *
+ * @return The value of @a obj considering the @a order.
+ */
+static inline unsigned long _Atomic_Load_ulong( const Atomic_Ulong *obj, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->load( order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_load_explicit( obj, order );
+#else
+ unsigned long val;
+
+ (void) order;
+ val = *obj;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Loads value of Uintptr considering the order.
+ *
+ * @param obj The CPU atomic Uintptr to get the value from.
+ * @param order The atomic order for getting the value.
+ *
+ * @return The value of @a obj considering the @a order.
+ */
+static inline uintptr_t _Atomic_Load_uintptr( const Atomic_Uintptr *obj, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->load( order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_load_explicit( obj, order );
+#else
+ uintptr_t val;
+
+ (void) order;
+ val = *obj;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Stores a value to Uint considering the order.
+ *
+ * @param[out] obj The CPU atomic Uint to store a value in.
+ * @param desired The desired value for @a obj.
+ * @param order The atomic order for storing the value.
+ */
+static inline void _Atomic_Store_uint( Atomic_Uint *obj, unsigned int desired, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ obj->store( desired, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_store_explicit( obj, desired, order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ *obj = desired;
+#endif
+}
+
+/**
+ * @brief Stores a value to Ulong considering the order.
+ *
+ * @param[out] obj The CPU atomic Ulong to store a value in.
+ * @param desired The desired value for @a obj.
+ * @param order The atomic order for storing the value.
+ */
+static inline void _Atomic_Store_ulong( Atomic_Ulong *obj, unsigned long desired, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ obj->store( desired, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_store_explicit( obj, desired, order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ *obj = desired;
+#endif
+}
+
+/**
+ * @brief Stores a value to Uintptr considering the order.
+ *
+ * @param[out] obj The CPU atomic Uintptr to store a value in.
+ * @param desired The desired value for @a obj.
+ * @param order The atomic order for storing the value.
+ */
+static inline void _Atomic_Store_uintptr( Atomic_Uintptr *obj, uintptr_t desired, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ obj->store( desired, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_store_explicit( obj, desired, order );
+#else
+ (void) order;
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ *obj = desired;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uint and adds a value to the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uint to get the value from and add @a arg to.
+ * @param arg The value to add to @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the addition of @a arg.
+ */
+static inline unsigned int _Atomic_Fetch_add_uint( Atomic_Uint *obj, unsigned int arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_add( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_add_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val + arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Ulong and adds a value to the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Ulong to get the value from and add @a arg to.
+ * @param arg The value to add to @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the addition of @a arg.
+ */
+static inline unsigned long _Atomic_Fetch_add_ulong( Atomic_Ulong *obj, unsigned long arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_add( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_add_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val + arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uintptr and adds a value to the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uintptr to get the value from and add @a arg to.
+ * @param arg The value to add to @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the addition of @a arg.
+ */
+static inline uintptr_t _Atomic_Fetch_add_uintptr( Atomic_Uintptr *obj, uintptr_t arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_add( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_add_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val + arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uint and subtracts a value from the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uint to get the value from and subtract @a arg from.
+ * @param arg The value to subtract from @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the subtraction of @a arg.
+ */
+static inline unsigned int _Atomic_Fetch_sub_uint( Atomic_Uint *obj, unsigned int arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_sub( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_sub_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val - arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Ulong and subtracts a value from the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Ulong to get the value from and subtract @a arg from.
+ * @param arg The value to subtract from @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the subtraction of @a arg.
+ */
+static inline unsigned long _Atomic_Fetch_sub_ulong( Atomic_Ulong *obj, unsigned long arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_sub( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_sub_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val - arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uintptr and subtracts a value from the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uintptr to get the value from and subtract @a arg from.
+ * @param arg The value to subtract from @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the subtraction of @a arg.
+ */
+static inline uintptr_t _Atomic_Fetch_sub_uintptr( Atomic_Uintptr *obj, uintptr_t arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_sub( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_sub_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val - arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uint and ORs a value with the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uint to get the value from and OR @a arg to.
+ * @param arg The value to OR with @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the OR operation with @a arg.
+ */
+static inline unsigned int _Atomic_Fetch_or_uint( Atomic_Uint *obj, unsigned int arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_or( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_or_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val | arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Ulong and ORs a value with the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Ulong to get the value from and OR @a arg to.
+ * @param arg The value to OR with @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the OR operation with @a arg.
+ */
+static inline unsigned long _Atomic_Fetch_or_ulong( Atomic_Ulong *obj, unsigned long arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_or( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_or_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val | arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uintptr and ORs a value with the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uintptr to get the value from and OR @a arg to.
+ * @param arg The value to OR with @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the OR operation with @a arg.
+ */
+static inline uintptr_t _Atomic_Fetch_or_uintptr( Atomic_Uintptr *obj, uintptr_t arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_or( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_or_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val | arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uint and ANDs a value with the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uint to get the value from and AND @a arg to.
+ * @param arg The value to AND with @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the AND operation with @a arg.
+ */
+static inline unsigned int _Atomic_Fetch_and_uint( Atomic_Uint *obj, unsigned int arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_and( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_and_explicit( obj, arg, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val & arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Ulong and ANDs a value with the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Ulong to get the value from and AND @a arg to.
+ * @param arg The value to AND with @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the AND operation with @a arg.
+ */
+static inline unsigned long _Atomic_Fetch_and_ulong( Atomic_Ulong *obj, unsigned long arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_and( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_and_explicit( obj, arg, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val & arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uintptr and ANDs a value with the stored value.
+ *
+ * @param[in, out] obj The CPU atomic Uintptr to get the value from and AND @a arg to.
+ * @param arg The value to AND with @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the AND operation with @a arg.
+ */
+static inline uintptr_t _Atomic_Fetch_and_uintptr( Atomic_Uintptr *obj, uintptr_t arg, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->fetch_and( arg, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_fetch_and_explicit( obj, arg, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = val & arg;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uint and sets its value.
+ *
+ * @param[in, out] obj The CPU atomic Uint to get the value from and set the value to @a desired.
+ * @param arg The value to set for @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the exchange with @a desired.
+ */
+static inline unsigned int _Atomic_Exchange_uint( Atomic_Uint *obj, unsigned int desired, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->exchange( desired, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_exchange_explicit( obj, desired, order );
+#else
+ unsigned int val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = desired;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Ulong and sets its value.
+ *
+ * @param[in, out] obj The CPU atomic Ulong to get the value from and set the value to @a desired.
+ * @param arg The value to set for @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the exchange with @a desired.
+ */
+static inline unsigned long _Atomic_Exchange_ulong( Atomic_Ulong *obj, unsigned long desired, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->exchange( desired, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_exchange_explicit( obj, desired, order );
+#else
+ unsigned long val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = desired;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Fetches current value of Uintptr and sets its value.
+ *
+ * @param[in, out] obj The CPU atomic Uintptr to get the value from and set the value to @a desired.
+ * @param arg The value to set for @a obj.
+ * @param order The atomic order for the operation.
+ *
+ * @return The value of @a obj prior to the exchange with @a desired.
+ */
+static inline uintptr_t _Atomic_Exchange_uintptr( Atomic_Uintptr *obj, uintptr_t desired, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->exchange( desired, order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_exchange_explicit( obj, desired, order );
+#else
+ uintptr_t val;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ val = *obj;
+ *obj = desired;
+ _ISR_Local_enable( level );
+
+ return val;
+#endif
+}
+
+/**
+ * @brief Checks if value of Uint is as expected.
+ *
+ * This method checks if the value of @a obj is equal to the value of @a expected. If
+ * this is the case, the value of @a obj is changed to @a desired. Otherwise, the value
+ * of @a obj is changed to @a expected.
+ *
+ * @param[in, out] obj The CPU atomic Uint to operate upon.
+ * @param[in, out] expected The expected value of @a obj. If @a obj has a different
+ * value, @a expected is changed to the actual value of @a obj.
+ * @param desired The new value of @a obj if the old value of @a obj was as expected.
+ * @param succ The order if it is successful.
+ * @param fail The order if it fails.
+ *
+ * @retval true The old value of @a obj was as expected.
+ * @retval false The old value of @a obj was not as expected.
+ */
+static inline bool _Atomic_Compare_exchange_uint( Atomic_Uint *obj, unsigned int *expected, unsigned int desired, Atomic_Order succ, Atomic_Order fail )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->compare_exchange_strong( *expected, desired, succ, fail );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
+#else
+ bool success;
+ ISR_Level level;
+ unsigned int actual;
+
+ (void) succ;
+ (void) fail;
+ _ISR_Local_disable( level );
+ actual = *obj;
+ success = ( actual == *expected );
+ if ( success ) {
+ *obj = desired;
+ } else {
+ *expected = actual;
+ }
+ _ISR_Local_enable( level );
+
+ return success;
+#endif
+}
+
+/**
+ * @brief Checks if value of Ulong is as expected.
+ *
+ * This method checks if the value of @a obj is equal to the value of @a expected. If
+ * this is the case, the value of @a obj is changed to @a desired. Otherwise, the value
+ * of @a obj is changed to @a expected.
+ *
+ * @param[in, out] obj The CPU atomic Ulong to operate upon.
+ * @param[in, out] expected The expected value of @a obj. If @a obj has a different
+ * value, @a expected is changed to the actual value of @a obj.
+ * @param desired The new value of @a obj if the old value of @a obj was as expected.
+ * @param succ The order if it is successful.
+ * @param fail The order if it fails.
+ *
+ * @retval true The old value of @a obj was as expected.
+ * @retval false The old value of @a obj was not as expected.
+ */
+static inline bool _Atomic_Compare_exchange_ulong( Atomic_Ulong *obj, unsigned long *expected, unsigned long desired, Atomic_Order succ, Atomic_Order fail )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->compare_exchange_strong( *expected, desired, succ, fail );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
+#else
+ bool success;
+ ISR_Level level;
+ unsigned long actual;
+
+ (void) succ;
+ (void) fail;
+ _ISR_Local_disable( level );
+ actual = *obj;
+ success = ( actual == *expected );
+ if ( success ) {
+ *obj = desired;
+ } else {
+ *expected = actual;
+ }
+ _ISR_Local_enable( level );
+
+ return success;
+#endif
+}
+
+/**
+ * @brief Checks if value of Uintptr is as expected.
+ *
+ * This method checks if the value of @a obj is equal to the value of @a expected. If
+ * this is the case, the value of @a obj is changed to @a desired. Otherwise, the value
+ * of @a obj is changed to @a expected.
+ *
+ * @param[in, out] obj The CPU atomic Uintptr to operate upon.
+ * @param[in, out] expected The expected value of @a obj. If @a obj has a different
+ * value, @a expected is changed to the actual value of @a obj.
+ * @param desired The new value of @a obj if the old value of @a obj was as expected.
+ * @param succ The order if it is successful.
+ * @param fail The order if it fails.
+ *
+ * @retval true The old value of @a obj was as expected.
+ * @retval false The old value of @a obj was not as expected.
+ */
+static inline bool _Atomic_Compare_exchange_uintptr( Atomic_Uintptr *obj, uintptr_t *expected, uintptr_t desired, Atomic_Order succ, Atomic_Order fail )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->compare_exchange_strong( *expected, desired, succ, fail );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
+#else
+ bool success;
+ ISR_Level level;
+ uintptr_t actual;
+
+ (void) succ;
+ (void) fail;
+ _ISR_Local_disable( level );
+ actual = *obj;
+ success = ( actual == *expected );
+ if ( success ) {
+ *obj = desired;
+ } else {
+ *expected = actual;
+ }
+ _ISR_Local_enable( level );
+
+ return success;
+#endif
+}
+
+/**
+ * @brief Clears the atomic flag.
+ *
+ * @param[out] obj The atomic flag to be cleared.
+ * @param order The atomic order for the operation.
+ */
+static inline void _Atomic_Flag_clear( Atomic_Flag *obj, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ obj->clear( order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ atomic_flag_clear_explicit( obj, order );
+#else
+ (void) order;
+ *obj = false;
+#endif
+}
+
+/**
+ * @brief Returns current flag state and sets it.
+ *
+ * @param[in, out] obj The atomic flag to be set.
+ * @param order The atomic order for the operation.
+ *
+ * @retval true @a obj was set prior to this operation.
+ * @retval false @a obj was not set prior to this operation.
+ */
+static inline bool _Atomic_Flag_test_and_set( Atomic_Flag *obj, Atomic_Order order )
+{
+#if defined(_RTEMS_SCORE_ATOMIC_USE_ATOMIC)
+ return obj->test_and_set( order );
+#elif defined(_RTEMS_SCORE_ATOMIC_USE_STDATOMIC)
+ return atomic_flag_test_and_set_explicit( obj, order );
+#else
+ bool flag;
+ ISR_Level level;
+
+ (void) order;
+ _ISR_Local_disable( level );
+ flag = *obj;
+ *obj = true;
+ _ISR_Local_enable( level );
+
+ return flag;
+#endif
+}
/** @} */
diff --git a/cpukit/include/rtems/score/basedefs.h b/cpukit/include/rtems/score/basedefs.h
index 7a37299eee..010728d795 100644
--- a/cpukit/include/rtems/score/basedefs.h
+++ b/cpukit/include/rtems/score/basedefs.h
@@ -3,14 +3,16 @@
/**
* @file
*
+ * @ingroup RTEMSScore
+ *
* @brief This header file provides basic definitions used by the API and the
* implementation.
*/
/*
- * Copyright (C) 2014 Paval Pisa
+ * Copyright (C) 2014 Pavel Pisa
* Copyright (C) 2011, 2013 On-Line Applications Research Corporation (OAR)
- * Copyright (C) 2009, 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2009, 2023 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -73,9 +75,8 @@ extern "C" {
/**
* @defgroup RTEMSAPI API
*
- * @brief API
- *
- * This group contains the RTEMS Application Programming Interface (API).
+ * @brief This group contains the RTEMS Application Programming Interface
+ * (API).
*/
/* Generated from spec:/rtems/basedefs/if/group */
@@ -167,9 +168,9 @@ extern "C" {
*
* @return Returns the alignment requirement of the type.
*/
-#if __cplusplus >= 201103L
+#if defined( __cplusplus ) && __cplusplus >= 201103L
#define RTEMS_ALIGNOF( _type_name ) alignof( _type_name )
-#elif __STDC_VERSION__ >= 201112L
+#elif defined( __STDC_VERSION__ ) && __STDC_VERSION__ >= 201112L
#define RTEMS_ALIGNOF( _type_name ) _Alignof( _type_name )
#else
#define RTEMS_ALIGNOF( _type_name ) sizeof( _type_name )
@@ -353,6 +354,47 @@ extern "C" {
*/
#define RTEMS_EXPAND( _token ) _token
+/* Generated from spec:/rtems/basedefs/if/function-name */
+
+/**
+ * @ingroup RTEMSAPIBaseDefs
+ *
+ * @brief Expands to the name of the function containing the use of this
+ * define.
+ */
+#if defined(__cplusplus) && defined(__GNUC__)
+ #define RTEMS_FUNCTION_NAME __PRETTY_FUNCTION__
+#else
+ #define RTEMS_FUNCTION_NAME __func__
+#endif
+
+/* Generated from spec:/rtems/basedefs/if/no-return */
+
+/**
+ * @ingroup RTEMSAPIBaseDefs
+ *
+ * @brief Tells the compiler in a function declaration that this function does
+ * not return.
+ */
+#if defined( __cplusplus ) && __cplusplus >= 201103L
+ #define RTEMS_NO_RETURN [[noreturn]]
+#elif defined( __STDC_VERSION__ ) && __STDC_VERSION__ >= 201112L
+ #define RTEMS_NO_RETURN _Noreturn
+#elif defined(__GNUC__)
+ #define RTEMS_NO_RETURN __attribute__(( __noreturn__ ))
+#else
+ #define RTEMS_NO_RETURN
+#endif
+
+/* Generated from spec:/rtems/basedefs/if/compiler-no-return-attribute */
+
+/**
+ * @ingroup RTEMSAPIBaseDefs
+ *
+ * @brief Provided for backward compatibility.
+ */
+#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE RTEMS_NO_RETURN
+
/* Generated from spec:/rtems/basedefs/if/section */
/**
@@ -390,7 +432,7 @@ extern "C" {
*
* @brief Gets the pointer reference type.
*
- * @param _level is the pointer indirection level expressed in *.
+ * @param _level is the pointer indirection level expressed in ``*``.
*
* @param _target is the reference target type.
*
@@ -423,21 +465,33 @@ extern "C" {
*/
#define RTEMS_XCONCAT( _x, _y ) RTEMS_CONCAT( _x, _y )
-/* Generated from spec:/score/if/assert-unreachable */
+#if !defined(ASM) && defined(RTEMS_DEBUG)
+ /* Generated from spec:/score/basedefs/if/debug-unreachable */
-/**
- * @brief Asserts that this program point is unreachable.
- */
-#if defined(RTEMS_DEBUG)
- #define _Assert_Unreachable() _Assert( 0 )
-#else
- #define _Assert_Unreachable() do { } while ( 0 )
+ /**
+ * @ingroup RTEMSScore
+ *
+ * @brief Terminates the program with a failed assertion.
+ *
+ * @param file is the file name.
+ *
+ * @param line is the line of the file.
+ *
+ * @param func is the function name.
+ */
+ RTEMS_NO_RETURN void _Debug_Unreachable(
+ const char *file,
+ int line,
+ const char *func
+ );
#endif
#if !defined(ASM)
- /* Generated from spec:/score/if/dequalify-types-not-compatible */
+ /* Generated from spec:/score/basedefs/if/dequalify-types-not-compatible */
/**
+ * @ingroup RTEMSScore
+ *
* @brief A not implemented function to trigger compile time errors with an
* error message.
*/
@@ -457,7 +511,7 @@ extern "C" {
* @brief Performs a type cast which removes qualifiers without warnings to the
* type for the variable.
*
- * @param _ptr_level is the pointer indirection level expressed in *.
+ * @param _ptr_level is the pointer indirection level expressed in ``*``.
*
* @param _type is the target type of the cast.
*
@@ -468,12 +522,18 @@ extern "C" {
( const_cast<_type>( _var ) )
#elif defined(__GNUC__)
#define RTEMS_DEQUALIFY_DEPTHX( _ptr_level, _type, _var ) \
- __builtin_choose_expr( __builtin_types_compatible_p( \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p( \
RTEMS_TYPEOF_REFX( _ptr_level, _var ), \
RTEMS_TYPEOF_REFX( _ptr_level, _type ) \
- ) || __builtin_types_compatible_p( _type, void * ), \
- (_type) ( _var ), \
- RTEMS_DEQUALIFY_types_not_compatible() )
+ ) || \
+ __builtin_types_compatible_p( \
+ _type, \
+ void * \
+ ), \
+ (_type) ( _var ), \
+ RTEMS_DEQUALIFY_types_not_compatible() \
+ )
#else
#define RTEMS_DEQUALIFY_DEPTHX( _ptr_level, _type, _var ) \
( (_type) (uintptr_t) (const volatile void *)( _var ) )
@@ -602,33 +662,6 @@ extern "C" {
#define RTEMS_NO_INLINE
#endif
-/* Generated from spec:/rtems/basedefs/if/no-return */
-
-/**
- * @ingroup RTEMSAPIBaseDefs
- *
- * @brief Tells the compiler in a function declaration that this function does
- * not return.
- */
-#if __cplusplus >= 201103L
- #define RTEMS_NO_RETURN [[noreturn]]
-#elif __STDC_VERSION__ >= 201112L
- #define RTEMS_NO_RETURN _Noreturn
-#elif defined(__GNUC__)
- #define RTEMS_NO_RETURN __attribute__(( __noreturn__ ))
-#else
- #define RTEMS_NO_RETURN
-#endif
-
-/* Generated from spec:/rtems/basedefs/if/compiler-no-return-attribute */
-
-/**
- * @ingroup RTEMSAPIBaseDefs
- *
- * @brief Provided for backward compatibility.
- */
-#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE RTEMS_NO_RETURN
-
/* Generated from spec:/rtems/basedefs/if/noinit */
/**
@@ -800,9 +833,9 @@ extern "C" {
*
* @param _msg is the error message in case the static assertion fails.
*/
-#if __cplusplus >= 201103L
+#if defined( __cplusplus ) && __cplusplus >= 201103L
#define RTEMS_STATIC_ASSERT( _cond, _msg ) static_assert( _cond, # _msg )
-#elif __STDC_VERSION__ >= 201112L
+#elif defined( __STDC_VERSION__ ) && __STDC_VERSION__ >= 201112L
#define RTEMS_STATIC_ASSERT( _cond, _msg ) _Static_assert( _cond, # _msg )
#else
#define RTEMS_STATIC_ASSERT( _cond, _msg ) \
@@ -846,14 +879,13 @@ extern "C" {
*
* @brief Tells the compiler that this program point is unreachable.
*/
-#if defined(__GNUC__)
+#if defined(RTEMS_DEBUG)
#define RTEMS_UNREACHABLE() \
- do { \
- __builtin_unreachable(); \
- _Assert_Unreachable(); \
- } while ( 0 )
+ _Debug_Unreachable( __FILE__, __LINE__, RTEMS_FUNCTION_NAME )
+#elif defined(__GNUC__)
+ #define RTEMS_UNREACHABLE() __builtin_unreachable()
#else
- #define RTEMS_UNREACHABLE() _Assert_Unreachable()
+ #define RTEMS_UNREACHABLE() do { } while ( 0 )
#endif
/* Generated from spec:/rtems/basedefs/if/unused */
@@ -967,11 +999,12 @@ extern "C" {
*
* @param _value is the value of the symbol. On the value a macro expansion is
* performed and afterwards it is stringified. It shall expand to an integer
- * expression understood by the assembler.
+ * expression understood by the assembler. The value shall be representable
+ * in the code model of the target architecture.
*
* This macro shall be placed at file scope.
*/
-#if defined(__USER_LABEL_PREFIX__)
+#if defined(__GNUC__)
#define RTEMS_DEFINE_GLOBAL_SYMBOL( _name, _value ) \
__asm__( \
"\t.globl " RTEMS_XSTRING( RTEMS_SYMBOL_NAME( _name ) ) \
diff --git a/cpukit/include/rtems/score/chain.h b/cpukit/include/rtems/score/chain.h
index 7414fdb697..0b1ede75cf 100644
--- a/cpukit/include/rtems/score/chain.h
+++ b/cpukit/include/rtems/score/chain.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2010 embedded brains GmbH.
+ * Copyright (c) 2010 embedded brains GmbH & Co. KG
*
* COPYRIGHT (c) 1989-2006.
* On-Line Applications Research Corporation (OAR).
@@ -61,17 +61,7 @@ extern "C" {
*/
/**
- * @typedef Chain_Node
- *
- * This type definition promotes the name for the Chain Node used by
- * all RTEMS code. It is a separate type definition because a forward
- * reference is required to define it. See @ref Chain_Node_struct for
- * detailed information.
- */
-typedef struct Chain_Node_struct Chain_Node;
-
-/**
- * @struct Chain_Node_struct
+ * @brief This structure represents a chain node.
*
* This is used to manage each element (node) which is placed
* on a chain.
@@ -85,15 +75,15 @@ typedef struct Chain_Node_struct Chain_Node;
* so the user can cast the pointers back and forth.
*
*/
-struct Chain_Node_struct {
+typedef struct Chain_Node {
/** This points to the node after this one on this chain. */
- Chain_Node *next;
+ struct Chain_Node *next;
/** This points to the node immediate prior to this one on this chain. */
- Chain_Node *previous;
-};
+ struct Chain_Node *previous;
+} Chain_Node;
/**
- * @struct Chain_Control
+ * @brief This union represents a chain control block.
*
* This is used to manage a chain. A chain consists of a doubly
* linked list of zero or more nodes.
diff --git a/cpukit/include/rtems/score/chainimpl.h b/cpukit/include/rtems/score/chainimpl.h
index 4b53fa266e..a2ea5e2645 100644
--- a/cpukit/include/rtems/score/chainimpl.h
+++ b/cpukit/include/rtems/score/chainimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2010 embedded brains GmbH.
+ * Copyright (c) 2010 embedded brains GmbH & Co. KG
*
* COPYRIGHT (c) 1989-2014.
* On-Line Applications Research Corporation (OAR).
@@ -121,7 +121,7 @@ size_t _Chain_Node_count_unprotected( const Chain_Control *chain );
*
* @param[out] node The node to set off chain.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Set_off_chain(
+static inline void _Chain_Set_off_chain(
Chain_Node *node
)
{
@@ -139,7 +139,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Set_off_chain(
*
* @param[out] the_node The chain node to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Initialize_node( Chain_Node *the_node )
+static inline void _Chain_Initialize_node( Chain_Node *the_node )
{
#if defined(RTEMS_DEBUG)
_Chain_Set_off_chain( the_node );
@@ -159,7 +159,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Initialize_node( Chain_Node *the_node )
* @retval true The @a node is off chain.
* @retval false The @a node is not off chain.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Is_node_off_chain(
+static inline bool _Chain_Is_node_off_chain(
const Chain_Node *node
)
{
@@ -178,7 +178,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Is_node_off_chain(
* @retval true @a left and @a right are equal.
* @retval false @a left and @a right are not equal.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Are_nodes_equal(
+static inline bool _Chain_Are_nodes_equal(
const Chain_Node *left,
const Chain_Node *right
)
@@ -195,7 +195,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Are_nodes_equal(
*
* @return This method returns the permanent head node of the chain.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Head(
+static inline Chain_Node *_Chain_Head(
Chain_Control *the_chain
)
{
@@ -211,7 +211,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Head(
*
* @return This method returns the permanent head node of the chain.
*/
-RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_head(
+static inline const Chain_Node *_Chain_Immutable_head(
const Chain_Control *the_chain
)
{
@@ -227,7 +227,7 @@ RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_head(
*
* @return This method returns the permanent tail node of the chain.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Tail(
+static inline Chain_Node *_Chain_Tail(
Chain_Control *the_chain
)
{
@@ -243,7 +243,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Tail(
*
* @return This method returns the permanent tail node of the chain.
*/
-RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_tail(
+static inline const Chain_Node *_Chain_Immutable_tail(
const Chain_Control *the_chain
)
{
@@ -260,7 +260,7 @@ RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_tail(
*
* @return This method returns the first node of the chain.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_First(
+static inline Chain_Node *_Chain_First(
const Chain_Control *the_chain
)
{
@@ -277,7 +277,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_First(
*
* @return This method returns the first node of the chain.
*/
-RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first(
+static inline const Chain_Node *_Chain_Immutable_first(
const Chain_Control *the_chain
)
{
@@ -294,7 +294,7 @@ RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first(
*
* @return This method returns the last node of the chain.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Last(
+static inline Chain_Node *_Chain_Last(
const Chain_Control *the_chain
)
{
@@ -311,7 +311,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Last(
*
* @return This method returns the last node of the chain.
*/
-RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_last(
+static inline const Chain_Node *_Chain_Immutable_last(
const Chain_Control *the_chain
)
{
@@ -327,7 +327,7 @@ RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_last(
*
* @return This method returns the next node on the chain.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Next(
+static inline Chain_Node *_Chain_Next(
const Chain_Node *the_node
)
{
@@ -343,7 +343,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Next(
*
* @return This method returns the next node on the chain.
*/
-RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_next(
+static inline const Chain_Node *_Chain_Immutable_next(
const Chain_Node *the_node
)
{
@@ -359,7 +359,7 @@ RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_next(
*
* @return This method returns the previous node on the chain.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Previous(
+static inline Chain_Node *_Chain_Previous(
const Chain_Node *the_node
)
{
@@ -375,7 +375,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Previous(
*
* @return This method returns the previous node on the chain.
*/
-RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_previous(
+static inline const Chain_Node *_Chain_Immutable_previous(
const Chain_Node *the_node
)
{
@@ -393,7 +393,7 @@ RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_previous(
* @retval true There are no nodes on @a the_chain.
* @retval false There are nodes on @a the_chain.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(
+static inline bool _Chain_Is_empty(
const Chain_Control *the_chain
)
{
@@ -413,7 +413,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(
* @retval true @a the_node is the first node on a chain.
* @retval false @a the_node is not the first node on a chain.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Is_first(
+static inline bool _Chain_Is_first(
const Chain_Node *the_node
)
{
@@ -432,7 +432,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Is_first(
* @retval true @a the_node is the last node on a chain.
* @retval false @a the_node is not the last node on a chain.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Is_last(
+static inline bool _Chain_Is_last(
const Chain_Node *the_node
)
{
@@ -450,7 +450,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Is_last(
* @retval true There is only one node on @a the_chain.
* @retval false There is more than one node on @a the_chain.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Has_only_one_node(
+static inline bool _Chain_Has_only_one_node(
const Chain_Control *the_chain
)
{
@@ -470,7 +470,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Has_only_one_node(
* @retval true @a the_node is the head of @a the_chain.
* @retval false @a the_node is not the head of @a the_chain.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Is_head(
+static inline bool _Chain_Is_head(
const Chain_Control *the_chain,
const Chain_Node *the_node
)
@@ -490,7 +490,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Is_head(
* @retval true @a the_node is the tail of @a the_chain.
* @retval false @a the_node is not the tail of @a the_chain.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Is_tail(
+static inline bool _Chain_Is_tail(
const Chain_Control *the_chain,
const Chain_Node *the_node
)
@@ -505,7 +505,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Is_tail(
*
* @param[out] the_chain The chain to be initialized.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(
+static inline void _Chain_Initialize_empty(
Chain_Control *the_chain
)
{
@@ -528,7 +528,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(
* @param[out] the_chain The chain to be initialized to contain exactly the specified node.
* @param[out] the_node The one and only node of the chain to be initialized.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Initialize_one(
+static inline void _Chain_Initialize_one(
Chain_Control *the_chain,
Chain_Node *the_node
)
@@ -558,7 +558,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Initialize_one(
*
* @param[out] the_node The node to be extracted.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(
+static inline void _Chain_Extract_unprotected(
Chain_Node *the_node
)
{
@@ -592,7 +592,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(
* @note This routine assumes that there is at least one node on the chain
* and always returns a node even if it is the Chain Tail.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_first_unprotected(
+static inline Chain_Node *_Chain_Get_first_unprotected(
Chain_Control *the_chain
)
{
@@ -630,7 +630,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_first_unprotected(
* @note It does NOT disable interrupts to ensure the atomicity of the
* get operation.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_unprotected(
+static inline Chain_Node *_Chain_Get_unprotected(
Chain_Control *the_chain
)
{
@@ -653,7 +653,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_unprotected(
* @note It does NOT disable interrupts to ensure the atomicity
* of the extract operation.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Insert_unprotected(
+static inline void _Chain_Insert_unprotected(
Chain_Node *after_node,
Chain_Node *the_node
)
@@ -680,7 +680,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Insert_unprotected(
* @note It does NOT disable interrupts to ensure the atomicity of the
* append operation.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Append_unprotected(
+static inline void _Chain_Append_unprotected(
Chain_Control *the_chain,
Chain_Node *the_node
)
@@ -711,7 +711,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Append_unprotected(
*
* @see _Chain_Append_unprotected() and _Chain_Is_node_off_chain().
*/
-RTEMS_INLINE_ROUTINE void _Chain_Append_if_is_off_chain_unprotected(
+static inline void _Chain_Append_if_is_off_chain_unprotected(
Chain_Control *the_chain,
Chain_Node *the_node
)
@@ -732,7 +732,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Append_if_is_off_chain_unprotected(
* @note It does NOT disable interrupts to ensure the atomicity of the
* prepend operation.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(
+static inline void _Chain_Prepend_unprotected(
Chain_Control *the_chain,
Chain_Node *the_node
)
@@ -754,7 +754,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(
* @retval true The chain was empty before.
* @retval false The chain contained at least one node before.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Append_with_empty_check_unprotected(
+static inline bool _Chain_Append_with_empty_check_unprotected(
Chain_Control *the_chain,
Chain_Node *the_node
)
@@ -780,7 +780,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Append_with_empty_check_unprotected(
* @retval true The chain was empty before.
* @retval false The chain contained at least one node before.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Prepend_with_empty_check_unprotected(
+static inline bool _Chain_Prepend_with_empty_check_unprotected(
Chain_Control *the_chain,
Chain_Node *the_node
)
@@ -810,7 +810,7 @@ RTEMS_INLINE_ROUTINE bool _Chain_Prepend_with_empty_check_unprotected(
* @retval true The chain is empty now.
* @retval false The chain contains at least one node now.
*/
-RTEMS_INLINE_ROUTINE bool _Chain_Get_with_empty_check_unprotected(
+static inline bool _Chain_Get_with_empty_check_unprotected(
Chain_Control *the_chain,
Chain_Node **the_node
)
@@ -865,7 +865,7 @@ typedef bool ( *Chain_Node_order )(
* variable.
* @param order The order relation.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected(
+static inline void _Chain_Insert_ordered_unprotected(
Chain_Control *the_chain,
Chain_Node *to_insert,
const void *key,
@@ -954,7 +954,7 @@ typedef struct {
*
* @param[out] the_registry The chain iterator registry to be initialized.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Iterator_registry_initialize(
+static inline void _Chain_Iterator_registry_initialize(
Chain_Iterator_registry *the_registry
)
{
@@ -973,7 +973,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Iterator_registry_initialize(
* @param[in, out] the_registry the chain iterator registry.
* @param[out] the_node_to_extract The node that will be extracted.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Iterator_registry_update(
+static inline void _Chain_Iterator_registry_update(
Chain_Iterator_registry *the_registry,
Chain_Node *the_node_to_extract
)
@@ -1060,7 +1060,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Iterator_registry_update(
* implementation is unfit for use in performance relevant components, due to
* the linear time complexity in _Chain_Iterator_registry_update().
*/
-RTEMS_INLINE_ROUTINE void _Chain_Iterator_initialize(
+static inline void _Chain_Iterator_initialize(
Chain_Control *the_chain,
Chain_Iterator_registry *the_registry,
Chain_Iterator *the_iterator,
@@ -1092,7 +1092,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Iterator_initialize(
*
* @return The next node in the iterator direction
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Iterator_next(
+static inline Chain_Node *_Chain_Iterator_next(
const Chain_Iterator *the_iterator
)
{
@@ -1109,7 +1109,7 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Iterator_next(
* @param[out] the_iterator The chain iterator.
* @param[out] the_node The new iterator position.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Iterator_set_position(
+static inline void _Chain_Iterator_set_position(
Chain_Iterator *the_iterator,
Chain_Node *the_node
)
@@ -1124,7 +1124,7 @@ RTEMS_INLINE_ROUTINE void _Chain_Iterator_set_position(
*
* @param[out] the_iterator The chain iterator.
*/
-RTEMS_INLINE_ROUTINE void _Chain_Iterator_destroy(
+static inline void _Chain_Iterator_destroy(
Chain_Iterator *the_iterator
)
{
diff --git a/cpukit/include/rtems/score/copyrt.h b/cpukit/include/rtems/score/copyrt.h
index 17067a26cf..21c4bec635 100644
--- a/cpukit/include/rtems/score/copyrt.h
+++ b/cpukit/include/rtems/score/copyrt.h
@@ -3,7 +3,7 @@
/**
* @file
*
- * @ingroup RTEMSSuperCoreCopyright
+ * @ingroup RTEMSScoreCopyright
*
* @brief This header file provides the interfaces of the
* @ref RTEMSScoreCopyright.
diff --git a/cpukit/include/rtems/score/corebarrierimpl.h b/cpukit/include/rtems/score/corebarrierimpl.h
index 707d9cb3aa..b58bc44b4c 100644
--- a/cpukit/include/rtems/score/corebarrierimpl.h
+++ b/cpukit/include/rtems/score/corebarrierimpl.h
@@ -89,7 +89,7 @@ void _CORE_barrier_Initialize(
*
* @param[out] the_barrier The barrier to destroy.
*/
-RTEMS_INLINE_ROUTINE void _CORE_barrier_Destroy(
+static inline void _CORE_barrier_Destroy(
CORE_barrier_Control *the_barrier
)
{
@@ -102,7 +102,7 @@ RTEMS_INLINE_ROUTINE void _CORE_barrier_Destroy(
* @param[in, out] the_barrier The barrier to acquire.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_barrier_Acquire_critical(
+static inline void _CORE_barrier_Acquire_critical(
CORE_barrier_Control *the_barrier,
Thread_queue_Context *queue_context
)
@@ -116,7 +116,7 @@ RTEMS_INLINE_ROUTINE void _CORE_barrier_Acquire_critical(
* @param[in, out] the_barrier The barrier to release.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_barrier_Release(
+static inline void _CORE_barrier_Release(
CORE_barrier_Control *the_barrier,
Thread_queue_Context *queue_context
)
@@ -157,7 +157,7 @@ Status_Control _CORE_barrier_Seize(
*
* @return The number of unblocked threads.
*/
-RTEMS_INLINE_ROUTINE uint32_t _CORE_barrier_Surrender(
+static inline uint32_t _CORE_barrier_Surrender(
CORE_barrier_Control *the_barrier,
Thread_queue_Context *queue_context
)
@@ -176,7 +176,7 @@ RTEMS_INLINE_ROUTINE uint32_t _CORE_barrier_Surrender(
* @param[in, out] the_barrier The barrier to flush.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_barrier_Flush(
+static inline void _CORE_barrier_Flush(
CORE_barrier_Control *the_barrier,
Thread_queue_Context *queue_context
)
diff --git a/cpukit/include/rtems/score/coremsgbuffer.h b/cpukit/include/rtems/score/coremsgbuffer.h
index 330a480423..cceb80bdf5 100644
--- a/cpukit/include/rtems/score/coremsgbuffer.h
+++ b/cpukit/include/rtems/score/coremsgbuffer.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2020 embedded brains GmbH & Co. KG
* Copyright (C) 1989, 2009 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
diff --git a/cpukit/include/rtems/score/coremsgimpl.h b/cpukit/include/rtems/score/coremsgimpl.h
index fff3f3e159..a11beef938 100644
--- a/cpukit/include/rtems/score/coremsgimpl.h
+++ b/cpukit/include/rtems/score/coremsgimpl.h
@@ -372,7 +372,7 @@ void _CORE_message_queue_Insert_message(
* @retval STATUS_MESSAGE_QUEUE_WAIT_IN_ISR The caller is in an ISR, do not block!
* @retval STATUS_TIMEOUT A timeout occurred.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Send(
+static inline Status_Control _CORE_message_queue_Send(
CORE_message_queue_Control *the_message_queue,
const void *buffer,
size_t size,
@@ -408,7 +408,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Send(
* @retval STATUS_MESSAGE_QUEUE_WAIT_IN_ISR The caller is in an ISR, do not block!
* @retval STATUS_TIMEOUT A timeout occurred.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Urgent(
+static inline Status_Control _CORE_message_queue_Urgent(
CORE_message_queue_Control *the_message_queue,
const void *buffer,
size_t size,
@@ -430,10 +430,10 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Urgent(
/**
* @brief Acquires the message queue.
*
- * @param[in, out] the_message_queue Rhe message queue to acquire.
+ * @param[in, out] the_message_queue The message queue to acquire.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire(
+static inline void _CORE_message_queue_Acquire(
CORE_message_queue_Control *the_message_queue,
Thread_queue_Context *queue_context
)
@@ -447,7 +447,7 @@ RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire(
* @param[in, out] the_message_queue The message queue to acquire critical.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire_critical(
+static inline void _CORE_message_queue_Acquire_critical(
CORE_message_queue_Control *the_message_queue,
Thread_queue_Context *queue_context
)
@@ -461,7 +461,7 @@ RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire_critical(
* @param[in, out] the_message_queue The message queue to release.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_message_queue_Release(
+static inline void _CORE_message_queue_Release(
CORE_message_queue_Control *the_message_queue,
Thread_queue_Context *queue_context
)
@@ -479,7 +479,7 @@ RTEMS_INLINE_ROUTINE void _CORE_message_queue_Release(
* @param[out] destination The destination messag buffer to copy the source to.
* @param size The size of the source buffer.
*/
-RTEMS_INLINE_ROUTINE void _CORE_message_queue_Copy_buffer (
+static inline void _CORE_message_queue_Copy_buffer (
const void *source,
void *destination,
size_t size
@@ -499,7 +499,7 @@ RTEMS_INLINE_ROUTINE void _CORE_message_queue_Copy_buffer (
* @retval pointer The allocated message buffer.
* @retval NULL The inactive message buffer chain is empty.
*/
-RTEMS_INLINE_ROUTINE CORE_message_queue_Buffer *
+static inline CORE_message_queue_Buffer *
_CORE_message_queue_Allocate_message_buffer (
CORE_message_queue_Control *the_message_queue
)
@@ -517,7 +517,7 @@ _CORE_message_queue_Allocate_message_buffer (
* @param[in, out] the_message_queue The message queue to free the message buffer to.
* @param[out] the_message The message to be freed.
*/
-RTEMS_INLINE_ROUTINE void _CORE_message_queue_Free_message_buffer (
+static inline void _CORE_message_queue_Free_message_buffer (
CORE_message_queue_Control *the_message_queue,
CORE_message_queue_Buffer *the_message
)
@@ -538,7 +538,7 @@ RTEMS_INLINE_ROUTINE void _CORE_message_queue_Free_message_buffer (
* @note It encapsulates the optional behavior that message priority is
* disabled if no API requires it.
*/
-RTEMS_INLINE_ROUTINE int _CORE_message_queue_Get_message_priority (
+static inline int _CORE_message_queue_Get_message_priority (
const CORE_message_queue_Buffer *the_message
)
{
@@ -560,7 +560,7 @@ RTEMS_INLINE_ROUTINE int _CORE_message_queue_Get_message_priority (
* @retval pointer The first message if the message queue is not empty.
* @retval NULL The message queue is empty.
*/
-RTEMS_INLINE_ROUTINE
+static inline
CORE_message_queue_Buffer *_CORE_message_queue_Get_pending_message (
CORE_message_queue_Control *the_message_queue
)
@@ -581,7 +581,7 @@ RTEMS_INLINE_ROUTINE
* @retval true Notification is enabled on this message queue.
* @retval false Notification is not enabled on this message queue.
*/
- RTEMS_INLINE_ROUTINE bool _CORE_message_queue_Is_notify_enabled (
+ static inline bool _CORE_message_queue_Is_notify_enabled (
CORE_message_queue_Control *the_message_queue
)
{
@@ -599,7 +599,7 @@ RTEMS_INLINE_ROUTINE
* @param[out] the_handler The notification information for the message queue.
*/
#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
- RTEMS_INLINE_ROUTINE void _CORE_message_queue_Set_notify (
+ static inline void _CORE_message_queue_Set_notify (
CORE_message_queue_Control *the_message_queue,
CORE_message_queue_Notify_Handler the_handler
)
@@ -627,7 +627,7 @@ RTEMS_INLINE_ROUTINE
* @retval thread The Thread_Control for the first locked thread, if there is a locked thread.
* @retval NULL There are pending messages or no thread waiting to receive.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_CORE_message_queue_Dequeue_receiver(
+static inline Thread_Control *_CORE_message_queue_Dequeue_receiver(
CORE_message_queue_Control *the_message_queue,
const void *buffer,
size_t size,
diff --git a/cpukit/include/rtems/score/coremuteximpl.h b/cpukit/include/rtems/score/coremuteximpl.h
index d489a0d946..bcdd1adceb 100644
--- a/cpukit/include/rtems/score/coremuteximpl.h
+++ b/cpukit/include/rtems/score/coremuteximpl.h
@@ -65,7 +65,7 @@ extern "C" {
*
* @param[out] the_mutex The mutex to initialize.
*/
-RTEMS_INLINE_ROUTINE void _CORE_mutex_Initialize(
+static inline void _CORE_mutex_Initialize(
CORE_mutex_Control *the_mutex
)
{
@@ -77,7 +77,7 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Initialize(
*
* @param[out] the_mutex the mutex to destroy.
*/
-RTEMS_INLINE_ROUTINE void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex )
+static inline void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex )
{
_Thread_queue_Destroy( &the_mutex->Wait_queue );
}
@@ -88,7 +88,7 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex )
* @param[in, out] the_mutex The mutex to acquire critical.
* @param queue_context The queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_mutex_Acquire_critical(
+static inline void _CORE_mutex_Acquire_critical(
CORE_mutex_Control *the_mutex,
Thread_queue_Context *queue_context
)
@@ -102,7 +102,7 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Acquire_critical(
* @param[in, out] the_mutex The mutex to release.
* @param queue_context The queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_mutex_Release(
+static inline void _CORE_mutex_Release(
CORE_mutex_Control *the_mutex,
Thread_queue_Context *queue_context
)
@@ -117,7 +117,7 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Release(
*
* @return The owner of the mutex.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_CORE_mutex_Get_owner(
+static inline Thread_Control *_CORE_mutex_Get_owner(
const CORE_mutex_Control *the_mutex
)
{
@@ -135,7 +135,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_CORE_mutex_Get_owner(
* @retval true The mutex is locked.
* @retval false The mutex is not locked.
*/
-RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_locked(
+static inline bool _CORE_mutex_Is_locked(
const CORE_mutex_Control *the_mutex
)
{
@@ -168,7 +168,7 @@ Status_Control _CORE_mutex_Seize_slow(
* @param[out] the_mutex The mutex to set the owner from.
* @param owner The new owner of the mutex.
*/
-RTEMS_INLINE_ROUTINE void _CORE_mutex_Set_owner(
+static inline void _CORE_mutex_Set_owner(
CORE_mutex_Control *the_mutex,
Thread_Control *owner
)
@@ -185,7 +185,7 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Set_owner(
* @retval true @a the_thread is the owner of @a the_mutex.
* @retval false @a the_thread is not the owner of @a the_mutex.
*/
-RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_owner(
+static inline bool _CORE_mutex_Is_owner(
const CORE_mutex_Control *the_mutex,
const Thread_Control *the_thread
)
@@ -198,7 +198,7 @@ RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_owner(
*
* @param[out] the_mutex The recursive mutex to initialize.
*/
-RTEMS_INLINE_ROUTINE void _CORE_recursive_mutex_Initialize(
+static inline void _CORE_recursive_mutex_Initialize(
CORE_recursive_mutex_Control *the_mutex
)
{
@@ -213,7 +213,7 @@ RTEMS_INLINE_ROUTINE void _CORE_recursive_mutex_Initialize(
*
* @return STATUS_SUCCESSFUL, this method is always successful.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize_nested(
+static inline Status_Control _CORE_recursive_mutex_Seize_nested(
CORE_recursive_mutex_Control *the_mutex
)
{
@@ -236,7 +236,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize_nested(
* @retval _Thread_Wait_get_status The status of the executing thread.
* @retval STATUS_UNAVAILABLE The calling thread is not willing to wait.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize(
+static inline Status_Control _CORE_recursive_mutex_Seize(
CORE_recursive_mutex_Control *the_mutex,
const Thread_queue_Operations *operations,
Thread_Control *executing,
@@ -286,7 +286,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize(
* @retval STATUS_SUCCESSFUL @a the_mutex is successfully surrendered.
* @retval STATUS_NOT_OWNER The executing thread does not own @a the_mutex.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
+static inline Status_Control _CORE_recursive_mutex_Surrender(
CORE_recursive_mutex_Control *the_mutex,
const Thread_queue_Operations *operations,
Thread_Control *executing,
@@ -339,7 +339,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
* Only needed if RTEMS_SMP is defined
* @param priority_ceiling The priority ceiling for the initialized mutex.
*/
-RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Initialize(
+static inline void _CORE_ceiling_mutex_Initialize(
CORE_ceiling_mutex_Control *the_mutex,
const Scheduler_Control *scheduler,
Priority_Control priority_ceiling
@@ -359,7 +359,7 @@ RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Initialize(
*
* @return The scheduler of the mutex. If RTEMS_SMP is not defined, the first entry of the _Scheduler_Table is returned.
*/
-RTEMS_INLINE_ROUTINE const Scheduler_Control *
+static inline const Scheduler_Control *
_CORE_ceiling_mutex_Get_scheduler(
const CORE_ceiling_mutex_Control *the_mutex
)
@@ -377,7 +377,7 @@ _CORE_ceiling_mutex_Get_scheduler(
* @param[out] the_mutex The ceiling mutex to set the priority of.
* @param priority_ceiling The new priority ceiling of the mutex.
*/
-RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
+static inline void _CORE_ceiling_mutex_Set_priority(
CORE_ceiling_mutex_Control *the_mutex,
Priority_Control priority_ceiling
)
@@ -412,7 +412,7 @@ RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
*
* @return The priority ceiling of @a the_mutex.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _CORE_ceiling_mutex_Get_priority(
+static inline Priority_Control _CORE_ceiling_mutex_Get_priority(
const CORE_ceiling_mutex_Control *the_mutex
)
{
@@ -430,7 +430,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _CORE_ceiling_mutex_Get_priority(
* @retval STATUS_MUTEX_CEILING_VIOLATED The owners wait priority
* is smaller than the priority of the ceiling mutex.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
+static inline Status_Control _CORE_ceiling_mutex_Set_owner(
CORE_ceiling_mutex_Control *the_mutex,
Thread_Control *owner,
Thread_queue_Context *queue_context
@@ -484,7 +484,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
* is smaller than the priority of the ceiling mutex.
* @retval other Return value of @a nested.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Seize(
+static inline Status_Control _CORE_ceiling_mutex_Seize(
CORE_ceiling_mutex_Control *the_mutex,
Thread_Control *executing,
bool wait,
@@ -544,7 +544,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Seize(
* @retval STATUS_SUCCESSFUL The ceiling mutex was successfullysurrendered.
* @retval STATUS_NOT_OWNER The executing thread is not the owner of @a the_mutex.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
+static inline Status_Control _CORE_ceiling_mutex_Surrender(
CORE_ceiling_mutex_Control *the_mutex,
Thread_Control *executing,
Thread_queue_Context *queue_context
diff --git a/cpukit/include/rtems/score/corerwlockimpl.h b/cpukit/include/rtems/score/corerwlockimpl.h
index d59abd6f81..0cb2965199 100644
--- a/cpukit/include/rtems/score/corerwlockimpl.h
+++ b/cpukit/include/rtems/score/corerwlockimpl.h
@@ -124,7 +124,7 @@ void _CORE_RWLock_Initialize(
*
* @param[out] the_rwlock is the RWLock to destroy.
*/
-RTEMS_INLINE_ROUTINE void _CORE_RWLock_Destroy(
+static inline void _CORE_RWLock_Destroy(
CORE_RWLock_Control *the_rwlock
)
{
@@ -139,7 +139,7 @@ RTEMS_INLINE_ROUTINE void _CORE_RWLock_Destroy(
*
* @return The executing thread.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_CORE_RWLock_Acquire(
+static inline Thread_Control *_CORE_RWLock_Acquire(
CORE_RWLock_Control *the_rwlock,
Thread_queue_Context *queue_context
)
@@ -165,7 +165,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_CORE_RWLock_Acquire(
* @param[in, out] the_rwlock The RWlock to release.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_RWLock_Release(
+static inline void _CORE_RWLock_Release(
CORE_RWLock_Control *the_rwlock,
Thread_queue_Context *queue_context
)
diff --git a/cpukit/include/rtems/score/coresemimpl.h b/cpukit/include/rtems/score/coresemimpl.h
index f7168d6f63..ca952a4570 100644
--- a/cpukit/include/rtems/score/coresemimpl.h
+++ b/cpukit/include/rtems/score/coresemimpl.h
@@ -81,7 +81,7 @@ void _CORE_semaphore_Initialize(
* @param[in, out] the_semaphore The semaphore to acquire.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_semaphore_Acquire_critical(
+static inline void _CORE_semaphore_Acquire_critical(
CORE_semaphore_Control *the_semaphore,
Thread_queue_Context *queue_context
)
@@ -97,7 +97,7 @@ RTEMS_INLINE_ROUTINE void _CORE_semaphore_Acquire_critical(
* @param[in, out] the_semaphore The semaphore to release.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_semaphore_Release(
+static inline void _CORE_semaphore_Release(
CORE_semaphore_Control *the_semaphore,
Thread_queue_Context *queue_context
)
@@ -114,7 +114,7 @@ RTEMS_INLINE_ROUTINE void _CORE_semaphore_Release(
* @param operations The thread queue operations.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _CORE_semaphore_Destroy(
+static inline void _CORE_semaphore_Destroy(
CORE_semaphore_Control *the_semaphore,
const Thread_queue_Operations *operations,
Thread_queue_Context *queue_context
@@ -145,7 +145,7 @@ RTEMS_INLINE_ROUTINE void _CORE_semaphore_Destroy(
* @retval STATUS_SUCCESSFUL The unit was successfully freed to the semaphore.
* @retval STATUS_MAXIMUM_COUNT_EXCEEDED The maximum number of units was exceeded.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Surrender(
+static inline Status_Control _CORE_semaphore_Surrender(
CORE_semaphore_Control *the_semaphore,
const Thread_queue_Operations *operations,
uint32_t maximum_count,
@@ -187,7 +187,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Surrender(
*
* @return the current count of this semaphore.
*/
-RTEMS_INLINE_ROUTINE uint32_t _CORE_semaphore_Get_count(
+static inline uint32_t _CORE_semaphore_Get_count(
const CORE_semaphore_Control *the_semaphore
)
{
@@ -214,7 +214,7 @@ RTEMS_INLINE_ROUTINE uint32_t _CORE_semaphore_Get_count(
* calling thread not willing to wait.
* @retval STATUS_TIMEOUT A timeout occurred.
*/
-RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Seize(
+static inline Status_Control _CORE_semaphore_Seize(
CORE_semaphore_Control *the_semaphore,
const Thread_queue_Operations *operations,
Thread_Control *executing,
diff --git a/cpukit/include/rtems/score/cpustdatomic.h b/cpukit/include/rtems/score/cpustdatomic.h
deleted file mode 100644
index 899f52cd83..0000000000
--- a/cpukit/include/rtems/score/cpustdatomic.h
+++ /dev/null
@@ -1,984 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-
-/**
- * @file
- *
- * @brief This header file provides the interfaces of the
- * @ref RTEMSScoreAtomicCPU.
- */
-
-/*
- * COPYRIGHT (c) 2013 Deng Hengyi.
- * Copyright (c) 2015 embedded brains GmbH.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTEMS_SCORE_CPUSTDATOMIC_H
-#define _RTEMS_SCORE_CPUSTDATOMIC_H
-
-#include <rtems/score/basedefs.h>
-
-/**
- * @defgroup RTEMSScoreAtomicCPU C11/C++11 Atomic Operations
- *
- * @ingroup RTEMSScoreAtomic
- *
- * @brief This group contains the atomic operations implementation using
- * functions provided by the C11/C++11.
- *
- * @{
- */
-
-#ifdef RTEMS_SMP
- #if defined(__cplusplus) \
- && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9))
- /*
- * The GCC 4.9 ships its own <stdatomic.h> which is not C++ compatible. The
- * suggested solution was to include <atomic> in case C++ is used. This works
- * at least with GCC 4.9. See also:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60940
- */
- #include <atomic>
- #define _RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC
- #else
- #include <stdatomic.h>
- #define _RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC
- #endif
-#else
- #include <rtems/score/isrlevel.h>
-#endif
-
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
-
-typedef std::atomic_uint CPU_atomic_Uint;
-
-typedef std::atomic_ulong CPU_atomic_Ulong;
-
-typedef std::atomic_uintptr_t CPU_atomic_Uintptr;
-
-typedef std::atomic_flag CPU_atomic_Flag;
-
-typedef std::memory_order CPU_atomic_Order;
-
-#define CPU_ATOMIC_ORDER_RELAXED std::memory_order_relaxed
-
-#define CPU_ATOMIC_ORDER_ACQUIRE std::memory_order_acquire
-
-#define CPU_ATOMIC_ORDER_RELEASE std::memory_order_release
-
-#define CPU_ATOMIC_ORDER_ACQ_REL std::memory_order_acq_rel
-
-#define CPU_ATOMIC_ORDER_SEQ_CST std::memory_order_seq_cst
-
-#define CPU_ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value )
-
-#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value )
-
-#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value )
-
-#define CPU_ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT
-
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
-
-typedef atomic_uint CPU_atomic_Uint;
-
-typedef atomic_ulong CPU_atomic_Ulong;
-
-typedef atomic_uintptr_t CPU_atomic_Uintptr;
-
-typedef atomic_flag CPU_atomic_Flag;
-
-typedef memory_order CPU_atomic_Order;
-
-#define CPU_ATOMIC_ORDER_RELAXED memory_order_relaxed
-
-#define CPU_ATOMIC_ORDER_ACQUIRE memory_order_acquire
-
-#define CPU_ATOMIC_ORDER_RELEASE memory_order_release
-
-#define CPU_ATOMIC_ORDER_ACQ_REL memory_order_acq_rel
-
-#define CPU_ATOMIC_ORDER_SEQ_CST memory_order_seq_cst
-
-#define CPU_ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value )
-
-#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value )
-
-#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value )
-
-#define CPU_ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT
-
-#else
-
-typedef unsigned int CPU_atomic_Uint;
-
-typedef unsigned long CPU_atomic_Ulong;
-
-typedef uintptr_t CPU_atomic_Uintptr;
-
-typedef bool CPU_atomic_Flag;
-
-typedef int CPU_atomic_Order;
-
-#define CPU_ATOMIC_ORDER_RELAXED 0
-
-#define CPU_ATOMIC_ORDER_ACQUIRE 2
-
-#define CPU_ATOMIC_ORDER_RELEASE 3
-
-#define CPU_ATOMIC_ORDER_ACQ_REL 4
-
-#define CPU_ATOMIC_ORDER_SEQ_CST 5
-
-#define CPU_ATOMIC_INITIALIZER_UINT( value ) ( value )
-
-#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ( value )
-
-#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ( value )
-
-#define CPU_ATOMIC_INITIALIZER_FLAG false
-
-#endif
-
-/**
- * @brief Sets up a cpu fence.
- *
- * @param[out] order The order for the fence.
- */
-static inline void _CPU_atomic_Fence( CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- std::atomic_thread_fence( order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_thread_fence( order );
-#else
- (void) order;
- RTEMS_COMPILER_MEMORY_BARRIER();
-#endif
-}
-
-/**
- * @brief Initializes Uint.
- *
- * @param[out] obj The CPU atomic Uint to initialize.
- * @param desired The desired value for @a obj.
- */
-static inline void _CPU_atomic_Init_uint( CPU_atomic_Uint *obj, unsigned int desired )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- obj->store( desired );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_init( obj, desired );
-#else
- *obj = desired;
-#endif
-}
-
-/**
- * @brief Initializes Ulong.
- *
- * @param[out] obj The CPU atomic Ulong to initialize.
- * @param desired The desired value for @a obj.
- */
-static inline void _CPU_atomic_Init_ulong( CPU_atomic_Ulong *obj, unsigned long desired )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- obj->store( desired );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_init( obj, desired );
-#else
- *obj = desired;
-#endif
-}
-
-/**
- * @brief Initializes Uintptr.
- *
- * @param[out] obj The CPU atomic Uintptr to initialize.
- * @param desired The desired value for @a obj.
- */
-static inline void _CPU_atomic_Init_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- obj->store( desired );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_init( obj, desired );
-#else
- *obj = desired;
-#endif
-}
-
-/**
- * @brief Loads value of Uint considering the order.
- *
- * @param obj The CPU atomic Uint to get the value from.
- * @param order The atomic order for getting the value.
- *
- * @return The value of @a obj considering the @a order.
- */
-static inline unsigned int _CPU_atomic_Load_uint( const CPU_atomic_Uint *obj, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->load( order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_load_explicit( obj, order );
-#else
- unsigned int val;
-
- (void) order;
- val = *obj;
- RTEMS_COMPILER_MEMORY_BARRIER();
-
- return val;
-#endif
-}
-
-/**
- * @brief Loads value of Ulong considering the order.
- *
- * @param obj The CPU atomic Ulong to get the value from.
- * @param order The atomic order for getting the value.
- *
- * @return The value of @a obj considering the @a order.
- */
-static inline unsigned long _CPU_atomic_Load_ulong( const CPU_atomic_Ulong *obj, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->load( order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_load_explicit( obj, order );
-#else
- unsigned long val;
-
- (void) order;
- val = *obj;
- RTEMS_COMPILER_MEMORY_BARRIER();
-
- return val;
-#endif
-}
-
-/**
- * @brief Loads value of Uintptr considering the order.
- *
- * @param obj The CPU atomic Uintptr to get the value from.
- * @param order The atomic order for getting the value.
- *
- * @return The value of @a obj considering the @a order.
- */
-static inline uintptr_t _CPU_atomic_Load_uintptr( const CPU_atomic_Uintptr *obj, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->load( order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_load_explicit( obj, order );
-#else
- uintptr_t val;
-
- (void) order;
- val = *obj;
- RTEMS_COMPILER_MEMORY_BARRIER();
-
- return val;
-#endif
-}
-
-/**
- * @brief Stores a value to Uint considering the order.
- *
- * @param[out] obj The CPU atomic Uint to store a value in.
- * @param desired The desired value for @a obj.
- * @param order The atomic order for storing the value.
- */
-static inline void _CPU_atomic_Store_uint( CPU_atomic_Uint *obj, unsigned int desired, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- obj->store( desired, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_store_explicit( obj, desired, order );
-#else
- (void) order;
- RTEMS_COMPILER_MEMORY_BARRIER();
- *obj = desired;
-#endif
-}
-
-/**
- * @brief Stores a value to Ulong considering the order.
- *
- * @param[out] obj The CPU atomic Ulong to store a value in.
- * @param desired The desired value for @a obj.
- * @param order The atomic order for storing the value.
- */
-static inline void _CPU_atomic_Store_ulong( CPU_atomic_Ulong *obj, unsigned long desired, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- obj->store( desired, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_store_explicit( obj, desired, order );
-#else
- (void) order;
- RTEMS_COMPILER_MEMORY_BARRIER();
- *obj = desired;
-#endif
-}
-
-/**
- * @brief Stores a value to Uintptr considering the order.
- *
- * @param[out] obj The CPU atomic Uintptr to store a value in.
- * @param desired The desired value for @a obj.
- * @param order The atomic order for storing the value.
- */
-static inline void _CPU_atomic_Store_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- obj->store( desired, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_store_explicit( obj, desired, order );
-#else
- (void) order;
- RTEMS_COMPILER_MEMORY_BARRIER();
- *obj = desired;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uint and adds a value to the stored value.
- *
- * @param[in, out] obj The CPU atomic Uint to get the value from and add @a arg to.
- * @param arg The value to add to @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the addition of @a arg.
- */
-static inline unsigned int _CPU_atomic_Fetch_add_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_add( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_add_explicit( obj, arg, order );
-#else
- unsigned int val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val + arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Ulong and adds a value to the stored value.
- *
- * @param[in, out] obj The CPU atomic Ulong to get the value from and add @a arg to.
- * @param arg The value to add to @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the addition of @a arg.
- */
-static inline unsigned long _CPU_atomic_Fetch_add_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_add( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_add_explicit( obj, arg, order );
-#else
- unsigned long val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val + arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uintptr and adds a value to the stored value.
- *
- * @param[in, out] obj The CPU atomic Uintptr to get the value from and add @a arg to.
- * @param arg The value to add to @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the addition of @a arg.
- */
-static inline uintptr_t _CPU_atomic_Fetch_add_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_add( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_add_explicit( obj, arg, order );
-#else
- uintptr_t val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val + arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uint and subtracts a value from the stored value.
- *
- * @param[in, out] obj The CPU atomic Uint to get the value from and subtract @a arg from.
- * @param arg The value to subtract from @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the subtraction of @a arg.
- */
-static inline unsigned int _CPU_atomic_Fetch_sub_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_sub( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_sub_explicit( obj, arg, order );
-#else
- unsigned int val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val - arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Ulong and subtracts a value from the stored value.
- *
- * @param[in, out] obj The CPU atomic Ulong to get the value from and subtract @a arg from.
- * @param arg The value to subtract from @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the subtraction of @a arg.
- */
-static inline unsigned long _CPU_atomic_Fetch_sub_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_sub( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_sub_explicit( obj, arg, order );
-#else
- unsigned long val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val - arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uintptr and subtracts a value from the stored value.
- *
- * @param[in, out] obj The CPU atomic Uintptr to get the value from and subtract @a arg from.
- * @param arg The value to subtract from @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the subtraction of @a arg.
- */
-static inline uintptr_t _CPU_atomic_Fetch_sub_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_sub( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_sub_explicit( obj, arg, order );
-#else
- uintptr_t val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val - arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uint and ORs a value with the stored value.
- *
- * @param[in, out] obj The CPU atomic Uint to get the value from and OR @a arg to.
- * @param arg The value to OR with @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the OR operation with @a arg.
- */
-static inline unsigned int _CPU_atomic_Fetch_or_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_or( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_or_explicit( obj, arg, order );
-#else
- unsigned int val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val | arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Ulong and ORs a value with the stored value.
- *
- * @param[in, out] obj The CPU atomic Ulong to get the value from and OR @a arg to.
- * @param arg The value to OR with @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the OR operation with @a arg.
- */
-static inline unsigned long _CPU_atomic_Fetch_or_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_or( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_or_explicit( obj, arg, order );
-#else
- unsigned long val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val | arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uintptr and ORs a value with the stored value.
- *
- * @param[in, out] obj The CPU atomic Uintptr to get the value from and OR @a arg to.
- * @param arg The value to OR with @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the OR operation with @a arg.
- */
-static inline uintptr_t _CPU_atomic_Fetch_or_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_or( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_or_explicit( obj, arg, order );
-#else
- uintptr_t val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val | arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uint and ANDs a value with the stored value.
- *
- * @param[in, out] obj The CPU atomic Uint to get the value from and AND @a arg to.
- * @param arg The value to AND with @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the AND operation with @a arg.
- */
-static inline unsigned int _CPU_atomic_Fetch_and_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_and( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_and_explicit( obj, arg, order );
-#else
- unsigned int val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val & arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Ulong and ANDs a value with the stored value.
- *
- * @param[in, out] obj The CPU atomic Ulong to get the value from and AND @a arg to.
- * @param arg The value to AND with @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the AND operation with @a arg.
- */
-static inline unsigned long _CPU_atomic_Fetch_and_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_and( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_and_explicit( obj, arg, order );
-#else
- unsigned long val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val & arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uintptr and ANDs a value with the stored value.
- *
- * @param[in, out] obj The CPU atomic Uintptr to get the value from and AND @a arg to.
- * @param arg The value to AND with @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the AND operation with @a arg.
- */
-static inline uintptr_t _CPU_atomic_Fetch_and_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->fetch_and( arg, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_fetch_and_explicit( obj, arg, order );
-#else
- uintptr_t val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = val & arg;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uint and sets its value.
- *
- * @param[in, out] obj The CPU atomic Uint to get the value from and set the value to @a desired.
- * @param arg The value to set for @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the exchange with @a desired.
- */
-static inline unsigned int _CPU_atomic_Exchange_uint( CPU_atomic_Uint *obj, unsigned int desired, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->exchange( desired, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_exchange_explicit( obj, desired, order );
-#else
- unsigned int val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = desired;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Ulong and sets its value.
- *
- * @param[in, out] obj The CPU atomic Ulong to get the value from and set the value to @a desired.
- * @param arg The value to set for @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the exchange with @a desired.
- */
-static inline unsigned long _CPU_atomic_Exchange_ulong( CPU_atomic_Ulong *obj, unsigned long desired, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->exchange( desired, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_exchange_explicit( obj, desired, order );
-#else
- unsigned long val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = desired;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Fetches current value of Uintptr and sets its value.
- *
- * @param[in, out] obj The CPU atomic Uintptr to get the value from and set the value to @a desired.
- * @param arg The value to set for @a obj.
- * @param order The atomic order for the operation.
- *
- * @return The value of @a obj prior to the exchange with @a desired.
- */
-static inline uintptr_t _CPU_atomic_Exchange_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->exchange( desired, order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_exchange_explicit( obj, desired, order );
-#else
- uintptr_t val;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- val = *obj;
- *obj = desired;
- _ISR_Local_enable( level );
-
- return val;
-#endif
-}
-
-/**
- * @brief Checks if value of Uint is as expected.
- *
- * This method checks if the value of @a obj is equal to the value of @a expected. If
- * this is the case, the value of @a obj is changed to @a desired. Otherwise, the value
- * of @a obj is changed to @a expected.
- *
- * @param[in, out] obj The CPU atomic Uint to operate upon.
- * @param[in, out] expected The expected value of @a obj. If @a obj has a different
- * value, @a expected is changed to the actual value of @a obj.
- * @param desired The new value of @a obj if the old value of @a obj was as expected.
- * @param succ The order if it is successful.
- * @param fail The order if it fails.
- *
- * @retval true The old value of @a obj was as expected.
- * @retval false The old value of @a obj was not as expected.
- */
-static inline bool _CPU_atomic_Compare_exchange_uint( CPU_atomic_Uint *obj, unsigned int *expected, unsigned int desired, CPU_atomic_Order succ, CPU_atomic_Order fail )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->compare_exchange_strong( *expected, desired, succ, fail );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
-#else
- bool success;
- ISR_Level level;
- unsigned int actual;
-
- (void) succ;
- (void) fail;
- _ISR_Local_disable( level );
- actual = *obj;
- success = ( actual == *expected );
- if ( success ) {
- *obj = desired;
- } else {
- *expected = actual;
- }
- _ISR_Local_enable( level );
-
- return success;
-#endif
-}
-
-/**
- * @brief Checks if value of Ulong is as expected.
- *
- * This method checks if the value of @a obj is equal to the value of @a expected. If
- * this is the case, the value of @a obj is changed to @a desired. Otherwise, the value
- * of @a obj is changed to @a expected.
- *
- * @param[in, out] obj The CPU atomic Ulong to operate upon.
- * @param[in, out] expected The expected value of @a obj. If @a obj has a different
- * value, @a expected is changed to the actual value of @a obj.
- * @param desired The new value of @a obj if the old value of @a obj was as expected.
- * @param succ The order if it is successful.
- * @param fail The order if it fails.
- *
- * @retval true The old value of @a obj was as expected.
- * @retval false The old value of @a obj was not as expected.
- */
-static inline bool _CPU_atomic_Compare_exchange_ulong( CPU_atomic_Ulong *obj, unsigned long *expected, unsigned long desired, CPU_atomic_Order succ, CPU_atomic_Order fail )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->compare_exchange_strong( *expected, desired, succ, fail );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
-#else
- bool success;
- ISR_Level level;
- unsigned long actual;
-
- (void) succ;
- (void) fail;
- _ISR_Local_disable( level );
- actual = *obj;
- success = ( actual == *expected );
- if ( success ) {
- *obj = desired;
- } else {
- *expected = actual;
- }
- _ISR_Local_enable( level );
-
- return success;
-#endif
-}
-
-/**
- * @brief Checks if value of Uintptr is as expected.
- *
- * This method checks if the value of @a obj is equal to the value of @a expected. If
- * this is the case, the value of @a obj is changed to @a desired. Otherwise, the value
- * of @a obj is changed to @a expected.
- *
- * @param[in, out] obj The CPU atomic Uintptr to operate upon.
- * @param[in, out] expected The expected value of @a obj. If @a obj has a different
- * value, @a expected is changed to the actual value of @a obj.
- * @param desired The new value of @a obj if the old value of @a obj was as expected.
- * @param succ The order if it is successful.
- * @param fail The order if it fails.
- *
- * @retval true The old value of @a obj was as expected.
- * @retval false The old value of @a obj was not as expected.
- */
-static inline bool _CPU_atomic_Compare_exchange_uintptr( CPU_atomic_Uintptr *obj, uintptr_t *expected, uintptr_t desired, CPU_atomic_Order succ, CPU_atomic_Order fail )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->compare_exchange_strong( *expected, desired, succ, fail );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail );
-#else
- bool success;
- ISR_Level level;
- uintptr_t actual;
-
- (void) succ;
- (void) fail;
- _ISR_Local_disable( level );
- actual = *obj;
- success = ( actual == *expected );
- if ( success ) {
- *obj = desired;
- } else {
- *expected = actual;
- }
- _ISR_Local_enable( level );
-
- return success;
-#endif
-}
-
-/**
- * @brief Clears the atomic flag.
- *
- * @param[out] obj The atomic flag to be cleared.
- * @param order The atomic order for the operation.
- */
-static inline void _CPU_atomic_Flag_clear( CPU_atomic_Flag *obj, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- obj->clear( order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- atomic_flag_clear_explicit( obj, order );
-#else
- (void) order;
- *obj = false;
-#endif
-}
-
-/**
- * @brief Returns current flag state and sets it.
- *
- * @param[in, out] obj The atomic flag to be set.
- * @param order The atomic order for the operation.
- *
- * @retval true @a obj was set prior to this operation.
- * @retval false @a obj was not set prior to this operation.
- */
-static inline bool _CPU_atomic_Flag_test_and_set( CPU_atomic_Flag *obj, CPU_atomic_Order order )
-{
-#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC)
- return obj->test_and_set( order );
-#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC)
- return atomic_flag_test_and_set_explicit( obj, order );
-#else
- bool flag;
- ISR_Level level;
-
- (void) order;
- _ISR_Local_disable( level );
- flag = *obj;
- *obj = true;
- _ISR_Local_enable( level );
-
- return flag;
-#endif
-}
-
-/** @} */
-
-#endif /* _RTEMS_SCORE_CPUSTDATOMIC_H */
diff --git a/cpukit/include/rtems/score/freechainimpl.h b/cpukit/include/rtems/score/freechainimpl.h
index 8eeebcc728..3f02472961 100644
--- a/cpukit/include/rtems/score/freechainimpl.h
+++ b/cpukit/include/rtems/score/freechainimpl.h
@@ -68,7 +68,7 @@ typedef void *( *Freechain_Allocator )( size_t size );
* @param number_nodes The initial number of nodes.
* @param node_size The node size.
*/
-RTEMS_INLINE_ROUTINE void _Freechain_Initialize(
+static inline void _Freechain_Initialize(
Freechain_Control *freechain,
void *initial_nodes,
size_t number_nodes,
@@ -88,7 +88,7 @@ RTEMS_INLINE_ROUTINE void _Freechain_Initialize(
*
* @param freechain The freechain control.
*/
-RTEMS_INLINE_ROUTINE bool _Freechain_Is_empty(
+static inline bool _Freechain_Is_empty(
const Freechain_Control *freechain
)
{
@@ -102,7 +102,7 @@ RTEMS_INLINE_ROUTINE bool _Freechain_Is_empty(
*
* @param freechain The freechain control.
*/
-RTEMS_INLINE_ROUTINE void *_Freechain_Pop( Freechain_Control *freechain )
+static inline void *_Freechain_Pop( Freechain_Control *freechain )
{
return _Chain_Get_first_unprotected( &freechain->Free );
}
@@ -113,7 +113,7 @@ RTEMS_INLINE_ROUTINE void *_Freechain_Pop( Freechain_Control *freechain )
* @param freechain The freechain control.
* @param node The node to push back. The node shall not be @c NULL.
*/
-void RTEMS_INLINE_ROUTINE _Freechain_Push(
+void static inline _Freechain_Push(
Freechain_Control *freechain,
void *node
)
diff --git a/cpukit/include/rtems/score/hash.h b/cpukit/include/rtems/score/hash.h
index c6f9ebf463..666407a791 100644
--- a/cpukit/include/rtems/score/hash.h
+++ b/cpukit/include/rtems/score/hash.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2021 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -84,7 +84,7 @@ typedef struct {
*
* @return Returns the hash value as a NUL-terminated string.
*/
-RTEMS_INLINE_ROUTINE const char *_Hash_Get_string( const Hash_Control *hash )
+static inline const char *_Hash_Get_string( const Hash_Control *hash )
{
return &hash->chars[ 0 ];
}
@@ -114,7 +114,7 @@ typedef struct {
*
* @param[out] context is the hash context to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Hash_Initialize( Hash_Context *context )
+static inline void _Hash_Initialize( Hash_Context *context )
{
SHA256_Init( &context->Context );
}
@@ -128,7 +128,7 @@ RTEMS_INLINE_ROUTINE void _Hash_Initialize( Hash_Context *context )
*
* @param size is the size of the data in bytes.
*/
-RTEMS_INLINE_ROUTINE void _Hash_Add_data(
+static inline void _Hash_Add_data(
Hash_Context *context,
const void *begin,
size_t size
@@ -144,7 +144,7 @@ RTEMS_INLINE_ROUTINE void _Hash_Add_data(
*
* @param str is the string to add.
*/
-RTEMS_INLINE_ROUTINE void _Hash_Add_string(
+static inline void _Hash_Add_string(
Hash_Context *context,
const char *str
)
diff --git a/cpukit/include/rtems/score/heap.h b/cpukit/include/rtems/score/heap.h
index 963c31d4aa..8777e8b548 100644
--- a/cpukit/include/rtems/score/heap.h
+++ b/cpukit/include/rtems/score/heap.h
@@ -431,7 +431,7 @@ uintptr_t _Heap_No_extend(
*
* @return The @a value aligned to the given @a alignment, rounded up.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
+static inline uintptr_t _Heap_Align_up(
uintptr_t value,
uintptr_t alignment
)
@@ -452,7 +452,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
*
* @return The minimal Heap Block size for the given @a page_size.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min_block_size( uintptr_t page_size )
+static inline uintptr_t _Heap_Min_block_size( uintptr_t page_size )
{
return _Heap_Align_up( sizeof( Heap_Block ), page_size );
}
@@ -464,7 +464,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min_block_size( uintptr_t page_size )
*
* @return The worst case overhead to manage a memory area.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead(
+static inline uintptr_t _Heap_Area_overhead(
uintptr_t page_size
)
{
@@ -493,7 +493,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead(
*
* @return The size with administration and alignment overhead for one allocation.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Size_with_overhead(
+static inline uintptr_t _Heap_Size_with_overhead(
uintptr_t page_size,
uintptr_t size,
uintptr_t alignment
diff --git a/cpukit/include/rtems/score/heapimpl.h b/cpukit/include/rtems/score/heapimpl.h
index 2b1ef21c83..91326c6577 100644
--- a/cpukit/include/rtems/score/heapimpl.h
+++ b/cpukit/include/rtems/score/heapimpl.h
@@ -156,7 +156,7 @@ void *_Heap_Allocate_aligned_with_boundary(
* @retval pointer The starting address of the allocated memory area.
* @retval NULL No memory is available of the parameters are inconsistent.
*/
-RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned(
+static inline void *_Heap_Allocate_aligned(
Heap_Control *heap,
uintptr_t size,
uintptr_t alignment
@@ -177,7 +177,7 @@ RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned(
* @retval pointer The starting address of the allocated memory area.
* @retval NULL No memory is available of the parameters are inconsistent.
*/
-RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
+static inline void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
{
return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
}
@@ -440,7 +440,7 @@ Heap_Block *_Heap_Block_allocate(
* @param[in, out] heap The heap control.
* @param fraction The fraction is one divided by this fraction value.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Protection_set_delayed_free_fraction(
+static inline void _Heap_Protection_set_delayed_free_fraction(
Heap_Control *heap,
uintptr_t fraction
)
@@ -460,7 +460,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Protection_set_delayed_free_fraction(
*
* @return The head of the free list.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
+static inline Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
{
return &heap->free_list;
}
@@ -472,7 +472,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
*
* @return The tail of the free list.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
+static inline Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
{
return &heap->free_list;
}
@@ -484,7 +484,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
*
* @return The first block of the free list.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
+static inline Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
{
return _Heap_Free_list_head(heap)->next;
}
@@ -496,7 +496,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
*
* @return The last block of the free list.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_last( Heap_Control *heap )
+static inline Heap_Block *_Heap_Free_list_last( Heap_Control *heap )
{
return _Heap_Free_list_tail(heap)->prev;
}
@@ -506,7 +506,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_last( Heap_Control *heap )
*
* @param block The block to be removed.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block )
+static inline void _Heap_Free_list_remove( Heap_Block *block )
{
Heap_Block *next = block->next;
Heap_Block *prev = block->prev;
@@ -521,7 +521,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block )
* @param old_block The block in the free list to replace.
* @param new_block The block that should replace @a old_block.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Free_list_replace(
+static inline void _Heap_Free_list_replace(
Heap_Block *old_block,
Heap_Block *new_block
)
@@ -542,7 +542,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Free_list_replace(
* @param block_before The block that is already in the free list.
* @param new_block The block to be inserted after @a block_before.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after(
+static inline void _Heap_Free_list_insert_after(
Heap_Block *block_before,
Heap_Block *new_block
)
@@ -561,7 +561,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after(
* @param block_before The block that is already in the free list.
* @param new_block The block to be inserted before @a block_before.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_before(
+static inline void _Heap_Free_list_insert_before(
Heap_Block *block_next,
Heap_Block *new_block
)
@@ -583,7 +583,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_before(
* @retval true The value is aligned to the given alignment.
* @retval false The value is not aligned to the given alignment.
*/
-RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
+static inline bool _Heap_Is_aligned(
uintptr_t value,
uintptr_t alignment
)
@@ -599,7 +599,7 @@ RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
*
* @return The aligned value, truncated.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
+static inline uintptr_t _Heap_Align_down(
uintptr_t value,
uintptr_t alignment
)
@@ -615,7 +615,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
*
* @return The address of the block which is @a offset away from @a block.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
+static inline Heap_Block *_Heap_Block_at(
const Heap_Block *block,
uintptr_t offset
)
@@ -630,7 +630,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
*
* @return The address of the previous block.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Prev_block(
+static inline Heap_Block *_Heap_Prev_block(
const Heap_Block *block
)
{
@@ -644,7 +644,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Prev_block(
*
* @return The first address after the heap header.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
+static inline uintptr_t _Heap_Alloc_area_of_block(
const Heap_Block *block
)
{
@@ -659,7 +659,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
*
* @return The Starting address of the corresponding block of the allocatable area.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
+static inline Heap_Block *_Heap_Block_of_alloc_area(
uintptr_t alloc_begin,
uintptr_t page_size
)
@@ -675,7 +675,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
*
* @return The block size.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
+static inline uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
}
@@ -686,7 +686,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
* @param[in, out] block The block of which the size shall be set.
* @param size The new size of the block.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Block_set_size(
+static inline void _Heap_Block_set_size(
Heap_Block *block,
uintptr_t size
)
@@ -705,7 +705,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Block_set_size(
* @retval true The previous block is used.
* @retval false The previous block is not used.
*/
-RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
+static inline bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
}
@@ -718,7 +718,7 @@ RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
* @retval true The block is used.
* @retval false The block is not used.
*/
-RTEMS_INLINE_ROUTINE bool _Heap_Is_used(
+static inline bool _Heap_Is_used(
const Heap_Block *block
)
{
@@ -736,7 +736,7 @@ RTEMS_INLINE_ROUTINE bool _Heap_Is_used(
* @retval true The block is free.
* @retval false The block is not free.
*/
-RTEMS_INLINE_ROUTINE bool _Heap_Is_free(
+static inline bool _Heap_Is_free(
const Heap_Block *block
)
{
@@ -752,7 +752,7 @@ RTEMS_INLINE_ROUTINE bool _Heap_Is_free(
* @retval true The block is part of the heap.
* @retval false The block is not part of the heap.
*/
-RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
+static inline bool _Heap_Is_block_in_heap(
const Heap_Control *heap,
const Heap_Block *block
)
@@ -774,7 +774,7 @@ RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
*
* @param[in, out] heap The heap to set the last block size of.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Set_last_block_size( Heap_Control *heap )
+static inline void _Heap_Set_last_block_size( Heap_Control *heap )
{
_Heap_Block_set_size(
heap->last_block,
@@ -791,7 +791,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Set_last_block_size( Heap_Control *heap )
*
* @return The size of the allocatable area in @a heap in bytes.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
+static inline uintptr_t _Heap_Get_size( const Heap_Control *heap )
{
return heap->stats.size;
}
@@ -805,7 +805,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
* @retval a If a > b.
* @retval b If b >= a
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
+static inline uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
{
return a > b ? a : b;
}
@@ -819,7 +819,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
* @retval a If a < b.
* @retval b If b <= a
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min( uintptr_t a, uintptr_t b )
+static inline uintptr_t _Heap_Min( uintptr_t a, uintptr_t b )
{
return a < b ? a : b;
}
diff --git a/cpukit/include/rtems/score/interr.h b/cpukit/include/rtems/score/interr.h
index d3fb370ec3..003e80e0bd 100644
--- a/cpukit/include/rtems/score/interr.h
+++ b/cpukit/include/rtems/score/interr.h
@@ -175,7 +175,7 @@ typedef enum {
* This enum value ensures that the enum type needs at least 32-bits for
* architectures with short enums.
*/
- RTEMS_FATAL_SOURCE_LAST = 0xffffffff
+ RTEMS_FATAL_SOURCE_LAST = 0x7fffffff
} Internal_errors_Source;
/**
@@ -229,6 +229,9 @@ typedef enum {
INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA = 40,
INTERNAL_ERROR_TOO_LARGE_TLS_SIZE = 41,
INTERNAL_ERROR_RTEMS_INIT_TASK_CONSTRUCT_FAILED = 42,
+ INTERNAL_ERROR_IDLE_THREAD_CREATE_FAILED = 43,
+ INTERNAL_ERROR_NO_MEMORY_FOR_IDLE_TASK_STORAGE = 44,
+ INTERNAL_ERROR_IDLE_THREAD_STACK_TOO_SMALL = 45
} Internal_errors_Core_list;
typedef CPU_Uint32ptr Internal_errors_t;
diff --git a/cpukit/include/rtems/score/io.h b/cpukit/include/rtems/score/io.h
deleted file mode 100644
index 106418f185..0000000000
--- a/cpukit/include/rtems/score/io.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-
-/**
- * @file
- *
- * @ingroup RTEMSScoreIO
- *
- * @brief This header file provides the interfaces of the
- * @ref RTEMSScoreIO.
- */
-
-/*
- * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTEMS_SCORE_IO_H
-#define _RTEMS_SCORE_IO_H
-
-#include <rtems/score/basedefs.h>
-
-#include <stdarg.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
- * @defgroup RTEMSScoreIO IO Handler
- *
- * @ingroup RTEMSScore
- *
- * @brief This group contains the IO Handler implementation.
- *
- * @{
- */
-
-typedef void ( *IO_Put_char )( int c, void *arg );
-
-int _IO_Printf(
- IO_Put_char put_char,
- void *arg,
- char const *fmt,
- ...
-) RTEMS_PRINTFLIKE( 3, 4 );
-
-int _IO_Vprintf(
- IO_Put_char put_char,
- void *arg,
- char const *fmt,
- va_list ap
-);
-
-/**
- * @brief Outputs the source buffer in base64 encoding.
- *
- * After word length of output characters produced by the encoding a word break
- * is produced.
- *
- * @param put_char is the put character function used to output the encoded
- * source buffer.
- *
- * @param arg is the argument passed to the put character function.
- *
- * @param src is the pointer to the source buffer begin.
- *
- * @param srclen is the length of the source buffer in bytes.
- *
- * @param wordbreak is the word break string.
- *
- * @param wordlen is the word length in bytes. If the word length is less than
- * four, then a word length of four will be used.
- *
- * @return Returns the count of output characters.
- */
-int _IO_Base64(
- IO_Put_char put_char,
- void *arg,
- const void *src,
- size_t len,
- const char *wordbreak,
- int wordlen
-);
-
-/**
- * @brief Outputs the source buffer in base64url encoding.
- *
- * After word length of output characters produced by the encoding a word break
- * is produced.
- *
- * @param put_char is the put character function used to output the encoded
- * source buffer.
- *
- * @param arg is the argument passed to the put character function.
- *
- * @param src is the pointer to the source buffer begin.
- *
- * @param srclen is the length of the source buffer in bytes.
- *
- * @param wordbreak is the word break string.
- *
- * @param wordlen is the word length in bytes. If the word length is less than
- * four, then a word length of four will be used.
- *
- * @return Returns the count of output characters.
- */
-int _IO_Base64url(
- IO_Put_char put_char,
- void *arg,
- const void *src,
- size_t len,
- const char *wordbreak,
- int wordlen
-);
-
-/**
- * @brief Issues a couple of no-operation instructions.
- *
- * This function may be used to burn a couple of processor cycles with minimum
- * impact on the system bus. It may be used in busy wait loops.
- */
-void _IO_Relax( void );
-
-/** @} */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* _RTEMS_SCORE_IO_H */
diff --git a/cpukit/include/rtems/score/isr.h b/cpukit/include/rtems/score/isr.h
index bb1f3cee50..96ad816245 100644
--- a/cpukit/include/rtems/score/isr.h
+++ b/cpukit/include/rtems/score/isr.h
@@ -98,7 +98,11 @@ extern ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS ];
#endif
/**
- * @brief Global symbol with a value equal to the configure interrupt stack size.
+ * @brief Provides the configured interrupt stack size through an address.
+ *
+ * The address of this global symbol is equal to the configured interrupt stack
+ * size. The address of this symbol has an arbitrary value an may not be
+ * representable in the code model used by the compiler.
*
* This global symbol is defined by the application configuration option
* CONFIGURE_INIT_TASK_STACK_SIZE via <rtems/confdefs.h>.
@@ -106,6 +110,14 @@ extern ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS ];
RTEMS_DECLARE_GLOBAL_SYMBOL( _ISR_Stack_size );
/**
+ * @brief Provides the configured interrupt stack size through an object.
+ *
+ * This object is provided to avoid issues with the _ISR_Stack_size symbol
+ * address and the code model used by the compiler.
+ */
+extern const char * const volatile _ISR_Stack_size_object;
+
+/**
* @brief The interrupt stack area begin.
*
* The interrupt stack area is defined by the application configuration via
diff --git a/cpukit/include/rtems/score/isrlock.h b/cpukit/include/rtems/score/isrlock.h
index 7dbb7aa938..7586624f9d 100644
--- a/cpukit/include/rtems/score/isrlock.h
+++ b/cpukit/include/rtems/score/isrlock.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2013, 2019 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2013, 2019 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -172,7 +172,7 @@ typedef struct {
* @param[out] context The ISR lock context.
* @param level The ISR level.
*/
-RTEMS_INLINE_ROUTINE void _ISR_lock_Context_set_level(
+static inline void _ISR_lock_Context_set_level(
ISR_lock_Context *context,
ISR_Level level
)
diff --git a/cpukit/include/rtems/score/memory.h b/cpukit/include/rtems/score/memory.h
index 5761402711..a593d98d76 100644
--- a/cpukit/include/rtems/score/memory.h
+++ b/cpukit/include/rtems/score/memory.h
@@ -10,7 +10,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (C) 2019 embedded brains GmbH
+ * Copyright (C) 2019, 2022 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -115,7 +115,7 @@ typedef struct {
*
* @return The memory area count.
*/
-RTEMS_INLINE_ROUTINE size_t _Memory_Get_count(
+static inline size_t _Memory_Get_count(
const Memory_Information *information
)
{
@@ -130,7 +130,7 @@ RTEMS_INLINE_ROUTINE size_t _Memory_Get_count(
*
* @return The memory area of the specified index.
*/
-RTEMS_INLINE_ROUTINE Memory_Area *_Memory_Get_area(
+static inline Memory_Area *_Memory_Get_area(
const Memory_Information *information,
size_t index
)
@@ -146,7 +146,7 @@ RTEMS_INLINE_ROUTINE Memory_Area *_Memory_Get_area(
* @param begin The begin of the memory area.
* @param end The end of the memory area.
*/
-RTEMS_INLINE_ROUTINE void _Memory_Initialize(
+static inline void _Memory_Initialize(
Memory_Area *area,
void *begin,
void *end
@@ -164,7 +164,7 @@ RTEMS_INLINE_ROUTINE void _Memory_Initialize(
* @param begin The begin of the memory area.
* @param size The size of the memory area in bytes.
*/
-RTEMS_INLINE_ROUTINE void _Memory_Initialize_by_size(
+static inline void _Memory_Initialize_by_size(
Memory_Area *area,
void *begin,
uintptr_t size
@@ -182,7 +182,7 @@ RTEMS_INLINE_ROUTINE void _Memory_Initialize_by_size(
*
* @return The memory area begin.
*/
-RTEMS_INLINE_ROUTINE const void *_Memory_Get_begin( const Memory_Area *area )
+static inline const void *_Memory_Get_begin( const Memory_Area *area )
{
return area->begin;
}
@@ -193,7 +193,7 @@ RTEMS_INLINE_ROUTINE const void *_Memory_Get_begin( const Memory_Area *area )
* @param area The memory area.
* @param begin The memory area begin.
*/
-RTEMS_INLINE_ROUTINE void _Memory_Set_begin(
+static inline void _Memory_Set_begin(
Memory_Area *area,
const void *begin
)
@@ -208,7 +208,7 @@ RTEMS_INLINE_ROUTINE void _Memory_Set_begin(
*
* @return The memory area end.
*/
-RTEMS_INLINE_ROUTINE const void *_Memory_Get_end( const Memory_Area *area )
+static inline const void *_Memory_Get_end( const Memory_Area *area )
{
return area->end;
}
@@ -219,7 +219,7 @@ RTEMS_INLINE_ROUTINE const void *_Memory_Get_end( const Memory_Area *area )
* @param area The memory area.
* @param end The memory area end.
*/
-RTEMS_INLINE_ROUTINE void _Memory_Set_end(
+static inline void _Memory_Set_end(
Memory_Area *area,
const void *end
)
@@ -234,7 +234,7 @@ RTEMS_INLINE_ROUTINE void _Memory_Set_end(
*
* @return The memory area size in bytes.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Memory_Get_size( const Memory_Area *area )
+static inline uintptr_t _Memory_Get_size( const Memory_Area *area )
{
return (uintptr_t) area->end - (uintptr_t) area->begin;
}
@@ -246,7 +246,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Memory_Get_size( const Memory_Area *area )
*
* @return The free memory area begin the memory area.
*/
-RTEMS_INLINE_ROUTINE void *_Memory_Get_free_begin( const Memory_Area *area )
+static inline void *_Memory_Get_free_begin( const Memory_Area *area )
{
return area->free;
}
@@ -257,7 +257,7 @@ RTEMS_INLINE_ROUTINE void *_Memory_Get_free_begin( const Memory_Area *area )
* @param area The memory area.
* @param begin The free memory area begin the memory area.
*/
-RTEMS_INLINE_ROUTINE void _Memory_Set_free_begin(
+static inline void _Memory_Set_free_begin(
Memory_Area *area,
void *begin
)
@@ -272,7 +272,7 @@ RTEMS_INLINE_ROUTINE void _Memory_Set_free_begin(
*
* @return The free memory area size in bytes of the memory area.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Memory_Get_free_size( const Memory_Area *area )
+static inline uintptr_t _Memory_Get_free_size( const Memory_Area *area )
{
return (uintptr_t) area->end - (uintptr_t) area->free;
}
@@ -285,7 +285,7 @@ RTEMS_INLINE_ROUTINE uintptr_t _Memory_Get_free_size( const Memory_Area *area )
* @param consume The bytes to consume from the free memory area of the memory
* area.
*/
-RTEMS_INLINE_ROUTINE void _Memory_Consume(
+static inline void _Memory_Consume(
Memory_Area *area,
uintptr_t consume
)
@@ -352,6 +352,18 @@ void _Memory_Zero_free_areas( void );
*/
void _Memory_Dirty_free_areas( void );
+/**
+ * @brief This symbol marks the begin of the non-initialized section used by
+ * RTEMS.
+ */
+extern char _Memory_Noinit_begin[];
+
+/**
+ * @brief This symbol marks the end of the non-initialized section used by
+ * RTEMS.
+ */
+extern char _Memory_Noinit_end[];
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/mpci.h b/cpukit/include/rtems/score/mpci.h
index 874c195e95..796c881929 100644
--- a/cpukit/include/rtems/score/mpci.h
+++ b/cpukit/include/rtems/score/mpci.h
@@ -40,9 +40,6 @@
#define _RTEMS_SCORE_MPCI_H
#include <rtems/score/mppkt.h>
-#include <rtems/score/thread.h>
-#include <rtems/score/threadq.h>
-#include <rtems/score/watchdog.h>
#ifdef __cplusplus
extern "C" {
diff --git a/cpukit/include/rtems/score/mpciimpl.h b/cpukit/include/rtems/score/mpciimpl.h
index 5ee819fb32..d0c2d0558a 100644
--- a/cpukit/include/rtems/score/mpciimpl.h
+++ b/cpukit/include/rtems/score/mpciimpl.h
@@ -39,6 +39,9 @@
#define _RTEMS_SCORE_MPCIIMPL_H
#include <rtems/score/mpci.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+#include <rtems/score/watchdog.h>
#include <rtems/score/status.h>
#ifdef __cplusplus
@@ -337,7 +340,7 @@ MPCI_Internal_packet *_MPCI_Internal_packets_Get_packet ( void );
* because this enum starts at lower bound of zero.
*/
-RTEMS_INLINE_ROUTINE bool _Mp_packet_Is_valid_packet_class (
+static inline bool _Mp_packet_Is_valid_packet_class (
MP_packet_Classes the_packet_class
)
{
diff --git a/cpukit/include/rtems/score/mrsp.h b/cpukit/include/rtems/score/mrsp.h
index 266e52fc60..cd9b0a046d 100644
--- a/cpukit/include/rtems/score/mrsp.h
+++ b/cpukit/include/rtems/score/mrsp.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (c) 2014, 2016 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2014, 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/mrspimpl.h b/cpukit/include/rtems/score/mrspimpl.h
index 50f63f22e0..fd783bf2a0 100644
--- a/cpukit/include/rtems/score/mrspimpl.h
+++ b/cpukit/include/rtems/score/mrspimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2014, 2019 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2014, 2019 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -64,7 +64,7 @@ extern "C" {
* @param mrsp The MrsP control for the operation.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
+static inline void _MRSP_Acquire_critical(
MRSP_Control *mrsp,
Thread_queue_Context *queue_context
)
@@ -78,7 +78,7 @@ RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
* @param mrsp The MrsP control for the operation.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _MRSP_Release(
+static inline void _MRSP_Release(
MRSP_Control *mrsp,
Thread_queue_Context *queue_context
)
@@ -93,7 +93,7 @@ RTEMS_INLINE_ROUTINE void _MRSP_Release(
*
* @return The owner of the Mrsp control.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_MRSP_Get_owner(
+static inline Thread_Control *_MRSP_Get_owner(
const MRSP_Control *mrsp
)
{
@@ -106,7 +106,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_MRSP_Get_owner(
* @param[out] mrsp The MrsP control to set the owner of.
* @param owner The desired new owner for @a mrsp.
*/
-RTEMS_INLINE_ROUTINE void _MRSP_Set_owner(
+static inline void _MRSP_Set_owner(
MRSP_Control *mrsp,
Thread_Control *owner
)
@@ -122,7 +122,7 @@ RTEMS_INLINE_ROUTINE void _MRSP_Set_owner(
*
* @return The priority of the MrsP control.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
+static inline Priority_Control _MRSP_Get_priority(
const MRSP_Control *mrsp,
const Scheduler_Control *scheduler
)
@@ -130,6 +130,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
uint32_t scheduler_index;
scheduler_index = _Scheduler_Get_index( scheduler );
+ _Assert( scheduler_index < _Scheduler_Count );
return mrsp->ceiling_priorities[ scheduler_index ];
}
@@ -140,7 +141,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
* @param schedulger The corresponding scheduler.
* @param new_priority The new priority for the MrsP control
*/
-RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
+static inline void _MRSP_Set_priority(
MRSP_Control *mrsp,
const Scheduler_Control *scheduler,
Priority_Control new_priority
@@ -149,6 +150,7 @@ RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
uint32_t scheduler_index;
scheduler_index = _Scheduler_Get_index( scheduler );
+ _Assert( scheduler_index < _Scheduler_Count );
mrsp->ceiling_priorities[ scheduler_index ] = new_priority;
}
@@ -165,7 +167,7 @@ RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
* @retval STATUS_MUTEX_CEILING_VIOLATED The wait priority of the thread
* exceeds the ceiling priority.
*/
-RTEMS_INLINE_ROUTINE Status_Control _MRSP_Raise_priority(
+static inline Status_Control _MRSP_Raise_priority(
MRSP_Control *mrsp,
Thread_Control *thread,
Priority_Node *priority_node,
@@ -207,7 +209,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Raise_priority(
* @param priority_node The priority node to remove from the thread
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _MRSP_Remove_priority(
+static inline void _MRSP_Remove_priority(
Thread_Control *thread,
Priority_Node *priority_node,
Thread_queue_Context *queue_context
@@ -229,7 +231,7 @@ RTEMS_INLINE_ROUTINE void _MRSP_Remove_priority(
* @param[out] thread The thread to replace the priorities.
* @param ceiling_priority The node to be replaced.
*/
-RTEMS_INLINE_ROUTINE void _MRSP_Replace_priority(
+static inline void _MRSP_Replace_priority(
MRSP_Control *mrsp,
Thread_Control *thread,
Priority_Node *ceiling_priority
@@ -257,7 +259,7 @@ RTEMS_INLINE_ROUTINE void _MRSP_Replace_priority(
* @retval STATUS_MUTEX_CEILING_VIOLATED The wait priority of the executing
* thread exceeds the ceiling priority.
*/
-RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
+static inline Status_Control _MRSP_Claim_ownership(
MRSP_Control *mrsp,
Thread_Control *executing,
Thread_queue_Context *queue_context
@@ -300,7 +302,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
* @retval STATUS_INVALID_NUMBER The MrsP control is initially locked.
* @retval STATUS_NO_MEMORY There is not enough memory to allocate.
*/
-RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
+static inline Status_Control _MRSP_Initialize(
MRSP_Control *mrsp,
const Scheduler_Control *scheduler,
Priority_Control ceiling_priority,
@@ -361,7 +363,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
* @retval STATUS_DEADLOCK A deadlock occurred.
* @retval STATUS_TIMEOUT A timeout occurred.
*/
-RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
+static inline Status_Control _MRSP_Wait_for_ownership(
MRSP_Control *mrsp,
Thread_Control *executing,
Thread_queue_Context *queue_context
@@ -439,7 +441,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
* @retval STATUS_MUTEX_CEILING_VIOLATED The current priority of the executing
* thread exceeds the ceiling priority of the mutex.
*/
-RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
+static inline Status_Control _MRSP_Seize(
MRSP_Control *mrsp,
Thread_Control *executing,
bool wait,
@@ -478,7 +480,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
* @retval STATUS_SUCCESSFUL The operation succeeded.
* @retval STATUS_NOT_OWNER The executing thread does not own the MrsP control.
*/
-RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
+static inline Status_Control _MRSP_Surrender(
MRSP_Control *mrsp,
Thread_Control *executing,
Thread_queue_Context *queue_context
@@ -530,7 +532,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
* @retval STATUS_RESOURCE_IN_USE The MrsP control is in use,
* it cannot be destroyed.
*/
-RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
+static inline Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
{
if ( _MRSP_Get_owner( mrsp ) != NULL ) {
return STATUS_RESOURCE_IN_USE;
@@ -545,7 +547,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
* @param[in, out] The mrsp that is about to be destroyed.
* @param queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _MRSP_Destroy(
+static inline void _MRSP_Destroy(
MRSP_Control *mrsp,
Thread_queue_Context *queue_context
)
diff --git a/cpukit/include/rtems/score/muteximpl.h b/cpukit/include/rtems/score/muteximpl.h
index c024a00060..aa76b7e7b8 100644
--- a/cpukit/include/rtems/score/muteximpl.h
+++ b/cpukit/include/rtems/score/muteximpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2015, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2015, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/object.h b/cpukit/include/rtems/score/object.h
index 867e1766ab..cdb554b39c 100644
--- a/cpukit/include/rtems/score/object.h
+++ b/cpukit/include/rtems/score/object.h
@@ -273,7 +273,7 @@ typedef enum {
*
* @return An object Id constructed from the arguments.
*/
-RTEMS_INLINE_ROUTINE Objects_APIs _Objects_Get_API(
+static inline Objects_APIs _Objects_Get_API(
Objects_Id id
)
{
@@ -287,7 +287,7 @@ RTEMS_INLINE_ROUTINE Objects_APIs _Objects_Get_API(
*
* @return The class portion of the ID.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_class(
+static inline uint32_t _Objects_Get_class(
Objects_Id id
)
{
@@ -302,7 +302,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_class(
*
* @return Returns the node portion of an object ID.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_node(
+static inline uint32_t _Objects_Get_node(
Objects_Id id
)
{
@@ -316,7 +316,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_node(
*
* @return Returns the index portion of the specified object ID.
*/
-RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Get_index(
+static inline Objects_Maximum _Objects_Get_index(
Objects_Id id
)
{
diff --git a/cpukit/include/rtems/score/objectdata.h b/cpukit/include/rtems/score/objectdata.h
index d624f182f9..4bdd30f2c6 100644
--- a/cpukit/include/rtems/score/objectdata.h
+++ b/cpukit/include/rtems/score/objectdata.h
@@ -449,7 +449,9 @@ Objects_Information name##_Information = { \
#define OBJECTS_INFORMATION_DEFINE( name, api, cls, type, max, nl, ex ) \
static Objects_Control * \
name##_Local_table[ _Objects_Maximum_per_allocation( max ) ]; \
-static type name##_Objects[ _Objects_Maximum_per_allocation( max ) ]; \
+static RTEMS_SECTION( ".noinit.rtems.content.objects." #name ) \
+type \
+name##_Objects[ _Objects_Maximum_per_allocation( max ) ]; \
Objects_Information name##_Information = { \
_Objects_Build_id( api, cls, 1, _Objects_Maximum_per_allocation( max ) ), \
name##_Local_table, \
diff --git a/cpukit/include/rtems/score/objectimpl.h b/cpukit/include/rtems/score/objectimpl.h
index 72a5f6b126..a1a87b5ccb 100644
--- a/cpukit/include/rtems/score/objectimpl.h
+++ b/cpukit/include/rtems/score/objectimpl.h
@@ -396,7 +396,7 @@ Objects_Information *_Objects_Get_information_id(
* @retval true The object has a string name.
* @retval false The object does not have a string name.
*/
-RTEMS_INLINE_ROUTINE bool _Objects_Has_string_name(
+static inline bool _Objects_Has_string_name(
const Objects_Information *information
)
{
@@ -471,7 +471,7 @@ Status_Control _Objects_Set_name(
* @param information The corresponding object information table.
* @param[out] the_object The object to operate upon.
*/
-RTEMS_INLINE_ROUTINE void _Objects_Namespace_remove_u32(
+static inline void _Objects_Namespace_remove_u32(
const Objects_Information *information,
Objects_Control *the_object
)
@@ -523,7 +523,7 @@ Objects_Maximum _Objects_Active_count(
*
* @return The number of objects per block of @a information.
*/
-RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Extend_size(
+static inline Objects_Maximum _Objects_Extend_size(
const Objects_Information *information
)
{
@@ -538,13 +538,11 @@ RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Extend_size(
* @retval true The specified api value is valid.
* @retval false The specified api value is not valid.
*/
-RTEMS_INLINE_ROUTINE bool _Objects_Is_api_valid(
+static inline bool _Objects_Is_api_valid(
uint32_t the_api
)
{
- if ( !the_api || the_api > OBJECTS_APIS_LAST )
- return false;
- return true;
+ return ( 1 <= the_api && the_api <= OBJECTS_APIS_LAST );
}
/**
@@ -556,7 +554,7 @@ RTEMS_INLINE_ROUTINE bool _Objects_Is_api_valid(
* @retval true The specified node is the local node.
* @retval false The specified node is not the local node.
*/
-RTEMS_INLINE_ROUTINE bool _Objects_Is_local_node(
+static inline bool _Objects_Is_local_node(
uint32_t node
)
{
@@ -573,7 +571,7 @@ RTEMS_INLINE_ROUTINE bool _Objects_Is_local_node(
*
* @note On a single processor configuration, this always returns true.
*/
-RTEMS_INLINE_ROUTINE bool _Objects_Is_local_id(
+static inline bool _Objects_Is_local_id(
#if defined(RTEMS_MULTIPROCESSING)
Objects_Id id
#else
@@ -597,7 +595,7 @@ RTEMS_INLINE_ROUTINE bool _Objects_Is_local_id(
* @retval true The specified object IDs are equal.
* @retval false The specified object IDs are not equal.
*/
-RTEMS_INLINE_ROUTINE bool _Objects_Are_ids_equal(
+static inline bool _Objects_Are_ids_equal(
Objects_Id left,
Objects_Id right
)
@@ -614,7 +612,7 @@ RTEMS_INLINE_ROUTINE bool _Objects_Are_ids_equal(
*
* @return The corresponding ID with the minimum index.
*/
-RTEMS_INLINE_ROUTINE Objects_Id _Objects_Get_minimum_id( Objects_Id id )
+static inline Objects_Id _Objects_Get_minimum_id( Objects_Id id )
{
id &= ~OBJECTS_INDEX_MASK;
id += (Objects_Id) OBJECTS_INDEX_MINIMUM << OBJECTS_INDEX_START_BIT;
@@ -628,7 +626,7 @@ RTEMS_INLINE_ROUTINE Objects_Id _Objects_Get_minimum_id( Objects_Id id )
*
* @return The maximum index of the specified object class.
*/
-RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Get_maximum_index(
+static inline Objects_Maximum _Objects_Get_maximum_index(
const Objects_Information *information
)
{
@@ -641,7 +639,7 @@ RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Get_maximum_index(
* @retval NULL No inactive object is available.
* @retval object An inactive object.
*/
-RTEMS_INLINE_ROUTINE Objects_Control *_Objects_Get_inactive(
+static inline Objects_Control *_Objects_Get_inactive(
Objects_Information *information
)
{
@@ -657,7 +655,7 @@ RTEMS_INLINE_ROUTINE Objects_Control *_Objects_Get_inactive(
* @retval true The automatic object extension (unlimited objects) is enabled.
* @retval false The automatic object extension (unlimited objects) is not enabled.
*/
-RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Is_auto_extend(
+static inline Objects_Maximum _Objects_Is_auto_extend(
const Objects_Information *information
)
{
@@ -678,7 +676,7 @@ RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Is_auto_extend(
* or delete/destroy operations.
*/
-RTEMS_INLINE_ROUTINE void _Objects_Set_local_object(
+static inline void _Objects_Set_local_object(
const Objects_Information *information,
uint32_t index,
Objects_Control *the_object
@@ -711,7 +709,7 @@ RTEMS_INLINE_ROUTINE void _Objects_Set_local_object(
* or delete/destroy operations.
*/
-RTEMS_INLINE_ROUTINE void _Objects_Invalidate_Id(
+static inline void _Objects_Invalidate_Id(
const Objects_Information *information,
Objects_Control *the_object
)
@@ -738,7 +736,7 @@ RTEMS_INLINE_ROUTINE void _Objects_Invalidate_Id(
*
* @return Returns the identifier of the object which is now valid.
*/
-RTEMS_INLINE_ROUTINE Objects_Id _Objects_Open_u32(
+static inline Objects_Id _Objects_Open_u32(
const Objects_Information *information,
Objects_Control *the_object,
uint32_t name
@@ -769,7 +767,7 @@ RTEMS_INLINE_ROUTINE Objects_Id _Objects_Open_u32(
*
* @param name is the name of the object to open.
*/
-RTEMS_INLINE_ROUTINE void _Objects_Open_string(
+static inline void _Objects_Open_string(
const Objects_Information *information,
Objects_Control *the_object,
const char *name
@@ -802,7 +800,7 @@ RTEMS_INLINE_ROUTINE void _Objects_Open_string(
*
* @see _Objects_Allocator_unlock() and _Objects_Allocate().
*/
-RTEMS_INLINE_ROUTINE void _Objects_Allocator_lock( void )
+static inline void _Objects_Allocator_lock( void )
{
_RTEMS_Lock_allocator();
}
@@ -814,7 +812,7 @@ RTEMS_INLINE_ROUTINE void _Objects_Allocator_lock( void )
* previous thread life protection state and thus may not return if the
* executing thread was restarted or deleted in the mean-time.
*/
-RTEMS_INLINE_ROUTINE void _Objects_Allocator_unlock( void )
+static inline void _Objects_Allocator_unlock( void )
{
_RTEMS_Unlock_allocator();
}
@@ -825,7 +823,7 @@ RTEMS_INLINE_ROUTINE void _Objects_Allocator_unlock( void )
* @retval true The allocator is the owner of the object allocator mutex.
* @retval false The allocato is not the owner of the object allocator mutex.
*/
-RTEMS_INLINE_ROUTINE bool _Objects_Allocator_is_owner( void )
+static inline bool _Objects_Allocator_is_owner( void )
{
return _RTEMS_Allocator_is_owner();
}
@@ -845,7 +843,7 @@ RTEMS_INLINE_ROUTINE bool _Objects_Allocator_is_owner( void )
*
* @see _Objects_Allocate() and _Objects_Free().
*/
-RTEMS_INLINE_ROUTINE Objects_Control *_Objects_Allocate_unprotected(
+static inline Objects_Control *_Objects_Allocate_unprotected(
Objects_Information *information
)
{
@@ -901,7 +899,7 @@ RTEMS_INLINE_ROUTINE Objects_Control *_Objects_Allocate_unprotected(
* }
* @endcode
*/
-RTEMS_INLINE_ROUTINE void _Objects_Free(
+static inline void _Objects_Free(
Objects_Information *information,
Objects_Control *the_object
)
@@ -912,6 +910,25 @@ RTEMS_INLINE_ROUTINE void _Objects_Free(
}
/**
+ * @brief Returns true, if the object associated with the zero-based index is
+ * contained in an allocated block of objects, otherwise false.
+ *
+ * @param index is the zero-based object index.
+ * @param objects_per_block is the object count per block.
+ *
+ * @retval true The object associated with the zero-based index is in an
+ * allocated block of objects.
+ * @retval false Otherwise.
+ */
+static inline bool _Objects_Is_in_allocated_block(
+ Objects_Maximum index,
+ Objects_Maximum objects_per_block
+)
+{
+ return index >= objects_per_block;
+}
+
+/**
* @brief Activate the object.
*
* This function must be only used in case this objects information supports
@@ -920,21 +937,23 @@ RTEMS_INLINE_ROUTINE void _Objects_Free(
* @param information The object information block.
* @param the_object The object to activate.
*/
-RTEMS_INLINE_ROUTINE void _Objects_Activate_unlimited(
+static inline void _Objects_Activate_unlimited(
Objects_Information *information,
Objects_Control *the_object
)
{
Objects_Maximum objects_per_block;
- Objects_Maximum block;
+ Objects_Maximum index;
_Assert( _Objects_Is_auto_extend( information ) );
objects_per_block = information->objects_per_block;
- block = _Objects_Get_index( the_object->id ) - OBJECTS_INDEX_MINIMUM;
+ index = _Objects_Get_index( the_object->id ) - OBJECTS_INDEX_MINIMUM;
+
+ if ( _Objects_Is_in_allocated_block( index, objects_per_block ) ) {
+ Objects_Maximum block;
- if ( block > objects_per_block ) {
- block /= objects_per_block;
+ block = index / objects_per_block;
information->inactive_per_block[ block ]--;
information->inactive--;
@@ -950,7 +969,7 @@ RTEMS_INLINE_ROUTINE void _Objects_Activate_unlimited(
* @param information The object information block.
* @param extend The object information extend handler.
*/
-RTEMS_INLINE_ROUTINE Objects_Control *_Objects_Allocate_with_extend(
+static inline Objects_Control *_Objects_Allocate_with_extend(
Objects_Information *information,
void ( *extend )( Objects_Information * )
)
diff --git a/cpukit/include/rtems/score/onceimpl.h b/cpukit/include/rtems/score/onceimpl.h
index 2060a376a2..9552cc0a67 100644
--- a/cpukit/include/rtems/score/onceimpl.h
+++ b/cpukit/include/rtems/score/onceimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2014, 2019 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2014, 2019 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 24086cde86..288445bc6f 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -13,7 +13,7 @@
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2012, 2018 embedded brains GmbH
+ * Copyright (C) 2012, 2018 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -262,6 +262,13 @@ typedef struct Per_CPU_Job {
/**
* @brief Per-CPU statistics.
*/
+
+/*
+ * This was added to address the following warning:
+ * warning: struct has no members
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
typedef struct {
#if defined( RTEMS_PROFILING )
/**
@@ -330,6 +337,7 @@ typedef struct {
uint64_t total_interrupt_time;
#endif /* defined( RTEMS_PROFILING ) */
} Per_CPU_Stats;
+#pragma GCC diagnostic pop
/**
* @brief Per-CPU watchdog header index.
@@ -747,7 +755,7 @@ static inline bool _Per_CPU_Is_boot_processor(
#endif
}
-RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all(
+static inline void _Per_CPU_Acquire_all(
ISR_lock_Context *lock_context
)
{
@@ -774,7 +782,7 @@ RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all(
#endif
}
-RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
+static inline void _Per_CPU_Release_all(
ISR_lock_Context *lock_context
)
{
@@ -951,7 +959,7 @@ void _Per_CPU_Wait_for_job(
*
* @return The thread control block of the executing thread.
*/
-RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
+static inline struct _Thread_Control *_Thread_Get_executing( void )
{
struct _Thread_Control *executing;
diff --git a/cpukit/include/rtems/score/percpudata.h b/cpukit/include/rtems/score/percpudata.h
index 07045525bc..817adde232 100644
--- a/cpukit/include/rtems/score/percpudata.h
+++ b/cpukit/include/rtems/score/percpudata.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2018 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2018 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/priority.h b/cpukit/include/rtems/score/priority.h
index 568e3c4e3e..bbb8fd03f2 100644
--- a/cpukit/include/rtems/score/priority.h
+++ b/cpukit/include/rtems/score/priority.h
@@ -14,7 +14,7 @@
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2016, 2017 embedded brains GmbH.
+ * Copyright (C) 2016, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,12 +45,12 @@
#include <rtems/score/cpu.h>
#include <rtems/score/rbtree.h>
-struct _Scheduler_Control;
-
#ifdef __cplusplus
extern "C" {
#endif
+struct _Scheduler_Control;
+
/**
* @defgroup RTEMSScorePriority Priority Handler
*
@@ -96,13 +96,6 @@ typedef uint64_t Priority_Control;
#define PRIORITY_MINIMUM 0
/**
- * @brief The priority value of pseudo-ISR threads.
- *
- * Examples are the MPCI and timer server threads.
- */
-#define PRIORITY_PSEUDO_ISR PRIORITY_MINIMUM
-
-/**
* @brief The default lowest (least important) thread priority value.
*
* This value is CPU port dependent.
diff --git a/cpukit/include/rtems/score/prioritybitmapimpl.h b/cpukit/include/rtems/score/prioritybitmapimpl.h
index 7c98126f8d..eda19357c4 100644
--- a/cpukit/include/rtems/score/prioritybitmapimpl.h
+++ b/cpukit/include/rtems/score/prioritybitmapimpl.h
@@ -72,7 +72,7 @@ extern const unsigned char _Bitfield_Leading_zeros[256];
*
* @see _Priority_Bits_index() and _Priority_Mask().
*/
-RTEMS_INLINE_ROUTINE unsigned int _Bitfield_Find_first_bit(
+static inline unsigned int _Bitfield_Find_first_bit(
unsigned int value
)
{
@@ -102,7 +102,7 @@ RTEMS_INLINE_ROUTINE unsigned int _Bitfield_Find_first_bit(
*
* @return The priority bit mask.
*/
-RTEMS_INLINE_ROUTINE Priority_bit_map_Word _Priority_Mask(
+static inline Priority_bit_map_Word _Priority_Mask(
unsigned int bit_number
)
{
@@ -121,7 +121,7 @@ RTEMS_INLINE_ROUTINE Priority_bit_map_Word _Priority_Mask(
*
* @return The corresponding array index into the priority bit map.
*/
-RTEMS_INLINE_ROUTINE unsigned int _Priority_Bits_index(
+static inline unsigned int _Priority_Bits_index(
unsigned int bit_number
)
{
@@ -139,7 +139,7 @@ RTEMS_INLINE_ROUTINE unsigned int _Priority_Bits_index(
*
* @return The major portion of the priority.
*/
-RTEMS_INLINE_ROUTINE unsigned int _Priority_Major( unsigned int the_priority )
+static inline unsigned int _Priority_Major( unsigned int the_priority )
{
return the_priority / 16;
}
@@ -151,7 +151,7 @@ RTEMS_INLINE_ROUTINE unsigned int _Priority_Major( unsigned int the_priority )
*
* @return The minor portion of the priority.
*/
-RTEMS_INLINE_ROUTINE unsigned int _Priority_Minor( unsigned int the_priority )
+static inline unsigned int _Priority_Minor( unsigned int the_priority )
{
return the_priority % 16;
}
@@ -161,7 +161,7 @@ RTEMS_INLINE_ROUTINE unsigned int _Priority_Minor( unsigned int the_priority )
*
* @param[out] bit_map The bit map to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Priority_bit_map_Initialize(
+static inline void _Priority_bit_map_Initialize(
Priority_bit_map_Control *bit_map
)
{
@@ -176,7 +176,7 @@ RTEMS_INLINE_ROUTINE void _Priority_bit_map_Initialize(
* @param[out] bit_map The bit map to be altered by @a bit_map_info.
* @param bit_map_info The information with which to alter @a bit_map.
*/
-RTEMS_INLINE_ROUTINE void _Priority_bit_map_Add (
+static inline void _Priority_bit_map_Add (
Priority_bit_map_Control *bit_map,
Priority_bit_map_Information *bit_map_info
)
@@ -193,7 +193,7 @@ RTEMS_INLINE_ROUTINE void _Priority_bit_map_Add (
* @param[out] bit_map The bit map to be altered by @a bit_map_info.
* @param bit_map_info The information with which to alter @a bit_map.
*/
-RTEMS_INLINE_ROUTINE void _Priority_bit_map_Remove (
+static inline void _Priority_bit_map_Remove (
Priority_bit_map_Control *bit_map,
Priority_bit_map_Information *bit_map_info
)
@@ -210,7 +210,7 @@ RTEMS_INLINE_ROUTINE void _Priority_bit_map_Remove (
*
* @return The highest portion of the bitmap.
*/
-RTEMS_INLINE_ROUTINE unsigned int _Priority_bit_map_Get_highest(
+static inline unsigned int _Priority_bit_map_Get_highest(
const Priority_bit_map_Control *bit_map
)
{
@@ -232,7 +232,7 @@ RTEMS_INLINE_ROUTINE unsigned int _Priority_bit_map_Get_highest(
* @retval true The Priority queue bit map is empty
* @retval false The Priority queue bit map is not empty.
*/
-RTEMS_INLINE_ROUTINE bool _Priority_bit_map_Is_empty(
+static inline bool _Priority_bit_map_Is_empty(
const Priority_bit_map_Control *bit_map
)
{
@@ -248,7 +248,7 @@ RTEMS_INLINE_ROUTINE bool _Priority_bit_map_Is_empty(
* @param new_priority The new priority for the initialization
* of the bit map information.
*/
-RTEMS_INLINE_ROUTINE void _Priority_bit_map_Initialize_information(
+static inline void _Priority_bit_map_Initialize_information(
Priority_bit_map_Control *bit_map,
Priority_bit_map_Information *bit_map_info,
unsigned int new_priority
diff --git a/cpukit/include/rtems/score/priorityimpl.h b/cpukit/include/rtems/score/priorityimpl.h
index 55cddf53be..2a95ea605c 100644
--- a/cpukit/include/rtems/score/priorityimpl.h
+++ b/cpukit/include/rtems/score/priorityimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -78,7 +78,7 @@ typedef enum {
*
* @param[out] actions The actions to be initialized empty.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_empty(
+static inline void _Priority_Actions_initialize_empty(
Priority_Actions *actions
)
{
@@ -93,7 +93,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_empty(
* @param node The action node for the @a actions to be initialized.
* @param type The action type for the @a actions to be initialized.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_one(
+static inline void _Priority_Actions_initialize_one(
Priority_Actions *actions,
Priority_Aggregation *aggregation,
Priority_Node *node,
@@ -117,7 +117,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_one(
* @retval true The priority actions @a actions is empty.
* @retval false The priority actions @a actions is empty.
*/
-RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_empty(
+static inline bool _Priority_Actions_is_empty(
const Priority_Actions *actions
)
{
@@ -131,7 +131,7 @@ RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_empty(
*
* @return The former actions of @a actions that were moved.
*/
-RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Actions_move(
+static inline Priority_Aggregation *_Priority_Actions_move(
Priority_Actions *actions
)
{
@@ -149,7 +149,7 @@ RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Actions_move(
* @param[in, out] actions The priority actions to add actions to.
* @param[out] aggregation The actions to add to @a actions.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Actions_add(
+static inline void _Priority_Actions_add(
Priority_Actions *actions,
Priority_Aggregation *aggregation
)
@@ -170,7 +170,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Actions_add(
* @param[out] node The priority node to be initialized.
* @param priority The priority to initialize @a node to.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Node_initialize(
+static inline void _Priority_Node_initialize(
Priority_Node *node,
Priority_Control priority
)
@@ -185,7 +185,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Node_initialize(
* @param[out] node The priority node to set the priority of.
* @param priority The new priority for @a node.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Node_set_priority(
+static inline void _Priority_Node_set_priority(
Priority_Node *node,
Priority_Control priority
)
@@ -198,7 +198,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Node_set_priority(
*
* @param[in, out] node The priority node to set inactive.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Node_set_inactive(
+static inline void _Priority_Node_set_inactive(
Priority_Node *node
)
{
@@ -213,7 +213,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Node_set_inactive(
* @retval true The priority node is active.
* @retval false The priority node is inactive.
*/
-RTEMS_INLINE_ROUTINE bool _Priority_Node_is_active(
+static inline bool _Priority_Node_is_active(
const Priority_Node *node
)
{
@@ -225,7 +225,7 @@ RTEMS_INLINE_ROUTINE bool _Priority_Node_is_active(
*
* @param[out] aggregation The priority aggregaton to initialize empty.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Initialize_empty(
+static inline void _Priority_Initialize_empty(
Priority_Aggregation *aggregation
)
{
@@ -246,7 +246,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Initialize_empty(
* @param[out] aggregation The priority aggregaton to initialize.
* @param node The priority node to initialize @a aggregation with.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Initialize_one(
+static inline void _Priority_Initialize_one(
Priority_Aggregation *aggregation,
Priority_Node *node
)
@@ -270,7 +270,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Initialize_one(
* @retval true The priority aggregation is empty.
* @retval false The priority aggregation is not empty.
*/
-RTEMS_INLINE_ROUTINE bool _Priority_Is_empty(
+static inline bool _Priority_Is_empty(
const Priority_Aggregation *aggregation
)
{
@@ -284,7 +284,7 @@ RTEMS_INLINE_ROUTINE bool _Priority_Is_empty(
*
* @return The priority of @a aggregation.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Priority_Get_priority(
+static inline Priority_Control _Priority_Get_priority(
const Priority_Aggregation *aggregation
)
{
@@ -298,7 +298,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Priority_Get_priority(
*
* @return The scheduler of @a aggregation.
*/
-RTEMS_INLINE_ROUTINE const Scheduler_Control *_Priority_Get_scheduler(
+static inline const Scheduler_Control *_Priority_Get_scheduler(
const Priority_Aggregation *aggregation
)
{
@@ -316,7 +316,7 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Priority_Get_scheduler(
*
* @return The minimum node of @a aggregation
*/
-RTEMS_INLINE_ROUTINE Priority_Node *_Priority_Get_minimum_node(
+static inline Priority_Node *_Priority_Get_minimum_node(
const Priority_Aggregation *aggregation
)
{
@@ -329,7 +329,7 @@ RTEMS_INLINE_ROUTINE Priority_Node *_Priority_Get_minimum_node(
* @param[out] aggregation The priority aggregation to set the action node of.
* @param node The new priority node for @a aggregation.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Set_action_node(
+static inline void _Priority_Set_action_node(
Priority_Aggregation *aggregation,
Priority_Node *node
)
@@ -343,7 +343,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Set_action_node(
* @param[out] aggregation The priority aggregation to set the action type of.
* @param type The new action type for @a aggregation.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Set_action_type(
+static inline void _Priority_Set_action_type(
Priority_Aggregation *aggregation,
Priority_Action_type type
)
@@ -359,7 +359,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Set_action_type(
* @param node The new action node for @a aggregation.
* @param type The new action type for @a aggregation.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Set_action(
+static inline void _Priority_Set_action(
Priority_Aggregation *aggregation,
Priority_Node *node,
Priority_Action_type type
@@ -378,7 +378,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Set_action(
* @return Returns the next action of the priority aggregation or NULL if there
* is no next action.
*/
-RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Get_next_action(
+static inline Priority_Aggregation *_Priority_Get_next_action(
const Priority_Aggregation *aggregation
)
{
@@ -395,7 +395,7 @@ RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Get_next_action(
* @retval true The priority on the left hand side of the comparison is smaller.
* @retval false The priority on the left hand side of the comparison is greater of equal.
*/
-RTEMS_INLINE_ROUTINE bool _Priority_Less(
+static inline bool _Priority_Less(
const void *left,
const RBTree_Node *right
)
@@ -403,7 +403,7 @@ RTEMS_INLINE_ROUTINE bool _Priority_Less(
const Priority_Control *the_left;
const Priority_Node *the_right;
- the_left = left;
+ the_left = (const Priority_Control *) left;
the_right = RTEMS_CONTAINER_OF( right, Priority_Node, Node.RBTree );
return *the_left < the_right->priority;
@@ -422,7 +422,7 @@ RTEMS_INLINE_ROUTINE bool _Priority_Less(
* @retval true The inserted node with its priority is the minimum of the RBTree.
* @retval false The inserted node with its priority is not the minimum of the RBTree.
*/
-RTEMS_INLINE_ROUTINE bool _Priority_Plain_insert(
+static inline bool _Priority_Plain_insert(
Priority_Aggregation *aggregation,
Priority_Node *node,
Priority_Control priority
@@ -444,7 +444,7 @@ RTEMS_INLINE_ROUTINE bool _Priority_Plain_insert(
* @param[in, out] aggregation The aggregation to extract the node from.
* @param node The node to be extracted.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Plain_extract(
+static inline void _Priority_Plain_extract(
Priority_Aggregation *aggregation,
Priority_Node *node
)
@@ -461,7 +461,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Plain_extract(
* @param[in, out] aggregation The aggregation to change the node in.
* @param node The node that has a new priority and will be reinserted in the aggregation.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Plain_changed(
+static inline void _Priority_Plain_changed(
Priority_Aggregation *aggregation,
Priority_Node *node
)
@@ -499,7 +499,7 @@ typedef void ( *Priority_Remove_handler )(
* @param actions Is ignored by the method.
* @param arg Is ignored by the method.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Change_nothing(
+static inline void _Priority_Change_nothing(
Priority_Aggregation *aggregation,
Priority_Group_order group_order,
Priority_Actions *actions,
@@ -521,7 +521,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Change_nothing(
* @param actions Is ignored by the method.
* @param arg Is ignored by the method.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Remove_nothing(
+static inline void _Priority_Remove_nothing(
Priority_Aggregation *aggregation,
Priority_Actions *actions,
void *arg
@@ -545,7 +545,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Remove_nothing(
* @param arg Arguments for @a change that is used if the node is the new
* minimum.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Non_empty_insert(
+static inline void _Priority_Non_empty_insert(
Priority_Aggregation *aggregation,
Priority_Node *node,
Priority_Actions *actions,
@@ -576,7 +576,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Non_empty_insert(
* insert and @a node is the new minimum of the aggregation.
* @param arg The arguments for @a change.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Insert(
+static inline void _Priority_Insert(
Priority_Aggregation *aggregation,
Priority_Node *node,
Priority_Actions *actions,
@@ -610,7 +610,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Insert(
* @param change Is called in the case that the minimal node was extracted.
* @param arg The arguments for @a remove and @a change.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Extract(
+static inline void _Priority_Extract(
Priority_Aggregation *aggregation,
Priority_Node *node,
Priority_Actions *actions,
@@ -650,7 +650,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Extract(
* @param change Is called in the case that the minimal node was extracted.
* @param arg The arguments for @a change.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Extract_non_empty(
+static inline void _Priority_Extract_non_empty(
Priority_Aggregation *aggregation,
Priority_Node *node,
Priority_Actions *actions,
@@ -685,7 +685,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Extract_non_empty(
* @param change Is called if the minimal priority is incorrectly set after the change.
* @param arg The arguments for @a change.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Changed(
+static inline void _Priority_Changed(
Priority_Aggregation *aggregation,
Priority_Node *node,
Priority_Group_order group_order,
@@ -721,7 +721,7 @@ RTEMS_INLINE_ROUTINE void _Priority_Changed(
* @param[out] replacement The node that replaces @a victim. It obtains its priority
* from @a victim.
*/
-RTEMS_INLINE_ROUTINE void _Priority_Replace(
+static inline void _Priority_Replace(
Priority_Aggregation *aggregation,
Priority_Node *victim,
Priority_Node *replacement
diff --git a/cpukit/include/rtems/score/processormask.h b/cpukit/include/rtems/score/processormask.h
index 40fb52a70f..71ed37cd0e 100644
--- a/cpukit/include/rtems/score/processormask.h
+++ b/cpukit/include/rtems/score/processormask.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2016, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2016, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,9 +39,7 @@
#include <rtems/score/cpu.h>
-#include <sys/cpuset.h>
-
-#include <strings.h>
+#include <sys/_bitset.h>
#ifdef __cplusplus
extern "C" {
@@ -123,381 +121,6 @@ extern "C" {
*/
typedef __BITSET_DEFINE( Processor_mask, CPU_MAXIMUM_PROCESSORS ) Processor_mask;
-/**
- * @brief Sets the bits of the mask to zero, also considers CPU_MAXIMUM_PROCESSORS.
- *
- * @param[out] mask The mask to set to zero.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_Zero( Processor_mask *mask )
-{
- __BIT_ZERO( CPU_MAXIMUM_PROCESSORS, mask );
-}
-
-/**
- * @brief Checks if the mask is zero, also considers CPU_MAXIMUM_PROCESSORS.
- *
- * @param mask The mask to check whether is is zero
- *
- * @retval true The mask is zero.
- * @retval false The mask is not zero.
- */
-RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_zero( const Processor_mask *mask )
-{
- return __BIT_EMPTY( CPU_MAXIMUM_PROCESSORS, mask );
-}
-
-/**
- * @brief Fills the mask, also considers CPU_MAXIMUM_PROCESSORS.
- *
- * @param[out] mask The mask to fill
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_Fill( Processor_mask *mask )
-{
- __BIT_FILL( CPU_MAXIMUM_PROCESSORS, mask );
-}
-
-/**
- * @brief Copies the mask to another mask, also considers CPU_MAXIMUM_PROCESSORS.
- *
- * @param[out] dst The mask to copy @a src to.
- * @param src The mask to copy to @a dst.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_Assign(
- Processor_mask *dst, const Processor_mask *src
-)
-{
- __BIT_COPY( CPU_MAXIMUM_PROCESSORS, src, dst );
-}
-
-/**
- * @brief Sets the specified index bit of the mask.
- *
- * @param[out] mask The mask to set the bit of.
- * @param index The index of the bit that shall be set.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_Set(
- Processor_mask *mask,
- uint32_t index
-)
-{
- __BIT_SET( CPU_MAXIMUM_PROCESSORS, index, mask );
-}
-
-/**
- * @brief Clears the specified index bit of the mask.
- *
- * @param[out] mask The mask to clear the bit of.
- * @param index The index of the bit that shall be cleared.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_Clear(
- Processor_mask *mask,
- uint32_t index
-)
-{
- __BIT_CLR( CPU_MAXIMUM_PROCESSORS, index, mask );
-}
-
-/**
- * @brief Checks if the specified index bit of the mask is set.
- *
- * @param mask The mask to check if the specified bit is set.
- * @param index The index of the bit that is checked.
- *
- * @retval true The specified index bit is set.
- * @retval false The specified index bit is not set.
- */
-RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_set(
- const Processor_mask *mask,
- uint32_t index
-)
-{
- return __BIT_ISSET( CPU_MAXIMUM_PROCESSORS, index, mask );
-}
-
-/**
- * @brief Checks if the processor sets a and b are equal.
- *
- * @param a The first processor set.
- * @param b The seconde processor set.
- *
- * @retval true The processor sets a and b are equal.
- * @retval false The processor sets a and b are not equal.
- */
-RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_equal(
- const Processor_mask *a,
- const Processor_mask *b
-)
-{
- return !__BIT_CMP( CPU_MAXIMUM_PROCESSORS, a, b );
-}
-
-/**
- * @brief Checks if the intersection of the processor sets a and b is
- * non-empty.
- *
- * @param a The first processor set.
- * @param b The second processor set.
- *
- * @retval true The intersection of the processor sets a and b is non-empty.
- * @retval false The intersection of the processor sets a and b is empty.
- */
-RTEMS_INLINE_ROUTINE bool _Processor_mask_Has_overlap(
- const Processor_mask *a,
- const Processor_mask *b
-)
-{
- return __BIT_OVERLAP( CPU_MAXIMUM_PROCESSORS, a, b );
-}
-
-/**
- * @brief Checks if the processor set small is a subset of processor set
- * big.
- *
- * @param big The bigger processor set.
- * @param small The smaller processor set.
- *
- * @retval true @a small is a subset of @a big.
- * @retval false @a small is not a subset of @a big.
- */
-RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_subset(
- const Processor_mask *big,
- const Processor_mask *small
-)
-{
- return __BIT_SUBSET( CPU_MAXIMUM_PROCESSORS, big, small );
-}
-
-/**
- * @brief Performs a bitwise a = b & c.
- *
- * @param[out] a The processor mask that is set by this operation.
- * @param b The first parameter of the AND-operation.
- * @param c The second parameter of the AND-operation.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_And(
- Processor_mask *a,
- const Processor_mask *b,
- const Processor_mask *c
-)
-{
- __BIT_AND2( CPU_MAXIMUM_PROCESSORS, a, b, c );
-}
-
-/**
- * @brief Performs a bitwise a = b | c.
- *
- * @param[out] a The processor mask that is set by this operation.
- * @param b The first parameter of the OR-operation.
- * @param c The second parameter of the OR-operation.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_Or(
- Processor_mask *a,
- const Processor_mask *b,
- const Processor_mask *c
-)
-{
- __BIT_OR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
-}
-
-/**
- * @brief Performs a bitwise a = b ^ c.
- *
- * @param[out] a The processor mask that is set by this operation.
- * @param b The first parameter of the XOR-operation.
- * @param c The second parameter of the XOR-operation.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_Xor(
- Processor_mask *a,
- const Processor_mask *b,
- const Processor_mask *c
-)
-{
- __BIT_XOR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
-}
-
-/**
- * @brief Gets the number of set bits in the processor mask.
- *
- * @param a The processor mask of which the set bits are counted.
- *
- * @return The number of set bits in @a a.
- */
-RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Count( const Processor_mask *a )
-{
- return (uint32_t) __BIT_COUNT( CPU_MAXIMUM_PROCESSORS, a );
-}
-
-/**
- * @brief Finds the last set of the processor mask.
- *
- * @param a The processor mask wo find the last set of.
- *
- * @return The last set of @a a.
- */
-RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Find_last_set( const Processor_mask *a )
-{
- return (uint32_t) __BIT_FLS( CPU_MAXIMUM_PROCESSORS, a );
-}
-
-/**
- * @brief Returns the subset of 32 processors containing the specified index as
- * an unsigned 32-bit integer.
- *
- * @param mask The processor mask.
- * @param index The specified index.
- *
- * @return The subset containing the specified index as an unsigned 32-bit integer.
- */
-RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_To_uint32_t(
- const Processor_mask *mask,
- uint32_t index
-)
-{
- long bits = mask->__bits[ __bitset_words( index ) ];
-
- return (uint32_t) (bits >> (32 * (index % _BITSET_BITS) / 32));
-}
-
-/**
- * @brief Creates a processor set from an unsigned 32-bit integer relative to
- * the specified index.
- *
- * @param[out] mask The mask that is created.
- * @param bits The bits for creating the mask.
- * @param index The index to which the mask is relative.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_From_uint32_t(
- Processor_mask *mask,
- uint32_t bits,
- uint32_t index
-)
-{
- _Processor_mask_Zero( mask );
- mask->__bits[ __bitset_words( index ) ] = ((long) bits) << (32 * (index % _BITSET_BITS) / 32);
-}
-
-/**
- * @brief Creates a processor set from the specified index.
- *
- * @param[out] The mask that is created.
- * @param index The specified index.
- */
-RTEMS_INLINE_ROUTINE void _Processor_mask_From_index(
- Processor_mask *mask,
- uint32_t index
-)
-{
- __BIT_SETOF( CPU_MAXIMUM_PROCESSORS, (int) index, mask );
-}
-
-typedef enum {
- PROCESSOR_MASK_COPY_LOSSLESS,
- PROCESSOR_MASK_COPY_PARTIAL_LOSS,
- PROCESSOR_MASK_COPY_COMPLETE_LOSS,
- PROCESSOR_MASK_COPY_INVALID_SIZE
-} Processor_mask_Copy_status;
-
-/**
- * @brief Checks if the copy status guarantees at most partial loss.
- *
- * @param status The copy status to check.
- *
- * @retval true At most partial loss can be guaranteed.
- * @retval false The status indicates more than partial loss.
- */
-RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_at_most_partial_loss(
- Processor_mask_Copy_status status
-)
-{
- return (unsigned int) status <= PROCESSOR_MASK_COPY_PARTIAL_LOSS;
-}
-
-/**
- * @brief Copies one mask to another.
- *
- * @param[out] dst The destination of the copy operation.
- * @param dst_size The size of @a dst.
- * @param src The source of the copy operation.
- * @param src_size The size of @a src.
- *
- * @retval PROCESSOR_MASK_COPY_LOSSLESS It is guaranteed that the copy
- * operation is lossless.
- * @retval PROCESSOR_MASK_COPY_PARTIAL_LOSS Partial loss happened due
- * to the sizes of @a src and @a dst.
- * @retval PROCESSOR_MASK_COPY_COMPLETE_LOSS Complete loss happened due
- * to the sizes of @a src and @a dst.
- * @retval PROCESSOR_MASK_COPY_INVALID_SIZE One of the arguments sizes
- * is invalid (bigger than the size of a long).
- */
-Processor_mask_Copy_status _Processor_mask_Copy(
- long *dst,
- size_t dst_size,
- const long *src,
- size_t src_size
-);
-
-/**
- * @brief Copies one mask to another.
- *
- * @param src The source for the copy operation.
- * @param dst_size The size of @a dst.
- * @param[out] dst The destination for the copy operation.
- *
- * @retval PROCESSOR_MASK_COPY_LOSSLESS It is guaranteed that the copy
- * operation is lossless.
- * @retval PROCESSOR_MASK_COPY_PARTIAL_LOSS Partial loss happened due
- * to the sizes of @a src and @a dst.
- * @retval PROCESSOR_MASK_COPY_COMPLETE_LOSS Complete loss happened due
- * to the sizes of @a src and @a dst.
- * @retval PROCESSOR_MASK_COPY_INVALID_SIZE One of the arguments sizes
- * is invalid (bigger than the size of a long).
- */
-RTEMS_INLINE_ROUTINE Processor_mask_Copy_status _Processor_mask_To_cpu_set_t(
- const Processor_mask *src,
- size_t dst_size,
- cpu_set_t *dst
-)
-{
- return _Processor_mask_Copy(
- &dst->__bits[ 0 ],
- dst_size,
- &src->__bits[ 0 ],
- sizeof( *src )
- );
-}
-
-/**
- * @brief Copies one mask to another.
- *
- * @param src The source for the copy operation.
- * @param src_size The size of @a src.
- * @param[out] dst The destination for the copy operation.
- *
- * @retval PROCESSOR_MASK_COPY_LOSSLESS It is guaranteed that the copy
- * operation is lossless.
- * @retval PROCESSOR_MASK_COPY_PARTIAL_LOSS Partial loss happened due
- * to the sizes of @a src and @a dst.
- * @retval PROCESSOR_MASK_COPY_COMPLETE_LOSS Complete loss happened due
- * to the sizes of @a src and @a dst.
- * @retval PROCESSOR_MASK_COPY_INVALID_SIZE One of the arguments sizes
- * is invalid (bigger than the size of a long).
- */
-RTEMS_INLINE_ROUTINE Processor_mask_Copy_status _Processor_mask_From_cpu_set_t(
- Processor_mask *dst,
- size_t src_size,
- const cpu_set_t *src
-)
-{
- return _Processor_mask_Copy(
- &dst->__bits[ 0 ],
- sizeof( *dst ),
- &src->__bits[ 0 ],
- src_size
- );
-}
-
-extern const Processor_mask _Processor_mask_The_one_and_only;
-
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/processormaskimpl.h b/cpukit/include/rtems/score/processormaskimpl.h
new file mode 100644
index 0000000000..bc997edfd4
--- /dev/null
+++ b/cpukit/include/rtems/score/processormaskimpl.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreProcessorMask
+ *
+ * @brief This header file provides the interfaces of the
+ * @ref RTEMSScoreProcessorMask.
+ */
+
+/*
+ * Copyright (C) 2016, 2017 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_SCORE_PROCESSORMASKIMPL_H
+#define _RTEMS_SCORE_PROCESSORMASKIMPL_H
+
+#include <rtems/score/processormask.h>
+
+#include <sys/cpuset.h>
+
+#include <strings.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup RTEMSScoreProcessorMask
+ *
+ * @{
+ */
+
+/**
+ * @brief Sets the bits of the mask to zero, also considers CPU_MAXIMUM_PROCESSORS.
+ *
+ * @param[out] mask The mask to set to zero.
+ */
+static inline void _Processor_mask_Zero( Processor_mask *mask )
+{
+ __BIT_ZERO( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+/**
+ * @brief Checks if the mask is zero, also considers CPU_MAXIMUM_PROCESSORS.
+ *
+ * @param mask The mask to check whether is is zero
+ *
+ * @retval true The mask is zero.
+ * @retval false The mask is not zero.
+ */
+static inline bool _Processor_mask_Is_zero( const Processor_mask *mask )
+{
+ return __BIT_EMPTY( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+/**
+ * @brief Fills the mask, also considers CPU_MAXIMUM_PROCESSORS.
+ *
+ * @param[out] mask The mask to fill
+ */
+static inline void _Processor_mask_Fill( Processor_mask *mask )
+{
+ __BIT_FILL( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+/**
+ * @brief Copies the mask to another mask, also considers CPU_MAXIMUM_PROCESSORS.
+ *
+ * @param[out] dst The mask to copy @a src to.
+ * @param src The mask to copy to @a dst.
+ */
+static inline void _Processor_mask_Assign(
+ Processor_mask *dst, const Processor_mask *src
+)
+{
+ __BIT_COPY( CPU_MAXIMUM_PROCESSORS, src, dst );
+}
+
+/**
+ * @brief Sets the specified index bit of the mask.
+ *
+ * @param[out] mask The mask to set the bit of.
+ * @param index The index of the bit that shall be set.
+ */
+static inline void _Processor_mask_Set(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ __BIT_SET( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+/**
+ * @brief Clears the specified index bit of the mask.
+ *
+ * @param[out] mask The mask to clear the bit of.
+ * @param index The index of the bit that shall be cleared.
+ */
+static inline void _Processor_mask_Clear(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ __BIT_CLR( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+/**
+ * @brief Checks if the specified index bit of the mask is set.
+ *
+ * @param mask The mask to check if the specified bit is set.
+ * @param index The index of the bit that is checked.
+ *
+ * @retval true The specified index bit is set.
+ * @retval false The specified index bit is not set.
+ */
+static inline bool _Processor_mask_Is_set(
+ const Processor_mask *mask,
+ uint32_t index
+)
+{
+ return __BIT_ISSET( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+/**
+ * @brief Checks if the processor sets a and b are equal.
+ *
+ * @param a The first processor set.
+ * @param b The seconde processor set.
+ *
+ * @retval true The processor sets a and b are equal.
+ * @retval false The processor sets a and b are not equal.
+ */
+static inline bool _Processor_mask_Is_equal(
+ const Processor_mask *a,
+ const Processor_mask *b
+)
+{
+ return !__BIT_CMP( CPU_MAXIMUM_PROCESSORS, a, b );
+}
+
+/**
+ * @brief Checks if the intersection of the processor sets a and b is
+ * non-empty.
+ *
+ * @param a The first processor set.
+ * @param b The second processor set.
+ *
+ * @retval true The intersection of the processor sets a and b is non-empty.
+ * @retval false The intersection of the processor sets a and b is empty.
+ */
+static inline bool _Processor_mask_Has_overlap(
+ const Processor_mask *a,
+ const Processor_mask *b
+)
+{
+ return __BIT_OVERLAP( CPU_MAXIMUM_PROCESSORS, a, b );
+}
+
+/**
+ * @brief Checks if the processor set small is a subset of processor set
+ * big.
+ *
+ * @param big The bigger processor set.
+ * @param small The smaller processor set.
+ *
+ * @retval true @a small is a subset of @a big.
+ * @retval false @a small is not a subset of @a big.
+ */
+static inline bool _Processor_mask_Is_subset(
+ const Processor_mask *big,
+ const Processor_mask *small
+)
+{
+ return __BIT_SUBSET( CPU_MAXIMUM_PROCESSORS, big, small );
+}
+
+/**
+ * @brief Performs a bitwise a = b & c.
+ *
+ * @param[out] a The processor mask that is set by this operation.
+ * @param b The first parameter of the AND-operation.
+ * @param c The second parameter of the AND-operation.
+ */
+static inline void _Processor_mask_And(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ __BIT_AND2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b | c.
+ *
+ * @param[out] a The processor mask that is set by this operation.
+ * @param b The first parameter of the OR-operation.
+ * @param c The second parameter of the OR-operation.
+ */
+static inline void _Processor_mask_Or(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ __BIT_OR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b ^ c.
+ *
+ * @param[out] a The processor mask that is set by this operation.
+ * @param b The first parameter of the XOR-operation.
+ * @param c The second parameter of the XOR-operation.
+ */
+static inline void _Processor_mask_Xor(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ __BIT_XOR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Gets the number of set bits in the processor mask.
+ *
+ * @param a The processor mask of which the set bits are counted.
+ *
+ * @return The number of set bits in @a a.
+ */
+static inline uint32_t _Processor_mask_Count( const Processor_mask *a )
+{
+ return (uint32_t) __BIT_COUNT( CPU_MAXIMUM_PROCESSORS, a );
+}
+
+/**
+ * @brief Finds the last set of the processor mask.
+ *
+ * @param a The processor mask wo find the last set of.
+ *
+ * @return The last set of @a a.
+ */
+static inline uint32_t _Processor_mask_Find_last_set( const Processor_mask *a )
+{
+ return (uint32_t) __BIT_FLS( CPU_MAXIMUM_PROCESSORS, a );
+}
+
+/**
+ * @brief Returns the subset of 32 processors containing the specified index as
+ * an unsigned 32-bit integer.
+ *
+ * @param mask The processor mask.
+ * @param index The specified index.
+ *
+ * @return The subset containing the specified index as an unsigned 32-bit integer.
+ */
+static inline uint32_t _Processor_mask_To_uint32_t(
+ const Processor_mask *mask,
+ uint32_t index
+)
+{
+ long bits = mask->__bits[ index / _BITSET_BITS ];
+
+ return (uint32_t) ( bits >> ( 32 * ( ( index % _BITSET_BITS ) / 32 ) ) );
+}
+
+/**
+ * @brief Creates a processor set from an unsigned 32-bit integer relative to
+ * the specified index.
+ *
+ * @param[out] mask The mask that is created.
+ * @param bits The bits for creating the mask.
+ * @param index The index to which the mask is relative.
+ */
+static inline void _Processor_mask_From_uint32_t(
+ Processor_mask *mask,
+ uint32_t bits,
+ uint32_t index
+)
+{
+ _Processor_mask_Zero( mask );
+ mask->__bits[ __bitset_words( index ) ] = ((long) bits) << (32 * (index % _BITSET_BITS) / 32);
+}
+
+/**
+ * @brief Creates a processor set from the specified index.
+ *
+ * @param[out] The mask that is created.
+ * @param index The specified index.
+ */
+static inline void _Processor_mask_From_index(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ __BIT_SETOF( CPU_MAXIMUM_PROCESSORS, (int) index, mask );
+}
+
+typedef enum {
+ PROCESSOR_MASK_COPY_LOSSLESS,
+ PROCESSOR_MASK_COPY_PARTIAL_LOSS,
+ PROCESSOR_MASK_COPY_COMPLETE_LOSS,
+ PROCESSOR_MASK_COPY_INVALID_SIZE
+} Processor_mask_Copy_status;
+
+/**
+ * @brief Checks if the copy status guarantees at most partial loss.
+ *
+ * @param status The copy status to check.
+ *
+ * @retval true At most partial loss can be guaranteed.
+ * @retval false The status indicates more than partial loss.
+ */
+static inline bool _Processor_mask_Is_at_most_partial_loss(
+ Processor_mask_Copy_status status
+)
+{
+ return (unsigned int) status <= PROCESSOR_MASK_COPY_PARTIAL_LOSS;
+}
+
+/**
+ * @brief Copies one mask to another.
+ *
+ * @param[out] dst The destination of the copy operation.
+ * @param dst_size The size of @a dst.
+ * @param src The source of the copy operation.
+ * @param src_size The size of @a src.
+ *
+ * @retval PROCESSOR_MASK_COPY_LOSSLESS It is guaranteed that the copy
+ * operation is lossless.
+ * @retval PROCESSOR_MASK_COPY_PARTIAL_LOSS Partial loss happened due
+ * to the sizes of @a src and @a dst.
+ * @retval PROCESSOR_MASK_COPY_COMPLETE_LOSS Complete loss happened due
+ * to the sizes of @a src and @a dst.
+ * @retval PROCESSOR_MASK_COPY_INVALID_SIZE One of the arguments sizes
+ * is invalid (bigger than the size of a long).
+ */
+Processor_mask_Copy_status _Processor_mask_Copy(
+ long *dst,
+ size_t dst_size,
+ const long *src,
+ size_t src_size
+);
+
+/**
+ * @brief Copies one mask to another.
+ *
+ * @param src The source for the copy operation.
+ * @param dst_size The size of @a dst.
+ * @param[out] dst The destination for the copy operation.
+ *
+ * @retval PROCESSOR_MASK_COPY_LOSSLESS It is guaranteed that the copy
+ * operation is lossless.
+ * @retval PROCESSOR_MASK_COPY_PARTIAL_LOSS Partial loss happened due
+ * to the sizes of @a src and @a dst.
+ * @retval PROCESSOR_MASK_COPY_COMPLETE_LOSS Complete loss happened due
+ * to the sizes of @a src and @a dst.
+ * @retval PROCESSOR_MASK_COPY_INVALID_SIZE One of the arguments sizes
+ * is invalid (bigger than the size of a long).
+ */
+static inline Processor_mask_Copy_status _Processor_mask_To_cpu_set_t(
+ const Processor_mask *src,
+ size_t dst_size,
+ cpu_set_t *dst
+)
+{
+ return _Processor_mask_Copy(
+ &dst->__bits[ 0 ],
+ dst_size,
+ &src->__bits[ 0 ],
+ sizeof( *src )
+ );
+}
+
+/**
+ * @brief Copies one mask to another.
+ *
+ * @param src The source for the copy operation.
+ * @param src_size The size of @a src.
+ * @param[out] dst The destination for the copy operation.
+ *
+ * @retval PROCESSOR_MASK_COPY_LOSSLESS It is guaranteed that the copy
+ * operation is lossless.
+ * @retval PROCESSOR_MASK_COPY_PARTIAL_LOSS Partial loss happened due
+ * to the sizes of @a src and @a dst.
+ * @retval PROCESSOR_MASK_COPY_COMPLETE_LOSS Complete loss happened due
+ * to the sizes of @a src and @a dst.
+ * @retval PROCESSOR_MASK_COPY_INVALID_SIZE One of the arguments sizes
+ * is invalid (bigger than the size of a long).
+ */
+static inline Processor_mask_Copy_status _Processor_mask_From_cpu_set_t(
+ Processor_mask *dst,
+ size_t src_size,
+ const cpu_set_t *src
+)
+{
+ return _Processor_mask_Copy(
+ &dst->__bits[ 0 ],
+ sizeof( *dst ),
+ &src->__bits[ 0 ],
+ src_size
+ );
+}
+
+extern const Processor_mask _Processor_mask_The_one_and_only;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PROCESSORMASKIMPL_H */
diff --git a/cpukit/include/rtems/score/profiling.h b/cpukit/include/rtems/score/profiling.h
index 90e470a775..af26970dcd 100644
--- a/cpukit/include/rtems/score/profiling.h
+++ b/cpukit/include/rtems/score/profiling.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2014 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -129,10 +129,7 @@ static inline void _Profiling_Thread_dispatch_enable(
if ( new_thread_dispatch_disable_level == 0 ) {
Per_CPU_Stats *stats = &cpu->Stats;
CPU_Counter_ticks now = _CPU_Counter_read();
- CPU_Counter_ticks delta = _CPU_Counter_difference(
- now,
- stats->thread_dispatch_disabled_instant
- );
+ CPU_Counter_ticks delta = now - stats->thread_dispatch_disabled_instant;
stats->total_thread_dispatch_disabled_time += delta;
diff --git a/cpukit/include/rtems/score/protectedheap.h b/cpukit/include/rtems/score/protectedheap.h
index d165c4ddec..287108568a 100644
--- a/cpukit/include/rtems/score/protectedheap.h
+++ b/cpukit/include/rtems/score/protectedheap.h
@@ -65,7 +65,7 @@ extern "C" {
* @param area_size The size of the heap area.
* @param page_size The page size for the heap.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Protected_heap_Initialize(
+static inline uintptr_t _Protected_heap_Initialize(
Heap_Control *heap,
void *area_begin,
uintptr_t area_size,
@@ -131,7 +131,7 @@ void *_Protected_heap_Allocate_aligned_with_boundary(
* @retval pointer The starting address of the allocated memory area.
* @retval NULL No memory is available of the parameters are inconsistent.
*/
-RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate_aligned(
+static inline void *_Protected_heap_Allocate_aligned(
Heap_Control *heap,
uintptr_t size,
uintptr_t alignment
@@ -154,7 +154,7 @@ RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate_aligned(
* @retval pointer The starting address of the allocated memory area.
* @retval NULL No memory is available of the parameters are inconsistent.
*/
-RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate(
+static inline void *_Protected_heap_Allocate(
Heap_Control *heap,
uintptr_t size
)
@@ -163,59 +163,6 @@ RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate(
}
/**
- * @brief Returns the size of the allocatable memory area.
- *
- * The size value may be greater than the initially requested size in
- * _Heap_Allocate_aligned_with_boundary().
- *
- * Inappropriate values for @a addr will not corrupt the heap, but may yield
- * invalid size values.
- *
- * This method first locks the allocator and after the operation, unlocks it again.
- *
- * @param heap The heap to operate upon.
- * @param addr The starting address of the allocatable memory area.
- * @param[out] size Stores the size of the allocatable memory area after the method call.
- *
- * @retval true The operation was successful.
- * @retval false The operation was not successful.
- */
-bool _Protected_heap_Get_block_size(
- Heap_Control *heap,
- void *addr,
- uintptr_t *size
-);
-
-/**
- * @brief Resizes the block of the allocated memory area.
- *
- * Inappropriate values for @a addr may corrupt the heap.
- *
- * This method first locks the allocator and after the resize, unlocks it again.
- *
- * @param[in, out] heap The heap to operate upon.
- * @param addr The starting address of the allocated memory area to be resized.
- * @param size The least possible size for the new memory area. Resize may be
- * impossible and depends on the current heap usage.
- * @param[out] old_size Stores the size available for allocation in the current
- * block before the resize after the method call.
- * @param[out] new_size Stores the size available for allocation in the resized
- * block after the method call. In the case of an unsuccessful resize,
- * zero is returned in this parameter
- *
- * @retval HEAP_RESIZE_SUCCESSFUL The resize was successful.
- * @retval HEAP_RESIZE_UNSATISFIED The least possible size @a size was too big.
- * Resize not possible.
- * @retval HEAP_RESIZE_FATAL_ERROR The block starting at @a addr is not part of
- * the heap.
- */
-bool _Protected_heap_Resize_block(
- Heap_Control *heap,
- void *addr,
- uintptr_t size
-);
-
-/**
* @brief Frees the allocated memory area.
*
* Inappropriate values for @a addr may corrupt the heap. This method first locks
@@ -245,22 +192,6 @@ bool _Protected_heap_Free( Heap_Control *heap, void *addr );
bool _Protected_heap_Walk( Heap_Control *heap, int source, bool dump );
/**
- * @brief Iterates over all blocks of the heap.
- *
- * This method first locks the allocator and after the operation, unlocks it again.
- *
- * @param[in, out] heap The heap to iterate over.
- * @param visitor This will be called for each heap block with
- * the argument @a visitor_arg.
- * @param[in, out] visitor_arg The argument for all calls of @a visitor.
- */
-void _Protected_heap_Iterate(
- Heap_Control *heap,
- Heap_Block_visitor visitor,
- void *visitor_arg
-);
-
-/**
* @brief Returns information about used and free blocks for the heap.
*
* This method first locks the allocator and after the operation, unlocks it again.
diff --git a/cpukit/include/rtems/score/rbtree.h b/cpukit/include/rtems/score/rbtree.h
index aa8d1a32a6..253ef296bc 100644
--- a/cpukit/include/rtems/score/rbtree.h
+++ b/cpukit/include/rtems/score/rbtree.h
@@ -103,7 +103,7 @@ typedef RB_HEAD(RBTree_Control, RBTree_Node) RBTree_Control;
*
* @see _RBTree_Is_node_off_tree().
*/
-RTEMS_INLINE_ROUTINE void _RBTree_Set_off_tree( RBTree_Node *the_node )
+static inline void _RBTree_Set_off_tree( RBTree_Node *the_node )
{
RB_COLOR( the_node, Node ) = -1;
}
@@ -118,7 +118,7 @@ RTEMS_INLINE_ROUTINE void _RBTree_Set_off_tree( RBTree_Node *the_node )
*
* @see _RBTree_Set_off_tree().
*/
-RTEMS_INLINE_ROUTINE bool _RBTree_Is_node_off_tree(
+static inline bool _RBTree_Is_node_off_tree(
const RBTree_Node *the_node
)
{
@@ -144,7 +144,7 @@ void _RBTree_Insert_color(
*
* @param[out] the_node The red-black tree node to initialize.
*/
-RTEMS_INLINE_ROUTINE void _RBTree_Initialize_node( RBTree_Node *the_node )
+static inline void _RBTree_Initialize_node( RBTree_Node *the_node )
{
#if defined(RTEMS_DEBUG)
_RBTree_Set_off_tree( the_node );
@@ -160,7 +160,7 @@ RTEMS_INLINE_ROUTINE void _RBTree_Initialize_node( RBTree_Node *the_node )
* @param[out] parent The parent node.
* @param[out] link The child node link of the parent node.
*/
-RTEMS_INLINE_ROUTINE void _RBTree_Add_child(
+static inline void _RBTree_Add_child(
RBTree_Node *child,
RBTree_Node *parent,
RBTree_Node **link
@@ -221,7 +221,7 @@ RTEMS_INLINE_ROUTINE void _RBTree_Add_child(
* }
* @endcode
*/
-RTEMS_INLINE_ROUTINE void _RBTree_Insert_with_parent(
+static inline void _RBTree_Insert_with_parent(
RBTree_Control *the_rbtree,
RBTree_Node *the_node,
RBTree_Node *parent,
@@ -261,7 +261,7 @@ void _RBTree_Extract(
*
* @see _RBTree_Is_root().
*/
-RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Root(
+static inline RBTree_Node *_RBTree_Root(
const RBTree_Control *the_rbtree
)
{
@@ -276,7 +276,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Root(
* @retval pointer Pointer to the root node.
* @retval NULL The tree is empty.
*/
-RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Root_reference(
+static inline RBTree_Node **_RBTree_Root_reference(
RBTree_Control *the_rbtree
)
{
@@ -291,7 +291,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Root_reference(
* @retval pointer Pointer to the root node.
* @retval NULL The tree is empty.
*/
-RTEMS_INLINE_ROUTINE RBTree_Node * const *_RBTree_Root_const_reference(
+static inline RBTree_Node * const *_RBTree_Root_const_reference(
const RBTree_Control *the_rbtree
)
{
@@ -310,7 +310,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node * const *_RBTree_Root_const_reference(
* @retval parent The parent of this node.
* @retval undefined The node is the root node or not part of a tree.
*/
-RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Parent(
+static inline RBTree_Node *_RBTree_Parent(
const RBTree_Node *the_node
)
{
@@ -326,7 +326,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Parent(
*
* @return This method returns the left node on the rbtree.
*/
-RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Left(
+static inline RBTree_Node *_RBTree_Left(
const RBTree_Node *the_node
)
{
@@ -341,7 +341,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Left(
*
* @return This method returns a reference to the left child pointer on the rbtree.
*/
-RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Left_reference(
+static inline RBTree_Node **_RBTree_Left_reference(
RBTree_Node *the_node
)
{
@@ -357,7 +357,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Left_reference(
*
* @return This method returns the right node on the rbtree.
*/
-RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Right(
+static inline RBTree_Node *_RBTree_Right(
const RBTree_Node *the_node
)
{
@@ -372,7 +372,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Right(
*
* @return This method returns a reference to the right child pointer on the rbtree.
*/
-RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Right_reference(
+static inline RBTree_Node **_RBTree_Right_reference(
RBTree_Node *the_node
)
{
@@ -390,7 +390,7 @@ RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Right_reference(
* @retval true There are no nodes on @a the_rbtree.
* @retval false There are nodes on @a the_rbtree.
*/
-RTEMS_INLINE_ROUTINE bool _RBTree_Is_empty(
+static inline bool _RBTree_Is_empty(
const RBTree_Control *the_rbtree
)
{
@@ -411,7 +411,7 @@ RTEMS_INLINE_ROUTINE bool _RBTree_Is_empty(
*
* @see _RBTree_Root().
*/
-RTEMS_INLINE_ROUTINE bool _RBTree_Is_root(
+static inline bool _RBTree_Is_root(
const RBTree_Node *the_node
)
{
@@ -425,7 +425,7 @@ RTEMS_INLINE_ROUTINE bool _RBTree_Is_root(
*
* @param[out] the_rbtree The rbtree to initialize.
*/
-RTEMS_INLINE_ROUTINE void _RBTree_Initialize_empty(
+static inline void _RBTree_Initialize_empty(
RBTree_Control *the_rbtree
)
{
@@ -439,7 +439,7 @@ RTEMS_INLINE_ROUTINE void _RBTree_Initialize_empty(
* @param[out] the_rbtree The red-black tree control.
* @param[out] the_node The one and only node.
*/
-RTEMS_INLINE_ROUTINE void _RBTree_Initialize_one(
+static inline void _RBTree_Initialize_one(
RBTree_Control *the_rbtree,
RBTree_Node *the_node
)
@@ -523,7 +523,7 @@ void _RBTree_Replace_node(
* @retval false The inserted node is not the new minimum node according to the
* specified less order function.
*/
-RTEMS_INLINE_ROUTINE bool _RBTree_Insert_inline(
+static inline bool _RBTree_Insert_inline(
RBTree_Control *the_rbtree,
RBTree_Node *the_node,
const void *key,
@@ -572,7 +572,7 @@ RTEMS_INLINE_ROUTINE bool _RBTree_Insert_inline(
* @retval object An object with the specified key.
* @retval NULL No object with the specified key exists in the red-black tree.
*/
-RTEMS_INLINE_ROUTINE void *_RBTree_Find_inline(
+static inline void *_RBTree_Find_inline(
const RBTree_Control *the_rbtree,
const void *key,
bool ( *equal )( const void *, const RBTree_Node * ),
diff --git a/cpukit/include/rtems/score/schedulercbsimpl.h b/cpukit/include/rtems/score/schedulercbsimpl.h
index c51400b135..95e19f149d 100644
--- a/cpukit/include/rtems/score/schedulercbsimpl.h
+++ b/cpukit/include/rtems/score/schedulercbsimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2014 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,7 +57,7 @@ extern "C" {
*
* @return Pointer to the scheduler node of @a the_thread.
*/
-RTEMS_INLINE_ROUTINE Scheduler_CBS_Node *_Scheduler_CBS_Thread_get_node(
+static inline Scheduler_CBS_Node *_Scheduler_CBS_Thread_get_node(
Thread_Control *the_thread
)
{
@@ -71,7 +71,7 @@ RTEMS_INLINE_ROUTINE Scheduler_CBS_Node *_Scheduler_CBS_Thread_get_node(
*
* @return CBS Node pointer to @a node.
*/
-RTEMS_INLINE_ROUTINE Scheduler_CBS_Node *_Scheduler_CBS_Node_downcast(
+static inline Scheduler_CBS_Node *_Scheduler_CBS_Node_downcast(
Scheduler_Node *node
)
{
diff --git a/cpukit/include/rtems/score/scheduleredfimpl.h b/cpukit/include/rtems/score/scheduleredfimpl.h
index e0a07a8915..8aa0388537 100644
--- a/cpukit/include/rtems/score/scheduleredfimpl.h
+++ b/cpukit/include/rtems/score/scheduleredfimpl.h
@@ -39,7 +39,7 @@
#define _RTEMS_SCORE_SCHEDULEREDFIMPL_H
#include <rtems/score/scheduleredf.h>
-#include <rtems/score/schedulerimpl.h>
+#include <rtems/score/scheduleruniimpl.h>
#ifdef __cplusplus
extern "C" {
@@ -67,7 +67,7 @@ extern "C" {
*
* @return The scheduler context of @a scheduler.
*/
-RTEMS_INLINE_ROUTINE Scheduler_EDF_Context *
+static inline Scheduler_EDF_Context *
_Scheduler_EDF_Get_context( const Scheduler_Control *scheduler )
{
return (Scheduler_EDF_Context *) _Scheduler_Get_context( scheduler );
@@ -80,7 +80,7 @@ RTEMS_INLINE_ROUTINE Scheduler_EDF_Context *
*
* @return The EDF scheduler node of @a the_thread.
*/
-RTEMS_INLINE_ROUTINE Scheduler_EDF_Node *_Scheduler_EDF_Thread_get_node(
+static inline Scheduler_EDF_Node *_Scheduler_EDF_Thread_get_node(
Thread_Control *the_thread
)
{
@@ -94,7 +94,7 @@ RTEMS_INLINE_ROUTINE Scheduler_EDF_Node *_Scheduler_EDF_Thread_get_node(
*
* @return The corresponding scheduler EDF node.
*/
-RTEMS_INLINE_ROUTINE Scheduler_EDF_Node * _Scheduler_EDF_Node_downcast(
+static inline Scheduler_EDF_Node * _Scheduler_EDF_Node_downcast(
Scheduler_Node *node
)
{
@@ -110,7 +110,7 @@ RTEMS_INLINE_ROUTINE Scheduler_EDF_Node * _Scheduler_EDF_Node_downcast(
* @retval true @a left is less than the priority of @a right.
* @retval false @a left is greater or equal than the priority of @a right.
*/
-RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less(
+static inline bool _Scheduler_EDF_Less(
const void *left,
const RBTree_Node *right
)
@@ -120,7 +120,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less(
Priority_Control prio_left;
Priority_Control prio_right;
- the_left = left;
+ the_left = (const Priority_Control *) left;
the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
prio_left = *the_left;
@@ -138,7 +138,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less(
* @retval true @a left is less or equal than the priority of @a right.
* @retval false @a left is greater than the priority of @a right.
*/
-RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Priority_less_equal(
+static inline bool _Scheduler_EDF_Priority_less_equal(
const void *left,
const RBTree_Node *right
)
@@ -148,7 +148,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Priority_less_equal(
Priority_Control prio_left;
Priority_Control prio_right;
- the_left = left;
+ the_left = (const Priority_Control *) left;
the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
prio_left = *the_left;
@@ -165,7 +165,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Priority_less_equal(
* @param node The node to be inserted.
* @param insert_priority The priority with which the node will be inserted.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue(
+static inline void _Scheduler_EDF_Enqueue(
Scheduler_EDF_Context *context,
Scheduler_EDF_Node *node,
Priority_Control insert_priority
@@ -185,7 +185,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue(
* @param[in, out] context The context to extract the node from.
* @param[in, out] node The node to extract.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Extract(
+static inline void _Scheduler_EDF_Extract(
Scheduler_EDF_Context *context,
Scheduler_EDF_Node *node
)
@@ -200,7 +200,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Extract(
* @param the_thread The thread is not used in this method.
* @param[in, out] node The node to be extracted.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Extract_body(
+static inline void _Scheduler_EDF_Extract_body(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node
@@ -216,30 +216,23 @@ RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Extract_body(
}
/**
- * @brief Schedules the next ready thread as the heir.
+ * @brief Gets the highest priority ready thread of the scheduler.
*
- * @param scheduler The scheduler instance to schedule the minimum of the context of.
- * @param the_thread This parameter is not used.
- * @param force_dispatch Indicates whether the current heir is blocked even if it is
- * not set as preemptible.
+ * @param scheduler is the scheduler.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Schedule_body(
- const Scheduler_Control *scheduler,
- Thread_Control *the_thread,
- bool force_dispatch
+static inline Thread_Control *_Scheduler_EDF_Get_highest_ready(
+ const Scheduler_Control *scheduler
)
{
Scheduler_EDF_Context *context;
RBTree_Node *first;
Scheduler_EDF_Node *node;
- (void) the_thread;
-
context = _Scheduler_EDF_Get_context( scheduler );
first = _RBTree_Minimum( &context->Ready );
node = RTEMS_CONTAINER_OF( first, Scheduler_EDF_Node, Node );
- _Scheduler_Update_heir( node->Base.owner, force_dispatch );
+ return node->Base.owner;
}
/** @} */
diff --git a/cpukit/include/rtems/score/scheduleredfsmp.h b/cpukit/include/rtems/score/scheduleredfsmp.h
index ef1a116eb1..f915154241 100644
--- a/cpukit/include/rtems/score/scheduleredfsmp.h
+++ b/cpukit/include/rtems/score/scheduleredfsmp.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2017, 2018 embedded brains GmbH.
+ * Copyright (C) 2017, 2018 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/schedulerimpl.h b/cpukit/include/rtems/score/schedulerimpl.h
index 806cb4e145..2ca3e6e8b7 100644
--- a/cpukit/include/rtems/score/schedulerimpl.h
+++ b/cpukit/include/rtems/score/schedulerimpl.h
@@ -12,7 +12,7 @@
/*
* Copyright (C) 2010 Gedare Bloom.
* Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
- * Copyright (c) 2014, 2017 embedded brains GmbH
+ * Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -101,7 +101,7 @@ void _Scheduler_Handler_initialization( void );
*
* @return The context of @a scheduler.
*/
-RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
+static inline Scheduler_Context *_Scheduler_Get_context(
const Scheduler_Control *scheduler
)
{
@@ -115,7 +115,7 @@ RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
*
* @return The scheduler for the cpu.
*/
-RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
+static inline const Scheduler_Control *_Scheduler_Get_by_CPU(
const Per_CPU_Control *cpu
)
{
@@ -135,7 +135,7 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
* @param lock_context The lock context to use for
* _Scheduler_Release_critical().
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
+static inline void _Scheduler_Acquire_critical(
const Scheduler_Control *scheduler,
ISR_lock_Context *lock_context
)
@@ -159,7 +159,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
* @param lock_context The lock context used for
* _Scheduler_Acquire_critical().
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
+static inline void _Scheduler_Release_critical(
const Scheduler_Control *scheduler,
ISR_lock_Context *lock_context
)
@@ -185,7 +185,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
* @return True if the non-preempt mode for threads is supported by the
* scheduler, otherwise false.
*/
-RTEMS_INLINE_ROUTINE bool _Scheduler_Is_non_preempt_mode_supported(
+static inline bool _Scheduler_Is_non_preempt_mode_supported(
const Scheduler_Control *scheduler
)
{
@@ -216,7 +216,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Is_non_preempt_mode_supported(
*
* @param the_thread The thread which state changed previously.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
+static inline void _Scheduler_Schedule( Thread_Control *the_thread )
{
const Scheduler_Control *scheduler;
ISR_lock_Context lock_context;
@@ -237,7 +237,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
*
* @param the_thread The yielding thread.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
+static inline void _Scheduler_Yield( Thread_Control *the_thread )
{
const Scheduler_Control *scheduler;
ISR_lock_Context lock_context;
@@ -262,7 +262,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
*
* @param the_thread The thread.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
+static inline void _Scheduler_Block( Thread_Control *the_thread )
{
#if defined(RTEMS_SMP)
Chain_Node *node;
@@ -324,7 +324,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
*
* @see _Scheduler_Node_get_priority().
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
+static inline void _Scheduler_Unblock( Thread_Control *the_thread )
{
Scheduler_Node *scheduler_node;
const Scheduler_Control *scheduler;
@@ -359,7 +359,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
*
* @see _Scheduler_Node_get_priority().
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
+static inline void _Scheduler_Update_priority( Thread_Control *the_thread )
{
#if defined(RTEMS_SMP)
Chain_Node *node;
@@ -413,7 +413,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread
*
* @return The corresponding thread priority of the scheduler domain is returned.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
+static inline Priority_Control _Scheduler_Map_priority(
const Scheduler_Control *scheduler,
Priority_Control priority
)
@@ -429,7 +429,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
*
* @return The corresponding thread priority of the user domain is returned.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
+static inline Priority_Control _Scheduler_Unmap_priority(
const Scheduler_Control *scheduler,
Priority_Control priority
)
@@ -450,7 +450,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
* @param the_thread The thread of the scheduler node to initialize.
* @param priority The thread priority.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
+static inline void _Scheduler_Node_initialize(
const Scheduler_Control *scheduler,
Scheduler_Node *node,
Thread_Control *the_thread,
@@ -474,7 +474,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
* @param scheduler The scheduler instance.
* @param[out] node The scheduler node to destroy.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
+static inline void _Scheduler_Node_destroy(
const Scheduler_Control *scheduler,
Scheduler_Node *node
)
@@ -491,7 +491,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
* @param queue_context The thread queue context to provide the set of
* threads for _Thread_Priority_update().
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
+static inline void _Scheduler_Release_job(
Thread_Control *the_thread,
Priority_Node *priority_node,
uint64_t deadline,
@@ -518,7 +518,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
* @param queue_context The thread queue context to provide the set of
* threads for _Thread_Priority_update().
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
+static inline void _Scheduler_Cancel_job(
Thread_Control *the_thread,
Priority_Node *priority_node,
Thread_queue_Context *queue_context
@@ -544,7 +544,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
*
* @see _Thread_Create_idle().
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
+static inline void _Scheduler_Start_idle(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Per_CPU_Control *cpu
@@ -563,7 +563,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
* @retval true The scheduler of the cpu is the given @a scheduler.
* @retval false The scheduler of the cpu is not the given @a scheduler.
*/
-RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
+static inline bool _Scheduler_Has_processor_ownership(
const Scheduler_Control *scheduler,
uint32_t cpu_index
)
@@ -591,7 +591,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
*
* @return The processors of the context of the given scheduler.
*/
-RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
+static inline const Processor_mask *_Scheduler_Get_processors(
const Scheduler_Control *scheduler
)
{
@@ -632,7 +632,7 @@ Status_Control _Scheduler_Get_affinity(
* @retval STATUS_INVALID_NUMBER The affinity is not a subset of the online
* processors.
*/
-RTEMS_INLINE_ROUTINE Status_Control _Scheduler_default_Set_affinity_body(
+static inline Status_Control _Scheduler_default_Set_affinity_body(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node,
@@ -669,47 +669,13 @@ Status_Control _Scheduler_Set_affinity(
);
/**
- * @brief Blocks the thread.
- *
- * @param scheduler The scheduler instance.
- * @param the_thread The thread to block.
- * @param node The corresponding scheduler node.
- * @param extract Method to extract the thread.
- * @param schedule Method for scheduling threads.
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
- const Scheduler_Control *scheduler,
- Thread_Control *the_thread,
- Scheduler_Node *node,
- void ( *extract )(
- const Scheduler_Control *,
- Thread_Control *,
- Scheduler_Node *
- ),
- void ( *schedule )(
- const Scheduler_Control *,
- Thread_Control *,
- bool
- )
-)
-{
- ( *extract )( scheduler, the_thread, node );
-
- /* TODO: flash critical section? */
-
- if ( _Thread_Is_heir( the_thread ) ) {
- ( *schedule )( scheduler, the_thread, true );
- }
-}
-
-/**
* @brief Gets the number of processors of the scheduler.
*
* @param scheduler The scheduler instance to get the number of processors of.
*
* @return The number of processors.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
+static inline uint32_t _Scheduler_Get_processor_count(
const Scheduler_Control *scheduler
)
{
@@ -731,7 +697,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
*
* @return The build id.
*/
-RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
+static inline Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
{
return _Objects_Build_id(
OBJECTS_FAKE_OBJECTS_API,
@@ -748,7 +714,7 @@ RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
*
* @return The scheduler index.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
+static inline uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
{
uint32_t minimum_id = _Scheduler_Build_id( 0 );
@@ -762,7 +728,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
*
* @return The scheduler to the object id.
*/
-RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
+static inline const Scheduler_Control *_Scheduler_Get_by_id(
Objects_Id id
)
{
@@ -784,7 +750,7 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
*
* @return The index of the given scheduler.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
+static inline uint32_t _Scheduler_Get_index(
const Scheduler_Control *scheduler
)
{
@@ -821,7 +787,7 @@ typedef void ( *Scheduler_Release_idle_node )(
* @param[out] the_thread The thread to change the state of.
* @param new_state The new state for @a the_thread.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
+static inline void _Scheduler_Thread_change_state(
Thread_Control *the_thread,
Thread_Scheduler_state new_state
)
@@ -844,7 +810,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
*
* @param arg is the handler argument.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
+static inline Thread_Control *_Scheduler_Use_idle_thread(
Scheduler_Node *node,
Scheduler_Get_idle_node get_idle_node,
void *arg
@@ -872,7 +838,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
*
* @param arg is the handler argument.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Release_idle_thread(
+static inline void _Scheduler_Release_idle_thread(
Scheduler_Node *node,
const Thread_Control *idle,
Scheduler_Release_idle_node release_idle_node,
@@ -904,7 +870,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Release_idle_thread(
*
* @return Returns the idle thread used by the scheduler node.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
+static inline Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
Scheduler_Node *node,
Scheduler_Release_idle_node release_idle_node,
void *arg
@@ -932,7 +898,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread_if_necessary
*
* @param arg is the handler argument.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
+static inline void _Scheduler_Discard_idle_thread(
Thread_Control *the_thread,
Scheduler_Node *node,
Scheduler_Release_idle_node release_idle_node,
@@ -952,37 +918,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
#endif
/**
- * @brief Updates the heir.
- *
- * @param[in, out] new_heir The new heir.
- * @param force_dispatch Indicates whether the dispatch happens also if the
- * currently running thread is set as not preemptible.
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
- Thread_Control *new_heir,
- bool force_dispatch
-)
-{
- Thread_Control *heir = _Thread_Heir;
-
- if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
-#if defined(RTEMS_SMP)
- /*
- * We need this state only for _Thread_Get_CPU_time_used_locked(). Cannot
- * use _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
- * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
- * schedulers.
- */
- heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
- new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
-#endif
- _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
- _Thread_Heir = new_heir;
- _Thread_Dispatch_necessary = true;
- }
-}
-
-/**
* @brief Sets a new scheduler.
*
* @param new_scheduler The new scheduler to set.
@@ -993,7 +928,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
* @retval STATUS_RESOURCE_IN_USE The thread's wait queue is not empty.
* @retval STATUS_UNSATISFIED The new scheduler has no processors.
*/
-RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
+static inline Status_Control _Scheduler_Set(
const Scheduler_Control *new_scheduler,
Thread_Control *the_thread,
Priority_Control priority
diff --git a/cpukit/include/rtems/score/schedulernode.h b/cpukit/include/rtems/score/schedulernode.h
index 0a2de3b6e3..65a33a1485 100644
--- a/cpukit/include/rtems/score/schedulernode.h
+++ b/cpukit/include/rtems/score/schedulernode.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (c) 2014, 2016 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2014, 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/schedulernodeimpl.h b/cpukit/include/rtems/score/schedulernodeimpl.h
index 5231c6aef2..db14184723 100644
--- a/cpukit/include/rtems/score/schedulernodeimpl.h
+++ b/cpukit/include/rtems/score/schedulernodeimpl.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (c) 2014, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -100,7 +100,7 @@ extern "C" {
*
* @param priority is the initial priority of the node.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
+static inline void _Scheduler_Node_do_initialize(
const struct _Scheduler_Control *scheduler,
Scheduler_Node *node,
Thread_Control *the_thread,
@@ -132,7 +132,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
*
* @param[in, out] node is the node to destroy.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_destroy(
+static inline void _Scheduler_Node_do_destroy(
const struct _Scheduler_Control *scheduler,
Scheduler_Node *node
)
@@ -153,7 +153,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_destroy(
*
* @return The scheduler of the node.
*/
-RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Node_get_scheduler(
+static inline const Scheduler_Control *_Scheduler_Node_get_scheduler(
const Scheduler_Node *node
)
{
@@ -167,7 +167,7 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Node_get_scheduler(
*
* @return The owner of the node.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
+static inline Thread_Control *_Scheduler_Node_get_owner(
const Scheduler_Node *node
)
{
@@ -181,7 +181,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
*
* @return The priority of the node.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
+static inline Priority_Control _Scheduler_Node_get_priority(
Scheduler_Node *node
)
{
@@ -214,7 +214,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
* @param group_order is the priority group order, see #PRIORITY_GROUP_FIRST
* and #PRIORITY_GROUP_LAST.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(
+static inline void _Scheduler_Node_set_priority(
Scheduler_Node *node,
Priority_Control new_priority,
Priority_Group_order group_order
@@ -243,7 +243,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(
*
* @return The user of the node.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
+static inline Thread_Control *_Scheduler_Node_get_user(
const Scheduler_Node *node
)
{
@@ -256,7 +256,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
* @param[out] node The node to set the user of.
* @param user The new user for @a node.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
+static inline void _Scheduler_Node_set_user(
Scheduler_Node *node,
Thread_Control *user
)
@@ -271,7 +271,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
*
* @return The idle thread of @a node.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
+static inline Thread_Control *_Scheduler_Node_get_idle(
const Scheduler_Node *node
)
{
@@ -285,7 +285,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
*
* @param idle is the idle thread to use.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_idle_user(
+static inline void _Scheduler_Node_set_idle_user(
Scheduler_Node *node,
Thread_Control *idle
)
diff --git a/cpukit/include/rtems/score/schedulerpriority.h b/cpukit/include/rtems/score/schedulerpriority.h
index cf5d0762a9..86681cd201 100644
--- a/cpukit/include/rtems/score/schedulerpriority.h
+++ b/cpukit/include/rtems/score/schedulerpriority.h
@@ -3,10 +3,10 @@
/**
* @file
*
- * @ingroup RTEMSScoreSchedulerDPS
+ * @ingroup RTEMSScoreSchedulerPriority
*
* @brief This header file provides interfaces of the
- * @ref RTEMSScoreSchedulerDPS which are used by the implementation and the
+ * @ref RTEMSScoreSchedulerPriority which are used by the implementation and the
* @ref RTEMSImplApplConfig.
*/
@@ -48,7 +48,7 @@ extern "C" {
#endif
/**
- * @defgroup RTEMSScoreSchedulerDPS Deterministic Priority Scheduler
+ * @defgroup RTEMSScoreSchedulerPriority Deterministic Priority Scheduler
*
* @ingroup RTEMSScoreScheduler
*
@@ -94,7 +94,7 @@ typedef struct {
/**
* @brief One ready queue per priority level.
*/
- Chain_Control Ready[ 0 ];
+ Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
} Scheduler_priority_Context;
/**
diff --git a/cpukit/include/rtems/score/schedulerpriorityimpl.h b/cpukit/include/rtems/score/schedulerpriorityimpl.h
index d8d226d6f1..5e80918b20 100644
--- a/cpukit/include/rtems/score/schedulerpriorityimpl.h
+++ b/cpukit/include/rtems/score/schedulerpriorityimpl.h
@@ -3,10 +3,10 @@
/**
* @file
*
- * @ingroup RTEMSScoreSchedulerDPS
+ * @ingroup RTEMSScoreSchedulerPriority
*
* @brief This header file provides interfaces of the
- * @ref RTEMSScoreSchedulerDPS which are only used by the implementation.
+ * @ref RTEMSScoreSchedulerPriority which are only used by the implementation.
*/
/*
@@ -41,7 +41,7 @@
#include <rtems/score/schedulerpriority.h>
#include <rtems/score/chainimpl.h>
#include <rtems/score/prioritybitmapimpl.h>
-#include <rtems/score/schedulerimpl.h>
+#include <rtems/score/scheduleruniimpl.h>
#include <rtems/score/thread.h>
#ifdef __cplusplus
@@ -49,7 +49,7 @@ extern "C" {
#endif
/**
- * @addtogroup RTEMSScoreSchedulerDPS
+ * @addtogroup RTEMSScoreSchedulerPriority
*
* @{
*/
@@ -61,7 +61,7 @@ extern "C" {
*
* @return The context of the scheduler.
*/
-RTEMS_INLINE_ROUTINE Scheduler_priority_Context *
+static inline Scheduler_priority_Context *
_Scheduler_priority_Get_context( const Scheduler_Control *scheduler )
{
return (Scheduler_priority_Context *) _Scheduler_Get_context( scheduler );
@@ -74,7 +74,7 @@ RTEMS_INLINE_ROUTINE Scheduler_priority_Context *
*
* @return The scheduler node of @a the_thread.
*/
-RTEMS_INLINE_ROUTINE Scheduler_priority_Node *_Scheduler_priority_Thread_get_node(
+static inline Scheduler_priority_Node *_Scheduler_priority_Thread_get_node(
Thread_Control *the_thread
)
{
@@ -88,7 +88,7 @@ RTEMS_INLINE_ROUTINE Scheduler_priority_Node *_Scheduler_priority_Thread_get_nod
*
* @return The priority node.
*/
-RTEMS_INLINE_ROUTINE Scheduler_priority_Node *_Scheduler_priority_Node_downcast(
+static inline Scheduler_priority_Node *_Scheduler_priority_Node_downcast(
Scheduler_Node *node
)
{
@@ -103,7 +103,7 @@ RTEMS_INLINE_ROUTINE Scheduler_priority_Node *_Scheduler_priority_Node_downcast(
* @param[out] ready_queues The ready queue to initialize.
* @param maximum_priority The maximum priority in the ready queue.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_initialize(
+static inline void _Scheduler_priority_Ready_queue_initialize(
Chain_Control *ready_queues,
Priority_Control maximum_priority
)
@@ -124,7 +124,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_initialize(
* @param[in, out] ready_queue The ready queue.
* @param[out] bit_map The priority bit map of the scheduler instance.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue(
+static inline void _Scheduler_priority_Ready_queue_enqueue(
Chain_Node *node,
Scheduler_priority_Ready_queue *ready_queue,
Priority_bit_map_Control *bit_map
@@ -145,7 +145,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue(
* @param[in, out] ready_queue The ready queue.
* @param[out] bit_map The priority bit map of the scheduler instance.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue_first(
+static inline void _Scheduler_priority_Ready_queue_enqueue_first(
Chain_Node *node,
Scheduler_priority_Ready_queue *ready_queue,
Priority_bit_map_Control *bit_map
@@ -164,7 +164,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue_first(
* @param[in, out] ready_queue The ready queue.
* @param[out] bit_map The priority bit map of the scheduler instance.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_extract(
+static inline void _Scheduler_priority_Ready_queue_extract(
Chain_Node *node,
Scheduler_priority_Ready_queue *ready_queue,
Priority_bit_map_Control *bit_map
@@ -188,7 +188,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_extract(
* @param the_thread The thread of which the node will be extracted.
* @param[in, out] The node which preserves the ready queue.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_priority_Extract_body(
+static inline void _Scheduler_priority_Extract_body(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node
@@ -217,7 +217,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_priority_Extract_body(
*
* @return This method returns the first node.
*/
-RTEMS_INLINE_ROUTINE Chain_Node *_Scheduler_priority_Ready_queue_first(
+static inline Chain_Node *_Scheduler_priority_Ready_queue_first(
Priority_bit_map_Control *bit_map,
Chain_Control *ready_queues
)
@@ -231,33 +231,21 @@ RTEMS_INLINE_ROUTINE Chain_Node *_Scheduler_priority_Ready_queue_first(
}
/**
- * @brief Scheduling decision logic.
+ * @brief Gets the highest priority ready thread of the scheduler.
*
- * This kernel routine implements scheduling decision logic
- * for priority-based scheduling.
- *
- * @param[in, out] scheduler The scheduler instance.
- * @param the_thread This parameter is unused.
- * @param force_dispatch Indicates whether the dispatch happens also if
- * the currently executing thread is set as not preemptible.
+ * @param scheduler is the scheduler.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_priority_Schedule_body(
- const Scheduler_Control *scheduler,
- Thread_Control *the_thread,
- bool force_dispatch
+static inline Thread_Control *_Scheduler_priority_Get_highest_ready(
+ const Scheduler_Control *scheduler
)
{
Scheduler_priority_Context *context =
_Scheduler_priority_Get_context( scheduler );
- Thread_Control *heir = (Thread_Control *)
- _Scheduler_priority_Ready_queue_first(
- &context->Bit_map,
- &context->Ready[ 0 ]
- );
- ( void ) the_thread;
-
- _Scheduler_Update_heir( heir, force_dispatch );
+ return (Thread_Control *) _Scheduler_priority_Ready_queue_first(
+ &context->Bit_map,
+ &context->Ready[ 0 ]
+ );
}
/**
@@ -269,7 +257,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_priority_Schedule_body(
* @param bit_map The priority bit map of the scheduler instance.
* @param ready_queues The ready queues of the scheduler instance.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_update(
+static inline void _Scheduler_priority_Ready_queue_update(
Scheduler_priority_Ready_queue *ready_queue,
unsigned int new_priority,
Priority_bit_map_Control *bit_map,
diff --git a/cpukit/include/rtems/score/schedulerprioritysmp.h b/cpukit/include/rtems/score/schedulerprioritysmp.h
index af4ad2aaf3..476036b3bd 100644
--- a/cpukit/include/rtems/score/schedulerprioritysmp.h
+++ b/cpukit/include/rtems/score/schedulerprioritysmp.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2013, 2018 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
index c6e2dbb285..12fe6b1004 100644
--- a/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
+++ b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2013, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/schedulersimpleimpl.h b/cpukit/include/rtems/score/schedulersimpleimpl.h
index 2aaf0fe17e..da052ba1b4 100644
--- a/cpukit/include/rtems/score/schedulersimpleimpl.h
+++ b/cpukit/include/rtems/score/schedulersimpleimpl.h
@@ -39,7 +39,7 @@
#include <rtems/score/schedulersimple.h>
#include <rtems/score/chainimpl.h>
-#include <rtems/score/schedulerimpl.h>
+#include <rtems/score/scheduleruniimpl.h>
#ifdef __cplusplus
extern "C" {
@@ -58,7 +58,7 @@ extern "C" {
*
* @return The context of @a scheduler.
*/
-RTEMS_INLINE_ROUTINE Scheduler_simple_Context *
+static inline Scheduler_simple_Context *
_Scheduler_simple_Get_context( const Scheduler_Control *scheduler )
{
return (Scheduler_simple_Context *) _Scheduler_Get_context( scheduler );
@@ -76,7 +76,7 @@ RTEMS_INLINE_ROUTINE Scheduler_simple_Context *
* @retval true @a to_insert is smaller or equal than the priority of @a next.
* @retval false @a to_insert is greater than the priority of @a next.
*/
-RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Priority_less_equal(
+static inline bool _Scheduler_simple_Priority_less_equal(
const void *key,
const Chain_Node *to_insert,
const Chain_Node *next
@@ -99,7 +99,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Priority_less_equal(
* @param[in, out] to_insert The node to insert into @a chain.
* @param insert_priority The priority to insert @a to_insert with.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert(
+static inline void _Scheduler_simple_Insert(
Chain_Control *chain,
Thread_Control *to_insert,
unsigned int insert_priority
@@ -120,7 +120,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert(
* @param[in, out] the_thread The thread of which to extract the node out of its chain.
* @param node This parameter is unused.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_simple_Extract(
+static inline void _Scheduler_simple_Extract(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node
@@ -133,28 +133,18 @@ RTEMS_INLINE_ROUTINE void _Scheduler_simple_Extract(
}
/**
- * @brief Scheduling decision logic.
+ * @brief Gets the highest priority ready thread of the scheduler.
*
- * This kernel routine implements scheduling decision logic for the simple scheduler.
- *
- * @param[in, out] scheduler The scheduler instance.
- * @param the_thread This parameter is unused.
- * @param force_dispatch Indicates whether the dispatch happens also if
- * the currently executing thread is set as not preemptible.
+ * @param scheduler is the scheduler.
*/
-RTEMS_INLINE_ROUTINE void _Scheduler_simple_Schedule_body(
- const Scheduler_Control *scheduler,
- Thread_Control *the_thread,
- bool force_dispatch
+static inline Thread_Control *_Scheduler_simple_Get_highest_ready(
+ const Scheduler_Control *scheduler
)
{
Scheduler_simple_Context *context =
_Scheduler_simple_Get_context( scheduler );
- Thread_Control *heir = (Thread_Control *) _Chain_First( &context->Ready );
-
- ( void ) the_thread;
- _Scheduler_Update_heir( heir, force_dispatch );
+ return (Thread_Control *) _Chain_First( &context->Ready );
}
/** @} */
diff --git a/cpukit/include/rtems/score/schedulersimplesmp.h b/cpukit/include/rtems/score/schedulersimplesmp.h
index 5781a11b2b..4ef34847b8 100644
--- a/cpukit/include/rtems/score/schedulersimplesmp.h
+++ b/cpukit/include/rtems/score/schedulersimplesmp.h
@@ -12,7 +12,7 @@
/*
* Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2013, 2018 embedded brains GmbH.
+ * Copyright (C) 2013, 2018 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/schedulersmp.h b/cpukit/include/rtems/score/schedulersmp.h
index f6421504c9..3d1fe86582 100644
--- a/cpukit/include/rtems/score/schedulersmp.h
+++ b/cpukit/include/rtems/score/schedulersmp.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2013, 2014 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
index 93716be256..c1839c4517 100644
--- a/cpukit/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2013, 2021 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2013, 2021 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h
index 8db3ae8634..9bf0e615b6 100644
--- a/cpukit/include/rtems/score/schedulerstrongapa.h
+++ b/cpukit/include/rtems/score/schedulerstrongapa.h
@@ -11,7 +11,7 @@
/*
* Copyright (C) 2020 Richi Dubey
- * Copyright (C) 2013, 2018 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2013, 2018 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/scheduleruniimpl.h b/cpukit/include/rtems/score/scheduleruniimpl.h
new file mode 100644
index 0000000000..9fe9ec394c
--- /dev/null
+++ b/cpukit/include/rtems/score/scheduleruniimpl.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreScheduler
+ *
+ * @brief This header file provides interfaces of the supporting the
+ * implementation of uniprocessor schedulers.
+ */
+
+/*
+ * Copyright (C) 2010 Gedare Bloom.
+ * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ * Copyright (C) 2014, 2022 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERUNIIMPL_H
+#define _RTEMS_SCORE_SCHEDULERUNIIMPL_H
+
+#include <rtems/score/schedulerimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSScoreScheduler
+ *
+ * @{
+ */
+
+/**
+ * @brief Updates the heir thread of the processor.
+ *
+ * @param[in, out] heir is the current heir thread.
+ * @param[in, out] new_heir is the new heir thread.
+ */
+static inline void _Scheduler_uniprocessor_Update_heir(
+ Thread_Control *heir,
+ Thread_Control *new_heir
+)
+{
+ _Assert( heir != new_heir );
+#if defined(RTEMS_SMP)
+ /*
+ * We need this state only for _Thread_Get_CPU_time_used_locked(). Cannot
+ * use _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
+ * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
+ * schedulers.
+ */
+ heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
+ new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
+#endif
+ _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
+ _Thread_Heir = new_heir;
+ _Thread_Dispatch_necessary = true;
+}
+
+/**
+ * @brief Updates the heir thread of the processor if the current heir is
+ * not equal to the new heir thread.
+ *
+ * The update takes place even if the current heir thread is not preemptible.
+ *
+ * @param[in, out] new_heir is the new heir thread.
+ */
+static inline void _Scheduler_uniprocessor_Update_heir_if_necessary(
+ Thread_Control *new_heir
+)
+{
+ Thread_Control *heir = _Thread_Heir;
+
+ if ( heir != new_heir ) {
+ _Scheduler_uniprocessor_Update_heir( heir, new_heir );
+ }
+}
+
+/**
+ * @brief Updates the heir thread of the processor if the current heir thread
+ * is preemptible.
+ *
+ * @param[in, out] heir is the current heir thread.
+ * @param[in, out] new_heir is the new heir thread.
+ */
+static inline void _Scheduler_uniprocessor_Update_heir_if_preemptible(
+ Thread_Control *heir,
+ Thread_Control *new_heir
+)
+{
+ if ( heir != new_heir && heir->is_preemptible ) {
+ _Scheduler_uniprocessor_Update_heir( heir, new_heir );
+ }
+}
+
+/**
+ * @brief Blocks the thread.
+ *
+ * @param scheduler is the scheduler.
+ * @param the_thread is the thread to block.
+ * @param node is the scheduler node of the thread.
+ * @param extract is the handler to extract the thread.
+ * @param get_highest_ready is the handler to get the highest ready thread.
+ */
+static inline void _Scheduler_uniprocessor_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ void ( *extract )(
+ const Scheduler_Control *,
+ Thread_Control *,
+ Scheduler_Node *
+ ),
+ Thread_Control *( *get_highest_ready )( const Scheduler_Control * )
+)
+{
+ ( *extract )( scheduler, the_thread, node );
+
+ /* TODO: flash critical section? */
+
+ if ( _Thread_Is_heir( the_thread ) ) {
+ Thread_Control *highest_ready;
+
+ highest_ready = ( *get_highest_ready )( scheduler );
+ _Scheduler_uniprocessor_Update_heir( _Thread_Heir, highest_ready );
+ }
+}
+
+/**
+ * @brief Schedule the unblocked thread if it is the highest ready thread.
+ *
+ * @param scheduler is the scheduler.
+ * @param the_thread is the thread.
+ * @param priority is the priority of the thread.
+ */
+static inline void _Scheduler_uniprocessor_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Control priority
+)
+{
+ Thread_Control *heir;
+
+ heir = _Thread_Heir;
+
+ /*
+ * If the thread is more important than the heir, then we have a new heir.
+ * This may or may not result in a context switch. If the current heir
+ * thread is preemptible, then we need to do a context switch.
+ */
+ if ( priority < _Thread_Get_priority( heir ) ) {
+ _Scheduler_uniprocessor_Update_heir_if_preemptible( heir, the_thread );
+ }
+}
+
+/**
+ * @brief Schedules the highest ready thread if the current heir thread of the
+ * processor is preemptible.
+ *
+ * @param scheduler is the scheduler.
+ * @param get_highest_ready is the handler to get the highest ready thread.
+ */
+static inline void _Scheduler_uniprocessor_Schedule(
+ const Scheduler_Control *scheduler,
+ Thread_Control *( *get_highest_ready )( const Scheduler_Control * )
+)
+{
+ Thread_Control *highest_ready;
+
+ highest_ready = ( *get_highest_ready )( scheduler );
+ _Scheduler_uniprocessor_Update_heir_if_preemptible(
+ _Thread_Heir,
+ highest_ready
+ );
+}
+
+/**
+ * @brief Yields to the highest ready thread.
+ *
+ * @param scheduler is the scheduler.
+ * @param get_highest_ready is the handler to get the highest ready thread.
+ */
+static inline void _Scheduler_uniprocessor_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *( *get_highest_ready )( const Scheduler_Control * )
+)
+{
+ Thread_Control *highest_ready;
+
+ highest_ready = ( *get_highest_ready )( scheduler );
+ _Scheduler_uniprocessor_Update_heir_if_necessary( highest_ready );
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_SCHEDULERUNIIMPL_H */
diff --git a/cpukit/include/rtems/score/semaphoreimpl.h b/cpukit/include/rtems/score/semaphoreimpl.h
index e28375521c..6c0b62adcd 100644
--- a/cpukit/include/rtems/score/semaphoreimpl.h
+++ b/cpukit/include/rtems/score/semaphoreimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2015, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2015, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/smpbarrier.h b/cpukit/include/rtems/score/smpbarrier.h
index 51dfddae56..fc14859c41 100644
--- a/cpukit/include/rtems/score/smpbarrier.h
+++ b/cpukit/include/rtems/score/smpbarrier.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2013, 2014 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/smpimpl.h b/cpukit/include/rtems/score/smpimpl.h
index 8e965968a1..a8e3a3be15 100644
--- a/cpukit/include/rtems/score/smpimpl.h
+++ b/cpukit/include/rtems/score/smpimpl.h
@@ -40,7 +40,7 @@
#include <rtems/score/smp.h>
#include <rtems/score/percpu.h>
-#include <rtems/score/processormask.h>
+#include <rtems/score/processormaskimpl.h>
#include <rtems/fatal.h>
#ifdef __cplusplus
@@ -166,12 +166,9 @@ RTEMS_NO_RETURN void _SMP_Start_multitasking_on_secondary_processor(
* @param[in, out] cpu_self is the processor control of the processor executing
* this function.
*
- * @return Returns the processed message.
+ * @param message is the message to process.
*/
-long unsigned _SMP_Process_message(
- Per_CPU_Control *cpu_self,
- long unsigned message
-);
+void _SMP_Process_message( Per_CPU_Control *cpu_self, long unsigned message );
/**
* @brief Tries to process the current SMP message.
@@ -200,10 +197,8 @@ void _SMP_Try_to_process_message(
*
* @param[in, out] cpu_self is the processor control of the processor executing
* this function.
- *
- * @return Returns the processed message.
*/
-static inline long unsigned _SMP_Inter_processor_interrupt_handler(
+static inline void _SMP_Inter_processor_interrupt_handler(
Per_CPU_Control *cpu_self
)
{
@@ -222,10 +217,8 @@ static inline long unsigned _SMP_Inter_processor_interrupt_handler(
);
if ( RTEMS_PREDICT_FALSE( message != 0 ) ) {
- return _SMP_Process_message( cpu_self, message );
+ _SMP_Process_message( cpu_self, message );
}
-
- return message;
}
/**
@@ -355,7 +348,7 @@ void _SMP_Synchronize( void );
*
* @return The processor mask with all online processors.
*/
-RTEMS_INLINE_ROUTINE const Processor_mask *_SMP_Get_online_processors( void )
+static inline const Processor_mask *_SMP_Get_online_processors( void )
{
#if defined(RTEMS_SMP)
return &_SMP_Online_processors;
@@ -370,7 +363,7 @@ RTEMS_INLINE_ROUTINE const Processor_mask *_SMP_Get_online_processors( void )
* @return True if inter-processor interrupts are needed for the correct system
* operation, otherwise false.
*/
-RTEMS_INLINE_ROUTINE bool _SMP_Need_inter_processor_interrupts( void )
+static inline bool _SMP_Need_inter_processor_interrupts( void )
{
/*
* Use the configured processor maximum instead of the actual to allow
diff --git a/cpukit/include/rtems/score/smplock.h b/cpukit/include/rtems/score/smplock.h
index ca874fef08..52324fc76c 100644
--- a/cpukit/include/rtems/score/smplock.h
+++ b/cpukit/include/rtems/score/smplock.h
@@ -13,7 +13,7 @@
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2013, 2016 embedded brains GmbH
+ * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/smplockmcs.h b/cpukit/include/rtems/score/smplockmcs.h
index deb6ddeb3e..89c66e9ebf 100644
--- a/cpukit/include/rtems/score/smplockmcs.h
+++ b/cpukit/include/rtems/score/smplockmcs.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2016 embedded brains GmbH
+ * Copyright (c) 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/smplockseq.h b/cpukit/include/rtems/score/smplockseq.h
index d96cc6acaf..be0225b4dc 100644
--- a/cpukit/include/rtems/score/smplockseq.h
+++ b/cpukit/include/rtems/score/smplockseq.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2016 embedded brains GmbH
+ * Copyright (c) 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/smplockstats.h b/cpukit/include/rtems/score/smplockstats.h
index 102fb6eac5..913d551418 100644
--- a/cpukit/include/rtems/score/smplockstats.h
+++ b/cpukit/include/rtems/score/smplockstats.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2013, 2018 embedded brains GmbH
+ * Copyright (C) 2013, 2018 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -235,7 +235,7 @@ static inline void _SMP_lock_Stats_acquire_end(
second = _CPU_Counter_read();
stats_context->acquire_instant = second;
- delta = _CPU_Counter_difference( second, acquire_context->first );
+ delta = second - acquire_context->first;
++stats->usage_count;
@@ -270,7 +270,7 @@ static inline void _SMP_lock_Stats_release_update(
stats = stats_context->stats;
first = stats_context->acquire_instant;
second = _CPU_Counter_read();
- delta = _CPU_Counter_difference( second, first );
+ delta = second - first;
stats->total_section_time += delta;
diff --git a/cpukit/include/rtems/score/smplockticket.h b/cpukit/include/rtems/score/smplockticket.h
index 1f6172baa8..d317ea5dd2 100644
--- a/cpukit/include/rtems/score/smplockticket.h
+++ b/cpukit/include/rtems/score/smplockticket.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2013, 2016 embedded brains GmbH
+ * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/stack.h b/cpukit/include/rtems/score/stack.h
index 360e4d61f6..6746d6991b 100644
--- a/cpukit/include/rtems/score/stack.h
+++ b/cpukit/include/rtems/score/stack.h
@@ -11,8 +11,8 @@
*/
/*
- * COPYRIGHT (c) 1989-2006.
- * On-Line Applications Research Corporation (OAR).
+ * Copyright (C) 2022 embedded brains GmbH & Co. KG
+ * Copyright (C) 1989, 2021 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -105,15 +105,22 @@ typedef void ( *Stack_Allocator_free )( void *addr );
* The allocate for idle handler is optional even when the user thread stack
* allocator and deallocator are configured.
*
- * @param cpu Index of the CPU for the IDLE thread using this stack
- * @param stack_size The size of the stack area to allocate in bytes.
+ * @param cpu is the index of the CPU for the IDLE thread using this stack.
*
- * @retval NULL Not enough memory.
- * @retval other Pointer to begin of stack area.
+ * @param stack_size[in, out] is pointer to a size_t object. On function
+ * entry, the object contains the proposed size of the stack area to allocate
+ * in bytes. The proposed size does not take the actual thread-local storage
+ * size of the application into account. The stack allocator can modify the
+ * size to ensure that there is enough space available in the stack area for
+ * the thread-local storage.
+ *
+ * @retval NULL There was not enough memory available to allocate a stack area.
+ *
+ * @return Returns the pointer to begin of the allocated stack area.
*/
typedef void *( *Stack_Allocator_allocate_for_idle )(
uint32_t cpu,
- size_t stack_size
+ size_t *stack_size
);
/**
@@ -165,7 +172,57 @@ extern const Stack_Allocator_free _Stack_Allocator_free;
*/
void _Stack_Allocator_do_initialize( void );
-/** @} */
+/**
+ * @brief Allocates the IDLE thread storage area from the workspace.
+ *
+ * If the thread storage area cannot be allocated, then the
+ * ::INTERNAL_ERROR_NO_MEMORY_FOR_IDLE_TASK_STACK fatal error will occur.
+ *
+ * @param unused is an unused parameter.
+ *
+ * @param stack_size[in] is pointer to a size_t object. On function entry, the
+ * object contains the size of the task storage area to allocate in bytes.
+ *
+ * @return Returns a pointer to the begin of the allocated task storage area.
+ */
+void *_Stack_Allocator_allocate_for_idle_workspace(
+ uint32_t unused,
+ size_t *storage_size
+);
+
+/**
+ * @brief The size in bytes of the idle thread storage area used by
+ * _Stack_Allocator_allocate_for_idle_static().
+ *
+ * Application provided via <rtems/confdefs.h>.
+ */
+extern const size_t _Stack_Allocator_allocate_for_idle_storage_size;
+
+/**
+ * @brief The thread storage areas used by
+ * _Stack_Allocator_allocate_for_idle_static().
+ *
+ * Application provided via <rtems/confdefs.h>.
+ */
+extern char _Stack_Allocator_allocate_for_idle_storage_areas[];
+
+/**
+ * @brief Allocates the IDLE thread storage from the memory statically
+ * allocated by <rtems/confdefs.h>.
+ *
+ * @param cpu_index is the index of the CPU for the IDLE thread using this stack.
+ *
+ * @param stack_size[out] is pointer to a size_t object. On function return, the
+ * object value is set to the value of
+ * ::_Stack_Allocator_allocate_for_idle_storage_size.
+ *
+ * @return Returns a pointer to the begin of the allocated task storage area.
+ */
+void *_Stack_Allocator_allocate_for_idle_static(
+ uint32_t cpu_index,
+ size_t *storage_size
+);
+
/**
* @brief The stack allocator allocate stack for idle thread handler.
*
@@ -174,6 +231,8 @@ void _Stack_Allocator_do_initialize( void );
extern const Stack_Allocator_allocate_for_idle
_Stack_Allocator_allocate_for_idle;
+/** @} */
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/include/rtems/score/stackimpl.h b/cpukit/include/rtems/score/stackimpl.h
index 3fe9d2cd33..1df2f151e1 100644
--- a/cpukit/include/rtems/score/stackimpl.h
+++ b/cpukit/include/rtems/score/stackimpl.h
@@ -63,7 +63,7 @@ extern "C" {
* @param starting_address The starting_address for the new stack.
* @param size The size of the stack in bytes.
*/
-RTEMS_INLINE_ROUTINE void _Stack_Initialize (
+static inline void _Stack_Initialize (
Stack_Control *the_stack,
void *starting_address,
size_t size
@@ -81,7 +81,7 @@ RTEMS_INLINE_ROUTINE void _Stack_Initialize (
*
* @return The minimum stack size.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Stack_Minimum (void)
+static inline uint32_t _Stack_Minimum (void)
{
return rtems_minimum_stack_size;
}
@@ -98,7 +98,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Stack_Minimum (void)
* @retval true @a size is large enough.
* @retval false @a size is not large enough.
*/
-RTEMS_INLINE_ROUTINE bool _Stack_Is_enough(
+static inline bool _Stack_Is_enough(
size_t size,
bool is_fp
)
@@ -128,7 +128,7 @@ RTEMS_INLINE_ROUTINE bool _Stack_Is_enough(
*
* @return The appropriate stack size.
*/
-RTEMS_INLINE_ROUTINE size_t _Stack_Ensure_minimum (
+static inline size_t _Stack_Ensure_minimum (
size_t size
)
{
@@ -148,7 +148,7 @@ RTEMS_INLINE_ROUTINE size_t _Stack_Ensure_minimum (
*
* @return Returns the extended stack size.
*/
-RTEMS_INLINE_ROUTINE size_t _Stack_Extend_size(
+static inline size_t _Stack_Extend_size(
size_t stack_size,
bool is_fp
)
diff --git a/cpukit/include/rtems/score/statesimpl.h b/cpukit/include/rtems/score/statesimpl.h
index 2121731430..aa90f3c27f 100644
--- a/cpukit/include/rtems/score/statesimpl.h
+++ b/cpukit/include/rtems/score/statesimpl.h
@@ -171,7 +171,7 @@ extern "C" {
*
* @return This method returns the updated states value.
*/
-RTEMS_INLINE_ROUTINE States_Control _States_Set (
+static inline States_Control _States_Set (
States_Control states_to_set,
States_Control current_state
)
@@ -190,7 +190,7 @@ RTEMS_INLINE_ROUTINE States_Control _States_Set (
*
* @return This method returns the updated states value.
*/
-RTEMS_INLINE_ROUTINE States_Control _States_Clear (
+static inline States_Control _States_Clear (
States_Control states_to_clear,
States_Control current_state
)
@@ -209,7 +209,7 @@ RTEMS_INLINE_ROUTINE States_Control _States_Clear (
* @retval true The state is ready.
* @retval false The state is not ready.
*/
-RTEMS_INLINE_ROUTINE bool _States_Is_ready (
+static inline bool _States_Is_ready (
States_Control the_states
)
{
@@ -227,7 +227,7 @@ RTEMS_INLINE_ROUTINE bool _States_Is_ready (
* @retval true DORMANT state is set in @a the_states.
* @retval false DORMANT state is not set in @a the_states.
*/
-RTEMS_INLINE_ROUTINE bool _States_Is_dormant (
+static inline bool _States_Is_dormant (
States_Control the_states
)
{
@@ -245,7 +245,7 @@ RTEMS_INLINE_ROUTINE bool _States_Is_dormant (
* @retval true SUSPENDED state is set in @a the_states.
* @retval false SUSPENDED state is not set in @a the_states.
*/
-RTEMS_INLINE_ROUTINE bool _States_Is_suspended (
+static inline bool _States_Is_suspended (
States_Control the_states
)
{
@@ -263,7 +263,7 @@ RTEMS_INLINE_ROUTINE bool _States_Is_suspended (
* @retval true WAITING_FOR_TIME state is set in @a the_states.
* @retval false WAITING_FOR_TIME state is not set in @a the_states.
*/
-RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_rpc_reply (
+static inline bool _States_Is_waiting_for_rpc_reply (
States_Control the_states
)
{
@@ -281,7 +281,7 @@ RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_rpc_reply (
* @retval true WAITING_FOR_JOIN_AT_EXIT state is set in @a the_states.
* @retval false WAITING_FOR_JOIN_AT_EXIT state is not set in @a the_states.
*/
-RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_join_at_exit(
+static inline bool _States_Is_waiting_for_join_at_exit(
States_Control the_states
)
{
@@ -299,7 +299,7 @@ RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_join_at_exit(
* @retval true @a the_states is interruptible.
* @retval false @a the_states is not interruptible.
*/
-RTEMS_INLINE_ROUTINE bool _States_Is_interruptible_by_signal (
+static inline bool _States_Is_interruptible_by_signal (
States_Control the_states
)
{
@@ -320,7 +320,7 @@ RTEMS_INLINE_ROUTINE bool _States_Is_interruptible_by_signal (
* @retval false The state indicates that the task is not blocked waiting on a
* local resource.
*/
-RTEMS_INLINE_ROUTINE bool _States_Is_locally_blocked (
+static inline bool _States_Is_locally_blocked (
States_Control the_states
)
{
diff --git a/cpukit/include/rtems/score/status.h b/cpukit/include/rtems/score/status.h
index ac2da9b7d7..7fdf6a9017 100644
--- a/cpukit/include/rtems/score/status.h
+++ b/cpukit/include/rtems/score/status.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/sysstate.h b/cpukit/include/rtems/score/sysstate.h
index 773987b14b..1086569867 100644
--- a/cpukit/include/rtems/score/sysstate.h
+++ b/cpukit/include/rtems/score/sysstate.h
@@ -95,7 +95,7 @@ extern System_state_Codes _System_state_Current;
*
* @param state The state to set.
*/
-RTEMS_INLINE_ROUTINE void _System_state_Set (
+static inline void _System_state_Set (
System_state_Codes state
)
{
@@ -107,7 +107,7 @@ RTEMS_INLINE_ROUTINE void _System_state_Set (
*
* @return The current system state.
*/
-RTEMS_INLINE_ROUTINE System_state_Codes _System_state_Get ( void )
+static inline System_state_Codes _System_state_Get ( void )
{
return _System_state_Current;
}
@@ -120,7 +120,7 @@ RTEMS_INLINE_ROUTINE System_state_Codes _System_state_Get ( void )
* @retval true @a state is before initialization.
* @retval false @a state is not before initialization.
*/
-RTEMS_INLINE_ROUTINE bool _System_state_Is_before_initialization (
+static inline bool _System_state_Is_before_initialization (
System_state_Codes state
)
{
@@ -135,7 +135,7 @@ RTEMS_INLINE_ROUTINE bool _System_state_Is_before_initialization (
* @retval true @a state is before multitasking.
* @retval false @a state is not before multitasking.
*/
-RTEMS_INLINE_ROUTINE bool _System_state_Is_before_multitasking (
+static inline bool _System_state_Is_before_multitasking (
System_state_Codes state
)
{
@@ -150,7 +150,7 @@ RTEMS_INLINE_ROUTINE bool _System_state_Is_before_multitasking (
* @retval true @a state is up.
* @retval false @a state is not up.
*/
-RTEMS_INLINE_ROUTINE bool _System_state_Is_up (
+static inline bool _System_state_Is_up (
System_state_Codes state
)
{
@@ -165,7 +165,7 @@ RTEMS_INLINE_ROUTINE bool _System_state_Is_up (
* @retval true @a state is terminated.
* @retval false @a state is not terminated.
*/
-RTEMS_INLINE_ROUTINE bool _System_state_Is_terminated (
+static inline bool _System_state_Is_terminated (
System_state_Codes state
)
{
diff --git a/cpukit/include/rtems/score/thread.h b/cpukit/include/rtems/score/thread.h
index dd32b51a5f..8ca7d85205 100644
--- a/cpukit/include/rtems/score/thread.h
+++ b/cpukit/include/rtems/score/thread.h
@@ -14,7 +14,7 @@
* COPYRIGHT (c) 1989-2014.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2014, 2016 embedded brains GmbH.
+ * Copyright (C) 2014, 2016 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -719,50 +719,50 @@ typedef struct {
* The individual state flags must be a power of two to allow use of bit
* operations to manipulate and evaluate the thread life state.
*/
-typedef enum {
- /**
- * @brief Indicates that the thread life is protected.
- *
- * If this flag is set, then the thread restart or delete requests are deferred
- * until the protection and deferred change flags are cleared. It is used by
- * _Thread_Set_life_protection().
- */
- THREAD_LIFE_PROTECTED = 0x1,
+typedef unsigned int Thread_Life_state;
- /**
- * @brief Indicates that thread is restarting.
- *
- * If this flag is set, then a thread restart request is in pending. See
- * _Thread_Restart_self() and _Thread_Restart_other().
- */
- THREAD_LIFE_RESTARTING = 0x2,
+/**
+ * @brief Indicates that the thread life is protected.
+ *
+ * If this flag is set, then the thread restart or delete requests are deferred
+ * until the protection and deferred change flags are cleared. It is used by
+ * _Thread_Set_life_protection().
+ */
+#define THREAD_LIFE_PROTECTED 0x1U
- /**
- * @brief Indicates that thread is terminating.
- *
- * If this flag is set, then a thread termination request is in pending. See
- * _Thread_Exit() and _Thread_Cancel().
- */
- THREAD_LIFE_TERMINATING = 0x4,
+/**
+ * @brief Indicates that thread is restarting.
+ *
+ * If this flag is set, then a thread restart request is in pending. See
+ * _Thread_Restart_self() and _Thread_Restart_other().
+ */
+#define THREAD_LIFE_RESTARTING 0x2U
- /**
- * @brief Indicates that thread life changes are deferred.
- *
- * If this flag is set, then the thread restart or delete requests are deferred
- * until the protection and deferred change flags are cleared. It is used by
- * pthread_setcanceltype().
- */
- THREAD_LIFE_CHANGE_DEFERRED = 0x8,
+/**
+ * @brief Indicates that thread is terminating.
+ *
+ * If this flag is set, then a thread termination request is in pending. See
+ * _Thread_Exit() and _Thread_Cancel().
+ */
+#define THREAD_LIFE_TERMINATING 0x4U
- /**
- * @brief Indicates that thread is detached.
- *
- * If this flag is set, then the thread is detached. Detached threads do not
- * wait during termination for other threads to join. See rtems_task_delete(),
- * rtems_task_exit(), and pthread_detach().
- */
- THREAD_LIFE_DETACHED = 0x10
-} Thread_Life_state;
+/**
+ * @brief Indicates that thread life changes are deferred.
+ *
+ * If this flag is set, then the thread restart or delete requests are deferred
+ * until the protection and deferred change flags are cleared. It is used by
+ * pthread_setcanceltype().
+ */
+#define THREAD_LIFE_CHANGE_DEFERRED 0x8U
+
+/**
+ * @brief Indicates that thread is detached.
+ *
+ * If this flag is set, then the thread is detached. Detached threads do not
+ * wait during termination for other threads to join. See rtems_task_delete(),
+ * rtems_task_exit(), and pthread_detach().
+ */
+#define THREAD_LIFE_DETACHED 0x10U
/**
* @brief Thread life control.
@@ -858,6 +858,15 @@ struct _Thread_Control {
#endif
/*================= end of common block =================*/
+ /**
+ * @brief This member contains the context of this thread.
+ *
+ * This member is placed directly after the end of the common block so that
+ * the structure offsets are as small as possible. This helps on instruction
+ * set architectures with a very limited range for intermediate values.
+ */
+ Context_Control Registers;
+
#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
/**
* @brief Potpourri lock statistics.
@@ -913,16 +922,18 @@ struct _Thread_Control {
Thread_Action_control Post_switch_actions;
- /** This field contains the context of this thread. */
- Context_Control Registers;
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
/** This field points to the floating point context for this thread.
* If NULL, the thread is integer only.
*/
Context_Control_fp *fp_context;
#endif
+
+#ifndef _REENT_THREAD_LOCAL
/** This field points to the newlib reentrancy structure for this thread. */
struct _reent *libc_reent;
+#endif
+
/** This array contains the API extension area pointers. */
void *API_Extensions[ THREAD_API_LAST + 1 ];
@@ -1147,9 +1158,11 @@ Objects_Control *_Thread_Allocate_unlimited( Objects_Information *information );
#define THREAD_INFORMATION_DEFINE( name, api, cls, max ) \
static Objects_Control * \
name##_Local_table[ _Objects_Maximum_per_allocation( max ) ]; \
-static Thread_Configured_control \
+static RTEMS_SECTION( ".noinit.rtems.content.objects." #name ) \
+Thread_Configured_control \
name##_Objects[ _Objects_Maximum_per_allocation( max ) ]; \
-static Thread_queue_Configured_heads \
+static RTEMS_SECTION( ".noinit.rtems.content.objects." #name ) \
+Thread_queue_Configured_heads \
name##_Heads[ _Objects_Maximum_per_allocation( max ) ]; \
Thread_Information name##_Information = { \
{ \
@@ -1173,13 +1186,6 @@ Thread_Information name##_Information = { \
} \
}
-/**
- * @brief The idle thread stacks.
- *
- * Provided by the application via <rtems/confdefs.h>.
- */
-extern char _Thread_Idle_stacks[];
-
#if defined(RTEMS_MULTIPROCESSING)
/**
* @brief The configured thread control block.
diff --git a/cpukit/include/rtems/score/threadcpubudget.h b/cpukit/include/rtems/score/threadcpubudget.h
index bcbaa11bdb..e1d18ef6ed 100644
--- a/cpukit/include/rtems/score/threadcpubudget.h
+++ b/cpukit/include/rtems/score/threadcpubudget.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2021 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/threaddispatch.h b/cpukit/include/rtems/score/threaddispatch.h
index b4124ea0c2..b06ebe8fec 100644
--- a/cpukit/include/rtems/score/threaddispatch.h
+++ b/cpukit/include/rtems/score/threaddispatch.h
@@ -72,7 +72,7 @@ extern "C" {
* @retval false The executing thread is inside a thread dispatch critical
* section and dispatching is not allowed.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Dispatch_is_enabled(void)
+static inline bool _Thread_Dispatch_is_enabled(void)
{
bool enabled;
@@ -96,7 +96,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Dispatch_is_enabled(void)
*
* @return The value of the thread dispatch level.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_get_disable_level(void)
+static inline uint32_t _Thread_Dispatch_get_disable_level(void)
{
return _Thread_Dispatch_disable_level;
}
@@ -106,7 +106,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_get_disable_level(void)
*
* This routine initializes the thread dispatching subsystem.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void )
+static inline void _Thread_Dispatch_initialization( void )
{
_Thread_Dispatch_disable_level = 1;
}
@@ -180,7 +180,7 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level );
*
* @return The current processor.
*/
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_with_CPU(
+static inline Per_CPU_Control *_Thread_Dispatch_disable_with_CPU(
Per_CPU_Control *cpu_self,
const ISR_lock_Context *lock_context
)
@@ -207,7 +207,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_with_CPU(
*
* @return The current processor.
*/
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical(
+static inline Per_CPU_Control *_Thread_Dispatch_disable_critical(
const ISR_lock_Context *lock_context
)
{
@@ -219,19 +219,20 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical(
*
* @return The current processor.
*/
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable( void )
+static inline Per_CPU_Control *_Thread_Dispatch_disable( void )
{
Per_CPU_Control *cpu_self;
- ISR_lock_Context lock_context;
#if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING )
+ ISR_lock_Context lock_context;
+
_ISR_lock_ISR_disable( &lock_context );
-#endif
cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
-#if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING )
_ISR_lock_ISR_enable( &lock_context );
+#else
+ cpu_self = _Thread_Dispatch_disable_critical( NULL );
#endif
return cpu_self;
@@ -251,7 +252,7 @@ void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self );
*
* @param[in, out] cpu_self The current processor.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Dispatch_unnest( Per_CPU_Control *cpu_self )
+static inline void _Thread_Dispatch_unnest( Per_CPU_Control *cpu_self )
{
_Assert( cpu_self->thread_dispatch_disable_level > 0 );
--cpu_self->thread_dispatch_disable_level;
@@ -263,7 +264,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Dispatch_unnest( Per_CPU_Control *cpu_self )
* @param[in, out] cpu_self The current processor.
* @param[in, out] cpu_target The target processor to request a thread dispatch.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Dispatch_request(
+static inline void _Thread_Dispatch_request(
Per_CPU_Control *cpu_self,
Per_CPU_Control *cpu_target
)
diff --git a/cpukit/include/rtems/score/threadidledata.h b/cpukit/include/rtems/score/threadidledata.h
index 4f2a785ccd..8e458de345 100644
--- a/cpukit/include/rtems/score/threadidledata.h
+++ b/cpukit/include/rtems/score/threadidledata.h
@@ -11,7 +11,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2020 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/threadimpl.h b/cpukit/include/rtems/score/threadimpl.h
index 10800aa023..36ddb785e9 100644
--- a/cpukit/include/rtems/score/threadimpl.h
+++ b/cpukit/include/rtems/score/threadimpl.h
@@ -13,7 +13,7 @@
* COPYRIGHT (c) 1989-2008.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2014, 2017 embedded brains GmbH.
+ * Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -380,15 +380,23 @@ RTEMS_NO_RETURN void _Thread_Exit(
);
/**
- * @brief Joins the currently executing thread with the given thread to wait
- * for.
+ * @brief Joins the currently executing thread with the thread.
*
- * @param[in, out] the_thread The thread to wait for.
- * @param waiting_for_join The states control for the join.
- * @param[in, out] executing The currently executing thread.
- * @param queue_context The thread queue context.
+ * @param[in, out] the_thread is the thread to join.
+ *
+ * @param waiting_for_join is the thread state for the currently executing
+ * thread.
+ *
+ * @param[in, out] executing is the currently executing thread.
+ *
+ * @param queue_context[in, out] is the thread queue context. The caller shall
+ * have acquired the thread state lock using the thread queue context.
+ *
+ * @retval STATUS_SUCCESSFUL The operation was successful.
+ *
+ * @retval STATUS_DEADLOCK A deadlock occurred.
*/
-void _Thread_Join(
+Status_Control _Thread_Join(
Thread_Control *the_thread,
States_Control waiting_for_join,
Thread_Control *executing,
@@ -396,23 +404,38 @@ void _Thread_Join(
);
/**
+ * @brief Indicates the resulting state of _Thread_Cancel().
+ */
+typedef enum {
+ /**
+ * @brief Indicates that the thread cancel operation is done.
+ */
+ THREAD_CANCEL_DONE,
+
+ /**
+ * @brief Indicates that the thread cancel operation is in progress.
+ *
+ * The cancelled thread is terminating. It may be joined.
+ */
+ THREAD_CANCEL_IN_PROGRESS
+} Thread_Cancel_state;
+
+/**
* @brief Cancels the thread.
*
- * @param[in, out] the_thread The thread to cancel.
- * @param executing The currently executing thread.
- * @param exit_value The exit value for the thread.
+ * @param[in, out] the_thread is the thread to cancel.
+
+ * @param[in, out] executing is the currently executing thread.
+
+ * @param[in, out] life_states_to_clear is the set of thread life states to
+ * clear for the thread to cancel.
*/
-void _Thread_Cancel(
- Thread_Control *the_thread,
- Thread_Control *executing,
- void *exit_value
+Thread_Cancel_state _Thread_Cancel(
+ Thread_Control *the_thread,
+ Thread_Control *executing,
+ Thread_Life_state life_states_to_clear
);
-typedef struct {
- Thread_queue_Context Base;
- Thread_Control *cancel;
-} Thread_Close_context;
-
/**
* @brief Closes the thread.
*
@@ -420,14 +443,21 @@ typedef struct {
* case the executing thread is not terminated, then this function waits until
* the terminating thread reached the zombie state.
*
- * @param the_thread The thread to close.
- * @param executing The currently executing thread.
- * @param[in, out] context The thread close context.
+ * @param the_thread is the thread to close.
+ *
+ * @param[in, out] executing is the currently executing thread.
+ *
+ * @param[in, out] queue_context is the thread queue context. The caller shall
+ * have disabled interrupts using the thread queue context.
+ *
+ * @retval STATUS_SUCCESSFUL The operation was successful.
+ *
+ * @retval STATUS_DEADLOCK A deadlock occurred.
*/
-void _Thread_Close(
+Status_Control _Thread_Close(
Thread_Control *the_thread,
Thread_Control *executing,
- Thread_Close_context *context
+ Thread_queue_Context *queue_context
);
/**
@@ -438,7 +468,7 @@ void _Thread_Close(
* @retval true The thread is currently in the ready state.
* @retval false The thread is currently not ready.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
+static inline bool _Thread_Is_ready( const Thread_Control *the_thread )
{
return _States_Is_ready( the_thread->current_state );
}
@@ -565,7 +595,7 @@ void _Thread_Handler( void );
* @param the_thread The thread to acquire the lock context.
* @param lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
+static inline void _Thread_State_acquire_critical(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -579,7 +609,7 @@ RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
* @param the_thread The thread to acquire the lock context.
* @param lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
+static inline void _Thread_State_acquire(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -596,7 +626,7 @@ RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
*
* @return The currently executing thread.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
+static inline Thread_Control *_Thread_State_acquire_for_executing(
ISR_lock_Context *lock_context
)
{
@@ -615,7 +645,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
* @param the_thread The thread to release the lock context.
* @param lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
+static inline void _Thread_State_release_critical(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -629,7 +659,7 @@ RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
* @param[in, out] the_thread The thread to release the lock context.
* @param[out] lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_State_release(
+static inline void _Thread_State_release(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -647,7 +677,7 @@ RTEMS_INLINE_ROUTINE void _Thread_State_release(
* @retval false The thread is not owner of the lock of the join queue.
*/
#if defined(RTEMS_DEBUG)
-RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
+static inline bool _Thread_State_is_owner(
const Thread_Control *the_thread
)
{
@@ -761,7 +791,7 @@ void _Thread_Priority_changed(
*
* @see _Thread_Wait_acquire().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
+static inline void _Thread_Priority_change(
Thread_Control *the_thread,
Priority_Node *priority_node,
Priority_Control new_priority,
@@ -778,6 +808,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
);
}
+#if defined(RTEMS_SMP)
/**
* @brief Replaces the victim priority node with the replacement priority node
* in the corresponding thread priority aggregation.
@@ -795,6 +826,7 @@ void _Thread_Priority_replace(
Priority_Node *victim_node,
Priority_Node *replacement_node
);
+#endif
/**
* @brief Updates the priority of all threads in the set
@@ -844,7 +876,7 @@ void _Thread_Priority_update_ignore_sticky( Thread_Control *the_thread );
* @retval true The left priority is less in the intuitive sense.
* @retval false The left priority is greater or equal in the intuitive sense.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
+static inline bool _Thread_Priority_less_than(
Priority_Control left,
Priority_Control right
)
@@ -861,7 +893,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
*
* @return The highest priority in the intuitive sense of priority.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
+static inline Priority_Control _Thread_Priority_highest(
Priority_Control left,
Priority_Control right
)
@@ -881,7 +913,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
* @return Returns the thread object information associated with the API of the
* object identifier.
*/
-RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information_by_id(
+static inline Objects_Information *_Thread_Get_objects_information_by_id(
Objects_Id id
)
{
@@ -909,7 +941,7 @@ RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information_by_id(
*
* @return Returns the thread object information of the thread.
*/
-RTEMS_INLINE_ROUTINE Thread_Information *_Thread_Get_objects_information(
+static inline Thread_Information *_Thread_Get_objects_information(
Thread_Control *the_thread
)
{
@@ -953,7 +985,7 @@ Objects_Id _Thread_Self_id( void );
*
* @return The cpu of the thread's scheduler.
*/
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
+static inline Per_CPU_Control *_Thread_Get_CPU(
const Thread_Control *thread
)
{
@@ -972,7 +1004,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
* @param[out] thread The thread.
* @param cpu The cpu to set.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
+static inline void _Thread_Set_CPU(
Thread_Control *thread,
Per_CPU_Control *cpu
)
@@ -996,7 +1028,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
* @retval true @a the_thread is the currently executing one.
* @retval false @a the_thread is not the currently executing one.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
+static inline bool _Thread_Is_executing (
const Thread_Control *the_thread
)
{
@@ -1016,7 +1048,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
* @retval true @a the_thread is the currently executing one.
* @retval false @a the_thread is not the currently executing one.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
+static inline bool _Thread_Is_executing_on_a_processor(
const Thread_Control *the_thread
)
{
@@ -1035,7 +1067,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
* @retval true @a the_thread is the heir.
* @retval false @a the_thread is not the heir.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
+static inline bool _Thread_Is_heir (
const Thread_Control *the_thread
)
{
@@ -1051,7 +1083,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
*
* @param[in, out] the_thread The thread to unblock.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Unblock (
+static inline void _Thread_Unblock (
Thread_Control *the_thread
)
{
@@ -1074,7 +1106,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Unblock (
* loaded in the floating point unit.
*/
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
-RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
+static inline bool _Thread_Is_allocated_fp (
const Thread_Control *the_thread
)
{
@@ -1100,7 +1132,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
*
* @param executing The currently executing thread.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
+static inline void _Thread_Save_fp( Thread_Control *executing )
{
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
@@ -1115,7 +1147,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
*
* @param executing The currently executing thread.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
+static inline void _Thread_Restore_fp( Thread_Control *executing )
{
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
@@ -1140,7 +1172,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
* point context is now longer associated with an active thread.
*/
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
-RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
+static inline void _Thread_Deallocate_fp( void )
{
_Thread_Allocated_fp = NULL;
}
@@ -1155,7 +1187,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
* @retval true Dispatching is disabled.
* @retval false Dispatching is enabled.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
+static inline bool _Thread_Is_context_switch_necessary( void )
{
return ( _Thread_Dispatch_necessary );
}
@@ -1165,7 +1197,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
*
* @return The maximum number of internal threads.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
+static inline uint32_t _Thread_Get_maximum_internal_threads(void)
{
/* Idle threads */
uint32_t maximum_internal_threads =
@@ -1187,7 +1219,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
* @retval pointer Pointer to the allocated Thread_Control.
* @retval NULL The operation failed.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
+static inline Thread_Control *_Thread_Internal_allocate( void )
{
return (Thread_Control *)
_Objects_Allocate_unprotected( &_Thread_Information.Objects );
@@ -1206,7 +1238,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
* @see _Thread_Dispatch(), _Thread_Start_multitasking() and
* _Thread_Dispatch_update_heir().
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
+static inline Thread_Control *_Thread_Get_heir_and_make_it_executing(
Per_CPU_Control *cpu_self
)
{
@@ -1226,7 +1258,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
* used.
* @param cpu The cpu.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
+static inline void _Thread_Update_CPU_time_used(
Thread_Control *the_thread,
Per_CPU_Control *cpu
)
@@ -1248,7 +1280,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
* @param heir The new heir for @a cpu_for_heir.
*/
#if defined( RTEMS_SMP )
-RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
+static inline void _Thread_Dispatch_update_heir(
Per_CPU_Control *cpu_self,
Per_CPU_Control *cpu_for_heir,
Thread_Control *heir
@@ -1305,7 +1337,7 @@ Timestamp_Control _Thread_Get_CPU_time_used_after_last_reset(
*
* @param[out] action_control The action control to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
+static inline void _Thread_Action_control_initialize(
Thread_Action_control *action_control
)
{
@@ -1317,7 +1349,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
*
* @param[out] action The Thread_Action to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
+static inline void _Thread_Action_initialize(
Thread_Action *action
)
{
@@ -1336,7 +1368,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
*
* @param handler is the handler for the action.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
+static inline void _Thread_Add_post_switch_action(
Thread_Control *the_thread,
Thread_Action *action,
Thread_Action_handler handler
@@ -1369,7 +1401,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
*
* @param[in, out] action is the action to add.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Append_post_switch_action(
+static inline void _Thread_Append_post_switch_action(
Thread_Control *the_thread,
Thread_Action *action
)
@@ -1391,7 +1423,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Append_post_switch_action(
* @retval true @a life_state is restarting.
* @retval false @a life_state is not restarting.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
+static inline bool _Thread_Is_life_restarting(
Thread_Life_state life_state
)
{
@@ -1406,7 +1438,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
* @retval true @a life_state is terminating.
* @retval false @a life_state is not terminating.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
+static inline bool _Thread_Is_life_terminating(
Thread_Life_state life_state
)
{
@@ -1421,7 +1453,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
* @retval true @a life_state allows life change.
* @retval false @a life_state does not allow life change.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
+static inline bool _Thread_Is_life_change_allowed(
Thread_Life_state life_state
)
{
@@ -1437,7 +1469,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
* @retval true @a life_state is life changing.
* @retval false @a life_state is not life changing.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
+static inline bool _Thread_Is_life_changing(
Thread_Life_state life_state
)
{
@@ -1453,7 +1485,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
* @retval true @a life_state is joinable.
* @retval false @a life_state is not joinable.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
+static inline bool _Thread_Is_joinable(
const Thread_Control *the_thread
)
{
@@ -1466,7 +1498,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
*
* @param[in, out] the_thread The thread to increase the resource count of.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
+static inline void _Thread_Resource_count_increment(
Thread_Control *the_thread
)
{
@@ -1482,7 +1514,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
*
* @param[in, out] the_thread The thread to decrement the resource count of.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
+static inline void _Thread_Resource_count_decrement(
Thread_Control *the_thread
)
{
@@ -1505,7 +1537,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
* @retval true The thread owns resources.
* @retval false The thread does not own resources.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
+static inline bool _Thread_Owns_resources(
const Thread_Control *the_thread
)
{
@@ -1520,7 +1552,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
*
* @return The thread's home scheduler.
*/
-RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
+static inline const Scheduler_Control *_Thread_Scheduler_get_home(
const Thread_Control *the_thread
)
{
@@ -1539,7 +1571,7 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
*
* @return The thread's home node.
*/
-RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
+static inline Scheduler_Node *_Thread_Scheduler_get_home_node(
const Thread_Control *the_thread
)
{
@@ -1561,17 +1593,17 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
*
* @return The scheduler node with the specified index.
*/
-RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
+static inline Scheduler_Node *_Thread_Scheduler_get_node_by_index(
const Thread_Control *the_thread,
size_t scheduler_index
)
{
+ _Assert( scheduler_index < _Scheduler_Count );
#if defined(RTEMS_SMP)
return (Scheduler_Node *)
( (uintptr_t) the_thread->Scheduler.nodes
+ scheduler_index * _Scheduler_Node_size );
#else
- _Assert( scheduler_index == 0 );
(void) scheduler_index;
return the_thread->Scheduler.nodes;
#endif
@@ -1584,7 +1616,7 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
* @param the_thread The thread to acquire the lock context.
* @param lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
+static inline void _Thread_Scheduler_acquire_critical(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -1598,7 +1630,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
* @param the_thread The thread to release the lock context.
* @param lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
+static inline void _Thread_Scheduler_release_critical(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -1620,7 +1652,7 @@ void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
* @param[in, out] scheduler_node The scheduler node for the request.
* @param request The request to add.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
+static inline void _Thread_Scheduler_add_request(
Thread_Control *the_thread,
Scheduler_Node *scheduler_node,
Scheduler_Node_request request
@@ -1663,7 +1695,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
* @param[in, out] the_thread The thread to add the wait node to.
* @param scheduler_node The scheduler node which provides the wait node.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
+static inline void _Thread_Scheduler_add_wait_node(
Thread_Control *the_thread,
Scheduler_Node *scheduler_node
)
@@ -1686,7 +1718,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
* @param the_thread The thread to add the request to remove a wait node.
* @param scheduler_node The scheduler node to remove a wait node from.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
+static inline void _Thread_Scheduler_remove_wait_node(
Thread_Control *the_thread,
Scheduler_Node *scheduler_node
)
@@ -1711,7 +1743,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
*
* @return The priority of the thread.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
+static inline Priority_Control _Thread_Get_priority(
const Thread_Control *the_thread
)
{
@@ -1728,7 +1760,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
*
* @return The unmapped priority of the thread.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_priority(
+static inline Priority_Control _Thread_Get_unmapped_priority(
const Thread_Control *the_thread
)
{
@@ -1742,7 +1774,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_priority(
*
* @return The unmapped real priority of the thread.
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_real_priority(
+static inline Priority_Control _Thread_Get_unmapped_real_priority(
const Thread_Control *the_thread
)
{
@@ -1759,7 +1791,7 @@ RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_real_priority(
*
* @see _Thread_Wait_release_default_critical().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
+static inline void _Thread_Wait_acquire_default_critical(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -1778,7 +1810,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
*
* @see _Thread_Wait_release_default().
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
+static inline Thread_Control *_Thread_Wait_acquire_default_for_executing(
ISR_lock_Context *lock_context
)
{
@@ -1800,7 +1832,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
*
* @see _Thread_Wait_release_default().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
+static inline void _Thread_Wait_acquire_default(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -1819,7 +1851,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
* @param lock_context The lock context used for the corresponding lock
* acquire.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
+static inline void _Thread_Wait_release_default_critical(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -1835,7 +1867,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
* @param[out] lock_context The lock context used for the corresponding lock
* acquire.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
+static inline void _Thread_Wait_release_default(
Thread_Control *the_thread,
ISR_lock_Context *lock_context
)
@@ -1854,7 +1886,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
* @param the_thread The thread to remove the request from.
* @param queue_lock_context The queue lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
+static inline void _Thread_Wait_remove_request_locked(
Thread_Control *the_thread,
Thread_queue_Lock_context *queue_lock_context
)
@@ -1875,7 +1907,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
* @param queue The queue that acquires.
* @param queue_lock_context The queue lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
+static inline void _Thread_Wait_acquire_queue_critical(
Thread_queue_Queue *queue,
Thread_queue_Lock_context *queue_lock_context
)
@@ -1893,7 +1925,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
* @param queue The queue that releases.
* @param queue_lock_context The queue lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
+static inline void _Thread_Wait_release_queue_critical(
Thread_queue_Queue *queue,
Thread_queue_Lock_context *queue_lock_context
)
@@ -1913,7 +1945,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
* @param[in, out] queue_context The thread queue context for the corresponding
* _Thread_Wait_release_critical().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
+static inline void _Thread_Wait_acquire_critical(
Thread_Control *the_thread,
Thread_queue_Context *queue_context
)
@@ -1969,7 +2001,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
* @param[in, out] queue_context The thread queue context for the corresponding
* _Thread_Wait_release().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
+static inline void _Thread_Wait_acquire(
Thread_Control *the_thread,
Thread_queue_Context *queue_context
)
@@ -1988,7 +2020,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
* @param[in, out] queue_context The thread queue context used for corresponding
* _Thread_Wait_acquire_critical().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
+static inline void _Thread_Wait_release_critical(
Thread_Control *the_thread,
Thread_queue_Context *queue_context
)
@@ -2030,7 +2062,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
* @param[in, out] queue_context The thread queue context used for corresponding
* _Thread_Wait_acquire().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
+static inline void _Thread_Wait_release(
Thread_Control *the_thread,
Thread_queue_Context *queue_context
)
@@ -2053,7 +2085,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
*
* @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
+static inline void _Thread_Wait_claim(
Thread_Control *the_thread,
Thread_queue_Queue *queue
)
@@ -2082,7 +2114,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
* @param[in, out] the_thread The thread.
* @param operations The corresponding thread queue operations.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
+static inline void _Thread_Wait_claim_finalize(
Thread_Control *the_thread,
const Thread_queue_Operations *operations
)
@@ -2101,7 +2133,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
* @param[in, out] queue_lock_context The thread queue lock context used for
* corresponding _Thread_Wait_acquire().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
+static inline void _Thread_Wait_remove_request(
Thread_Control *the_thread,
Thread_queue_Lock_context *queue_lock_context
)
@@ -2130,7 +2162,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
*
* @see _Thread_Wait_claim().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
+static inline void _Thread_Wait_restore_default(
Thread_Control *the_thread
)
{
@@ -2189,7 +2221,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
*
* @param the_thread The thread.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
+static inline void _Thread_Wait_tranquilize(
Thread_Control *the_thread
)
{
@@ -2207,7 +2239,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
* @param queue_context The thread queue context used for corresponding
* _Thread_Wait_acquire().
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
+static inline void _Thread_Wait_cancel(
Thread_Control *the_thread,
Thread_queue_Context *queue_context
)
@@ -2294,7 +2326,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
* @param[in, out] the_thread The thread to set the wait flags of.
* @param flags The flags to set.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
+static inline void _Thread_Wait_flags_set(
Thread_Control *the_thread,
Thread_Wait_flags flags
)
@@ -2313,7 +2345,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
*
* @return The thread's wait flags.
*/
-RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
+static inline Thread_Wait_flags _Thread_Wait_flags_get(
const Thread_Control *the_thread
)
{
@@ -2331,7 +2363,7 @@ RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
*
* @return The thread's wait flags.
*/
-RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
+static inline Thread_Wait_flags _Thread_Wait_flags_get_acquire(
const Thread_Control *the_thread
)
{
@@ -2358,7 +2390,7 @@ RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
* @retval true The wait flags were equal to the expected wait flags.
* @retval false The wait flags were not equal to the expected wait flags.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
+static inline bool _Thread_Wait_flags_try_change_release(
Thread_Control *the_thread,
Thread_Wait_flags expected_flags,
Thread_Wait_flags desired_flags
@@ -2398,7 +2430,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
* @retval true The wait flags were equal to the expected wait flags.
* @retval false The wait flags were not equal to the expected wait flags.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
+static inline bool _Thread_Wait_flags_try_change_acquire(
Thread_Control *the_thread,
Thread_Wait_flags expected_flags,
Thread_Wait_flags desired_flags
@@ -2452,7 +2484,7 @@ Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
*
* @param the_thread The thread to get the status of the wait return code of.
*/
-RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
+static inline Status_Control _Thread_Wait_get_status(
const Thread_Control *the_thread
)
{
@@ -2486,7 +2518,7 @@ void _Thread_Timeout( Watchdog_Control *the_watchdog );
* @param [in, out] timer The timer to initialize.
* @param cpu The cpu for the operation.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
+static inline void _Thread_Timer_initialize(
Thread_Timer_information *timer,
Per_CPU_Control *cpu
)
@@ -2503,7 +2535,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
* @param cpu The cpu for the operation.
* @param ticks The ticks to add to the timeout ticks.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
+static inline void _Thread_Add_timeout_ticks(
Thread_Control *the_thread,
Per_CPU_Control *cpu,
Watchdog_Interval ticks
@@ -2529,7 +2561,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
* @param routine The watchdog routine for the thread.
* @param expire Expiration for the watchdog.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
+static inline void _Thread_Timer_insert_realtime(
Thread_Control *the_thread,
Per_CPU_Control *cpu,
Watchdog_Service_routine_entry routine,
@@ -2554,7 +2586,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
*
* @param[in, out] the_thread The thread to remove the watchdog from.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
+static inline void _Thread_Timer_remove( Thread_Control *the_thread )
{
ISR_lock_Context lock_context;
@@ -2580,7 +2612,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
* if necessary.
* @param queue The thread queue.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
+static inline void _Thread_Remove_timer_and_unblock(
Thread_Control *the_thread,
Thread_queue_Queue *queue
)
@@ -2651,7 +2683,7 @@ void _Thread_Do_unpin(
*
* @param executing The currently executing thread.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
+static inline void _Thread_Pin( Thread_Control *executing )
{
#if defined(RTEMS_SMP)
_Assert( executing == _Thread_Get_executing() );
@@ -2668,7 +2700,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
* @param executing The currently executing thread.
* @param cpu_self The cpu for the operation.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Unpin(
+static inline void _Thread_Unpin(
Thread_Control *executing,
Per_CPU_Control *cpu_self
)
@@ -2720,7 +2752,7 @@ extern "C" {
*
* @param status is the thread wait status.
*/
-RTEMS_INLINE_ROUTINE void _Thread_Timer_remove_and_continue(
+static inline void _Thread_Timer_remove_and_continue(
Thread_Control *the_thread,
Status_Control status
)
diff --git a/cpukit/include/rtems/score/threadmp.h b/cpukit/include/rtems/score/threadmp.h
index 59c2ea675c..30c7c84bd4 100644
--- a/cpukit/include/rtems/score/threadmp.h
+++ b/cpukit/include/rtems/score/threadmp.h
@@ -145,7 +145,7 @@ void _Thread_MP_Free_proxy( Thread_Control *the_thread );
* @retval false The object if is not valid or the thread MP with this object
* id is not remote.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_MP_Is_remote( Objects_Id id )
+static inline bool _Thread_MP_Is_remote( Objects_Id id )
{
Objects_Information *information;
diff --git a/cpukit/include/rtems/score/threadqimpl.h b/cpukit/include/rtems/score/threadqimpl.h
index 95289dc872..44b2010226 100644
--- a/cpukit/include/rtems/score/threadqimpl.h
+++ b/cpukit/include/rtems/score/threadqimpl.h
@@ -166,7 +166,7 @@ void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread );
*
* @param[out] queue_context The thread queue context to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
+static inline void _Thread_queue_Context_initialize(
Thread_queue_Context *queue_context
)
{
@@ -191,7 +191,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_thread_state(
Thread_queue_Context *queue_context,
States_Control thread_state
@@ -208,7 +208,7 @@ _Thread_queue_Context_set_thread_state(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_timeout_ticks(
Thread_queue_Context *queue_context,
Watchdog_Interval ticks
@@ -229,7 +229,7 @@ _Thread_queue_Context_set_timeout_ticks(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_timeout_argument(
Thread_queue_Context *queue_context,
const void *arg,
@@ -248,7 +248,7 @@ _Thread_queue_Context_set_timeout_argument(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_enqueue_callout(
Thread_queue_Context *queue_context,
Thread_queue_Enqueue_callout enqueue_callout
@@ -264,7 +264,7 @@ _Thread_queue_Context_set_enqueue_callout(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_enqueue_do_nothing_extra(
Thread_queue_Context *queue_context
)
@@ -281,7 +281,7 @@ _Thread_queue_Context_set_enqueue_do_nothing_extra(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_enqueue_timeout_ticks(
Thread_queue_Context *queue_context,
Watchdog_Interval ticks
@@ -304,7 +304,7 @@ _Thread_queue_Context_set_enqueue_timeout_ticks(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
Thread_queue_Context *queue_context,
const struct timespec *timeout,
@@ -330,7 +330,7 @@ _Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void
+static inline void
_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
Thread_queue_Context *queue_context,
const struct timespec *timeout,
@@ -356,7 +356,7 @@ _Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
*
* @see _Thread_queue_Enqueue().
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout(
+static inline void _Thread_queue_Context_set_deadlock_callout(
Thread_queue_Context *queue_context,
Thread_queue_Deadlock_callout deadlock_callout
)
@@ -370,7 +370,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout(
* @param[out] queue_context The thread queue context to clear the priority
* update count.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Context_clear_priority_updates(
+static inline void _Thread_queue_Context_clear_priority_updates(
Thread_queue_Context *queue_context
)
{
@@ -385,7 +385,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_clear_priority_updates(
*
* @return The priority update count of @a queue_context.
*/
-RTEMS_INLINE_ROUTINE size_t _Thread_queue_Context_get_priority_updates(
+static inline size_t _Thread_queue_Context_get_priority_updates(
const Thread_queue_Context *queue_context
)
{
@@ -399,7 +399,7 @@ RTEMS_INLINE_ROUTINE size_t _Thread_queue_Context_get_priority_updates(
* update count of.
* @param update_count The priority update count.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Context_restore_priority_updates(
+static inline void _Thread_queue_Context_restore_priority_updates(
Thread_queue_Context *queue_context,
size_t update_count
)
@@ -415,7 +415,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_restore_priority_updates(
* array.
* @param the_thread The thread for the priority update.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Context_add_priority_update(
+static inline void _Thread_queue_Context_add_priority_update(
Thread_queue_Context *queue_context,
Thread_Control *the_thread
)
@@ -443,7 +443,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_add_priority_update(
* @param[out] queue_context The thread queue context to set the ISR level of.
* @param level The ISR level to set @a queue_context to.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_ISR_level(
+static inline void _Thread_queue_Context_set_ISR_level(
Thread_queue_Context *queue_context,
ISR_Level level
)
@@ -461,7 +461,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_ISR_level(
*
* @return The current processor.
*/
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_queue_Dispatch_disable(
+static inline Per_CPU_Control *_Thread_queue_Dispatch_disable(
Thread_queue_Context *queue_context
)
{
@@ -480,7 +480,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_queue_Dispatch_disable(
* objects with multiprocessing (MP) support.
*/
#if defined(RTEMS_MULTIPROCESSING)
-RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_MP_callout(
+static inline void _Thread_queue_Context_set_MP_callout(
Thread_queue_Context *queue_context,
Thread_queue_MP_callout mp_callout
)
@@ -500,7 +500,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_MP_callout(
*
* @param[out] gate The gate to close.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_close(
+static inline void _Thread_queue_Gate_close(
Thread_queue_Gate *gate
)
{
@@ -513,7 +513,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_close(
* @param[in, out] chain The chain to add the gate to.
* @param gate The gate to add to the chain.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_add(
+static inline void _Thread_queue_Gate_add(
Chain_Control *chain,
Thread_queue_Gate *gate
)
@@ -526,7 +526,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_add(
*
* @param[out] gate The gate to open.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_open(
+static inline void _Thread_queue_Gate_open(
Thread_queue_Gate *gate
)
{
@@ -540,7 +540,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_open(
*
* @param gate The gate to wait for.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_wait(
+static inline void _Thread_queue_Gate_wait(
Thread_queue_Gate *gate
)
{
@@ -555,7 +555,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Gate_wait(
*
* @param[out] heads The thread queue heads to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
+static inline void _Thread_queue_Heads_initialize(
Thread_queue_Heads *heads
)
{
@@ -579,7 +579,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
* @param[out] queue The thread queue queue to initialize.
* @param name The name for the @a queue.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_initialize(
+static inline void _Thread_queue_Queue_initialize(
Thread_queue_Queue *queue,
const char *name
)
@@ -599,7 +599,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_initialize(
* @param lock_stats The lock statistics.
* @param[out] lock_context The interrupt lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_do_acquire_critical(
+static inline void _Thread_queue_Queue_do_acquire_critical(
Thread_queue_Queue *queue,
#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
SMP_lock_Stats *lock_stats,
@@ -635,7 +635,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_do_acquire_critical(
* @param queue The thread queue queue to release in a critical section.
* @param[out] lock_context The interrupt lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release_critical(
+static inline void _Thread_queue_Queue_release_critical(
Thread_queue_Queue *queue,
ISR_lock_Context *lock_context
)
@@ -657,7 +657,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release_critical(
* @param queue The thread queue queue to release.
* @param[out] lock_context The interrupt lock context to enable interrupts.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release(
+static inline void _Thread_queue_Queue_release(
Thread_queue_Queue *queue,
ISR_lock_Context *lock_context
)
@@ -697,7 +697,7 @@ void _Thread_queue_Do_acquire_critical(
ISR_lock_Context *lock_context
);
#else
-RTEMS_INLINE_ROUTINE void _Thread_queue_Do_acquire_critical(
+static inline void _Thread_queue_Do_acquire_critical(
Thread_queue_Control *the_thread_queue,
ISR_lock_Context *lock_context
)
@@ -713,7 +713,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Do_acquire_critical(
* @param the_thread_queue The thread queue control to acquire.
* @param[out] lock_context The interrupt lock context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire_critical(
+static inline void _Thread_queue_Acquire_critical(
Thread_queue_Control *the_thread_queue,
Thread_queue_Context *queue_context
)
@@ -736,7 +736,7 @@ void _Thread_queue_Acquire(
Thread_queue_Context *queue_context
);
#else
-RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire(
+static inline void _Thread_queue_Acquire(
Thread_queue_Control *the_thread_queue,
Thread_queue_Context *queue_context
)
@@ -755,7 +755,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire(
* @retval false The thread queue control is not the owner of the lock.
*/
#if defined(RTEMS_DEBUG)
-RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_lock_owner(
+static inline bool _Thread_queue_Is_lock_owner(
const Thread_queue_Control *the_thread_queue
)
{
@@ -779,7 +779,7 @@ void _Thread_queue_Do_release_critical(
ISR_lock_Context *lock_context
);
#else
-RTEMS_INLINE_ROUTINE void _Thread_queue_Do_release_critical(
+static inline void _Thread_queue_Do_release_critical(
Thread_queue_Control *the_thread_queue,
ISR_lock_Context *lock_context
)
@@ -796,7 +796,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Do_release_critical(
* @param the_thread_queue The thread queue control to release.
* @param[out] queue_context The thread queue context.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Release_critical(
+static inline void _Thread_queue_Release_critical(
Thread_queue_Control *the_thread_queue,
Thread_queue_Context *queue_context
)
@@ -819,7 +819,7 @@ void _Thread_queue_Release(
Thread_queue_Context *queue_context
);
#else
-RTEMS_INLINE_ROUTINE void _Thread_queue_Release(
+static inline void _Thread_queue_Release(
Thread_queue_Control *the_thread_queue,
Thread_queue_Context *queue_context
)
@@ -1107,7 +1107,7 @@ void _Thread_queue_Surrender_sticky(
* @retval true @a queue is empty.
* @retval false @a queue is not empty.
*/
-RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_empty(
+static inline bool _Thread_queue_Is_empty(
const Thread_queue_Queue *queue
)
{
@@ -1305,7 +1305,7 @@ void _Thread_queue_Initialize(
*
* @param[out] the_thread_queue The thread queue to destroy.
*/
-RTEMS_INLINE_ROUTINE void _Thread_queue_Destroy(
+static inline void _Thread_queue_Destroy(
Thread_queue_Control *the_thread_queue
)
{
diff --git a/cpukit/include/rtems/score/threadqops.h b/cpukit/include/rtems/score/threadqops.h
index 504383e98d..d05823eefb 100644
--- a/cpukit/include/rtems/score/threadqops.h
+++ b/cpukit/include/rtems/score/threadqops.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2021 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/timecounter.h b/cpukit/include/rtems/score/timecounter.h
index 6559801559..ced3d7c60c 100644
--- a/cpukit/include/rtems/score/timecounter.h
+++ b/cpukit/include/rtems/score/timecounter.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2015, 2021 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2015, 2021 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/timecounterimpl.h b/cpukit/include/rtems/score/timecounterimpl.h
index a1f79f2ee2..ee8f795bba 100644
--- a/cpukit/include/rtems/score/timecounterimpl.h
+++ b/cpukit/include/rtems/score/timecounterimpl.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2015 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2015 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/timespec.h b/cpukit/include/rtems/score/timespec.h
index 2090f19b32..2e419d69de 100644
--- a/cpukit/include/rtems/score/timespec.h
+++ b/cpukit/include/rtems/score/timespec.h
@@ -3,7 +3,7 @@
/**
* @file
*
- * @ingroup Timespec
+ * @ingroup RTEMSScoreTimespec
*
* @brief This header file provides the interfaces of the
* @ref RTEMSScoreTimespec.
diff --git a/cpukit/include/rtems/score/timestampimpl.h b/cpukit/include/rtems/score/timestampimpl.h
index 22df1dd3a1..fee1994bfd 100644
--- a/cpukit/include/rtems/score/timestampimpl.h
+++ b/cpukit/include/rtems/score/timestampimpl.h
@@ -63,7 +63,7 @@ extern "C" {
* @param _seconds The seconds portion of the timestamp.
* @param _nanoseconds The nanoseconds portion of the timestamp.
*/
-RTEMS_INLINE_ROUTINE void _Timestamp_Set(
+static inline void _Timestamp_Set(
Timestamp_Control *_time,
time_t _seconds,
long _nanoseconds
@@ -86,7 +86,7 @@ RTEMS_INLINE_ROUTINE void _Timestamp_Set(
* @param[out] _time The timestamp instance to zero.
*/
-RTEMS_INLINE_ROUTINE void _Timestamp_Set_to_zero(
+static inline void _Timestamp_Set_to_zero(
Timestamp_Control *_time
)
{
@@ -105,7 +105,7 @@ RTEMS_INLINE_ROUTINE void _Timestamp_Set_to_zero(
* @retval false @a _lhs is greater or equal than @a rhs.
*/
-RTEMS_INLINE_ROUTINE bool _Timestamp_Less_than(
+static inline bool _Timestamp_Less_than(
const Timestamp_Control *_lhs,
const Timestamp_Control *_rhs
)
@@ -125,7 +125,7 @@ RTEMS_INLINE_ROUTINE bool _Timestamp_Less_than(
* @retval false @a _lhs is less or equal than @a _rhs.
*/
-RTEMS_INLINE_ROUTINE bool _Timestamp_Greater_than(
+static inline bool _Timestamp_Greater_than(
const Timestamp_Control *_lhs,
const Timestamp_Control *_rhs
)
@@ -145,7 +145,7 @@ RTEMS_INLINE_ROUTINE bool _Timestamp_Greater_than(
* @retval false @a _lhs is not equal to @a _rhs.
*/
-RTEMS_INLINE_ROUTINE bool _Timestamp_Equal_to(
+static inline bool _Timestamp_Equal_to(
const Timestamp_Control *_lhs,
const Timestamp_Control *_rhs
)
@@ -162,7 +162,7 @@ RTEMS_INLINE_ROUTINE bool _Timestamp_Equal_to(
* @param[in, out] _time The base time to be added to.
* @param _add points The timestamp to add to the first argument.
*/
-RTEMS_INLINE_ROUTINE void _Timestamp_Add_to(
+static inline void _Timestamp_Add_to(
Timestamp_Control *_time,
const Timestamp_Control *_add
)
@@ -181,7 +181,7 @@ RTEMS_INLINE_ROUTINE void _Timestamp_Add_to(
* @param[out] _result Contains the difference between starting and ending
* time after the method call.
*/
-RTEMS_INLINE_ROUTINE void _Timestamp_Subtract(
+static inline void _Timestamp_Subtract(
const Timestamp_Control *_start,
const Timestamp_Control *_end,
Timestamp_Control *_result
@@ -201,7 +201,7 @@ RTEMS_INLINE_ROUTINE void _Timestamp_Subtract(
* @param[out] _ival_percentage The integer portion of the average.
* @param[out] _fval_percentage The thousandths of percentage.
*/
-RTEMS_INLINE_ROUTINE void _Timestamp_Divide(
+static inline void _Timestamp_Divide(
const Timestamp_Control *_lhs,
const Timestamp_Control *_rhs,
uint32_t *_ival_percentage,
@@ -231,7 +231,7 @@ RTEMS_INLINE_ROUTINE void _Timestamp_Divide(
*
* @return The seconds portion of @a _time.
*/
-RTEMS_INLINE_ROUTINE time_t _Timestamp_Get_seconds(
+static inline time_t _Timestamp_Get_seconds(
const Timestamp_Control *_time
)
{
@@ -247,7 +247,7 @@ RTEMS_INLINE_ROUTINE time_t _Timestamp_Get_seconds(
*
* @return The nanoseconds portion of @a _time.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Timestamp_Get_nanoseconds(
+static inline uint32_t _Timestamp_Get_nanoseconds(
const Timestamp_Control *_time
)
{
@@ -267,7 +267,7 @@ RTEMS_INLINE_ROUTINE uint32_t _Timestamp_Get_nanoseconds(
*
* @return The time in nanoseconds.
*/
-RTEMS_INLINE_ROUTINE uint64_t _Timestamp_Get_as_nanoseconds(
+static inline uint64_t _Timestamp_Get_as_nanoseconds(
const Timestamp_Control *_time
)
{
@@ -284,7 +284,7 @@ RTEMS_INLINE_ROUTINE uint64_t _Timestamp_Get_as_nanoseconds(
* @param _timestamp The timestamp.
* @param[out] _timespec The timespec to be filled in by the method.
*/
-RTEMS_INLINE_ROUTINE void _Timestamp_To_timespec(
+static inline void _Timestamp_To_timespec(
const Timestamp_Control *_timestamp,
struct timespec *_timespec
)
@@ -298,7 +298,7 @@ RTEMS_INLINE_ROUTINE void _Timestamp_To_timespec(
* @param _timestamp The timestamp.
* @param[out] _timeval The timeval to be filled in by the method.
*/
-RTEMS_INLINE_ROUTINE void _Timestamp_To_timeval(
+static inline void _Timestamp_To_timeval(
const Timestamp_Control *_timestamp,
struct timeval *_timeval
)
diff --git a/cpukit/include/rtems/score/tls.h b/cpukit/include/rtems/score/tls.h
index f3bcb2c96a..8716c5230c 100644
--- a/cpukit/include/rtems/score/tls.h
+++ b/cpukit/include/rtems/score/tls.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2014, 2023 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,7 +37,7 @@
#ifndef _RTEMS_SCORE_TLS_H
#define _RTEMS_SCORE_TLS_H
-#include <rtems/score/cpu.h>
+#include <rtems/score/cpuimpl.h>
#include <string.h>
@@ -59,31 +59,51 @@ extern "C" {
* @{
*/
-extern char _TLS_Data_begin[];
-
-extern char _TLS_Data_end[];
+/**
+ * @brief Represents the TLS configuration.
+ */
+typedef struct {
+ /**
+ * @brief This member is initialized to _TLS_Data_begin.
+ */
+ const char *data_begin;
-extern char _TLS_Data_size[];
+ /**
+ * @brief This member is initialized to _TLS_Data_size.
+ */
+ const char *data_size;
-extern char _TLS_BSS_begin[];
+ /**
+ * @brief This member is initialized to _TLS_BSS_begin.
+ */
+ const char *bss_begin;
-extern char _TLS_BSS_end[];
+ /**
+ * @brief This member is initialized to _TLS_BSS_size.
+ */
+ const char *bss_size;
-extern char _TLS_BSS_size[];
+ /**
+ * @brief This member is initialized to _TLS_Size.
+ */
+ const char *size;
-extern char _TLS_Size[];
+ /**
+ * @brief This member is initialized to _TLS_Alignment.
+ */
+ const char *alignment;
+} TLS_Configuration;
/**
- * @brief The TLS section alignment.
+ * @brief Provides the TLS configuration.
*
- * This symbol is provided by the linker command file as the maximum alignment
- * of the .tdata and .tbss sections. The linker ensures that the first TLS
- * output section is aligned to the maximum alignment of all TLS output
- * sections, see function _bfd_elf_tls_setup() in bfd/elflink.c of the GNU
- * Binutils sources. The linker command file must take into account the case
- * that the .tdata section is empty and the .tbss section is non-empty.
+ * Directly using symbols with an arbitrary absolute address such as
+ * _TLS_Alignment may not work with all code models (for example the AArch64
+ * tiny and small code models). Store the addresses in a read-only object.
+ * Using the volatile qualifier ensures that the compiler actually loads the
+ * address from the object.
*/
-extern char _TLS_Alignment[];
+extern const volatile TLS_Configuration _TLS_Configuration;
typedef struct {
/*
@@ -116,101 +136,71 @@ typedef struct {
} TLS_Index;
/**
- * @brief Gets the TLS size.
+ * @brief Gets the size of the thread control block area in bytes.
*
- * @return The TLS size.
- */
-static inline uintptr_t _TLS_Get_size( void )
-{
- uintptr_t size;
-
- /*
- * We must be careful with using _TLS_Size here since this could lead GCC to
- * assume that this symbol is not 0 and the tests for 0 will be optimized
- * away.
- */
- size = (uintptr_t) _TLS_Size;
- RTEMS_OBFUSCATE_VARIABLE( size );
- return size;
-}
-
-/**
- * @brief Returns the value aligned up to the stack alignment.
+ * @param config is the TLS configuration.
*
- * @param val The value to align.
- *
- * @return The value aligned to the stack alignment.
- */
-static inline uintptr_t _TLS_Align_up( uintptr_t val )
-{
- uintptr_t alignment = CPU_STACK_ALIGNMENT;
-
- return RTEMS_ALIGN_UP( val, alignment );
-}
-
-/**
- * @brief Returns the size of the thread control block area size for this
- * alignment, or the minimum size if alignment is too small.
- *
- * @param alignment The alignment for the operation.
- *
- * @return The size of the thread control block area.
+ * @return Returns the size of the thread control block area in bytes.
*/
static inline uintptr_t _TLS_Get_thread_control_block_area_size(
- uintptr_t alignment
+ const volatile TLS_Configuration *config
)
{
- return alignment <= sizeof(TLS_Thread_control_block) ?
- sizeof(TLS_Thread_control_block) : alignment;
+#if CPU_THREAD_LOCAL_STORAGE_VARIANT == 11
+ uintptr_t alignment;
+
+ alignment = (uintptr_t) config->alignment;
+
+ return RTEMS_ALIGN_UP( sizeof( TLS_Thread_control_block ), alignment );
+#else
+ (void) config;
+ return sizeof( TLS_Thread_control_block );
+#endif
}
/**
- * @brief Return the TLS area allocation size.
+ * @brief Gets the allocation size of the thread-local storage area in bytes.
*
- * @return The TLS area allocation size.
+ * @return Returns the allocation size of the thread-local storage area in
+ * bytes.
*/
uintptr_t _TLS_Get_allocation_size( void );
/**
- * @brief Copies TLS size bytes from the address tls_area and returns a pointer
- * to the start of the area after clearing it.
+ * @brief Initializes the thread-local storage data.
*
- * @param tls_area The starting address of the area to clear.
+ * @param config is the TLS configuration.
*
- * @return The pointer to the beginning of the cleared section.
+ * @param[out] tls_data is the thread-local storage data to initialize.
*/
-static inline void *_TLS_Copy_and_clear( void *tls_area )
+static inline void _TLS_Copy_and_clear(
+ const volatile TLS_Configuration *config,
+ void *tls_data
+)
{
- tls_area = memcpy(
- tls_area,
- _TLS_Data_begin,
- (size_t) ((uintptr_t)_TLS_Data_size)
- );
-
+ tls_data =
+ memcpy( tls_data, config->data_begin, (uintptr_t) config->data_size );
memset(
- (char *) tls_area + (size_t)((intptr_t) _TLS_BSS_begin) -
- (size_t)((intptr_t) _TLS_Data_begin),
+ (char *) tls_data +
+ (uintptr_t) config->bss_begin - (uintptr_t) config->data_begin,
0,
- ((size_t) (intptr_t)_TLS_BSS_size)
+ (uintptr_t) config->bss_size
);
-
- return tls_area;
}
/**
- * @brief Initializes the dynamic thread vector.
+ * @brief Initializes the thread control block and the dynamic thread vector.
*
- * @param tls_block The tls block for @a dtv.
- * @param tcb The thread control block for @a dtv.
- * @param[out] dtv The dynamic thread vector to initialize.
+ * @param tls_data is the thread-local storage data address.
*
- * @return Pointer to an area that was copied and cleared from tls_block
- * onwards (@see _TLS_Copy_and_clear).
+ * @param[out] tcb is the thread control block to initialize.
+ *
+ * @param[out] dtv is the dynamic thread vector to initialize.
*/
-static inline void *_TLS_Initialize(
- void *tls_block,
- TLS_Thread_control_block *tcb,
+static inline void _TLS_Initialize_TCB_and_DTV(
+ void *tls_data,
+ TLS_Thread_control_block *tcb,
TLS_Dynamic_thread_vector *dtv
)
{
@@ -220,86 +210,72 @@ static inline void *_TLS_Initialize(
#else
tcb->dtv = dtv;
dtv->generation_number = 1;
- dtv->tls_blocks[0] = tls_block;
+ dtv->tls_blocks[0] = tls_data;
#endif
-
- return _TLS_Copy_and_clear( tls_block );
}
/**
- * @brief Initializes a dynamic thread vector beginning at the given starting
- * address.
+ * @brief Initializes the thread-local storage area.
*
- * Use Variant I, TLS offsets emitted by linker takes the TCB into account.
+ * @param tls_area[out] is the thread-local storage area to initialize.
*
- * @param tls_area The tls area for the initialization.
- *
- * @return Pointer to an area that was copied and cleared from tls_block
- * onwards (@see _TLS_Copy_and_clear).
+ * @return Where the architectures uses Variant I and the TLS offsets emitted
+ * by the linker neglect the TCB, returns the address of the thread-local
+ * storage data. Otherwise, returns the address of the thread control block.
*/
-static inline void *_TLS_TCB_at_area_begin_initialize( void *tls_area )
+static inline void *_TLS_Initialize_area( void *tls_area )
{
- void *tls_block = (char *) tls_area
- + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
- TLS_Thread_control_block *tcb = tls_area;
- uintptr_t aligned_size = _TLS_Align_up( (uintptr_t) _TLS_Size );
- TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
- ((char *) tls_block + aligned_size);
-
- return _TLS_Initialize( tls_block, tcb, dtv );
-}
+ const volatile TLS_Configuration *config;
+ uintptr_t alignment;
+ void *tls_data;
+ TLS_Thread_control_block *tcb;
+ TLS_Dynamic_thread_vector *dtv;
+ void *return_value;
+#if CPU_THREAD_LOCAL_STORAGE_VARIANT == 11
+ uintptr_t tcb_size;
+#endif
+#if CPU_THREAD_LOCAL_STORAGE_VARIANT == 20
+ uintptr_t size;
+ uintptr_t alignment_2;
+#endif
-/**
- * @brief Initializes a dynamic thread vector with the area before a given
- * starting address as thread control block.
- *
- * Use Variant I, TLS offsets emitted by linker neglects the TCB.
- *
- * @param tls_area The tls area for the initialization.
- *
- * @return Pointer to an area that was copied and cleared from tls_block
- * onwards (@see _TLS_Copy_and_clear).
- */
-static inline void *_TLS_TCB_before_TLS_block_initialize( void *tls_area )
-{
- void *tls_block = (char *) tls_area
- + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
- TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
- ((char *) tls_block - sizeof(*tcb));
- uintptr_t aligned_size = _TLS_Align_up( (uintptr_t) _TLS_Size );
- TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
- ((char *) tls_block + aligned_size);
-
- return _TLS_Initialize( tls_block, tcb, dtv );
-}
+ config = &_TLS_Configuration;
+ alignment = (uintptr_t) config->alignment;
-/**
- * @brief Initializes a dynamic thread vector with the area after a given
- * starting address as thread control block.
- *
- * Use Variant II
- *
- * @param tls_area The tls area for the initialization.
- *
- * @return Pointer to an area that was copied and cleared from tls_block
- * onwards (@see _TLS_Copy_and_clear).
- */
-static inline void *_TLS_TCB_after_TLS_block_initialize( void *tls_area )
-{
- uintptr_t size = (uintptr_t) _TLS_Size;
- uintptr_t tls_align = (uintptr_t) _TLS_Alignment;
- uintptr_t tls_mask = tls_align - 1;
- uintptr_t heap_align = _TLS_Align_up( tls_align );
- uintptr_t heap_mask = heap_align - 1;
- TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
- ((char *) tls_area + ((size + heap_mask) & ~heap_mask));
- void *tls_block = (char *) tcb - ((size + tls_mask) & ~tls_mask);
- TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
- ((char *) tcb + sizeof(*tcb));
-
- _TLS_Initialize( tls_block, tcb, dtv );
-
- return tcb;
+#ifdef __i386__
+ dtv = NULL;
+#else
+ dtv = (TLS_Dynamic_thread_vector *) tls_area;
+ tls_area = (char *) tls_area + sizeof( *dtv );
+#endif
+
+#if CPU_THREAD_LOCAL_STORAGE_VARIANT == 10
+ tls_data = (void *)
+ RTEMS_ALIGN_UP( (uintptr_t) tls_area + sizeof( *tcb ), alignment );
+ tcb = (TLS_Thread_control_block *) ((char *) tls_data - sizeof( *tcb ));
+ return_value = tls_data;
+#elif CPU_THREAD_LOCAL_STORAGE_VARIANT == 11
+ tcb_size = RTEMS_ALIGN_UP( sizeof( *tcb ), alignment );
+ tls_data = (void *)
+ RTEMS_ALIGN_UP( (uintptr_t) tls_area + tcb_size, alignment );
+ tcb = (TLS_Thread_control_block *) ((char *) tls_data - tcb_size);
+ return_value = tcb;
+#elif CPU_THREAD_LOCAL_STORAGE_VARIANT == 20
+ alignment_2 = RTEMS_ALIGN_UP( alignment, CPU_SIZEOF_POINTER );
+ tls_area = (void *) RTEMS_ALIGN_UP( (uintptr_t) tls_area, alignment_2 );
+ size = (uintptr_t) config->size;
+ tcb = (TLS_Thread_control_block *)
+ ((char *) tls_area + RTEMS_ALIGN_UP( size, alignment_2 ));
+ tls_data = (char *) tcb - RTEMS_ALIGN_UP( size, alignment );
+ return_value = tcb;
+#else
+#error "unexpected CPU_THREAD_LOCAL_STORAGE_VARIANT value"
+#endif
+
+ _TLS_Initialize_TCB_and_DTV( tls_data, tcb, dtv );
+ _TLS_Copy_and_clear( config, tls_data );
+
+ return return_value;
}
/** @} */
diff --git a/cpukit/include/rtems/score/todimpl.h b/cpukit/include/rtems/score/todimpl.h
index 5d1e0613a1..565e047c7f 100644
--- a/cpukit/include/rtems/score/todimpl.h
+++ b/cpukit/include/rtems/score/todimpl.h
@@ -374,7 +374,7 @@ static inline uint32_t _TOD_Seconds_since_epoch( void )
*
* @param[out] time The timeval to be filled in by the method.
*/
-RTEMS_INLINE_ROUTINE void _TOD_Get_timeval(
+static inline void _TOD_Get_timeval(
struct timeval *time
)
{
@@ -382,27 +382,12 @@ RTEMS_INLINE_ROUTINE void _TOD_Get_timeval(
}
/**
- * @brief Adjusts the Time of Time.
- *
- * This method is used to adjust the current time of day by the
- * specified amount.
- *
- * @param delta is the amount to adjust.
- *
- * @retval STATUS_SUCCESSFUL Successful operation.
- * @retval other Some error occurred.
- */
-Status_Control _TOD_Adjust(
- const struct timespec *delta
-);
-
-/**
* @brief Check if the TOD is Set
*
* @retval true The time is set.
* @retval false The time is not set.
*/
-RTEMS_INLINE_ROUTINE bool _TOD_Is_set( void )
+static inline bool _TOD_Is_set( void )
{
return _TOD.is_set;
}
diff --git a/cpukit/include/rtems/score/userextimpl.h b/cpukit/include/rtems/score/userextimpl.h
index bedc82606a..70a612f402 100644
--- a/cpukit/include/rtems/score/userextimpl.h
+++ b/cpukit/include/rtems/score/userextimpl.h
@@ -123,7 +123,7 @@ void _User_extensions_Add_set(
*
* @param extension The user extension to add.
*/
-RTEMS_INLINE_ROUTINE void _User_extensions_Add_API_set(
+static inline void _User_extensions_Add_API_set(
User_extensions_Control *extension
)
{
@@ -136,7 +136,7 @@ RTEMS_INLINE_ROUTINE void _User_extensions_Add_API_set(
* @param[in, out] extension The user extension to add.
* @param extension_table Is set as callouts for @a extension.
*/
-RTEMS_INLINE_ROUTINE void _User_extensions_Add_set_with_table(
+static inline void _User_extensions_Add_set_with_table(
User_extensions_Control *extension,
const User_extensions_Table *extension_table
)
diff --git a/cpukit/include/rtems/score/watchdogimpl.h b/cpukit/include/rtems/score/watchdogimpl.h
index 65259674ae..df479aeeed 100644
--- a/cpukit/include/rtems/score/watchdogimpl.h
+++ b/cpukit/include/rtems/score/watchdogimpl.h
@@ -114,7 +114,7 @@ typedef enum {
*
* @param[out] header The header to initialize.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Header_initialize(
+static inline void _Watchdog_Header_initialize(
Watchdog_Header *header
)
{
@@ -129,7 +129,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Header_initialize(
*
* @return The first of @a header.
*/
-RTEMS_INLINE_ROUTINE Watchdog_Control *_Watchdog_Header_first(
+static inline Watchdog_Control *_Watchdog_Header_first(
const Watchdog_Header *header
)
{
@@ -141,7 +141,7 @@ RTEMS_INLINE_ROUTINE Watchdog_Control *_Watchdog_Header_first(
*
* @param header The watchdog header to destroy.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Header_destroy(
+static inline void _Watchdog_Header_destroy(
Watchdog_Header *header
)
{
@@ -163,7 +163,7 @@ void _Watchdog_Tick( struct Per_CPU_Control *cpu );
*
* @return The RB_COLOR of @a the_watchdog.
*/
-RTEMS_INLINE_ROUTINE Watchdog_State _Watchdog_Get_state(
+static inline Watchdog_State _Watchdog_Get_state(
const Watchdog_Control *the_watchdog
)
{
@@ -176,7 +176,7 @@ RTEMS_INLINE_ROUTINE Watchdog_State _Watchdog_Get_state(
* @param[out] the_watchdog The watchdog to set the state of.
* @param state The state to set the watchdog to.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Set_state(
+static inline void _Watchdog_Set_state(
Watchdog_Control *the_watchdog,
Watchdog_State state
)
@@ -191,7 +191,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Set_state(
*
* @return The cpu of the watchdog.
*/
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Watchdog_Get_CPU(
+static inline Per_CPU_Control *_Watchdog_Get_CPU(
const Watchdog_Control *the_watchdog
)
{
@@ -208,7 +208,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Watchdog_Get_CPU(
* @param[out] the_watchdog The watchdog to set the cpu of.
* @param cpu The cpu to be set as @a the_watchdog's cpu.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Set_CPU(
+static inline void _Watchdog_Set_CPU(
Watchdog_Control *the_watchdog,
Per_CPU_Control *cpu
)
@@ -228,7 +228,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Set_CPU(
*
* @param[out] the_watchdog The uninitialized watchdog.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Preinitialize(
+static inline void _Watchdog_Preinitialize(
Watchdog_Control *the_watchdog,
Per_CPU_Control *cpu
)
@@ -250,7 +250,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Preinitialize(
* @param[out] the_watchdog The watchdog to initialize.
* @param routing The service routine for @a the_watchdog.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Initialize(
+static inline void _Watchdog_Initialize(
Watchdog_Control *the_watchdog,
Watchdog_Service_routine_entry routine
)
@@ -332,7 +332,7 @@ void _Watchdog_Remove(
* @retval 0 The now time is greater than or equal to the expiration time of
* the watchdog.
*/
-RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Cancel(
+static inline uint64_t _Watchdog_Cancel(
Watchdog_Header *header,
Watchdog_Control *the_watchdog,
uint64_t now
@@ -362,7 +362,7 @@ RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Cancel(
* @retval true The watchdog is scheduled.
* @retval false The watchdog is inactive.
*/
-RTEMS_INLINE_ROUTINE bool _Watchdog_Is_scheduled(
+static inline bool _Watchdog_Is_scheduled(
const Watchdog_Control *the_watchdog
)
{
@@ -381,7 +381,7 @@ RTEMS_INLINE_ROUTINE bool _Watchdog_Is_scheduled(
* @param first is the current first watchdog which should be removed
* afterwards.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Next_first(
+static inline void _Watchdog_Next_first(
Watchdog_Header *header,
const Watchdog_Control *first
)
@@ -450,7 +450,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Next_first(
* @retval true The timespec is a valid timespec.
* @retval false The timespec is invalid.
*/
-RTEMS_INLINE_ROUTINE bool _Watchdog_Is_valid_timespec(
+static inline bool _Watchdog_Is_valid_timespec(
const struct timespec *ts
)
{
@@ -466,7 +466,7 @@ RTEMS_INLINE_ROUTINE bool _Watchdog_Is_valid_timespec(
* @retval true The timespec is a valid interval timespec.
* @retval false The timespec is invalid.
*/
-RTEMS_INLINE_ROUTINE bool _Watchdog_Is_valid_interval_timespec(
+static inline bool _Watchdog_Is_valid_interval_timespec(
const struct timespec *ts
)
{
@@ -483,7 +483,7 @@ RTEMS_INLINE_ROUTINE bool _Watchdog_Is_valid_interval_timespec(
* @retval pointer Pointer to the now timespec.
* @retval NULL @a delta is not a valid interval timespec.
*/
-RTEMS_INLINE_ROUTINE const struct timespec * _Watchdog_Future_timespec(
+static inline const struct timespec * _Watchdog_Future_timespec(
struct timespec *now,
const struct timespec *delta
)
@@ -521,7 +521,7 @@ RTEMS_INLINE_ROUTINE const struct timespec * _Watchdog_Future_timespec(
* @retval true @a ts is too far in the future.
* @retval false @a ts is not too far in the future.
*/
-RTEMS_INLINE_ROUTINE bool _Watchdog_Is_far_future_timespec(
+static inline bool _Watchdog_Is_far_future_timespec(
const struct timespec *ts
)
{
@@ -535,7 +535,7 @@ RTEMS_INLINE_ROUTINE bool _Watchdog_Is_far_future_timespec(
*
* @return @a seconds converted to ticks.
*/
-RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Ticks_from_seconds(
+static inline uint64_t _Watchdog_Ticks_from_seconds(
uint32_t seconds
)
{
@@ -553,7 +553,7 @@ RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Ticks_from_seconds(
*
* @return @a ts converted to ticks.
*/
-RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Ticks_from_timespec(
+static inline uint64_t _Watchdog_Ticks_from_timespec(
const struct timespec *ts
)
{
@@ -577,7 +577,7 @@ RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Ticks_from_timespec(
*
* @param[out] ts is the timespec to return the converted ticks.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Ticks_to_timespec(
+static inline void _Watchdog_Ticks_to_timespec(
uint64_t ticks,
struct timespec *ts
)
@@ -593,7 +593,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Ticks_to_timespec(
*
* @return @a sbt converted to ticks.
*/
-RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Ticks_from_sbintime( int64_t sbt )
+static inline uint64_t _Watchdog_Ticks_from_sbintime( int64_t sbt )
{
uint64_t ticks = ( sbt >> 32 ) << WATCHDOG_BITS_FOR_1E9_NANOSECONDS;
@@ -608,7 +608,7 @@ RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Ticks_from_sbintime( int64_t sbt )
* @param cpu The cpu to acquire the watchdog lock of.
* @param lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_acquire_critical(
+static inline void _Watchdog_Per_CPU_acquire_critical(
Per_CPU_Control *cpu,
ISR_lock_Context *lock_context
)
@@ -622,7 +622,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_acquire_critical(
* @param cpu The cpu to release the watchdog lock of.
* @param lock_context The lock context.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_release_critical(
+static inline void _Watchdog_Per_CPU_release_critical(
Per_CPU_Control *cpu,
ISR_lock_Context *lock_context
)
@@ -640,7 +640,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_release_critical(
*
* @return The new expiration time of the watchdog.
*/
-RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Per_CPU_insert_ticks(
+static inline uint64_t _Watchdog_Per_CPU_insert_ticks(
Watchdog_Control *the_watchdog,
Per_CPU_Control *cpu,
Watchdog_Interval ticks
@@ -672,7 +672,7 @@ RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Per_CPU_insert_ticks(
*
* @return The expiration time of the watchdog.
*/
-RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Per_CPU_insert(
+static inline uint64_t _Watchdog_Per_CPU_insert(
Watchdog_Control *the_watchdog,
Per_CPU_Control *cpu,
Watchdog_Header *header,
@@ -696,7 +696,7 @@ RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Per_CPU_insert(
* @param cpu The cpu to remove the watchdog from.
* @param[in, out] The scheduled watchdogs.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_remove(
+static inline void _Watchdog_Per_CPU_remove(
Watchdog_Control *the_watchdog,
Per_CPU_Control *cpu,
Watchdog_Header *header
@@ -717,7 +717,7 @@ RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_remove(
*
* @param[in, out] the_watchdog The watchdog to remove.
*/
-RTEMS_INLINE_ROUTINE void _Watchdog_Per_CPU_remove_ticks(
+static inline void _Watchdog_Per_CPU_remove_ticks(
Watchdog_Control *the_watchdog
)
{
diff --git a/cpukit/include/rtems/score/wkspacedata.h b/cpukit/include/rtems/score/wkspacedata.h
index f1ca524fa0..6d809ca788 100644
--- a/cpukit/include/rtems/score/wkspacedata.h
+++ b/cpukit/include/rtems/score/wkspacedata.h
@@ -11,7 +11,7 @@
*/
/*
- * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2020 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/include/rtems/score/wkspaceinitmulti.h b/cpukit/include/rtems/score/wkspaceinitmulti.h
index 18520199ce..dfb2b3cbde 100644
--- a/cpukit/include/rtems/score/wkspaceinitmulti.h
+++ b/cpukit/include/rtems/score/wkspaceinitmulti.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (C) 2012, 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2012, 2020 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,7 +57,7 @@ extern "C" {
* memory area via _Memory_Get() to implement
* _Workspace_Handler_initialization().
*/
-RTEMS_INLINE_ROUTINE void _Workspace_Initialize_for_multiple_areas( void )
+static inline void _Workspace_Initialize_for_multiple_areas( void )
{
const Memory_Information *mem;
Heap_Initialization_or_extend_handler init_or_extend;
diff --git a/cpukit/include/rtems/score/wkspaceinitone.h b/cpukit/include/rtems/score/wkspaceinitone.h
index ce26a1cf8f..0dc252ad43 100644
--- a/cpukit/include/rtems/score/wkspaceinitone.h
+++ b/cpukit/include/rtems/score/wkspaceinitone.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright (C) 2012, 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2012, 2020 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,7 +57,7 @@ extern "C" {
* This implementation should be used by BSPs which provide exactly one memory
* area via _Memory_Get() to implement _Workspace_Handler_initialization().
*/
-RTEMS_INLINE_ROUTINE void _Workspace_Initialize_for_one_area( void )
+static inline void _Workspace_Initialize_for_one_area( void )
{
uintptr_t page_size;
uintptr_t wkspace_size;