summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-10-21 09:23:58 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-02 10:05:43 +0100
commit3a2724805421098df505c0acea106fb294bc2f6a (patch)
tree07b225c4fdc140786851750df34433526ed2ddff /cpukit
parentscore: Use scheduler instance specific locks (diff)
downloadrtems-3a2724805421098df505c0acea106fb294bc2f6a.tar.bz2
score: First part of new MrsP implementation
Update #2556.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/libmisc/monitor/mon-sema.c2
-rw-r--r--cpukit/sapi/src/interrtext.c3
-rw-r--r--cpukit/score/include/rtems/score/interr.h3
-rw-r--r--cpukit/score/include/rtems/score/mrsp.h80
-rw-r--r--cpukit/score/include/rtems/score/mrspimpl.h284
-rw-r--r--cpukit/score/include/rtems/score/schedulerimpl.h56
-rw-r--r--cpukit/score/include/rtems/score/status.h2
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h11
-rw-r--r--cpukit/score/include/rtems/score/threadqimpl.h62
-rw-r--r--cpukit/score/src/threadchangepriority.c17
-rw-r--r--cpukit/score/src/threadqenqueue.c151
11 files changed, 361 insertions, 310 deletions
diff --git a/cpukit/libmisc/monitor/mon-sema.c b/cpukit/libmisc/monitor/mon-sema.c
index 7334611465..3dc7172501 100644
--- a/cpukit/libmisc/monitor/mon-sema.c
+++ b/cpukit/libmisc/monitor/mon-sema.c
@@ -84,7 +84,7 @@ rtems_monitor_sema_canonical(
#if defined(RTEMS_SMP)
case SEMAPHORE_VARIANT_MRSP:
canonical_sema->cur_count =
- rtems_sema->Core_control.MRSP.Resource.owner == NULL;
+ _MRSP_Get_owner( &rtems_sema->Core_control.MRSP ) == NULL;
canonical_sema->max_count = 1;
break;
#endif
diff --git a/cpukit/sapi/src/interrtext.c b/cpukit/sapi/src/interrtext.c
index 840806153e..665302283b 100644
--- a/cpukit/sapi/src/interrtext.c
+++ b/cpukit/sapi/src/interrtext.c
@@ -55,7 +55,8 @@ static const char *const internal_error_text[] = {
"INTERNAL_ERROR_RESOURCE_IN_USE",
"INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL",
"INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL",
- "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK"
+ "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK",
+ "INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE"
};
const char *rtems_internal_error_text( rtems_fatal_code error )
diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h
index 845dc6f198..dff61012d7 100644
--- a/cpukit/score/include/rtems/score/interr.h
+++ b/cpukit/score/include/rtems/score/interr.h
@@ -164,7 +164,8 @@ typedef enum {
INTERNAL_ERROR_RESOURCE_IN_USE,
INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL,
INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL,
- INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
+ INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
} Internal_errors_Core_list;
typedef CPU_Uint32ptr Internal_errors_t;
diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h
index 5af3a06901..85cbff784f 100644
--- a/cpukit/score/include/rtems/score/mrsp.h
+++ b/cpukit/score/include/rtems/score/mrsp.h
@@ -19,9 +19,6 @@
#if defined(RTEMS_SMP)
-#include <rtems/score/chain.h>
-#include <rtems/score/scheduler.h>
-#include <rtems/score/thread.h>
#include <rtems/score/threadq.h>
#ifdef __cplusplus
@@ -51,87 +48,16 @@ extern "C" {
* @{
*/
-typedef struct MRSP_Control MRSP_Control;
-
-/**
- * @brief MrsP rival.
- *
- * The rivals are used by threads waiting for resource ownership. They are
- * registered in the MrsP control block.
- */
-typedef struct {
- /**
- * @brief The node for registration in the MrsP rival chain.
- *
- * The chain operations are protected by the MrsP control lock.
- *
- * @see MRSP_Control::Rivals.
- */
- Chain_Node Node;
-
- /**
- * @brief The corresponding MrsP control block.
- */
- MRSP_Control *resource;
-
- /**
- * @brief Identification of the rival thread.
- */
- Thread_Control *thread;
-
- /**
- * @brief The ceiling priority used by the rival thread.
- */
- Priority_Node Ceiling_priority;
-
- /**
- * @brief The initial help state of the thread at the begin of the resource
- * obtain sequence.
- *
- * Used to restore this state after a timeout.
- */
- Scheduler_Help_state initial_help_state;
-
- /**
- * @brief The rival status.
- *
- * Initially the status is set to MRSP_WAIT_FOR_OWNERSHIP. The rival will
- * busy wait until a status change happens. This can be STATUS_SUCCESSFUL or
- * STATUS_TIMEOUT. State changes are protected by the MrsP control lock.
- */
- volatile int status;
-
- /**
- * @brief Watchdog for timeouts.
- */
- Watchdog_Control Watchdog;
-} MRSP_Rival;
-
/**
* @brief MrsP control block.
*/
-struct MRSP_Control {
+typedef struct {
/**
- * @brief Lock to protect the resource dependency tree.
- *
- * This is a thread queue since this simplifies the Classic semaphore
- * implementation. Only the lock part of the thread queue is used.
+ * @brief The thread queue to manage ownership and waiting threads.
*/
Thread_queue_Control Wait_queue;
/**
- * @brief Basic resource control.
- */
- Resource_Control Resource;
-
- /**
- * @brief A chain of MrsP rivals waiting for resource ownership.
- *
- * @see MRSP_Rival::Node.
- */
- Chain_Control Rivals;
-
- /**
* @brief The ceiling priority used by the owner thread.
*/
Priority_Node Ceiling_priority;
@@ -140,7 +66,7 @@ struct MRSP_Control {
* @brief One ceiling priority per scheduler instance.
*/
Priority_Control *ceiling_priorities;
-};
+} MRSP_Control;
/** @} */
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index 92cc56614c..4b4e8c3c67 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -20,9 +20,6 @@
#if defined(RTEMS_SMP)
#include <rtems/score/assert.h>
-#include <rtems/score/chainimpl.h>
-#include <rtems/score/resourceimpl.h>
-#include <rtems/score/schedulerimpl.h>
#include <rtems/score/status.h>
#include <rtems/score/threadqimpl.h>
#include <rtems/score/watchdogimpl.h>
@@ -38,28 +35,7 @@ extern "C" {
* @{
*/
-/**
- * @brief Internal state used for MRSP_Rival::status to indicate that this
- * rival waits for resource ownership.
- */
-#define MRSP_WAIT_FOR_OWNERSHIP STATUS_MINUS_ONE
-
-/*
- * FIXME: Operations with the resource dependency tree are protected by the
- * global scheduler lock. Since the scheduler lock should be scheduler
- * instance specific in the future this will only work temporarily. A more
- * sophisticated locking strategy is necessary.
- */
-
-RTEMS_INLINE_ROUTINE void _MRSP_Giant_acquire( ISR_lock_Context *lock_context )
-{
- /* FIXME: MrsP protocol implementation will be reworked soon */
-}
-
-RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context )
-{
- /* FIXME: MrsP protocol implementation will be reworked soon */
-}
+#define MRSP_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit
RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
MRSP_Control *mrsp,
@@ -77,6 +53,19 @@ RTEMS_INLINE_ROUTINE void _MRSP_Release(
_Thread_queue_Release( &mrsp->Wait_queue, queue_context );
}
+RTEMS_INLINE_ROUTINE Thread_Control *_MRSP_Get_owner( MRSP_Control *mrsp )
+{
+ return mrsp->Wait_queue.Queue.owner;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Set_owner(
+ MRSP_Control *mrsp,
+ Thread_Control *owner
+)
+{
+ mrsp->Wait_queue.Queue.owner = owner;
+}
+
RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
const MRSP_Control *mrsp,
const Scheduler_Control *scheduler
@@ -149,23 +138,23 @@ RTEMS_INLINE_ROUTINE void _MRSP_Remove_priority(
RTEMS_INLINE_ROUTINE void _MRSP_Replace_priority(
MRSP_Control *mrsp,
Thread_Control *thread,
- MRSP_Rival *rival
+ Priority_Node *ceiling_priority
)
{
ISR_lock_Context lock_context;
- _Thread_Wait_acquire_default_critical( thread, &lock_context );
+ _Thread_Wait_acquire_default( thread, &lock_context );
_Thread_Priority_replace(
thread,
- &rival->Ceiling_priority,
+ ceiling_priority,
&mrsp->Ceiling_priority
);
- _Thread_Wait_release_default_critical( thread, &lock_context );
+ _Thread_Wait_release_default( thread, &lock_context );
}
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
MRSP_Control *mrsp,
- Thread_Control *new_owner,
+ Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
@@ -174,7 +163,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
status = _MRSP_Raise_priority(
mrsp,
- new_owner,
+ executing,
&mrsp->Ceiling_priority,
queue_context
);
@@ -184,17 +173,12 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
return status;
}
- _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
- _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
- _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
-
+ _MRSP_Set_owner( mrsp, executing );
cpu_self = _Thread_Dispatch_disable_critical(
&queue_context->Lock_context.Lock_context
);
_MRSP_Release( mrsp, queue_context );
-
- _Thread_Priority_update( queue_context );
-
+ _Thread_Priority_and_sticky_update( executing, 1 );
_Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
@@ -234,74 +218,23 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
}
}
- _Resource_Initialize( &mrsp->Resource );
- _Chain_Initialize_empty( &mrsp->Rivals );
_Thread_queue_Initialize( &mrsp->Wait_queue );
-
return STATUS_SUCCESSFUL;
}
-RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
-{
- MRSP_Rival *rival;
- MRSP_Control *mrsp;
- Thread_Control *thread;
- Thread_queue_Context queue_context;
-
- rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
- mrsp = rival->resource;
- thread = rival->thread;
-
- _Thread_queue_Context_initialize( &queue_context );
- _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
- _MRSP_Acquire_critical( mrsp, &queue_context );
-
- if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
- ISR_lock_Context giant_lock_context;
-
- _MRSP_Remove_priority( thread, &rival->Ceiling_priority, &queue_context );
-
- _MRSP_Giant_acquire( &giant_lock_context );
-
- _Chain_Extract_unprotected( &rival->Node );
- _Resource_Node_extract( &thread->Resource_node );
- _Resource_Node_set_dependency( &thread->Resource_node, NULL );
- _Scheduler_Thread_change_help_state( thread, rival->initial_help_state );
- _Scheduler_Thread_change_resource_root( thread, thread );
-
- _MRSP_Giant_release( &giant_lock_context );
-
- rival->status = STATUS_TIMEOUT;
-
- _MRSP_Release( mrsp, &queue_context );
-
- _Thread_Priority_update( &queue_context );
- } else {
- _MRSP_Release( mrsp, &queue_context );
- }
-}
-
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
MRSP_Control *mrsp,
- Resource_Node *owner,
Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
- Status_Control status;
- MRSP_Rival rival;
- Thread_Life_state life_state;
- Per_CPU_Control *cpu_self;
- ISR_lock_Context giant_lock_context;
- ISR_Level level;
- Watchdog_Interval timeout;
-
- _Assert( queue_context->timeout_discipline == WATCHDOG_RELATIVE );
+ Status_Control status;
+ Priority_Node ceiling_priority;
status = _MRSP_Raise_priority(
mrsp,
executing,
- &rival.Ceiling_priority,
+ &ceiling_priority,
queue_context
);
@@ -310,64 +243,38 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
return status;
}
- rival.thread = executing;
- rival.resource = mrsp;
- _Chain_Initialize_node( &rival.Node );
-
- _MRSP_Giant_acquire( &giant_lock_context );
-
- rival.initial_help_state =
- _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL );
- rival.status = MRSP_WAIT_FOR_OWNERSHIP;
-
- _Chain_Initialize_node( &rival.Node );
- _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node );
- _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node );
- _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
- _Scheduler_Thread_change_resource_root(
- executing,
- THREAD_RESOURCE_NODE_TO_THREAD( _Resource_Node_get_root( owner ) )
+ _Thread_queue_Context_set_deadlock_callout(
+ queue_context,
+ _Thread_queue_Deadlock_status
);
-
- _MRSP_Giant_release( &giant_lock_context );
-
- cpu_self = _Thread_Dispatch_disable_critical(
- &queue_context->Lock_context.Lock_context
+ status = _Thread_queue_Enqueue_sticky(
+ &mrsp->Wait_queue.Queue,
+ MRSP_TQ_OPERATIONS,
+ executing,
+ queue_context
);
- _MRSP_Release( mrsp, queue_context );
-
- _Thread_Priority_update( queue_context );
- timeout = (Watchdog_Interval) queue_context->timeout;
-
- if ( timeout > 0 ) {
- _Watchdog_Preinitialize( &rival.Watchdog, cpu_self );
- _Watchdog_Initialize( &rival.Watchdog, _MRSP_Timeout );
- _ISR_Local_disable( level );
- _Watchdog_Per_CPU_insert_relative( &rival.Watchdog, cpu_self, timeout );
- _ISR_Local_enable( level );
- }
-
- life_state = _Thread_Set_life_protection( THREAD_LIFE_PROTECTED );
- _Thread_Dispatch_enable( cpu_self );
-
- _Assert( _Debug_Is_thread_dispatching_allowed() );
-
- /* Wait for state change */
- do {
- status = rival.status;
- } while ( status == MRSP_WAIT_FOR_OWNERSHIP );
+ if ( status == STATUS_SUCCESSFUL ) {
+ _MRSP_Replace_priority( mrsp, executing, &ceiling_priority );
+ } else {
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ int sticky_level_change;
- _Thread_Set_life_protection( life_state );
+ if ( status != STATUS_DEADLOCK ) {
+ sticky_level_change = -1;
+ } else {
+ sticky_level_change = 0;
+ }
- if ( timeout > 0 ) {
- _ISR_Local_disable( level );
- _Watchdog_Per_CPU_remove(
- &rival.Watchdog,
- cpu_self,
- &cpu_self->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]
+ _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
+ _MRSP_Remove_priority( executing, &ceiling_priority, &queue_context );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context.Lock_context.Lock_context
);
- _ISR_Local_enable( level );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+ _Thread_Priority_and_sticky_update( executing, sticky_level_change );
+ _Thread_Dispatch_enable( cpu_self );
}
return status;
@@ -381,22 +288,21 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
)
{
Status_Control status;
- Resource_Node *owner;
+ Thread_Control *owner;
_MRSP_Acquire_critical( mrsp, queue_context );
- owner = _Resource_Get_owner( &mrsp->Resource );
+ owner = _MRSP_Get_owner( mrsp );
if ( owner == NULL ) {
status = _MRSP_Claim_ownership( mrsp, executing, queue_context );
- } else if (
- wait
- && _Resource_Node_get_root( owner ) != &executing->Resource_node
- ) {
- status = _MRSP_Wait_for_ownership( mrsp, owner, executing, queue_context );
+ } else if ( owner == executing ) {
+ _MRSP_Release( mrsp, queue_context );
+ status = STATUS_UNAVAILABLE;
+ } else if ( wait ) {
+ status = _MRSP_Wait_for_ownership( mrsp, executing, queue_context );
} else {
_MRSP_Release( mrsp, queue_context );
- /* Not available, nested access or deadlock */
status = STATUS_UNAVAILABLE;
}
@@ -409,77 +315,45 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
Thread_queue_Context *queue_context
)
{
- ISR_lock_Context giant_lock_context;
- Per_CPU_Control *cpu_self;
+ Thread_queue_Heads *heads;
- if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
+ if ( _MRSP_Get_owner( mrsp ) != executing ) {
_ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
return STATUS_NOT_OWNER;
}
- if (
- !_Resource_Is_most_recently_obtained(
- &mrsp->Resource,
- &executing->Resource_node
- )
- ) {
- _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
- return STATUS_RELEASE_ORDER_VIOLATION;
- }
-
_MRSP_Acquire_critical( mrsp, queue_context );
- _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
- _MRSP_Giant_acquire( &giant_lock_context );
-
- _Resource_Extract( &mrsp->Resource );
- if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
- _Resource_Set_owner( &mrsp->Resource, NULL );
- } else {
- MRSP_Rival *rival;
- Thread_Control *new_owner;
-
- rival = (MRSP_Rival *) _Chain_Get_first_unprotected( &mrsp->Rivals );
-
- /*
- * This must be inside the critical section since the status prevents a
- * potential double extraction in _MRSP_Timeout().
- */
- rival->status = STATUS_SUCCESSFUL;
-
- new_owner = rival->thread;
+ _MRSP_Set_owner( mrsp, NULL );
+ _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
- _MRSP_Replace_priority( mrsp, new_owner, rival );
+ heads = mrsp->Wait_queue.Queue.heads;
- _Resource_Node_extract( &new_owner->Resource_node );
- _Resource_Node_set_dependency( &new_owner->Resource_node, NULL );
- _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
- _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
- _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
- _Scheduler_Thread_change_resource_root( new_owner, new_owner );
- }
+ if ( heads == NULL ) {
+ Per_CPU_Control *cpu_self;
- if ( !_Resource_Node_owns_resources( &executing->Resource_node ) ) {
- _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_YOURSELF );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _MRSP_Release( mrsp, queue_context );
+ _Thread_Priority_and_sticky_update( executing, -1 );
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
}
- _MRSP_Giant_release( &giant_lock_context );
-
- cpu_self = _Thread_Dispatch_disable_critical(
- &queue_context->Lock_context.Lock_context
+ _Thread_queue_Surrender_sticky(
+ &mrsp->Wait_queue.Queue,
+ heads,
+ executing,
+ queue_context,
+ MRSP_TQ_OPERATIONS
);
- _MRSP_Release( mrsp, queue_context );
-
- _Thread_Priority_update( queue_context );
-
- _Thread_Dispatch_enable( cpu_self );
-
return STATUS_SUCCESSFUL;
}
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
{
- if ( _Resource_Get_owner( &mrsp->Resource ) != NULL ) {
+ if ( _MRSP_Get_owner( mrsp ) != NULL ) {
return STATUS_RESOURCE_IN_USE;
}
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 25b961fcd6..92b08e5bde 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -553,6 +553,62 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread
#endif
}
+#if defined(RTEMS_SMP)
+/**
+ * @brief Changes the sticky level of the home scheduler node and propagates a
+ * priority change of a thread to the scheduler.
+ *
+ * @param[in] the_thread The thread changing its priority or sticky level.
+ *
+ * @see _Scheduler_Update_priority().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
+ Thread_Control *the_thread,
+ int sticky_level_change
+)
+{
+ Chain_Node *node;
+ const Chain_Node *tail;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+ node = _Chain_Next( node );
+
+ while ( node != tail ) {
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ }
+}
+#endif
+
/**
* @brief Maps a thread priority from the user domain to the scheduler domain.
*
diff --git a/cpukit/score/include/rtems/score/status.h b/cpukit/score/include/rtems/score/status.h
index 453bf1143c..6b6f3c51d6 100644
--- a/cpukit/score/include/rtems/score/status.h
+++ b/cpukit/score/include/rtems/score/status.h
@@ -113,8 +113,6 @@ typedef enum {
STATUS_BUILD( STATUS_CLASSIC_NOT_OWNER_OF_RESOURCE, EPERM ),
STATUS_OBJECT_WAS_DELETED =
STATUS_BUILD( STATUS_CLASSIC_OBJECT_WAS_DELETED, EINVAL ),
- STATUS_RELEASE_ORDER_VIOLATION =
- STATUS_BUILD( STATUS_CLASSIC_INCORRECT_STATE, EPERM ),
STATUS_RESOURCE_IN_USE =
STATUS_BUILD( STATUS_CLASSIC_RESOURCE_IN_USE, EBUSY ),
STATUS_SUCCESSFUL =
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 19fad0ba18..bb891bd37b 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -1712,6 +1712,17 @@ RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
#endif
}
+RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
+#else
+ return the_thread->Wait.flags;
+#endif
+}
+
/**
* @brief Tries to change the thread wait flags with release semantics in case
* of success.
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index e24beec1bb..f74436df21 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -24,6 +24,7 @@
#include <rtems/score/priorityimpl.h>
#include <rtems/score/scheduler.h>
#include <rtems/score/smp.h>
+#include <rtems/score/status.h>
#include <rtems/score/thread.h>
#if defined(RTEMS_DEBUG)
@@ -553,6 +554,37 @@ void _Thread_queue_Enqueue_critical(
Thread_queue_Context *queue_context
);
+#if defined(RTEMS_SMP)
+/**
+ * @brief Enqueues the thread on the thread queue and busy waits for dequeue.
+ *
+ * Optionally starts the thread timer in case the timeout discipline is not
+ * WATCHDOG_NO_TIMEOUT. Timeout discipline and value are in the queue_context.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue lock and register it as the new thread lock.
+ *
+ * The thread priorities of the owner and the are updated with respect to the
+ * scheduler. The sticky level of the thread is incremented. A thread
+ * dispatch is performed if necessary.
+ *
+ * Afterwards, the thread busy waits on the thread wait flags until a timeout
+ * occurs or the thread queue is surrendered to this thread. So, it sticks to
+ * the processor instead of blocking with respect to the scheduler.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] operations The thread queue operations.
+ * @param[in] the_thread The thread to enqueue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ */
+Status_Control _Thread_queue_Enqueue_sticky(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+#endif
+
/**
* @brief Acquires the thread queue lock and calls
* _Thread_queue_Enqueue_critical().
@@ -733,6 +765,36 @@ void _Thread_queue_Surrender(
const Thread_queue_Operations *operations
);
+#if defined(RTEMS_SMP)
+/**
+ * @brief Surrenders the thread queue previously owned by the thread to the
+ * first enqueued thread.
+ *
+ * The owner of the thread queue must be set to NULL by the caller.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue.
+ *
+ * The thread priorities of the previous owner and the new owner are updated. The
+ * sticky level of the previous owner is decremented. A thread dispatch is
+ * performed if necessary.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] heads The thread queue heads. It must not be NULL.
+ * @param[in] previous_owner The previous owner thread surrendering the thread
+ * queue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ * @param[in] operations The thread queue operations.
+ */
+void _Thread_queue_Surrender_sticky(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+);
+#endif
+
RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_empty(
const Thread_queue_Queue *queue
)
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index e107bcc758..4fd4c02f01 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -353,3 +353,20 @@ void _Thread_Priority_update( Thread_queue_Context *queue_context )
_Thread_State_release( the_thread, &lock_context );
}
}
+
+#if defined(RTEMS_SMP)
+void _Thread_Priority_and_sticky_update(
+ Thread_Control *the_thread,
+ int sticky_level_change
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Scheduler_Priority_and_sticky_update(
+ the_thread,
+ sticky_level_change
+ );
+ _Thread_State_release( the_thread, &lock_context );
+}
+#endif
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 362ce8f5e8..335ee0f21d 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -370,6 +370,37 @@ void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
);
}
+static void _Thread_queue_Timeout(
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+)
+{
+ switch ( queue_context->timeout_discipline ) {
+ case WATCHDOG_RELATIVE:
+ /* A relative timeout of 0 is a special case indefinite (no) timeout */
+ if ( queue_context->timeout != 0 ) {
+ _Thread_Timer_insert_relative(
+ the_thread,
+ cpu_self,
+ _Thread_Timeout,
+ (Watchdog_Interval) queue_context->timeout
+ );
+ }
+ break;
+ case WATCHDOG_ABSOLUTE:
+ _Thread_Timer_insert_absolute(
+ the_thread,
+ cpu_self,
+ _Thread_Timeout,
+ queue_context->timeout
+ );
+ break;
+ default:
+ break;
+ }
+}
+
void _Thread_queue_Enqueue_critical(
Thread_queue_Queue *queue,
const Thread_queue_Operations *operations,
@@ -430,29 +461,7 @@ void _Thread_queue_Enqueue_critical(
/*
* If the thread wants to timeout, then schedule its timer.
*/
- switch ( queue_context->timeout_discipline ) {
- case WATCHDOG_RELATIVE:
- /* A relative timeout of 0 is a special case indefinite (no) timeout */
- if ( queue_context->timeout != 0 ) {
- _Thread_Timer_insert_relative(
- the_thread,
- cpu_self,
- _Thread_Timeout,
- (Watchdog_Interval) queue_context->timeout
- );
- }
- break;
- case WATCHDOG_ABSOLUTE:
- _Thread_Timer_insert_absolute(
- the_thread,
- cpu_self,
- _Thread_Timeout,
- queue_context->timeout
- );
- break;
- default:
- break;
- }
+ _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
/*
* At this point thread dispatching is disabled, however, we already released
@@ -476,6 +485,65 @@ void _Thread_queue_Enqueue_critical(
_Thread_Dispatch_enable( cpu_self );
}
+#if defined(RTEMS_SMP)
+Status_Control _Thread_queue_Enqueue_sticky(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+ Per_CPU_Control *cpu_self;
+
+ _Thread_Wait_claim( the_thread, queue );
+
+ if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
+ _Thread_queue_Path_release_critical( queue_context );
+ _Thread_Wait_restore_default( the_thread );
+ _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
+ _Thread_Wait_tranquilize( the_thread );
+ ( *queue_context->deadlock_callout )( the_thread );
+ return _Thread_Wait_get_status( the_thread );
+ }
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_claim_finalize( the_thread, operations );
+ ( *operations->enqueue )( queue, the_thread, queue_context );
+
+ _Thread_queue_Path_release_critical( queue_context );
+
+ the_thread->Wait.return_code = STATUS_SUCCESSFUL;
+ _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
+
+ if ( cpu_self->thread_dispatch_disable_level != 1 ) {
+ _Terminate(
+ INTERNAL_ERROR_CORE,
+ false,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
+ );
+ }
+
+ _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
+ _Thread_Priority_update( queue_context );
+ _Thread_Priority_and_sticky_update( the_thread, 1 );
+ _Thread_Dispatch_enable( cpu_self );
+
+ while (
+ _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK
+ ) {
+ /* Wait */
+ }
+
+ _Thread_Wait_tranquilize( the_thread );
+ _Thread_Timer_remove( the_thread );
+ return _Thread_Wait_get_status( the_thread );
+}
+#endif
+
#if defined(RTEMS_MULTIPROCESSING)
static bool _Thread_queue_MP_set_callout(
Thread_Control *the_thread,
@@ -666,6 +734,43 @@ void _Thread_queue_Surrender(
_Thread_Dispatch_enable( cpu_self );
}
+#if defined(RTEMS_SMP)
+void _Thread_queue_Surrender_sticky(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+)
+{
+ Thread_Control *new_owner;
+ Per_CPU_Control *cpu_self;
+
+ _Assert( heads != NULL );
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ new_owner = ( *operations->surrender )(
+ queue,
+ heads,
+ previous_owner,
+ queue_context
+ );
+ queue->owner = new_owner;
+ _Thread_queue_Make_ready_again( new_owner );
+
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_queue_Queue_release(
+ queue,
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Priority_and_sticky_update( previous_owner, -1 );
+ _Thread_Priority_and_sticky_update( new_owner, 0 );
+ _Thread_Dispatch_enable( cpu_self );
+}
+#endif
+
Thread_Control *_Thread_queue_Do_dequeue(
Thread_queue_Control *the_thread_queue,
const Thread_queue_Operations *operations